longlong.h (sub_ddmmss): New for ia64.
[gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
53 #include "gimple.h"
54 #include "intl.h"
55 #include "df.h"
56 #include "debug.h"
57 #include "params.h"
58 #include "dbgcnt.h"
59 #include "tm-constrs.h"
60 #include "sel-sched.h"
61
62 /* This is used for communication between ASM_OUTPUT_LABEL and
63 ASM_OUTPUT_LABELREF. */
64 int ia64_asm_output_label = 0;
65
66 /* Define the information needed to generate branch and scc insns. This is
67 stored from the compare operation. */
68 struct rtx_def * ia64_compare_op0;
69 struct rtx_def * ia64_compare_op1;
70
71 /* Register names for ia64_expand_prologue. */
72 static const char * const ia64_reg_numbers[96] =
73 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
74 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
75 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
76 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
77 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
78 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
79 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
80 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
81 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
82 "r104","r105","r106","r107","r108","r109","r110","r111",
83 "r112","r113","r114","r115","r116","r117","r118","r119",
84 "r120","r121","r122","r123","r124","r125","r126","r127"};
85
86 /* ??? These strings could be shared with REGISTER_NAMES. */
87 static const char * const ia64_input_reg_names[8] =
88 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
89
90 /* ??? These strings could be shared with REGISTER_NAMES. */
91 static const char * const ia64_local_reg_names[80] =
92 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
93 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
94 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
95 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
96 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
97 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
98 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
99 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
100 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
101 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
102
103 /* ??? These strings could be shared with REGISTER_NAMES. */
104 static const char * const ia64_output_reg_names[8] =
105 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
106
107 /* Which cpu are we scheduling for. */
108 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
109
110 /* Determines whether we run our final scheduling pass or not. We always
111 avoid the normal second scheduling pass. */
112 static int ia64_flag_schedule_insns2;
113
114 /* Determines whether we run variable tracking in machine dependent
115 reorganization. */
116 static int ia64_flag_var_tracking;
117
118 /* Variables which are this size or smaller are put in the sdata/sbss
119 sections. */
120
121 unsigned int ia64_section_threshold;
122
123 /* The following variable is used by the DFA insn scheduler. The value is
124 TRUE if we do insn bundling instead of insn scheduling. */
125 int bundling_p = 0;
126
127 enum ia64_frame_regs
128 {
129 reg_fp,
130 reg_save_b0,
131 reg_save_pr,
132 reg_save_ar_pfs,
133 reg_save_ar_unat,
134 reg_save_ar_lc,
135 reg_save_gp,
136 number_of_ia64_frame_regs
137 };
138
139 /* Structure to be filled in by ia64_compute_frame_size with register
140 save masks and offsets for the current function. */
141
142 struct ia64_frame_info
143 {
144 HOST_WIDE_INT total_size; /* size of the stack frame, not including
145 the caller's scratch area. */
146 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
147 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
148 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
149 HARD_REG_SET mask; /* mask of saved registers. */
150 unsigned int gr_used_mask; /* mask of registers in use as gr spill
151 registers or long-term scratches. */
152 int n_spilled; /* number of spilled registers. */
153 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
154 int n_input_regs; /* number of input registers used. */
155 int n_local_regs; /* number of local registers used. */
156 int n_output_regs; /* number of output registers used. */
157 int n_rotate_regs; /* number of rotating registers used. */
158
159 char need_regstk; /* true if a .regstk directive needed. */
160 char initialized; /* true if the data is finalized. */
161 };
162
163 /* Current frame information calculated by ia64_compute_frame_size. */
164 static struct ia64_frame_info current_frame_info;
165 /* The actual registers that are emitted. */
166 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
167 \f
168 static int ia64_first_cycle_multipass_dfa_lookahead (void);
169 static void ia64_dependencies_evaluation_hook (rtx, rtx);
170 static void ia64_init_dfa_pre_cycle_insn (void);
171 static rtx ia64_dfa_pre_cycle_insn (void);
172 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
173 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
174 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
175 static void ia64_h_i_d_extended (void);
176 static void * ia64_alloc_sched_context (void);
177 static void ia64_init_sched_context (void *, bool);
178 static void ia64_set_sched_context (void *);
179 static void ia64_clear_sched_context (void *);
180 static void ia64_free_sched_context (void *);
181 static int ia64_mode_to_int (enum machine_mode);
182 static void ia64_set_sched_flags (spec_info_t);
183 static ds_t ia64_get_insn_spec_ds (rtx);
184 static ds_t ia64_get_insn_checked_ds (rtx);
185 static bool ia64_skip_rtx_p (const_rtx);
186 static int ia64_speculate_insn (rtx, ds_t, rtx *);
187 static bool ia64_needs_block_p (int);
188 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
189 static int ia64_spec_check_p (rtx);
190 static int ia64_spec_check_src_p (rtx);
191 static rtx gen_tls_get_addr (void);
192 static rtx gen_thread_pointer (void);
193 static int find_gr_spill (enum ia64_frame_regs, int);
194 static int next_scratch_gr_reg (void);
195 static void mark_reg_gr_used_mask (rtx, void *);
196 static void ia64_compute_frame_size (HOST_WIDE_INT);
197 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
198 static void finish_spill_pointers (void);
199 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
200 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
201 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
202 static rtx gen_movdi_x (rtx, rtx, rtx);
203 static rtx gen_fr_spill_x (rtx, rtx, rtx);
204 static rtx gen_fr_restore_x (rtx, rtx, rtx);
205
206 static enum machine_mode hfa_element_mode (const_tree, bool);
207 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
208 tree, int *, int);
209 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
210 tree, bool);
211 static bool ia64_function_ok_for_sibcall (tree, tree);
212 static bool ia64_return_in_memory (const_tree, const_tree);
213 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
214 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
215 static void fix_range (const char *);
216 static bool ia64_handle_option (size_t, const char *, int);
217 static struct machine_function * ia64_init_machine_status (void);
218 static void emit_insn_group_barriers (FILE *);
219 static void emit_all_insn_group_barriers (FILE *);
220 static void final_emit_insn_group_barriers (FILE *);
221 static void emit_predicate_relation_info (void);
222 static void ia64_reorg (void);
223 static bool ia64_in_small_data_p (const_tree);
224 static void process_epilogue (FILE *, rtx, bool, bool);
225 static int process_set (FILE *, rtx, rtx, bool, bool);
226
227 static bool ia64_assemble_integer (rtx, unsigned int, int);
228 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
229 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
230 static void ia64_output_function_end_prologue (FILE *);
231
232 static int ia64_issue_rate (void);
233 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
234 static void ia64_sched_init (FILE *, int, int);
235 static void ia64_sched_init_global (FILE *, int, int);
236 static void ia64_sched_finish_global (FILE *, int);
237 static void ia64_sched_finish (FILE *, int);
238 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
239 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
240 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
241 static int ia64_variable_issue (FILE *, int, rtx, int);
242
243 static struct bundle_state *get_free_bundle_state (void);
244 static void free_bundle_state (struct bundle_state *);
245 static void initiate_bundle_states (void);
246 static void finish_bundle_states (void);
247 static unsigned bundle_state_hash (const void *);
248 static int bundle_state_eq_p (const void *, const void *);
249 static int insert_bundle_state (struct bundle_state *);
250 static void initiate_bundle_state_table (void);
251 static void finish_bundle_state_table (void);
252 static int try_issue_nops (struct bundle_state *, int);
253 static int try_issue_insn (struct bundle_state *, rtx);
254 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
255 static int get_max_pos (state_t);
256 static int get_template (state_t, int);
257
258 static rtx get_next_important_insn (rtx, rtx);
259 static bool important_for_bundling_p (rtx);
260 static void bundling (FILE *, int, rtx, rtx);
261
262 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
263 HOST_WIDE_INT, tree);
264 static void ia64_file_start (void);
265 static void ia64_globalize_decl_name (FILE *, tree);
266
267 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
268 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
269 static section *ia64_select_rtx_section (enum machine_mode, rtx,
270 unsigned HOST_WIDE_INT);
271 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
272 ATTRIBUTE_UNUSED;
273 static unsigned int ia64_section_type_flags (tree, const char *, int);
274 static void ia64_init_libfuncs (void)
275 ATTRIBUTE_UNUSED;
276 static void ia64_hpux_init_libfuncs (void)
277 ATTRIBUTE_UNUSED;
278 static void ia64_sysv4_init_libfuncs (void)
279 ATTRIBUTE_UNUSED;
280 static void ia64_vms_init_libfuncs (void)
281 ATTRIBUTE_UNUSED;
282 static void ia64_soft_fp_init_libfuncs (void)
283 ATTRIBUTE_UNUSED;
284
285 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
286 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
287 static void ia64_encode_section_info (tree, rtx, int);
288 static rtx ia64_struct_value_rtx (tree, int);
289 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
290 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
291 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
292 static bool ia64_cannot_force_const_mem (rtx);
293 static const char *ia64_mangle_type (const_tree);
294 static const char *ia64_invalid_conversion (const_tree, const_tree);
295 static const char *ia64_invalid_unary_op (int, const_tree);
296 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
297 static enum machine_mode ia64_c_mode_for_suffix (char);
298 \f
299 /* Table of valid machine attributes. */
300 static const struct attribute_spec ia64_attribute_table[] =
301 {
302 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
303 { "syscall_linkage", 0, 0, false, true, true, NULL },
304 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
305 { "version_id", 1, 1, true, false, false,
306 ia64_handle_version_id_attribute },
307 { NULL, 0, 0, false, false, false, NULL }
308 };
309
310 /* Initialize the GCC target structure. */
311 #undef TARGET_ATTRIBUTE_TABLE
312 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
313
314 #undef TARGET_INIT_BUILTINS
315 #define TARGET_INIT_BUILTINS ia64_init_builtins
316
317 #undef TARGET_EXPAND_BUILTIN
318 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
319
320 #undef TARGET_ASM_BYTE_OP
321 #define TARGET_ASM_BYTE_OP "\tdata1\t"
322 #undef TARGET_ASM_ALIGNED_HI_OP
323 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
324 #undef TARGET_ASM_ALIGNED_SI_OP
325 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
326 #undef TARGET_ASM_ALIGNED_DI_OP
327 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
328 #undef TARGET_ASM_UNALIGNED_HI_OP
329 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
330 #undef TARGET_ASM_UNALIGNED_SI_OP
331 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
332 #undef TARGET_ASM_UNALIGNED_DI_OP
333 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
334 #undef TARGET_ASM_INTEGER
335 #define TARGET_ASM_INTEGER ia64_assemble_integer
336
337 #undef TARGET_ASM_FUNCTION_PROLOGUE
338 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
339 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
340 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
341 #undef TARGET_ASM_FUNCTION_EPILOGUE
342 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
343
344 #undef TARGET_IN_SMALL_DATA_P
345 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
346
347 #undef TARGET_SCHED_ADJUST_COST_2
348 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
349 #undef TARGET_SCHED_ISSUE_RATE
350 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
351 #undef TARGET_SCHED_VARIABLE_ISSUE
352 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
353 #undef TARGET_SCHED_INIT
354 #define TARGET_SCHED_INIT ia64_sched_init
355 #undef TARGET_SCHED_FINISH
356 #define TARGET_SCHED_FINISH ia64_sched_finish
357 #undef TARGET_SCHED_INIT_GLOBAL
358 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
359 #undef TARGET_SCHED_FINISH_GLOBAL
360 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
361 #undef TARGET_SCHED_REORDER
362 #define TARGET_SCHED_REORDER ia64_sched_reorder
363 #undef TARGET_SCHED_REORDER2
364 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
365
366 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
367 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
368
369 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
370 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
371
372 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
373 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
374 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
375 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
376
377 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
378 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
379 ia64_first_cycle_multipass_dfa_lookahead_guard
380
381 #undef TARGET_SCHED_DFA_NEW_CYCLE
382 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
383
384 #undef TARGET_SCHED_H_I_D_EXTENDED
385 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
386
387 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
388 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
389
390 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
391 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
392
393 #undef TARGET_SCHED_SET_SCHED_CONTEXT
394 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
395
396 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
397 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
398
399 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
400 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
401
402 #undef TARGET_SCHED_SET_SCHED_FLAGS
403 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
404
405 #undef TARGET_SCHED_GET_INSN_SPEC_DS
406 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
407
408 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
409 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
410
411 #undef TARGET_SCHED_SPECULATE_INSN
412 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
413
414 #undef TARGET_SCHED_NEEDS_BLOCK_P
415 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
416
417 #undef TARGET_SCHED_GEN_SPEC_CHECK
418 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
419
420 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
421 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
422 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
423
424 #undef TARGET_SCHED_SKIP_RTX_P
425 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
426
427 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
428 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
429 #undef TARGET_ARG_PARTIAL_BYTES
430 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
431
432 #undef TARGET_ASM_OUTPUT_MI_THUNK
433 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
434 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
435 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
436
437 #undef TARGET_ASM_FILE_START
438 #define TARGET_ASM_FILE_START ia64_file_start
439
440 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
441 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
442
443 #undef TARGET_RTX_COSTS
444 #define TARGET_RTX_COSTS ia64_rtx_costs
445 #undef TARGET_ADDRESS_COST
446 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
447
448 #undef TARGET_UNSPEC_MAY_TRAP_P
449 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
450
451 #undef TARGET_MACHINE_DEPENDENT_REORG
452 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
453
454 #undef TARGET_ENCODE_SECTION_INFO
455 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
456
457 #undef TARGET_SECTION_TYPE_FLAGS
458 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
459
460 #ifdef HAVE_AS_TLS
461 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
462 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
463 #endif
464
465 /* ??? ABI doesn't allow us to define this. */
466 #if 0
467 #undef TARGET_PROMOTE_FUNCTION_ARGS
468 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
469 #endif
470
471 /* ??? ABI doesn't allow us to define this. */
472 #if 0
473 #undef TARGET_PROMOTE_FUNCTION_RETURN
474 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
475 #endif
476
477 /* ??? Investigate. */
478 #if 0
479 #undef TARGET_PROMOTE_PROTOTYPES
480 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
481 #endif
482
483 #undef TARGET_STRUCT_VALUE_RTX
484 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
485 #undef TARGET_RETURN_IN_MEMORY
486 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
487 #undef TARGET_SETUP_INCOMING_VARARGS
488 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
489 #undef TARGET_STRICT_ARGUMENT_NAMING
490 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
491 #undef TARGET_MUST_PASS_IN_STACK
492 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
493
494 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
495 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
496
497 #undef TARGET_UNWIND_EMIT
498 #define TARGET_UNWIND_EMIT process_for_unwind_directive
499
500 #undef TARGET_SCALAR_MODE_SUPPORTED_P
501 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
502 #undef TARGET_VECTOR_MODE_SUPPORTED_P
503 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
504
505 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
506 in an order different from the specified program order. */
507 #undef TARGET_RELAXED_ORDERING
508 #define TARGET_RELAXED_ORDERING true
509
510 #undef TARGET_DEFAULT_TARGET_FLAGS
511 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
512 #undef TARGET_HANDLE_OPTION
513 #define TARGET_HANDLE_OPTION ia64_handle_option
514
515 #undef TARGET_CANNOT_FORCE_CONST_MEM
516 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
517
518 #undef TARGET_MANGLE_TYPE
519 #define TARGET_MANGLE_TYPE ia64_mangle_type
520
521 #undef TARGET_INVALID_CONVERSION
522 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
523 #undef TARGET_INVALID_UNARY_OP
524 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
525 #undef TARGET_INVALID_BINARY_OP
526 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
527
528 #undef TARGET_C_MODE_FOR_SUFFIX
529 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
530
531 struct gcc_target targetm = TARGET_INITIALIZER;
532 \f
533 typedef enum
534 {
535 ADDR_AREA_NORMAL, /* normal address area */
536 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
537 }
538 ia64_addr_area;
539
540 static GTY(()) tree small_ident1;
541 static GTY(()) tree small_ident2;
542
543 static void
544 init_idents (void)
545 {
546 if (small_ident1 == 0)
547 {
548 small_ident1 = get_identifier ("small");
549 small_ident2 = get_identifier ("__small__");
550 }
551 }
552
553 /* Retrieve the address area that has been chosen for the given decl. */
554
555 static ia64_addr_area
556 ia64_get_addr_area (tree decl)
557 {
558 tree model_attr;
559
560 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
561 if (model_attr)
562 {
563 tree id;
564
565 init_idents ();
566 id = TREE_VALUE (TREE_VALUE (model_attr));
567 if (id == small_ident1 || id == small_ident2)
568 return ADDR_AREA_SMALL;
569 }
570 return ADDR_AREA_NORMAL;
571 }
572
573 static tree
574 ia64_handle_model_attribute (tree *node, tree name, tree args,
575 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
576 {
577 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
578 ia64_addr_area area;
579 tree arg, decl = *node;
580
581 init_idents ();
582 arg = TREE_VALUE (args);
583 if (arg == small_ident1 || arg == small_ident2)
584 {
585 addr_area = ADDR_AREA_SMALL;
586 }
587 else
588 {
589 warning (OPT_Wattributes, "invalid argument of %qs attribute",
590 IDENTIFIER_POINTER (name));
591 *no_add_attrs = true;
592 }
593
594 switch (TREE_CODE (decl))
595 {
596 case VAR_DECL:
597 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
598 == FUNCTION_DECL)
599 && !TREE_STATIC (decl))
600 {
601 error ("%Jan address area attribute cannot be specified for "
602 "local variables", decl);
603 *no_add_attrs = true;
604 }
605 area = ia64_get_addr_area (decl);
606 if (area != ADDR_AREA_NORMAL && addr_area != area)
607 {
608 error ("address area of %q+D conflicts with previous "
609 "declaration", decl);
610 *no_add_attrs = true;
611 }
612 break;
613
614 case FUNCTION_DECL:
615 error ("%Jaddress area attribute cannot be specified for functions",
616 decl);
617 *no_add_attrs = true;
618 break;
619
620 default:
621 warning (OPT_Wattributes, "%qs attribute ignored",
622 IDENTIFIER_POINTER (name));
623 *no_add_attrs = true;
624 break;
625 }
626
627 return NULL_TREE;
628 }
629
630 static void
631 ia64_encode_addr_area (tree decl, rtx symbol)
632 {
633 int flags;
634
635 flags = SYMBOL_REF_FLAGS (symbol);
636 switch (ia64_get_addr_area (decl))
637 {
638 case ADDR_AREA_NORMAL: break;
639 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
640 default: gcc_unreachable ();
641 }
642 SYMBOL_REF_FLAGS (symbol) = flags;
643 }
644
645 static void
646 ia64_encode_section_info (tree decl, rtx rtl, int first)
647 {
648 default_encode_section_info (decl, rtl, first);
649
650 /* Careful not to prod global register variables. */
651 if (TREE_CODE (decl) == VAR_DECL
652 && GET_CODE (DECL_RTL (decl)) == MEM
653 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
654 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
655 ia64_encode_addr_area (decl, XEXP (rtl, 0));
656 }
657 \f
658 /* Return 1 if the operands of a move are ok. */
659
660 int
661 ia64_move_ok (rtx dst, rtx src)
662 {
663 /* If we're under init_recog_no_volatile, we'll not be able to use
664 memory_operand. So check the code directly and don't worry about
665 the validity of the underlying address, which should have been
666 checked elsewhere anyway. */
667 if (GET_CODE (dst) != MEM)
668 return 1;
669 if (GET_CODE (src) == MEM)
670 return 0;
671 if (register_operand (src, VOIDmode))
672 return 1;
673
674 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
675 if (INTEGRAL_MODE_P (GET_MODE (dst)))
676 return src == const0_rtx;
677 else
678 return satisfies_constraint_G (src);
679 }
680
681 /* Return 1 if the operands are ok for a floating point load pair. */
682
683 int
684 ia64_load_pair_ok (rtx dst, rtx src)
685 {
686 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
687 return 0;
688 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
689 return 0;
690 switch (GET_CODE (XEXP (src, 0)))
691 {
692 case REG:
693 case POST_INC:
694 break;
695 case POST_DEC:
696 return 0;
697 case POST_MODIFY:
698 {
699 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
700
701 if (GET_CODE (adjust) != CONST_INT
702 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
703 return 0;
704 }
705 break;
706 default:
707 abort ();
708 }
709 return 1;
710 }
711
712 int
713 addp4_optimize_ok (rtx op1, rtx op2)
714 {
715 return (basereg_operand (op1, GET_MODE(op1)) !=
716 basereg_operand (op2, GET_MODE(op2)));
717 }
718
719 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
720 Return the length of the field, or <= 0 on failure. */
721
722 int
723 ia64_depz_field_mask (rtx rop, rtx rshift)
724 {
725 unsigned HOST_WIDE_INT op = INTVAL (rop);
726 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
727
728 /* Get rid of the zero bits we're shifting in. */
729 op >>= shift;
730
731 /* We must now have a solid block of 1's at bit 0. */
732 return exact_log2 (op + 1);
733 }
734
735 /* Return the TLS model to use for ADDR. */
736
737 static enum tls_model
738 tls_symbolic_operand_type (rtx addr)
739 {
740 enum tls_model tls_kind = 0;
741
742 if (GET_CODE (addr) == CONST)
743 {
744 if (GET_CODE (XEXP (addr, 0)) == PLUS
745 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
746 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
747 }
748 else if (GET_CODE (addr) == SYMBOL_REF)
749 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
750
751 return tls_kind;
752 }
753
754 /* Return true if X is a constant that is valid for some immediate
755 field in an instruction. */
756
757 bool
758 ia64_legitimate_constant_p (rtx x)
759 {
760 switch (GET_CODE (x))
761 {
762 case CONST_INT:
763 case LABEL_REF:
764 return true;
765
766 case CONST_DOUBLE:
767 if (GET_MODE (x) == VOIDmode)
768 return true;
769 return satisfies_constraint_G (x);
770
771 case CONST:
772 case SYMBOL_REF:
773 /* ??? Short term workaround for PR 28490. We must make the code here
774 match the code in ia64_expand_move and move_operand, even though they
775 are both technically wrong. */
776 if (tls_symbolic_operand_type (x) == 0)
777 {
778 HOST_WIDE_INT addend = 0;
779 rtx op = x;
780
781 if (GET_CODE (op) == CONST
782 && GET_CODE (XEXP (op, 0)) == PLUS
783 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
784 {
785 addend = INTVAL (XEXP (XEXP (op, 0), 1));
786 op = XEXP (XEXP (op, 0), 0);
787 }
788
789 if (any_offset_symbol_operand (op, GET_MODE (op))
790 || function_operand (op, GET_MODE (op)))
791 return true;
792 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
793 return (addend & 0x3fff) == 0;
794 return false;
795 }
796 return false;
797
798 case CONST_VECTOR:
799 {
800 enum machine_mode mode = GET_MODE (x);
801
802 if (mode == V2SFmode)
803 return satisfies_constraint_Y (x);
804
805 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
806 && GET_MODE_SIZE (mode) <= 8);
807 }
808
809 default:
810 return false;
811 }
812 }
813
814 /* Don't allow TLS addresses to get spilled to memory. */
815
816 static bool
817 ia64_cannot_force_const_mem (rtx x)
818 {
819 if (GET_MODE (x) == RFmode)
820 return true;
821 return tls_symbolic_operand_type (x) != 0;
822 }
823
824 /* Expand a symbolic constant load. */
825
826 bool
827 ia64_expand_load_address (rtx dest, rtx src)
828 {
829 gcc_assert (GET_CODE (dest) == REG);
830
831 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
832 having to pointer-extend the value afterward. Other forms of address
833 computation below are also more natural to compute as 64-bit quantities.
834 If we've been given an SImode destination register, change it. */
835 if (GET_MODE (dest) != Pmode)
836 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
837 byte_lowpart_offset (Pmode, GET_MODE (dest)));
838
839 if (TARGET_NO_PIC)
840 return false;
841 if (small_addr_symbolic_operand (src, VOIDmode))
842 return false;
843
844 if (TARGET_AUTO_PIC)
845 emit_insn (gen_load_gprel64 (dest, src));
846 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
847 emit_insn (gen_load_fptr (dest, src));
848 else if (sdata_symbolic_operand (src, VOIDmode))
849 emit_insn (gen_load_gprel (dest, src));
850 else
851 {
852 HOST_WIDE_INT addend = 0;
853 rtx tmp;
854
855 /* We did split constant offsets in ia64_expand_move, and we did try
856 to keep them split in move_operand, but we also allowed reload to
857 rematerialize arbitrary constants rather than spill the value to
858 the stack and reload it. So we have to be prepared here to split
859 them apart again. */
860 if (GET_CODE (src) == CONST)
861 {
862 HOST_WIDE_INT hi, lo;
863
864 hi = INTVAL (XEXP (XEXP (src, 0), 1));
865 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
866 hi = hi - lo;
867
868 if (lo != 0)
869 {
870 addend = lo;
871 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
872 }
873 }
874
875 tmp = gen_rtx_HIGH (Pmode, src);
876 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
877 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
878
879 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
880 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
881
882 if (addend)
883 {
884 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
885 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
886 }
887 }
888
889 return true;
890 }
891
892 static GTY(()) rtx gen_tls_tga;
893 static rtx
894 gen_tls_get_addr (void)
895 {
896 if (!gen_tls_tga)
897 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
898 return gen_tls_tga;
899 }
900
901 static GTY(()) rtx thread_pointer_rtx;
902 static rtx
903 gen_thread_pointer (void)
904 {
905 if (!thread_pointer_rtx)
906 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
907 return thread_pointer_rtx;
908 }
909
910 static rtx
911 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
912 rtx orig_op1, HOST_WIDE_INT addend)
913 {
914 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
915 rtx orig_op0 = op0;
916 HOST_WIDE_INT addend_lo, addend_hi;
917
918 switch (tls_kind)
919 {
920 case TLS_MODEL_GLOBAL_DYNAMIC:
921 start_sequence ();
922
923 tga_op1 = gen_reg_rtx (Pmode);
924 emit_insn (gen_load_dtpmod (tga_op1, op1));
925
926 tga_op2 = gen_reg_rtx (Pmode);
927 emit_insn (gen_load_dtprel (tga_op2, op1));
928
929 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
930 LCT_CONST, Pmode, 2, tga_op1,
931 Pmode, tga_op2, Pmode);
932
933 insns = get_insns ();
934 end_sequence ();
935
936 if (GET_MODE (op0) != Pmode)
937 op0 = tga_ret;
938 emit_libcall_block (insns, op0, tga_ret, op1);
939 break;
940
941 case TLS_MODEL_LOCAL_DYNAMIC:
942 /* ??? This isn't the completely proper way to do local-dynamic
943 If the call to __tls_get_addr is used only by a single symbol,
944 then we should (somehow) move the dtprel to the second arg
945 to avoid the extra add. */
946 start_sequence ();
947
948 tga_op1 = gen_reg_rtx (Pmode);
949 emit_insn (gen_load_dtpmod (tga_op1, op1));
950
951 tga_op2 = const0_rtx;
952
953 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
954 LCT_CONST, Pmode, 2, tga_op1,
955 Pmode, tga_op2, Pmode);
956
957 insns = get_insns ();
958 end_sequence ();
959
960 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
961 UNSPEC_LD_BASE);
962 tmp = gen_reg_rtx (Pmode);
963 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
964
965 if (!register_operand (op0, Pmode))
966 op0 = gen_reg_rtx (Pmode);
967 if (TARGET_TLS64)
968 {
969 emit_insn (gen_load_dtprel (op0, op1));
970 emit_insn (gen_adddi3 (op0, tmp, op0));
971 }
972 else
973 emit_insn (gen_add_dtprel (op0, op1, tmp));
974 break;
975
976 case TLS_MODEL_INITIAL_EXEC:
977 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
978 addend_hi = addend - addend_lo;
979
980 op1 = plus_constant (op1, addend_hi);
981 addend = addend_lo;
982
983 tmp = gen_reg_rtx (Pmode);
984 emit_insn (gen_load_tprel (tmp, op1));
985
986 if (!register_operand (op0, Pmode))
987 op0 = gen_reg_rtx (Pmode);
988 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
989 break;
990
991 case TLS_MODEL_LOCAL_EXEC:
992 if (!register_operand (op0, Pmode))
993 op0 = gen_reg_rtx (Pmode);
994
995 op1 = orig_op1;
996 addend = 0;
997 if (TARGET_TLS64)
998 {
999 emit_insn (gen_load_tprel (op0, op1));
1000 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1001 }
1002 else
1003 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1004 break;
1005
1006 default:
1007 gcc_unreachable ();
1008 }
1009
1010 if (addend)
1011 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1012 orig_op0, 1, OPTAB_DIRECT);
1013 if (orig_op0 == op0)
1014 return NULL_RTX;
1015 if (GET_MODE (orig_op0) == Pmode)
1016 return op0;
1017 return gen_lowpart (GET_MODE (orig_op0), op0);
1018 }
1019
1020 rtx
1021 ia64_expand_move (rtx op0, rtx op1)
1022 {
1023 enum machine_mode mode = GET_MODE (op0);
1024
1025 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1026 op1 = force_reg (mode, op1);
1027
1028 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1029 {
1030 HOST_WIDE_INT addend = 0;
1031 enum tls_model tls_kind;
1032 rtx sym = op1;
1033
1034 if (GET_CODE (op1) == CONST
1035 && GET_CODE (XEXP (op1, 0)) == PLUS
1036 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1037 {
1038 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1039 sym = XEXP (XEXP (op1, 0), 0);
1040 }
1041
1042 tls_kind = tls_symbolic_operand_type (sym);
1043 if (tls_kind)
1044 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1045
1046 if (any_offset_symbol_operand (sym, mode))
1047 addend = 0;
1048 else if (aligned_offset_symbol_operand (sym, mode))
1049 {
1050 HOST_WIDE_INT addend_lo, addend_hi;
1051
1052 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1053 addend_hi = addend - addend_lo;
1054
1055 if (addend_lo != 0)
1056 {
1057 op1 = plus_constant (sym, addend_hi);
1058 addend = addend_lo;
1059 }
1060 else
1061 addend = 0;
1062 }
1063 else
1064 op1 = sym;
1065
1066 if (reload_completed)
1067 {
1068 /* We really should have taken care of this offset earlier. */
1069 gcc_assert (addend == 0);
1070 if (ia64_expand_load_address (op0, op1))
1071 return NULL_RTX;
1072 }
1073
1074 if (addend)
1075 {
1076 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1077
1078 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1079
1080 op1 = expand_simple_binop (mode, PLUS, subtarget,
1081 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1082 if (op0 == op1)
1083 return NULL_RTX;
1084 }
1085 }
1086
1087 return op1;
1088 }
1089
1090 /* Split a move from OP1 to OP0 conditional on COND. */
1091
1092 void
1093 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1094 {
1095 rtx insn, first = get_last_insn ();
1096
1097 emit_move_insn (op0, op1);
1098
1099 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1100 if (INSN_P (insn))
1101 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1102 PATTERN (insn));
1103 }
1104
1105 /* Split a post-reload TImode or TFmode reference into two DImode
1106 components. This is made extra difficult by the fact that we do
1107 not get any scratch registers to work with, because reload cannot
1108 be prevented from giving us a scratch that overlaps the register
1109 pair involved. So instead, when addressing memory, we tweak the
1110 pointer register up and back down with POST_INCs. Or up and not
1111 back down when we can get away with it.
1112
1113 REVERSED is true when the loads must be done in reversed order
1114 (high word first) for correctness. DEAD is true when the pointer
1115 dies with the second insn we generate and therefore the second
1116 address must not carry a postmodify.
1117
1118 May return an insn which is to be emitted after the moves. */
1119
1120 static rtx
1121 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1122 {
1123 rtx fixup = 0;
1124
1125 switch (GET_CODE (in))
1126 {
1127 case REG:
1128 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1129 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1130 break;
1131
1132 case CONST_INT:
1133 case CONST_DOUBLE:
1134 /* Cannot occur reversed. */
1135 gcc_assert (!reversed);
1136
1137 if (GET_MODE (in) != TFmode)
1138 split_double (in, &out[0], &out[1]);
1139 else
1140 /* split_double does not understand how to split a TFmode
1141 quantity into a pair of DImode constants. */
1142 {
1143 REAL_VALUE_TYPE r;
1144 unsigned HOST_WIDE_INT p[2];
1145 long l[4]; /* TFmode is 128 bits */
1146
1147 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1148 real_to_target (l, &r, TFmode);
1149
1150 if (FLOAT_WORDS_BIG_ENDIAN)
1151 {
1152 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1153 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1154 }
1155 else
1156 {
1157 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1158 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1159 }
1160 out[0] = GEN_INT (p[0]);
1161 out[1] = GEN_INT (p[1]);
1162 }
1163 break;
1164
1165 case MEM:
1166 {
1167 rtx base = XEXP (in, 0);
1168 rtx offset;
1169
1170 switch (GET_CODE (base))
1171 {
1172 case REG:
1173 if (!reversed)
1174 {
1175 out[0] = adjust_automodify_address
1176 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1177 out[1] = adjust_automodify_address
1178 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1179 }
1180 else
1181 {
1182 /* Reversal requires a pre-increment, which can only
1183 be done as a separate insn. */
1184 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1185 out[0] = adjust_automodify_address
1186 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1187 out[1] = adjust_address (in, DImode, 0);
1188 }
1189 break;
1190
1191 case POST_INC:
1192 gcc_assert (!reversed && !dead);
1193
1194 /* Just do the increment in two steps. */
1195 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1196 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1197 break;
1198
1199 case POST_DEC:
1200 gcc_assert (!reversed && !dead);
1201
1202 /* Add 8, subtract 24. */
1203 base = XEXP (base, 0);
1204 out[0] = adjust_automodify_address
1205 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1206 out[1] = adjust_automodify_address
1207 (in, DImode,
1208 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1209 8);
1210 break;
1211
1212 case POST_MODIFY:
1213 gcc_assert (!reversed && !dead);
1214
1215 /* Extract and adjust the modification. This case is
1216 trickier than the others, because we might have an
1217 index register, or we might have a combined offset that
1218 doesn't fit a signed 9-bit displacement field. We can
1219 assume the incoming expression is already legitimate. */
1220 offset = XEXP (base, 1);
1221 base = XEXP (base, 0);
1222
1223 out[0] = adjust_automodify_address
1224 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1225
1226 if (GET_CODE (XEXP (offset, 1)) == REG)
1227 {
1228 /* Can't adjust the postmodify to match. Emit the
1229 original, then a separate addition insn. */
1230 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1231 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1232 }
1233 else
1234 {
1235 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1236 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1237 {
1238 /* Again the postmodify cannot be made to match,
1239 but in this case it's more efficient to get rid
1240 of the postmodify entirely and fix up with an
1241 add insn. */
1242 out[1] = adjust_automodify_address (in, DImode, base, 8);
1243 fixup = gen_adddi3
1244 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1245 }
1246 else
1247 {
1248 /* Combined offset still fits in the displacement field.
1249 (We cannot overflow it at the high end.) */
1250 out[1] = adjust_automodify_address
1251 (in, DImode, gen_rtx_POST_MODIFY
1252 (Pmode, base, gen_rtx_PLUS
1253 (Pmode, base,
1254 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1255 8);
1256 }
1257 }
1258 break;
1259
1260 default:
1261 gcc_unreachable ();
1262 }
1263 break;
1264 }
1265
1266 default:
1267 gcc_unreachable ();
1268 }
1269
1270 return fixup;
1271 }
1272
1273 /* Split a TImode or TFmode move instruction after reload.
1274 This is used by *movtf_internal and *movti_internal. */
1275 void
1276 ia64_split_tmode_move (rtx operands[])
1277 {
1278 rtx in[2], out[2], insn;
1279 rtx fixup[2];
1280 bool dead = false;
1281 bool reversed = false;
1282
1283 /* It is possible for reload to decide to overwrite a pointer with
1284 the value it points to. In that case we have to do the loads in
1285 the appropriate order so that the pointer is not destroyed too
1286 early. Also we must not generate a postmodify for that second
1287 load, or rws_access_regno will die. */
1288 if (GET_CODE (operands[1]) == MEM
1289 && reg_overlap_mentioned_p (operands[0], operands[1]))
1290 {
1291 rtx base = XEXP (operands[1], 0);
1292 while (GET_CODE (base) != REG)
1293 base = XEXP (base, 0);
1294
1295 if (REGNO (base) == REGNO (operands[0]))
1296 reversed = true;
1297 dead = true;
1298 }
1299 /* Another reason to do the moves in reversed order is if the first
1300 element of the target register pair is also the second element of
1301 the source register pair. */
1302 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1303 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1304 reversed = true;
1305
1306 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1307 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1308
1309 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1310 if (GET_CODE (EXP) == MEM \
1311 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1312 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1313 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1314 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1315 XEXP (XEXP (EXP, 0), 0), \
1316 REG_NOTES (INSN))
1317
1318 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1319 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1320 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1321
1322 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1323 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1324 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1325
1326 if (fixup[0])
1327 emit_insn (fixup[0]);
1328 if (fixup[1])
1329 emit_insn (fixup[1]);
1330
1331 #undef MAYBE_ADD_REG_INC_NOTE
1332 }
1333
1334 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1335 through memory plus an extra GR scratch register. Except that you can
1336 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1337 SECONDARY_RELOAD_CLASS, but not both.
1338
1339 We got into problems in the first place by allowing a construct like
1340 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1341 This solution attempts to prevent this situation from occurring. When
1342 we see something like the above, we spill the inner register to memory. */
1343
1344 static rtx
1345 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1346 {
1347 if (GET_CODE (in) == SUBREG
1348 && GET_MODE (SUBREG_REG (in)) == TImode
1349 && GET_CODE (SUBREG_REG (in)) == REG)
1350 {
1351 rtx memt = assign_stack_temp (TImode, 16, 0);
1352 emit_move_insn (memt, SUBREG_REG (in));
1353 return adjust_address (memt, mode, 0);
1354 }
1355 else if (force && GET_CODE (in) == REG)
1356 {
1357 rtx memx = assign_stack_temp (mode, 16, 0);
1358 emit_move_insn (memx, in);
1359 return memx;
1360 }
1361 else
1362 return in;
1363 }
1364
1365 /* Expand the movxf or movrf pattern (MODE says which) with the given
1366 OPERANDS, returning true if the pattern should then invoke
1367 DONE. */
1368
1369 bool
1370 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1371 {
1372 rtx op0 = operands[0];
1373
1374 if (GET_CODE (op0) == SUBREG)
1375 op0 = SUBREG_REG (op0);
1376
1377 /* We must support XFmode loads into general registers for stdarg/vararg,
1378 unprototyped calls, and a rare case where a long double is passed as
1379 an argument after a float HFA fills the FP registers. We split them into
1380 DImode loads for convenience. We also need to support XFmode stores
1381 for the last case. This case does not happen for stdarg/vararg routines,
1382 because we do a block store to memory of unnamed arguments. */
1383
1384 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1385 {
1386 rtx out[2];
1387
1388 /* We're hoping to transform everything that deals with XFmode
1389 quantities and GR registers early in the compiler. */
1390 gcc_assert (can_create_pseudo_p ());
1391
1392 /* Struct to register can just use TImode instead. */
1393 if ((GET_CODE (operands[1]) == SUBREG
1394 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1395 || (GET_CODE (operands[1]) == REG
1396 && GR_REGNO_P (REGNO (operands[1]))))
1397 {
1398 rtx op1 = operands[1];
1399
1400 if (GET_CODE (op1) == SUBREG)
1401 op1 = SUBREG_REG (op1);
1402 else
1403 op1 = gen_rtx_REG (TImode, REGNO (op1));
1404
1405 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1406 return true;
1407 }
1408
1409 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1410 {
1411 /* Don't word-swap when reading in the constant. */
1412 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1413 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1414 0, mode));
1415 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1416 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1417 0, mode));
1418 return true;
1419 }
1420
1421 /* If the quantity is in a register not known to be GR, spill it. */
1422 if (register_operand (operands[1], mode))
1423 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1424
1425 gcc_assert (GET_CODE (operands[1]) == MEM);
1426
1427 /* Don't word-swap when reading in the value. */
1428 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1429 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1430
1431 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1432 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1433 return true;
1434 }
1435
1436 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1437 {
1438 /* We're hoping to transform everything that deals with XFmode
1439 quantities and GR registers early in the compiler. */
1440 gcc_assert (can_create_pseudo_p ());
1441
1442 /* Op0 can't be a GR_REG here, as that case is handled above.
1443 If op0 is a register, then we spill op1, so that we now have a
1444 MEM operand. This requires creating an XFmode subreg of a TImode reg
1445 to force the spill. */
1446 if (register_operand (operands[0], mode))
1447 {
1448 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1449 op1 = gen_rtx_SUBREG (mode, op1, 0);
1450 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1451 }
1452
1453 else
1454 {
1455 rtx in[2];
1456
1457 gcc_assert (GET_CODE (operands[0]) == MEM);
1458
1459 /* Don't word-swap when writing out the value. */
1460 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1461 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1462
1463 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1464 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1465 return true;
1466 }
1467 }
1468
1469 if (!reload_in_progress && !reload_completed)
1470 {
1471 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1472
1473 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1474 {
1475 rtx memt, memx, in = operands[1];
1476 if (CONSTANT_P (in))
1477 in = validize_mem (force_const_mem (mode, in));
1478 if (GET_CODE (in) == MEM)
1479 memt = adjust_address (in, TImode, 0);
1480 else
1481 {
1482 memt = assign_stack_temp (TImode, 16, 0);
1483 memx = adjust_address (memt, mode, 0);
1484 emit_move_insn (memx, in);
1485 }
1486 emit_move_insn (op0, memt);
1487 return true;
1488 }
1489
1490 if (!ia64_move_ok (operands[0], operands[1]))
1491 operands[1] = force_reg (mode, operands[1]);
1492 }
1493
1494 return false;
1495 }
1496
1497 /* Emit comparison instruction if necessary, returning the expression
1498 that holds the compare result in the proper mode. */
1499
1500 static GTY(()) rtx cmptf_libfunc;
1501
1502 rtx
1503 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1504 {
1505 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1506 rtx cmp;
1507
1508 /* If we have a BImode input, then we already have a compare result, and
1509 do not need to emit another comparison. */
1510 if (GET_MODE (op0) == BImode)
1511 {
1512 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1513 cmp = op0;
1514 }
1515 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1516 magic number as its third argument, that indicates what to do.
1517 The return value is an integer to be compared against zero. */
1518 else if (TARGET_HPUX && GET_MODE (op0) == TFmode)
1519 {
1520 enum qfcmp_magic {
1521 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1522 QCMP_UNORD = 2,
1523 QCMP_EQ = 4,
1524 QCMP_LT = 8,
1525 QCMP_GT = 16
1526 } magic;
1527 enum rtx_code ncode;
1528 rtx ret, insns;
1529
1530 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1531 switch (code)
1532 {
1533 /* 1 = equal, 0 = not equal. Equality operators do
1534 not raise FP_INVALID when given an SNaN operand. */
1535 case EQ: magic = QCMP_EQ; ncode = NE; break;
1536 case NE: magic = QCMP_EQ; ncode = EQ; break;
1537 /* isunordered() from C99. */
1538 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1539 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1540 /* Relational operators raise FP_INVALID when given
1541 an SNaN operand. */
1542 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1543 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1544 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1545 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1546 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1547 Expanders for buneq etc. weuld have to be added to ia64.md
1548 for this to be useful. */
1549 default: gcc_unreachable ();
1550 }
1551
1552 start_sequence ();
1553
1554 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1555 op0, TFmode, op1, TFmode,
1556 GEN_INT (magic), DImode);
1557 cmp = gen_reg_rtx (BImode);
1558 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1559 gen_rtx_fmt_ee (ncode, BImode,
1560 ret, const0_rtx)));
1561
1562 insns = get_insns ();
1563 end_sequence ();
1564
1565 emit_libcall_block (insns, cmp, cmp,
1566 gen_rtx_fmt_ee (code, BImode, op0, op1));
1567 code = NE;
1568 }
1569 else
1570 {
1571 cmp = gen_reg_rtx (BImode);
1572 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1573 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1574 code = NE;
1575 }
1576
1577 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1578 }
1579
1580 /* Generate an integral vector comparison. Return true if the condition has
1581 been reversed, and so the sense of the comparison should be inverted. */
1582
1583 static bool
1584 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1585 rtx dest, rtx op0, rtx op1)
1586 {
1587 bool negate = false;
1588 rtx x;
1589
1590 /* Canonicalize the comparison to EQ, GT, GTU. */
1591 switch (code)
1592 {
1593 case EQ:
1594 case GT:
1595 case GTU:
1596 break;
1597
1598 case NE:
1599 case LE:
1600 case LEU:
1601 code = reverse_condition (code);
1602 negate = true;
1603 break;
1604
1605 case GE:
1606 case GEU:
1607 code = reverse_condition (code);
1608 negate = true;
1609 /* FALLTHRU */
1610
1611 case LT:
1612 case LTU:
1613 code = swap_condition (code);
1614 x = op0, op0 = op1, op1 = x;
1615 break;
1616
1617 default:
1618 gcc_unreachable ();
1619 }
1620
1621 /* Unsigned parallel compare is not supported by the hardware. Play some
1622 tricks to turn this into a signed comparison against 0. */
1623 if (code == GTU)
1624 {
1625 switch (mode)
1626 {
1627 case V2SImode:
1628 {
1629 rtx t1, t2, mask;
1630
1631 /* Perform a parallel modulo subtraction. */
1632 t1 = gen_reg_rtx (V2SImode);
1633 emit_insn (gen_subv2si3 (t1, op0, op1));
1634
1635 /* Extract the original sign bit of op0. */
1636 mask = GEN_INT (-0x80000000);
1637 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1638 mask = force_reg (V2SImode, mask);
1639 t2 = gen_reg_rtx (V2SImode);
1640 emit_insn (gen_andv2si3 (t2, op0, mask));
1641
1642 /* XOR it back into the result of the subtraction. This results
1643 in the sign bit set iff we saw unsigned underflow. */
1644 x = gen_reg_rtx (V2SImode);
1645 emit_insn (gen_xorv2si3 (x, t1, t2));
1646
1647 code = GT;
1648 op0 = x;
1649 op1 = CONST0_RTX (mode);
1650 }
1651 break;
1652
1653 case V8QImode:
1654 case V4HImode:
1655 /* Perform a parallel unsigned saturating subtraction. */
1656 x = gen_reg_rtx (mode);
1657 emit_insn (gen_rtx_SET (VOIDmode, x,
1658 gen_rtx_US_MINUS (mode, op0, op1)));
1659
1660 code = EQ;
1661 op0 = x;
1662 op1 = CONST0_RTX (mode);
1663 negate = !negate;
1664 break;
1665
1666 default:
1667 gcc_unreachable ();
1668 }
1669 }
1670
1671 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1672 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1673
1674 return negate;
1675 }
1676
1677 /* Emit an integral vector conditional move. */
1678
1679 void
1680 ia64_expand_vecint_cmov (rtx operands[])
1681 {
1682 enum machine_mode mode = GET_MODE (operands[0]);
1683 enum rtx_code code = GET_CODE (operands[3]);
1684 bool negate;
1685 rtx cmp, x, ot, of;
1686
1687 cmp = gen_reg_rtx (mode);
1688 negate = ia64_expand_vecint_compare (code, mode, cmp,
1689 operands[4], operands[5]);
1690
1691 ot = operands[1+negate];
1692 of = operands[2-negate];
1693
1694 if (ot == CONST0_RTX (mode))
1695 {
1696 if (of == CONST0_RTX (mode))
1697 {
1698 emit_move_insn (operands[0], ot);
1699 return;
1700 }
1701
1702 x = gen_rtx_NOT (mode, cmp);
1703 x = gen_rtx_AND (mode, x, of);
1704 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1705 }
1706 else if (of == CONST0_RTX (mode))
1707 {
1708 x = gen_rtx_AND (mode, cmp, ot);
1709 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1710 }
1711 else
1712 {
1713 rtx t, f;
1714
1715 t = gen_reg_rtx (mode);
1716 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1717 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1718
1719 f = gen_reg_rtx (mode);
1720 x = gen_rtx_NOT (mode, cmp);
1721 x = gen_rtx_AND (mode, x, operands[2-negate]);
1722 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1723
1724 x = gen_rtx_IOR (mode, t, f);
1725 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1726 }
1727 }
1728
1729 /* Emit an integral vector min or max operation. Return true if all done. */
1730
1731 bool
1732 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1733 rtx operands[])
1734 {
1735 rtx xops[6];
1736
1737 /* These four combinations are supported directly. */
1738 if (mode == V8QImode && (code == UMIN || code == UMAX))
1739 return false;
1740 if (mode == V4HImode && (code == SMIN || code == SMAX))
1741 return false;
1742
1743 /* This combination can be implemented with only saturating subtraction. */
1744 if (mode == V4HImode && code == UMAX)
1745 {
1746 rtx x, tmp = gen_reg_rtx (mode);
1747
1748 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1749 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1750
1751 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1752 return true;
1753 }
1754
1755 /* Everything else implemented via vector comparisons. */
1756 xops[0] = operands[0];
1757 xops[4] = xops[1] = operands[1];
1758 xops[5] = xops[2] = operands[2];
1759
1760 switch (code)
1761 {
1762 case UMIN:
1763 code = LTU;
1764 break;
1765 case UMAX:
1766 code = GTU;
1767 break;
1768 case SMIN:
1769 code = LT;
1770 break;
1771 case SMAX:
1772 code = GT;
1773 break;
1774 default:
1775 gcc_unreachable ();
1776 }
1777 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1778
1779 ia64_expand_vecint_cmov (xops);
1780 return true;
1781 }
1782
1783 /* Emit an integral vector widening sum operations. */
1784
1785 void
1786 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1787 {
1788 rtx l, h, x, s;
1789 enum machine_mode wmode, mode;
1790 rtx (*unpack_l) (rtx, rtx, rtx);
1791 rtx (*unpack_h) (rtx, rtx, rtx);
1792 rtx (*plus) (rtx, rtx, rtx);
1793
1794 wmode = GET_MODE (operands[0]);
1795 mode = GET_MODE (operands[1]);
1796
1797 switch (mode)
1798 {
1799 case V8QImode:
1800 unpack_l = gen_unpack1_l;
1801 unpack_h = gen_unpack1_h;
1802 plus = gen_addv4hi3;
1803 break;
1804 case V4HImode:
1805 unpack_l = gen_unpack2_l;
1806 unpack_h = gen_unpack2_h;
1807 plus = gen_addv2si3;
1808 break;
1809 default:
1810 gcc_unreachable ();
1811 }
1812
1813 /* Fill in x with the sign extension of each element in op1. */
1814 if (unsignedp)
1815 x = CONST0_RTX (mode);
1816 else
1817 {
1818 bool neg;
1819
1820 x = gen_reg_rtx (mode);
1821
1822 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1823 CONST0_RTX (mode));
1824 gcc_assert (!neg);
1825 }
1826
1827 l = gen_reg_rtx (wmode);
1828 h = gen_reg_rtx (wmode);
1829 s = gen_reg_rtx (wmode);
1830
1831 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1832 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1833 emit_insn (plus (s, l, operands[2]));
1834 emit_insn (plus (operands[0], h, s));
1835 }
1836
1837 /* Emit a signed or unsigned V8QI dot product operation. */
1838
1839 void
1840 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1841 {
1842 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1843
1844 /* Fill in x1 and x2 with the sign extension of each element. */
1845 if (unsignedp)
1846 x1 = x2 = CONST0_RTX (V8QImode);
1847 else
1848 {
1849 bool neg;
1850
1851 x1 = gen_reg_rtx (V8QImode);
1852 x2 = gen_reg_rtx (V8QImode);
1853
1854 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1855 CONST0_RTX (V8QImode));
1856 gcc_assert (!neg);
1857 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1858 CONST0_RTX (V8QImode));
1859 gcc_assert (!neg);
1860 }
1861
1862 l1 = gen_reg_rtx (V4HImode);
1863 l2 = gen_reg_rtx (V4HImode);
1864 h1 = gen_reg_rtx (V4HImode);
1865 h2 = gen_reg_rtx (V4HImode);
1866
1867 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1868 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1869 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1870 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1871
1872 p1 = gen_reg_rtx (V2SImode);
1873 p2 = gen_reg_rtx (V2SImode);
1874 p3 = gen_reg_rtx (V2SImode);
1875 p4 = gen_reg_rtx (V2SImode);
1876 emit_insn (gen_pmpy2_r (p1, l1, l2));
1877 emit_insn (gen_pmpy2_l (p2, l1, l2));
1878 emit_insn (gen_pmpy2_r (p3, h1, h2));
1879 emit_insn (gen_pmpy2_l (p4, h1, h2));
1880
1881 s1 = gen_reg_rtx (V2SImode);
1882 s2 = gen_reg_rtx (V2SImode);
1883 s3 = gen_reg_rtx (V2SImode);
1884 emit_insn (gen_addv2si3 (s1, p1, p2));
1885 emit_insn (gen_addv2si3 (s2, p3, p4));
1886 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1887 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1888 }
1889
1890 /* Emit the appropriate sequence for a call. */
1891
1892 void
1893 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1894 int sibcall_p)
1895 {
1896 rtx insn, b0;
1897
1898 addr = XEXP (addr, 0);
1899 addr = convert_memory_address (DImode, addr);
1900 b0 = gen_rtx_REG (DImode, R_BR (0));
1901
1902 /* ??? Should do this for functions known to bind local too. */
1903 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1904 {
1905 if (sibcall_p)
1906 insn = gen_sibcall_nogp (addr);
1907 else if (! retval)
1908 insn = gen_call_nogp (addr, b0);
1909 else
1910 insn = gen_call_value_nogp (retval, addr, b0);
1911 insn = emit_call_insn (insn);
1912 }
1913 else
1914 {
1915 if (sibcall_p)
1916 insn = gen_sibcall_gp (addr);
1917 else if (! retval)
1918 insn = gen_call_gp (addr, b0);
1919 else
1920 insn = gen_call_value_gp (retval, addr, b0);
1921 insn = emit_call_insn (insn);
1922
1923 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1924 }
1925
1926 if (sibcall_p)
1927 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1928 }
1929
1930 static void
1931 reg_emitted (enum ia64_frame_regs r)
1932 {
1933 if (emitted_frame_related_regs[r] == 0)
1934 emitted_frame_related_regs[r] = current_frame_info.r[r];
1935 else
1936 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
1937 }
1938
1939 static int
1940 get_reg (enum ia64_frame_regs r)
1941 {
1942 reg_emitted (r);
1943 return current_frame_info.r[r];
1944 }
1945
1946 static bool
1947 is_emitted (int regno)
1948 {
1949 enum ia64_frame_regs r;
1950
1951 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1952 if (emitted_frame_related_regs[r] == regno)
1953 return true;
1954 return false;
1955 }
1956
1957 void
1958 ia64_reload_gp (void)
1959 {
1960 rtx tmp;
1961
1962 if (current_frame_info.r[reg_save_gp])
1963 {
1964 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
1965 }
1966 else
1967 {
1968 HOST_WIDE_INT offset;
1969 rtx offset_r;
1970
1971 offset = (current_frame_info.spill_cfa_off
1972 + current_frame_info.spill_size);
1973 if (frame_pointer_needed)
1974 {
1975 tmp = hard_frame_pointer_rtx;
1976 offset = -offset;
1977 }
1978 else
1979 {
1980 tmp = stack_pointer_rtx;
1981 offset = current_frame_info.total_size - offset;
1982 }
1983
1984 offset_r = GEN_INT (offset);
1985 if (satisfies_constraint_I (offset_r))
1986 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1987 else
1988 {
1989 emit_move_insn (pic_offset_table_rtx, offset_r);
1990 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1991 pic_offset_table_rtx, tmp));
1992 }
1993
1994 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1995 }
1996
1997 emit_move_insn (pic_offset_table_rtx, tmp);
1998 }
1999
2000 void
2001 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2002 rtx scratch_b, int noreturn_p, int sibcall_p)
2003 {
2004 rtx insn;
2005 bool is_desc = false;
2006
2007 /* If we find we're calling through a register, then we're actually
2008 calling through a descriptor, so load up the values. */
2009 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2010 {
2011 rtx tmp;
2012 bool addr_dead_p;
2013
2014 /* ??? We are currently constrained to *not* use peep2, because
2015 we can legitimately change the global lifetime of the GP
2016 (in the form of killing where previously live). This is
2017 because a call through a descriptor doesn't use the previous
2018 value of the GP, while a direct call does, and we do not
2019 commit to either form until the split here.
2020
2021 That said, this means that we lack precise life info for
2022 whether ADDR is dead after this call. This is not terribly
2023 important, since we can fix things up essentially for free
2024 with the POST_DEC below, but it's nice to not use it when we
2025 can immediately tell it's not necessary. */
2026 addr_dead_p = ((noreturn_p || sibcall_p
2027 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2028 REGNO (addr)))
2029 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2030
2031 /* Load the code address into scratch_b. */
2032 tmp = gen_rtx_POST_INC (Pmode, addr);
2033 tmp = gen_rtx_MEM (Pmode, tmp);
2034 emit_move_insn (scratch_r, tmp);
2035 emit_move_insn (scratch_b, scratch_r);
2036
2037 /* Load the GP address. If ADDR is not dead here, then we must
2038 revert the change made above via the POST_INCREMENT. */
2039 if (!addr_dead_p)
2040 tmp = gen_rtx_POST_DEC (Pmode, addr);
2041 else
2042 tmp = addr;
2043 tmp = gen_rtx_MEM (Pmode, tmp);
2044 emit_move_insn (pic_offset_table_rtx, tmp);
2045
2046 is_desc = true;
2047 addr = scratch_b;
2048 }
2049
2050 if (sibcall_p)
2051 insn = gen_sibcall_nogp (addr);
2052 else if (retval)
2053 insn = gen_call_value_nogp (retval, addr, retaddr);
2054 else
2055 insn = gen_call_nogp (addr, retaddr);
2056 emit_call_insn (insn);
2057
2058 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2059 ia64_reload_gp ();
2060 }
2061
2062 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2063
2064 This differs from the generic code in that we know about the zero-extending
2065 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2066 also know that ld.acq+cmpxchg.rel equals a full barrier.
2067
2068 The loop we want to generate looks like
2069
2070 cmp_reg = mem;
2071 label:
2072 old_reg = cmp_reg;
2073 new_reg = cmp_reg op val;
2074 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2075 if (cmp_reg != old_reg)
2076 goto label;
2077
2078 Note that we only do the plain load from memory once. Subsequent
2079 iterations use the value loaded by the compare-and-swap pattern. */
2080
2081 void
2082 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2083 rtx old_dst, rtx new_dst)
2084 {
2085 enum machine_mode mode = GET_MODE (mem);
2086 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2087 enum insn_code icode;
2088
2089 /* Special case for using fetchadd. */
2090 if ((mode == SImode || mode == DImode)
2091 && (code == PLUS || code == MINUS)
2092 && fetchadd_operand (val, mode))
2093 {
2094 if (code == MINUS)
2095 val = GEN_INT (-INTVAL (val));
2096
2097 if (!old_dst)
2098 old_dst = gen_reg_rtx (mode);
2099
2100 emit_insn (gen_memory_barrier ());
2101
2102 if (mode == SImode)
2103 icode = CODE_FOR_fetchadd_acq_si;
2104 else
2105 icode = CODE_FOR_fetchadd_acq_di;
2106 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2107
2108 if (new_dst)
2109 {
2110 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2111 true, OPTAB_WIDEN);
2112 if (new_reg != new_dst)
2113 emit_move_insn (new_dst, new_reg);
2114 }
2115 return;
2116 }
2117
2118 /* Because of the volatile mem read, we get an ld.acq, which is the
2119 front half of the full barrier. The end half is the cmpxchg.rel. */
2120 gcc_assert (MEM_VOLATILE_P (mem));
2121
2122 old_reg = gen_reg_rtx (DImode);
2123 cmp_reg = gen_reg_rtx (DImode);
2124 label = gen_label_rtx ();
2125
2126 if (mode != DImode)
2127 {
2128 val = simplify_gen_subreg (DImode, val, mode, 0);
2129 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2130 }
2131 else
2132 emit_move_insn (cmp_reg, mem);
2133
2134 emit_label (label);
2135
2136 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2137 emit_move_insn (old_reg, cmp_reg);
2138 emit_move_insn (ar_ccv, cmp_reg);
2139
2140 if (old_dst)
2141 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2142
2143 new_reg = cmp_reg;
2144 if (code == NOT)
2145 {
2146 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2147 true, OPTAB_DIRECT);
2148 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2149 }
2150 else
2151 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2152 true, OPTAB_DIRECT);
2153
2154 if (mode != DImode)
2155 new_reg = gen_lowpart (mode, new_reg);
2156 if (new_dst)
2157 emit_move_insn (new_dst, new_reg);
2158
2159 switch (mode)
2160 {
2161 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2162 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2163 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2164 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2165 default:
2166 gcc_unreachable ();
2167 }
2168
2169 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2170
2171 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2172 }
2173 \f
2174 /* Begin the assembly file. */
2175
2176 static void
2177 ia64_file_start (void)
2178 {
2179 /* Variable tracking should be run after all optimizations which change order
2180 of insns. It also needs a valid CFG. This can't be done in
2181 ia64_override_options, because flag_var_tracking is finalized after
2182 that. */
2183 ia64_flag_var_tracking = flag_var_tracking;
2184 flag_var_tracking = 0;
2185
2186 default_file_start ();
2187 emit_safe_across_calls ();
2188 }
2189
2190 void
2191 emit_safe_across_calls (void)
2192 {
2193 unsigned int rs, re;
2194 int out_state;
2195
2196 rs = 1;
2197 out_state = 0;
2198 while (1)
2199 {
2200 while (rs < 64 && call_used_regs[PR_REG (rs)])
2201 rs++;
2202 if (rs >= 64)
2203 break;
2204 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2205 continue;
2206 if (out_state == 0)
2207 {
2208 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2209 out_state = 1;
2210 }
2211 else
2212 fputc (',', asm_out_file);
2213 if (re == rs + 1)
2214 fprintf (asm_out_file, "p%u", rs);
2215 else
2216 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2217 rs = re + 1;
2218 }
2219 if (out_state)
2220 fputc ('\n', asm_out_file);
2221 }
2222
2223 /* Globalize a declaration. */
2224
2225 static void
2226 ia64_globalize_decl_name (FILE * stream, tree decl)
2227 {
2228 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2229 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2230 if (version_attr)
2231 {
2232 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2233 const char *p = TREE_STRING_POINTER (v);
2234 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2235 }
2236 targetm.asm_out.globalize_label (stream, name);
2237 if (TREE_CODE (decl) == FUNCTION_DECL)
2238 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2239 }
2240
2241 /* Helper function for ia64_compute_frame_size: find an appropriate general
2242 register to spill some special register to. SPECIAL_SPILL_MASK contains
2243 bits in GR0 to GR31 that have already been allocated by this routine.
2244 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2245
2246 static int
2247 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2248 {
2249 int regno;
2250
2251 if (emitted_frame_related_regs[r] != 0)
2252 {
2253 regno = emitted_frame_related_regs[r];
2254 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2255 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2256 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2257 else if (current_function_is_leaf
2258 && regno >= GR_REG (1) && regno <= GR_REG (31))
2259 current_frame_info.gr_used_mask |= 1 << regno;
2260
2261 return regno;
2262 }
2263
2264 /* If this is a leaf function, first try an otherwise unused
2265 call-clobbered register. */
2266 if (current_function_is_leaf)
2267 {
2268 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2269 if (! df_regs_ever_live_p (regno)
2270 && call_used_regs[regno]
2271 && ! fixed_regs[regno]
2272 && ! global_regs[regno]
2273 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2274 && ! is_emitted (regno))
2275 {
2276 current_frame_info.gr_used_mask |= 1 << regno;
2277 return regno;
2278 }
2279 }
2280
2281 if (try_locals)
2282 {
2283 regno = current_frame_info.n_local_regs;
2284 /* If there is a frame pointer, then we can't use loc79, because
2285 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2286 reg_name switching code in ia64_expand_prologue. */
2287 while (regno < (80 - frame_pointer_needed))
2288 if (! is_emitted (LOC_REG (regno++)))
2289 {
2290 current_frame_info.n_local_regs = regno;
2291 return LOC_REG (regno - 1);
2292 }
2293 }
2294
2295 /* Failed to find a general register to spill to. Must use stack. */
2296 return 0;
2297 }
2298
2299 /* In order to make for nice schedules, we try to allocate every temporary
2300 to a different register. We must of course stay away from call-saved,
2301 fixed, and global registers. We must also stay away from registers
2302 allocated in current_frame_info.gr_used_mask, since those include regs
2303 used all through the prologue.
2304
2305 Any register allocated here must be used immediately. The idea is to
2306 aid scheduling, not to solve data flow problems. */
2307
2308 static int last_scratch_gr_reg;
2309
2310 static int
2311 next_scratch_gr_reg (void)
2312 {
2313 int i, regno;
2314
2315 for (i = 0; i < 32; ++i)
2316 {
2317 regno = (last_scratch_gr_reg + i + 1) & 31;
2318 if (call_used_regs[regno]
2319 && ! fixed_regs[regno]
2320 && ! global_regs[regno]
2321 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2322 {
2323 last_scratch_gr_reg = regno;
2324 return regno;
2325 }
2326 }
2327
2328 /* There must be _something_ available. */
2329 gcc_unreachable ();
2330 }
2331
2332 /* Helper function for ia64_compute_frame_size, called through
2333 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2334
2335 static void
2336 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2337 {
2338 unsigned int regno = REGNO (reg);
2339 if (regno < 32)
2340 {
2341 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2342 for (i = 0; i < n; ++i)
2343 current_frame_info.gr_used_mask |= 1 << (regno + i);
2344 }
2345 }
2346
2347
2348 /* Returns the number of bytes offset between the frame pointer and the stack
2349 pointer for the current function. SIZE is the number of bytes of space
2350 needed for local variables. */
2351
2352 static void
2353 ia64_compute_frame_size (HOST_WIDE_INT size)
2354 {
2355 HOST_WIDE_INT total_size;
2356 HOST_WIDE_INT spill_size = 0;
2357 HOST_WIDE_INT extra_spill_size = 0;
2358 HOST_WIDE_INT pretend_args_size;
2359 HARD_REG_SET mask;
2360 int n_spilled = 0;
2361 int spilled_gr_p = 0;
2362 int spilled_fr_p = 0;
2363 unsigned int regno;
2364 int min_regno;
2365 int max_regno;
2366 int i;
2367
2368 if (current_frame_info.initialized)
2369 return;
2370
2371 memset (&current_frame_info, 0, sizeof current_frame_info);
2372 CLEAR_HARD_REG_SET (mask);
2373
2374 /* Don't allocate scratches to the return register. */
2375 diddle_return_value (mark_reg_gr_used_mask, NULL);
2376
2377 /* Don't allocate scratches to the EH scratch registers. */
2378 if (cfun->machine->ia64_eh_epilogue_sp)
2379 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2380 if (cfun->machine->ia64_eh_epilogue_bsp)
2381 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2382
2383 /* Find the size of the register stack frame. We have only 80 local
2384 registers, because we reserve 8 for the inputs and 8 for the
2385 outputs. */
2386
2387 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2388 since we'll be adjusting that down later. */
2389 regno = LOC_REG (78) + ! frame_pointer_needed;
2390 for (; regno >= LOC_REG (0); regno--)
2391 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2392 break;
2393 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2394
2395 /* For functions marked with the syscall_linkage attribute, we must mark
2396 all eight input registers as in use, so that locals aren't visible to
2397 the caller. */
2398
2399 if (cfun->machine->n_varargs > 0
2400 || lookup_attribute ("syscall_linkage",
2401 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2402 current_frame_info.n_input_regs = 8;
2403 else
2404 {
2405 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2406 if (df_regs_ever_live_p (regno))
2407 break;
2408 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2409 }
2410
2411 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2412 if (df_regs_ever_live_p (regno))
2413 break;
2414 i = regno - OUT_REG (0) + 1;
2415
2416 #ifndef PROFILE_HOOK
2417 /* When -p profiling, we need one output register for the mcount argument.
2418 Likewise for -a profiling for the bb_init_func argument. For -ax
2419 profiling, we need two output registers for the two bb_init_trace_func
2420 arguments. */
2421 if (crtl->profile)
2422 i = MAX (i, 1);
2423 #endif
2424 current_frame_info.n_output_regs = i;
2425
2426 /* ??? No rotating register support yet. */
2427 current_frame_info.n_rotate_regs = 0;
2428
2429 /* Discover which registers need spilling, and how much room that
2430 will take. Begin with floating point and general registers,
2431 which will always wind up on the stack. */
2432
2433 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2434 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2435 {
2436 SET_HARD_REG_BIT (mask, regno);
2437 spill_size += 16;
2438 n_spilled += 1;
2439 spilled_fr_p = 1;
2440 }
2441
2442 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2443 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2444 {
2445 SET_HARD_REG_BIT (mask, regno);
2446 spill_size += 8;
2447 n_spilled += 1;
2448 spilled_gr_p = 1;
2449 }
2450
2451 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2452 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2453 {
2454 SET_HARD_REG_BIT (mask, regno);
2455 spill_size += 8;
2456 n_spilled += 1;
2457 }
2458
2459 /* Now come all special registers that might get saved in other
2460 general registers. */
2461
2462 if (frame_pointer_needed)
2463 {
2464 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2465 /* If we did not get a register, then we take LOC79. This is guaranteed
2466 to be free, even if regs_ever_live is already set, because this is
2467 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2468 as we don't count loc79 above. */
2469 if (current_frame_info.r[reg_fp] == 0)
2470 {
2471 current_frame_info.r[reg_fp] = LOC_REG (79);
2472 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2473 }
2474 }
2475
2476 if (! current_function_is_leaf)
2477 {
2478 /* Emit a save of BR0 if we call other functions. Do this even
2479 if this function doesn't return, as EH depends on this to be
2480 able to unwind the stack. */
2481 SET_HARD_REG_BIT (mask, BR_REG (0));
2482
2483 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2484 if (current_frame_info.r[reg_save_b0] == 0)
2485 {
2486 extra_spill_size += 8;
2487 n_spilled += 1;
2488 }
2489
2490 /* Similarly for ar.pfs. */
2491 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2492 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2493 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2494 {
2495 extra_spill_size += 8;
2496 n_spilled += 1;
2497 }
2498
2499 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2500 registers are clobbered, so we fall back to the stack. */
2501 current_frame_info.r[reg_save_gp]
2502 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2503 if (current_frame_info.r[reg_save_gp] == 0)
2504 {
2505 SET_HARD_REG_BIT (mask, GR_REG (1));
2506 spill_size += 8;
2507 n_spilled += 1;
2508 }
2509 }
2510 else
2511 {
2512 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2513 {
2514 SET_HARD_REG_BIT (mask, BR_REG (0));
2515 extra_spill_size += 8;
2516 n_spilled += 1;
2517 }
2518
2519 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2520 {
2521 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2522 current_frame_info.r[reg_save_ar_pfs]
2523 = find_gr_spill (reg_save_ar_pfs, 1);
2524 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2525 {
2526 extra_spill_size += 8;
2527 n_spilled += 1;
2528 }
2529 }
2530 }
2531
2532 /* Unwind descriptor hackery: things are most efficient if we allocate
2533 consecutive GR save registers for RP, PFS, FP in that order. However,
2534 it is absolutely critical that FP get the only hard register that's
2535 guaranteed to be free, so we allocated it first. If all three did
2536 happen to be allocated hard regs, and are consecutive, rearrange them
2537 into the preferred order now.
2538
2539 If we have already emitted code for any of those registers,
2540 then it's already too late to change. */
2541 min_regno = MIN (current_frame_info.r[reg_fp],
2542 MIN (current_frame_info.r[reg_save_b0],
2543 current_frame_info.r[reg_save_ar_pfs]));
2544 max_regno = MAX (current_frame_info.r[reg_fp],
2545 MAX (current_frame_info.r[reg_save_b0],
2546 current_frame_info.r[reg_save_ar_pfs]));
2547 if (min_regno > 0
2548 && min_regno + 2 == max_regno
2549 && (current_frame_info.r[reg_fp] == min_regno + 1
2550 || current_frame_info.r[reg_save_b0] == min_regno + 1
2551 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2552 && (emitted_frame_related_regs[reg_save_b0] == 0
2553 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2554 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2555 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2556 && (emitted_frame_related_regs[reg_fp] == 0
2557 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2558 {
2559 current_frame_info.r[reg_save_b0] = min_regno;
2560 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2561 current_frame_info.r[reg_fp] = min_regno + 2;
2562 }
2563
2564 /* See if we need to store the predicate register block. */
2565 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2566 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2567 break;
2568 if (regno <= PR_REG (63))
2569 {
2570 SET_HARD_REG_BIT (mask, PR_REG (0));
2571 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2572 if (current_frame_info.r[reg_save_pr] == 0)
2573 {
2574 extra_spill_size += 8;
2575 n_spilled += 1;
2576 }
2577
2578 /* ??? Mark them all as used so that register renaming and such
2579 are free to use them. */
2580 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2581 df_set_regs_ever_live (regno, true);
2582 }
2583
2584 /* If we're forced to use st8.spill, we're forced to save and restore
2585 ar.unat as well. The check for existing liveness allows inline asm
2586 to touch ar.unat. */
2587 if (spilled_gr_p || cfun->machine->n_varargs
2588 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2589 {
2590 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2591 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2592 current_frame_info.r[reg_save_ar_unat]
2593 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2594 if (current_frame_info.r[reg_save_ar_unat] == 0)
2595 {
2596 extra_spill_size += 8;
2597 n_spilled += 1;
2598 }
2599 }
2600
2601 if (df_regs_ever_live_p (AR_LC_REGNUM))
2602 {
2603 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2604 current_frame_info.r[reg_save_ar_lc]
2605 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2606 if (current_frame_info.r[reg_save_ar_lc] == 0)
2607 {
2608 extra_spill_size += 8;
2609 n_spilled += 1;
2610 }
2611 }
2612
2613 /* If we have an odd number of words of pretend arguments written to
2614 the stack, then the FR save area will be unaligned. We round the
2615 size of this area up to keep things 16 byte aligned. */
2616 if (spilled_fr_p)
2617 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2618 else
2619 pretend_args_size = crtl->args.pretend_args_size;
2620
2621 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2622 + crtl->outgoing_args_size);
2623 total_size = IA64_STACK_ALIGN (total_size);
2624
2625 /* We always use the 16-byte scratch area provided by the caller, but
2626 if we are a leaf function, there's no one to which we need to provide
2627 a scratch area. */
2628 if (current_function_is_leaf)
2629 total_size = MAX (0, total_size - 16);
2630
2631 current_frame_info.total_size = total_size;
2632 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2633 current_frame_info.spill_size = spill_size;
2634 current_frame_info.extra_spill_size = extra_spill_size;
2635 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2636 current_frame_info.n_spilled = n_spilled;
2637 current_frame_info.initialized = reload_completed;
2638 }
2639
2640 /* Compute the initial difference between the specified pair of registers. */
2641
2642 HOST_WIDE_INT
2643 ia64_initial_elimination_offset (int from, int to)
2644 {
2645 HOST_WIDE_INT offset;
2646
2647 ia64_compute_frame_size (get_frame_size ());
2648 switch (from)
2649 {
2650 case FRAME_POINTER_REGNUM:
2651 switch (to)
2652 {
2653 case HARD_FRAME_POINTER_REGNUM:
2654 if (current_function_is_leaf)
2655 offset = -current_frame_info.total_size;
2656 else
2657 offset = -(current_frame_info.total_size
2658 - crtl->outgoing_args_size - 16);
2659 break;
2660
2661 case STACK_POINTER_REGNUM:
2662 if (current_function_is_leaf)
2663 offset = 0;
2664 else
2665 offset = 16 + crtl->outgoing_args_size;
2666 break;
2667
2668 default:
2669 gcc_unreachable ();
2670 }
2671 break;
2672
2673 case ARG_POINTER_REGNUM:
2674 /* Arguments start above the 16 byte save area, unless stdarg
2675 in which case we store through the 16 byte save area. */
2676 switch (to)
2677 {
2678 case HARD_FRAME_POINTER_REGNUM:
2679 offset = 16 - crtl->args.pretend_args_size;
2680 break;
2681
2682 case STACK_POINTER_REGNUM:
2683 offset = (current_frame_info.total_size
2684 + 16 - crtl->args.pretend_args_size);
2685 break;
2686
2687 default:
2688 gcc_unreachable ();
2689 }
2690 break;
2691
2692 default:
2693 gcc_unreachable ();
2694 }
2695
2696 return offset;
2697 }
2698
2699 /* If there are more than a trivial number of register spills, we use
2700 two interleaved iterators so that we can get two memory references
2701 per insn group.
2702
2703 In order to simplify things in the prologue and epilogue expanders,
2704 we use helper functions to fix up the memory references after the
2705 fact with the appropriate offsets to a POST_MODIFY memory mode.
2706 The following data structure tracks the state of the two iterators
2707 while insns are being emitted. */
2708
2709 struct spill_fill_data
2710 {
2711 rtx init_after; /* point at which to emit initializations */
2712 rtx init_reg[2]; /* initial base register */
2713 rtx iter_reg[2]; /* the iterator registers */
2714 rtx *prev_addr[2]; /* address of last memory use */
2715 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2716 HOST_WIDE_INT prev_off[2]; /* last offset */
2717 int n_iter; /* number of iterators in use */
2718 int next_iter; /* next iterator to use */
2719 unsigned int save_gr_used_mask;
2720 };
2721
2722 static struct spill_fill_data spill_fill_data;
2723
2724 static void
2725 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2726 {
2727 int i;
2728
2729 spill_fill_data.init_after = get_last_insn ();
2730 spill_fill_data.init_reg[0] = init_reg;
2731 spill_fill_data.init_reg[1] = init_reg;
2732 spill_fill_data.prev_addr[0] = NULL;
2733 spill_fill_data.prev_addr[1] = NULL;
2734 spill_fill_data.prev_insn[0] = NULL;
2735 spill_fill_data.prev_insn[1] = NULL;
2736 spill_fill_data.prev_off[0] = cfa_off;
2737 spill_fill_data.prev_off[1] = cfa_off;
2738 spill_fill_data.next_iter = 0;
2739 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2740
2741 spill_fill_data.n_iter = 1 + (n_spills > 2);
2742 for (i = 0; i < spill_fill_data.n_iter; ++i)
2743 {
2744 int regno = next_scratch_gr_reg ();
2745 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2746 current_frame_info.gr_used_mask |= 1 << regno;
2747 }
2748 }
2749
2750 static void
2751 finish_spill_pointers (void)
2752 {
2753 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2754 }
2755
2756 static rtx
2757 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2758 {
2759 int iter = spill_fill_data.next_iter;
2760 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2761 rtx disp_rtx = GEN_INT (disp);
2762 rtx mem;
2763
2764 if (spill_fill_data.prev_addr[iter])
2765 {
2766 if (satisfies_constraint_N (disp_rtx))
2767 {
2768 *spill_fill_data.prev_addr[iter]
2769 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2770 gen_rtx_PLUS (DImode,
2771 spill_fill_data.iter_reg[iter],
2772 disp_rtx));
2773 REG_NOTES (spill_fill_data.prev_insn[iter])
2774 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2775 REG_NOTES (spill_fill_data.prev_insn[iter]));
2776 }
2777 else
2778 {
2779 /* ??? Could use register post_modify for loads. */
2780 if (!satisfies_constraint_I (disp_rtx))
2781 {
2782 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2783 emit_move_insn (tmp, disp_rtx);
2784 disp_rtx = tmp;
2785 }
2786 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2787 spill_fill_data.iter_reg[iter], disp_rtx));
2788 }
2789 }
2790 /* Micro-optimization: if we've created a frame pointer, it's at
2791 CFA 0, which may allow the real iterator to be initialized lower,
2792 slightly increasing parallelism. Also, if there are few saves
2793 it may eliminate the iterator entirely. */
2794 else if (disp == 0
2795 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2796 && frame_pointer_needed)
2797 {
2798 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2799 set_mem_alias_set (mem, get_varargs_alias_set ());
2800 return mem;
2801 }
2802 else
2803 {
2804 rtx seq, insn;
2805
2806 if (disp == 0)
2807 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2808 spill_fill_data.init_reg[iter]);
2809 else
2810 {
2811 start_sequence ();
2812
2813 if (!satisfies_constraint_I (disp_rtx))
2814 {
2815 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2816 emit_move_insn (tmp, disp_rtx);
2817 disp_rtx = tmp;
2818 }
2819
2820 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2821 spill_fill_data.init_reg[iter],
2822 disp_rtx));
2823
2824 seq = get_insns ();
2825 end_sequence ();
2826 }
2827
2828 /* Careful for being the first insn in a sequence. */
2829 if (spill_fill_data.init_after)
2830 insn = emit_insn_after (seq, spill_fill_data.init_after);
2831 else
2832 {
2833 rtx first = get_insns ();
2834 if (first)
2835 insn = emit_insn_before (seq, first);
2836 else
2837 insn = emit_insn (seq);
2838 }
2839 spill_fill_data.init_after = insn;
2840 }
2841
2842 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2843
2844 /* ??? Not all of the spills are for varargs, but some of them are.
2845 The rest of the spills belong in an alias set of their own. But
2846 it doesn't actually hurt to include them here. */
2847 set_mem_alias_set (mem, get_varargs_alias_set ());
2848
2849 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2850 spill_fill_data.prev_off[iter] = cfa_off;
2851
2852 if (++iter >= spill_fill_data.n_iter)
2853 iter = 0;
2854 spill_fill_data.next_iter = iter;
2855
2856 return mem;
2857 }
2858
2859 static void
2860 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2861 rtx frame_reg)
2862 {
2863 int iter = spill_fill_data.next_iter;
2864 rtx mem, insn;
2865
2866 mem = spill_restore_mem (reg, cfa_off);
2867 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2868 spill_fill_data.prev_insn[iter] = insn;
2869
2870 if (frame_reg)
2871 {
2872 rtx base;
2873 HOST_WIDE_INT off;
2874
2875 RTX_FRAME_RELATED_P (insn) = 1;
2876
2877 /* Don't even pretend that the unwind code can intuit its way
2878 through a pair of interleaved post_modify iterators. Just
2879 provide the correct answer. */
2880
2881 if (frame_pointer_needed)
2882 {
2883 base = hard_frame_pointer_rtx;
2884 off = - cfa_off;
2885 }
2886 else
2887 {
2888 base = stack_pointer_rtx;
2889 off = current_frame_info.total_size - cfa_off;
2890 }
2891
2892 REG_NOTES (insn)
2893 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2894 gen_rtx_SET (VOIDmode,
2895 gen_rtx_MEM (GET_MODE (reg),
2896 plus_constant (base, off)),
2897 frame_reg),
2898 REG_NOTES (insn));
2899 }
2900 }
2901
2902 static void
2903 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2904 {
2905 int iter = spill_fill_data.next_iter;
2906 rtx insn;
2907
2908 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2909 GEN_INT (cfa_off)));
2910 spill_fill_data.prev_insn[iter] = insn;
2911 }
2912
2913 /* Wrapper functions that discards the CONST_INT spill offset. These
2914 exist so that we can give gr_spill/gr_fill the offset they need and
2915 use a consistent function interface. */
2916
2917 static rtx
2918 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2919 {
2920 return gen_movdi (dest, src);
2921 }
2922
2923 static rtx
2924 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2925 {
2926 return gen_fr_spill (dest, src);
2927 }
2928
2929 static rtx
2930 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2931 {
2932 return gen_fr_restore (dest, src);
2933 }
2934
2935 /* Called after register allocation to add any instructions needed for the
2936 prologue. Using a prologue insn is favored compared to putting all of the
2937 instructions in output_function_prologue(), since it allows the scheduler
2938 to intermix instructions with the saves of the caller saved registers. In
2939 some cases, it might be necessary to emit a barrier instruction as the last
2940 insn to prevent such scheduling.
2941
2942 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2943 so that the debug info generation code can handle them properly.
2944
2945 The register save area is layed out like so:
2946 cfa+16
2947 [ varargs spill area ]
2948 [ fr register spill area ]
2949 [ br register spill area ]
2950 [ ar register spill area ]
2951 [ pr register spill area ]
2952 [ gr register spill area ] */
2953
2954 /* ??? Get inefficient code when the frame size is larger than can fit in an
2955 adds instruction. */
2956
2957 void
2958 ia64_expand_prologue (void)
2959 {
2960 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2961 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2962 rtx reg, alt_reg;
2963
2964 ia64_compute_frame_size (get_frame_size ());
2965 last_scratch_gr_reg = 15;
2966
2967 if (dump_file)
2968 {
2969 fprintf (dump_file, "ia64 frame related registers "
2970 "recorded in current_frame_info.r[]:\n");
2971 #define PRINTREG(a) if (current_frame_info.r[a]) \
2972 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
2973 PRINTREG(reg_fp);
2974 PRINTREG(reg_save_b0);
2975 PRINTREG(reg_save_pr);
2976 PRINTREG(reg_save_ar_pfs);
2977 PRINTREG(reg_save_ar_unat);
2978 PRINTREG(reg_save_ar_lc);
2979 PRINTREG(reg_save_gp);
2980 #undef PRINTREG
2981 }
2982
2983 /* If there is no epilogue, then we don't need some prologue insns.
2984 We need to avoid emitting the dead prologue insns, because flow
2985 will complain about them. */
2986 if (optimize)
2987 {
2988 edge e;
2989 edge_iterator ei;
2990
2991 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2992 if ((e->flags & EDGE_FAKE) == 0
2993 && (e->flags & EDGE_FALLTHRU) != 0)
2994 break;
2995 epilogue_p = (e != NULL);
2996 }
2997 else
2998 epilogue_p = 1;
2999
3000 /* Set the local, input, and output register names. We need to do this
3001 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3002 half. If we use in/loc/out register names, then we get assembler errors
3003 in crtn.S because there is no alloc insn or regstk directive in there. */
3004 if (! TARGET_REG_NAMES)
3005 {
3006 int inputs = current_frame_info.n_input_regs;
3007 int locals = current_frame_info.n_local_regs;
3008 int outputs = current_frame_info.n_output_regs;
3009
3010 for (i = 0; i < inputs; i++)
3011 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3012 for (i = 0; i < locals; i++)
3013 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3014 for (i = 0; i < outputs; i++)
3015 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3016 }
3017
3018 /* Set the frame pointer register name. The regnum is logically loc79,
3019 but of course we'll not have allocated that many locals. Rather than
3020 worrying about renumbering the existing rtxs, we adjust the name. */
3021 /* ??? This code means that we can never use one local register when
3022 there is a frame pointer. loc79 gets wasted in this case, as it is
3023 renamed to a register that will never be used. See also the try_locals
3024 code in find_gr_spill. */
3025 if (current_frame_info.r[reg_fp])
3026 {
3027 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3028 reg_names[HARD_FRAME_POINTER_REGNUM]
3029 = reg_names[current_frame_info.r[reg_fp]];
3030 reg_names[current_frame_info.r[reg_fp]] = tmp;
3031 }
3032
3033 /* We don't need an alloc instruction if we've used no outputs or locals. */
3034 if (current_frame_info.n_local_regs == 0
3035 && current_frame_info.n_output_regs == 0
3036 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3037 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3038 {
3039 /* If there is no alloc, but there are input registers used, then we
3040 need a .regstk directive. */
3041 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3042 ar_pfs_save_reg = NULL_RTX;
3043 }
3044 else
3045 {
3046 current_frame_info.need_regstk = 0;
3047
3048 if (current_frame_info.r[reg_save_ar_pfs])
3049 {
3050 regno = current_frame_info.r[reg_save_ar_pfs];
3051 reg_emitted (reg_save_ar_pfs);
3052 }
3053 else
3054 regno = next_scratch_gr_reg ();
3055 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3056
3057 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3058 GEN_INT (current_frame_info.n_input_regs),
3059 GEN_INT (current_frame_info.n_local_regs),
3060 GEN_INT (current_frame_info.n_output_regs),
3061 GEN_INT (current_frame_info.n_rotate_regs)));
3062 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3063 }
3064
3065 /* Set up frame pointer, stack pointer, and spill iterators. */
3066
3067 n_varargs = cfun->machine->n_varargs;
3068 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3069 stack_pointer_rtx, 0);
3070
3071 if (frame_pointer_needed)
3072 {
3073 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3074 RTX_FRAME_RELATED_P (insn) = 1;
3075 }
3076
3077 if (current_frame_info.total_size != 0)
3078 {
3079 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3080 rtx offset;
3081
3082 if (satisfies_constraint_I (frame_size_rtx))
3083 offset = frame_size_rtx;
3084 else
3085 {
3086 regno = next_scratch_gr_reg ();
3087 offset = gen_rtx_REG (DImode, regno);
3088 emit_move_insn (offset, frame_size_rtx);
3089 }
3090
3091 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3092 stack_pointer_rtx, offset));
3093
3094 if (! frame_pointer_needed)
3095 {
3096 RTX_FRAME_RELATED_P (insn) = 1;
3097 if (GET_CODE (offset) != CONST_INT)
3098 {
3099 REG_NOTES (insn)
3100 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3101 gen_rtx_SET (VOIDmode,
3102 stack_pointer_rtx,
3103 gen_rtx_PLUS (DImode,
3104 stack_pointer_rtx,
3105 frame_size_rtx)),
3106 REG_NOTES (insn));
3107 }
3108 }
3109
3110 /* ??? At this point we must generate a magic insn that appears to
3111 modify the stack pointer, the frame pointer, and all spill
3112 iterators. This would allow the most scheduling freedom. For
3113 now, just hard stop. */
3114 emit_insn (gen_blockage ());
3115 }
3116
3117 /* Must copy out ar.unat before doing any integer spills. */
3118 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3119 {
3120 if (current_frame_info.r[reg_save_ar_unat])
3121 {
3122 ar_unat_save_reg
3123 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3124 reg_emitted (reg_save_ar_unat);
3125 }
3126 else
3127 {
3128 alt_regno = next_scratch_gr_reg ();
3129 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3130 current_frame_info.gr_used_mask |= 1 << alt_regno;
3131 }
3132
3133 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3134 insn = emit_move_insn (ar_unat_save_reg, reg);
3135 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3136
3137 /* Even if we're not going to generate an epilogue, we still
3138 need to save the register so that EH works. */
3139 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3140 emit_insn (gen_prologue_use (ar_unat_save_reg));
3141 }
3142 else
3143 ar_unat_save_reg = NULL_RTX;
3144
3145 /* Spill all varargs registers. Do this before spilling any GR registers,
3146 since we want the UNAT bits for the GR registers to override the UNAT
3147 bits from varargs, which we don't care about. */
3148
3149 cfa_off = -16;
3150 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3151 {
3152 reg = gen_rtx_REG (DImode, regno);
3153 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3154 }
3155
3156 /* Locate the bottom of the register save area. */
3157 cfa_off = (current_frame_info.spill_cfa_off
3158 + current_frame_info.spill_size
3159 + current_frame_info.extra_spill_size);
3160
3161 /* Save the predicate register block either in a register or in memory. */
3162 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3163 {
3164 reg = gen_rtx_REG (DImode, PR_REG (0));
3165 if (current_frame_info.r[reg_save_pr] != 0)
3166 {
3167 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3168 reg_emitted (reg_save_pr);
3169 insn = emit_move_insn (alt_reg, reg);
3170
3171 /* ??? Denote pr spill/fill by a DImode move that modifies all
3172 64 hard registers. */
3173 RTX_FRAME_RELATED_P (insn) = 1;
3174 REG_NOTES (insn)
3175 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3176 gen_rtx_SET (VOIDmode, alt_reg, reg),
3177 REG_NOTES (insn));
3178
3179 /* Even if we're not going to generate an epilogue, we still
3180 need to save the register so that EH works. */
3181 if (! epilogue_p)
3182 emit_insn (gen_prologue_use (alt_reg));
3183 }
3184 else
3185 {
3186 alt_regno = next_scratch_gr_reg ();
3187 alt_reg = gen_rtx_REG (DImode, alt_regno);
3188 insn = emit_move_insn (alt_reg, reg);
3189 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3190 cfa_off -= 8;
3191 }
3192 }
3193
3194 /* Handle AR regs in numerical order. All of them get special handling. */
3195 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3196 && current_frame_info.r[reg_save_ar_unat] == 0)
3197 {
3198 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3199 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3200 cfa_off -= 8;
3201 }
3202
3203 /* The alloc insn already copied ar.pfs into a general register. The
3204 only thing we have to do now is copy that register to a stack slot
3205 if we'd not allocated a local register for the job. */
3206 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3207 && current_frame_info.r[reg_save_ar_pfs] == 0)
3208 {
3209 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3210 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3211 cfa_off -= 8;
3212 }
3213
3214 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3215 {
3216 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3217 if (current_frame_info.r[reg_save_ar_lc] != 0)
3218 {
3219 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3220 reg_emitted (reg_save_ar_lc);
3221 insn = emit_move_insn (alt_reg, reg);
3222 RTX_FRAME_RELATED_P (insn) = 1;
3223
3224 /* Even if we're not going to generate an epilogue, we still
3225 need to save the register so that EH works. */
3226 if (! epilogue_p)
3227 emit_insn (gen_prologue_use (alt_reg));
3228 }
3229 else
3230 {
3231 alt_regno = next_scratch_gr_reg ();
3232 alt_reg = gen_rtx_REG (DImode, alt_regno);
3233 emit_move_insn (alt_reg, reg);
3234 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3235 cfa_off -= 8;
3236 }
3237 }
3238
3239 /* Save the return pointer. */
3240 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3241 {
3242 reg = gen_rtx_REG (DImode, BR_REG (0));
3243 if (current_frame_info.r[reg_save_b0] != 0)
3244 {
3245 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3246 reg_emitted (reg_save_b0);
3247 insn = emit_move_insn (alt_reg, reg);
3248 RTX_FRAME_RELATED_P (insn) = 1;
3249
3250 /* Even if we're not going to generate an epilogue, we still
3251 need to save the register so that EH works. */
3252 if (! epilogue_p)
3253 emit_insn (gen_prologue_use (alt_reg));
3254 }
3255 else
3256 {
3257 alt_regno = next_scratch_gr_reg ();
3258 alt_reg = gen_rtx_REG (DImode, alt_regno);
3259 emit_move_insn (alt_reg, reg);
3260 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3261 cfa_off -= 8;
3262 }
3263 }
3264
3265 if (current_frame_info.r[reg_save_gp])
3266 {
3267 reg_emitted (reg_save_gp);
3268 insn = emit_move_insn (gen_rtx_REG (DImode,
3269 current_frame_info.r[reg_save_gp]),
3270 pic_offset_table_rtx);
3271 }
3272
3273 /* We should now be at the base of the gr/br/fr spill area. */
3274 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3275 + current_frame_info.spill_size));
3276
3277 /* Spill all general registers. */
3278 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3279 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3280 {
3281 reg = gen_rtx_REG (DImode, regno);
3282 do_spill (gen_gr_spill, reg, cfa_off, reg);
3283 cfa_off -= 8;
3284 }
3285
3286 /* Spill the rest of the BR registers. */
3287 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3288 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3289 {
3290 alt_regno = next_scratch_gr_reg ();
3291 alt_reg = gen_rtx_REG (DImode, alt_regno);
3292 reg = gen_rtx_REG (DImode, regno);
3293 emit_move_insn (alt_reg, reg);
3294 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3295 cfa_off -= 8;
3296 }
3297
3298 /* Align the frame and spill all FR registers. */
3299 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3300 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3301 {
3302 gcc_assert (!(cfa_off & 15));
3303 reg = gen_rtx_REG (XFmode, regno);
3304 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3305 cfa_off -= 16;
3306 }
3307
3308 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3309
3310 finish_spill_pointers ();
3311 }
3312
3313 /* Called after register allocation to add any instructions needed for the
3314 epilogue. Using an epilogue insn is favored compared to putting all of the
3315 instructions in output_function_prologue(), since it allows the scheduler
3316 to intermix instructions with the saves of the caller saved registers. In
3317 some cases, it might be necessary to emit a barrier instruction as the last
3318 insn to prevent such scheduling. */
3319
3320 void
3321 ia64_expand_epilogue (int sibcall_p)
3322 {
3323 rtx insn, reg, alt_reg, ar_unat_save_reg;
3324 int regno, alt_regno, cfa_off;
3325
3326 ia64_compute_frame_size (get_frame_size ());
3327
3328 /* If there is a frame pointer, then we use it instead of the stack
3329 pointer, so that the stack pointer does not need to be valid when
3330 the epilogue starts. See EXIT_IGNORE_STACK. */
3331 if (frame_pointer_needed)
3332 setup_spill_pointers (current_frame_info.n_spilled,
3333 hard_frame_pointer_rtx, 0);
3334 else
3335 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3336 current_frame_info.total_size);
3337
3338 if (current_frame_info.total_size != 0)
3339 {
3340 /* ??? At this point we must generate a magic insn that appears to
3341 modify the spill iterators and the frame pointer. This would
3342 allow the most scheduling freedom. For now, just hard stop. */
3343 emit_insn (gen_blockage ());
3344 }
3345
3346 /* Locate the bottom of the register save area. */
3347 cfa_off = (current_frame_info.spill_cfa_off
3348 + current_frame_info.spill_size
3349 + current_frame_info.extra_spill_size);
3350
3351 /* Restore the predicate registers. */
3352 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3353 {
3354 if (current_frame_info.r[reg_save_pr] != 0)
3355 {
3356 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3357 reg_emitted (reg_save_pr);
3358 }
3359 else
3360 {
3361 alt_regno = next_scratch_gr_reg ();
3362 alt_reg = gen_rtx_REG (DImode, alt_regno);
3363 do_restore (gen_movdi_x, alt_reg, cfa_off);
3364 cfa_off -= 8;
3365 }
3366 reg = gen_rtx_REG (DImode, PR_REG (0));
3367 emit_move_insn (reg, alt_reg);
3368 }
3369
3370 /* Restore the application registers. */
3371
3372 /* Load the saved unat from the stack, but do not restore it until
3373 after the GRs have been restored. */
3374 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3375 {
3376 if (current_frame_info.r[reg_save_ar_unat] != 0)
3377 {
3378 ar_unat_save_reg
3379 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3380 reg_emitted (reg_save_ar_unat);
3381 }
3382 else
3383 {
3384 alt_regno = next_scratch_gr_reg ();
3385 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3386 current_frame_info.gr_used_mask |= 1 << alt_regno;
3387 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3388 cfa_off -= 8;
3389 }
3390 }
3391 else
3392 ar_unat_save_reg = NULL_RTX;
3393
3394 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3395 {
3396 reg_emitted (reg_save_ar_pfs);
3397 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3398 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3399 emit_move_insn (reg, alt_reg);
3400 }
3401 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3402 {
3403 alt_regno = next_scratch_gr_reg ();
3404 alt_reg = gen_rtx_REG (DImode, alt_regno);
3405 do_restore (gen_movdi_x, alt_reg, cfa_off);
3406 cfa_off -= 8;
3407 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3408 emit_move_insn (reg, alt_reg);
3409 }
3410
3411 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3412 {
3413 if (current_frame_info.r[reg_save_ar_lc] != 0)
3414 {
3415 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3416 reg_emitted (reg_save_ar_lc);
3417 }
3418 else
3419 {
3420 alt_regno = next_scratch_gr_reg ();
3421 alt_reg = gen_rtx_REG (DImode, alt_regno);
3422 do_restore (gen_movdi_x, alt_reg, cfa_off);
3423 cfa_off -= 8;
3424 }
3425 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3426 emit_move_insn (reg, alt_reg);
3427 }
3428
3429 /* Restore the return pointer. */
3430 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3431 {
3432 if (current_frame_info.r[reg_save_b0] != 0)
3433 {
3434 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3435 reg_emitted (reg_save_b0);
3436 }
3437 else
3438 {
3439 alt_regno = next_scratch_gr_reg ();
3440 alt_reg = gen_rtx_REG (DImode, alt_regno);
3441 do_restore (gen_movdi_x, alt_reg, cfa_off);
3442 cfa_off -= 8;
3443 }
3444 reg = gen_rtx_REG (DImode, BR_REG (0));
3445 emit_move_insn (reg, alt_reg);
3446 }
3447
3448 /* We should now be at the base of the gr/br/fr spill area. */
3449 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3450 + current_frame_info.spill_size));
3451
3452 /* The GP may be stored on the stack in the prologue, but it's
3453 never restored in the epilogue. Skip the stack slot. */
3454 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3455 cfa_off -= 8;
3456
3457 /* Restore all general registers. */
3458 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3459 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3460 {
3461 reg = gen_rtx_REG (DImode, regno);
3462 do_restore (gen_gr_restore, reg, cfa_off);
3463 cfa_off -= 8;
3464 }
3465
3466 /* Restore the branch registers. */
3467 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3468 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3469 {
3470 alt_regno = next_scratch_gr_reg ();
3471 alt_reg = gen_rtx_REG (DImode, alt_regno);
3472 do_restore (gen_movdi_x, alt_reg, cfa_off);
3473 cfa_off -= 8;
3474 reg = gen_rtx_REG (DImode, regno);
3475 emit_move_insn (reg, alt_reg);
3476 }
3477
3478 /* Restore floating point registers. */
3479 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3480 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3481 {
3482 gcc_assert (!(cfa_off & 15));
3483 reg = gen_rtx_REG (XFmode, regno);
3484 do_restore (gen_fr_restore_x, reg, cfa_off);
3485 cfa_off -= 16;
3486 }
3487
3488 /* Restore ar.unat for real. */
3489 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3490 {
3491 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3492 emit_move_insn (reg, ar_unat_save_reg);
3493 }
3494
3495 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3496
3497 finish_spill_pointers ();
3498
3499 if (current_frame_info.total_size
3500 || cfun->machine->ia64_eh_epilogue_sp
3501 || frame_pointer_needed)
3502 {
3503 /* ??? At this point we must generate a magic insn that appears to
3504 modify the spill iterators, the stack pointer, and the frame
3505 pointer. This would allow the most scheduling freedom. For now,
3506 just hard stop. */
3507 emit_insn (gen_blockage ());
3508 }
3509
3510 if (cfun->machine->ia64_eh_epilogue_sp)
3511 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3512 else if (frame_pointer_needed)
3513 {
3514 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3515 RTX_FRAME_RELATED_P (insn) = 1;
3516 }
3517 else if (current_frame_info.total_size)
3518 {
3519 rtx offset, frame_size_rtx;
3520
3521 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3522 if (satisfies_constraint_I (frame_size_rtx))
3523 offset = frame_size_rtx;
3524 else
3525 {
3526 regno = next_scratch_gr_reg ();
3527 offset = gen_rtx_REG (DImode, regno);
3528 emit_move_insn (offset, frame_size_rtx);
3529 }
3530
3531 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3532 offset));
3533
3534 RTX_FRAME_RELATED_P (insn) = 1;
3535 if (GET_CODE (offset) != CONST_INT)
3536 {
3537 REG_NOTES (insn)
3538 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3539 gen_rtx_SET (VOIDmode,
3540 stack_pointer_rtx,
3541 gen_rtx_PLUS (DImode,
3542 stack_pointer_rtx,
3543 frame_size_rtx)),
3544 REG_NOTES (insn));
3545 }
3546 }
3547
3548 if (cfun->machine->ia64_eh_epilogue_bsp)
3549 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3550
3551 if (! sibcall_p)
3552 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3553 else
3554 {
3555 int fp = GR_REG (2);
3556 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3557 first available call clobbered register. If there was a frame_pointer
3558 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3559 so we have to make sure we're using the string "r2" when emitting
3560 the register name for the assembler. */
3561 if (current_frame_info.r[reg_fp]
3562 && current_frame_info.r[reg_fp] == GR_REG (2))
3563 fp = HARD_FRAME_POINTER_REGNUM;
3564
3565 /* We must emit an alloc to force the input registers to become output
3566 registers. Otherwise, if the callee tries to pass its parameters
3567 through to another call without an intervening alloc, then these
3568 values get lost. */
3569 /* ??? We don't need to preserve all input registers. We only need to
3570 preserve those input registers used as arguments to the sibling call.
3571 It is unclear how to compute that number here. */
3572 if (current_frame_info.n_input_regs != 0)
3573 {
3574 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3575 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3576 const0_rtx, const0_rtx,
3577 n_inputs, const0_rtx));
3578 RTX_FRAME_RELATED_P (insn) = 1;
3579 }
3580 }
3581 }
3582
3583 /* Return 1 if br.ret can do all the work required to return from a
3584 function. */
3585
3586 int
3587 ia64_direct_return (void)
3588 {
3589 if (reload_completed && ! frame_pointer_needed)
3590 {
3591 ia64_compute_frame_size (get_frame_size ());
3592
3593 return (current_frame_info.total_size == 0
3594 && current_frame_info.n_spilled == 0
3595 && current_frame_info.r[reg_save_b0] == 0
3596 && current_frame_info.r[reg_save_pr] == 0
3597 && current_frame_info.r[reg_save_ar_pfs] == 0
3598 && current_frame_info.r[reg_save_ar_unat] == 0
3599 && current_frame_info.r[reg_save_ar_lc] == 0);
3600 }
3601 return 0;
3602 }
3603
3604 /* Return the magic cookie that we use to hold the return address
3605 during early compilation. */
3606
3607 rtx
3608 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3609 {
3610 if (count != 0)
3611 return NULL;
3612 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3613 }
3614
3615 /* Split this value after reload, now that we know where the return
3616 address is saved. */
3617
3618 void
3619 ia64_split_return_addr_rtx (rtx dest)
3620 {
3621 rtx src;
3622
3623 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3624 {
3625 if (current_frame_info.r[reg_save_b0] != 0)
3626 {
3627 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3628 reg_emitted (reg_save_b0);
3629 }
3630 else
3631 {
3632 HOST_WIDE_INT off;
3633 unsigned int regno;
3634 rtx off_r;
3635
3636 /* Compute offset from CFA for BR0. */
3637 /* ??? Must be kept in sync with ia64_expand_prologue. */
3638 off = (current_frame_info.spill_cfa_off
3639 + current_frame_info.spill_size);
3640 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3641 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3642 off -= 8;
3643
3644 /* Convert CFA offset to a register based offset. */
3645 if (frame_pointer_needed)
3646 src = hard_frame_pointer_rtx;
3647 else
3648 {
3649 src = stack_pointer_rtx;
3650 off += current_frame_info.total_size;
3651 }
3652
3653 /* Load address into scratch register. */
3654 off_r = GEN_INT (off);
3655 if (satisfies_constraint_I (off_r))
3656 emit_insn (gen_adddi3 (dest, src, off_r));
3657 else
3658 {
3659 emit_move_insn (dest, off_r);
3660 emit_insn (gen_adddi3 (dest, src, dest));
3661 }
3662
3663 src = gen_rtx_MEM (Pmode, dest);
3664 }
3665 }
3666 else
3667 src = gen_rtx_REG (DImode, BR_REG (0));
3668
3669 emit_move_insn (dest, src);
3670 }
3671
3672 int
3673 ia64_hard_regno_rename_ok (int from, int to)
3674 {
3675 /* Don't clobber any of the registers we reserved for the prologue. */
3676 enum ia64_frame_regs r;
3677
3678 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3679 if (to == current_frame_info.r[r]
3680 || from == current_frame_info.r[r]
3681 || to == emitted_frame_related_regs[r]
3682 || from == emitted_frame_related_regs[r])
3683 return 0;
3684
3685 /* Don't use output registers outside the register frame. */
3686 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3687 return 0;
3688
3689 /* Retain even/oddness on predicate register pairs. */
3690 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3691 return (from & 1) == (to & 1);
3692
3693 return 1;
3694 }
3695
3696 /* Target hook for assembling integer objects. Handle word-sized
3697 aligned objects and detect the cases when @fptr is needed. */
3698
3699 static bool
3700 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3701 {
3702 if (size == POINTER_SIZE / BITS_PER_UNIT
3703 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3704 && GET_CODE (x) == SYMBOL_REF
3705 && SYMBOL_REF_FUNCTION_P (x))
3706 {
3707 static const char * const directive[2][2] = {
3708 /* 64-bit pointer */ /* 32-bit pointer */
3709 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3710 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3711 };
3712 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3713 output_addr_const (asm_out_file, x);
3714 fputs (")\n", asm_out_file);
3715 return true;
3716 }
3717 return default_assemble_integer (x, size, aligned_p);
3718 }
3719
3720 /* Emit the function prologue. */
3721
3722 static void
3723 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3724 {
3725 int mask, grsave, grsave_prev;
3726
3727 if (current_frame_info.need_regstk)
3728 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3729 current_frame_info.n_input_regs,
3730 current_frame_info.n_local_regs,
3731 current_frame_info.n_output_regs,
3732 current_frame_info.n_rotate_regs);
3733
3734 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3735 return;
3736
3737 /* Emit the .prologue directive. */
3738
3739 mask = 0;
3740 grsave = grsave_prev = 0;
3741 if (current_frame_info.r[reg_save_b0] != 0)
3742 {
3743 mask |= 8;
3744 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3745 }
3746 if (current_frame_info.r[reg_save_ar_pfs] != 0
3747 && (grsave_prev == 0
3748 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3749 {
3750 mask |= 4;
3751 if (grsave_prev == 0)
3752 grsave = current_frame_info.r[reg_save_ar_pfs];
3753 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3754 }
3755 if (current_frame_info.r[reg_fp] != 0
3756 && (grsave_prev == 0
3757 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3758 {
3759 mask |= 2;
3760 if (grsave_prev == 0)
3761 grsave = HARD_FRAME_POINTER_REGNUM;
3762 grsave_prev = current_frame_info.r[reg_fp];
3763 }
3764 if (current_frame_info.r[reg_save_pr] != 0
3765 && (grsave_prev == 0
3766 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3767 {
3768 mask |= 1;
3769 if (grsave_prev == 0)
3770 grsave = current_frame_info.r[reg_save_pr];
3771 }
3772
3773 if (mask && TARGET_GNU_AS)
3774 fprintf (file, "\t.prologue %d, %d\n", mask,
3775 ia64_dbx_register_number (grsave));
3776 else
3777 fputs ("\t.prologue\n", file);
3778
3779 /* Emit a .spill directive, if necessary, to relocate the base of
3780 the register spill area. */
3781 if (current_frame_info.spill_cfa_off != -16)
3782 fprintf (file, "\t.spill %ld\n",
3783 (long) (current_frame_info.spill_cfa_off
3784 + current_frame_info.spill_size));
3785 }
3786
3787 /* Emit the .body directive at the scheduled end of the prologue. */
3788
3789 static void
3790 ia64_output_function_end_prologue (FILE *file)
3791 {
3792 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3793 return;
3794
3795 fputs ("\t.body\n", file);
3796 }
3797
3798 /* Emit the function epilogue. */
3799
3800 static void
3801 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3802 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3803 {
3804 int i;
3805
3806 if (current_frame_info.r[reg_fp])
3807 {
3808 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3809 reg_names[HARD_FRAME_POINTER_REGNUM]
3810 = reg_names[current_frame_info.r[reg_fp]];
3811 reg_names[current_frame_info.r[reg_fp]] = tmp;
3812 reg_emitted (reg_fp);
3813 }
3814 if (! TARGET_REG_NAMES)
3815 {
3816 for (i = 0; i < current_frame_info.n_input_regs; i++)
3817 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3818 for (i = 0; i < current_frame_info.n_local_regs; i++)
3819 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3820 for (i = 0; i < current_frame_info.n_output_regs; i++)
3821 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3822 }
3823
3824 current_frame_info.initialized = 0;
3825 }
3826
3827 int
3828 ia64_dbx_register_number (int regno)
3829 {
3830 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3831 from its home at loc79 to something inside the register frame. We
3832 must perform the same renumbering here for the debug info. */
3833 if (current_frame_info.r[reg_fp])
3834 {
3835 if (regno == HARD_FRAME_POINTER_REGNUM)
3836 regno = current_frame_info.r[reg_fp];
3837 else if (regno == current_frame_info.r[reg_fp])
3838 regno = HARD_FRAME_POINTER_REGNUM;
3839 }
3840
3841 if (IN_REGNO_P (regno))
3842 return 32 + regno - IN_REG (0);
3843 else if (LOC_REGNO_P (regno))
3844 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3845 else if (OUT_REGNO_P (regno))
3846 return (32 + current_frame_info.n_input_regs
3847 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3848 else
3849 return regno;
3850 }
3851
3852 void
3853 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3854 {
3855 rtx addr_reg, eight = GEN_INT (8);
3856
3857 /* The Intel assembler requires that the global __ia64_trampoline symbol
3858 be declared explicitly */
3859 if (!TARGET_GNU_AS)
3860 {
3861 static bool declared_ia64_trampoline = false;
3862
3863 if (!declared_ia64_trampoline)
3864 {
3865 declared_ia64_trampoline = true;
3866 (*targetm.asm_out.globalize_label) (asm_out_file,
3867 "__ia64_trampoline");
3868 }
3869 }
3870
3871 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3872 addr = convert_memory_address (Pmode, addr);
3873 fnaddr = convert_memory_address (Pmode, fnaddr);
3874 static_chain = convert_memory_address (Pmode, static_chain);
3875
3876 /* Load up our iterator. */
3877 addr_reg = gen_reg_rtx (Pmode);
3878 emit_move_insn (addr_reg, addr);
3879
3880 /* The first two words are the fake descriptor:
3881 __ia64_trampoline, ADDR+16. */
3882 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3883 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3884 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3885
3886 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3887 copy_to_reg (plus_constant (addr, 16)));
3888 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3889
3890 /* The third word is the target descriptor. */
3891 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3892 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3893
3894 /* The fourth word is the static chain. */
3895 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3896 }
3897 \f
3898 /* Do any needed setup for a variadic function. CUM has not been updated
3899 for the last named argument which has type TYPE and mode MODE.
3900
3901 We generate the actual spill instructions during prologue generation. */
3902
3903 static void
3904 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3905 tree type, int * pretend_size,
3906 int second_time ATTRIBUTE_UNUSED)
3907 {
3908 CUMULATIVE_ARGS next_cum = *cum;
3909
3910 /* Skip the current argument. */
3911 ia64_function_arg_advance (&next_cum, mode, type, 1);
3912
3913 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3914 {
3915 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3916 *pretend_size = n * UNITS_PER_WORD;
3917 cfun->machine->n_varargs = n;
3918 }
3919 }
3920
3921 /* Check whether TYPE is a homogeneous floating point aggregate. If
3922 it is, return the mode of the floating point type that appears
3923 in all leafs. If it is not, return VOIDmode.
3924
3925 An aggregate is a homogeneous floating point aggregate is if all
3926 fields/elements in it have the same floating point type (e.g,
3927 SFmode). 128-bit quad-precision floats are excluded.
3928
3929 Variable sized aggregates should never arrive here, since we should
3930 have already decided to pass them by reference. Top-level zero-sized
3931 aggregates are excluded because our parallels crash the middle-end. */
3932
3933 static enum machine_mode
3934 hfa_element_mode (const_tree type, bool nested)
3935 {
3936 enum machine_mode element_mode = VOIDmode;
3937 enum machine_mode mode;
3938 enum tree_code code = TREE_CODE (type);
3939 int know_element_mode = 0;
3940 tree t;
3941
3942 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3943 return VOIDmode;
3944
3945 switch (code)
3946 {
3947 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3948 case BOOLEAN_TYPE: case POINTER_TYPE:
3949 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3950 case LANG_TYPE: case FUNCTION_TYPE:
3951 return VOIDmode;
3952
3953 /* Fortran complex types are supposed to be HFAs, so we need to handle
3954 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3955 types though. */
3956 case COMPLEX_TYPE:
3957 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3958 && TYPE_MODE (type) != TCmode)
3959 return GET_MODE_INNER (TYPE_MODE (type));
3960 else
3961 return VOIDmode;
3962
3963 case REAL_TYPE:
3964 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3965 mode if this is contained within an aggregate. */
3966 if (nested && TYPE_MODE (type) != TFmode)
3967 return TYPE_MODE (type);
3968 else
3969 return VOIDmode;
3970
3971 case ARRAY_TYPE:
3972 return hfa_element_mode (TREE_TYPE (type), 1);
3973
3974 case RECORD_TYPE:
3975 case UNION_TYPE:
3976 case QUAL_UNION_TYPE:
3977 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3978 {
3979 if (TREE_CODE (t) != FIELD_DECL)
3980 continue;
3981
3982 mode = hfa_element_mode (TREE_TYPE (t), 1);
3983 if (know_element_mode)
3984 {
3985 if (mode != element_mode)
3986 return VOIDmode;
3987 }
3988 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3989 return VOIDmode;
3990 else
3991 {
3992 know_element_mode = 1;
3993 element_mode = mode;
3994 }
3995 }
3996 return element_mode;
3997
3998 default:
3999 /* If we reach here, we probably have some front-end specific type
4000 that the backend doesn't know about. This can happen via the
4001 aggregate_value_p call in init_function_start. All we can do is
4002 ignore unknown tree types. */
4003 return VOIDmode;
4004 }
4005
4006 return VOIDmode;
4007 }
4008
4009 /* Return the number of words required to hold a quantity of TYPE and MODE
4010 when passed as an argument. */
4011 static int
4012 ia64_function_arg_words (tree type, enum machine_mode mode)
4013 {
4014 int words;
4015
4016 if (mode == BLKmode)
4017 words = int_size_in_bytes (type);
4018 else
4019 words = GET_MODE_SIZE (mode);
4020
4021 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4022 }
4023
4024 /* Return the number of registers that should be skipped so the current
4025 argument (described by TYPE and WORDS) will be properly aligned.
4026
4027 Integer and float arguments larger than 8 bytes start at the next
4028 even boundary. Aggregates larger than 8 bytes start at the next
4029 even boundary if the aggregate has 16 byte alignment. Note that
4030 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4031 but are still to be aligned in registers.
4032
4033 ??? The ABI does not specify how to handle aggregates with
4034 alignment from 9 to 15 bytes, or greater than 16. We handle them
4035 all as if they had 16 byte alignment. Such aggregates can occur
4036 only if gcc extensions are used. */
4037 static int
4038 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4039 {
4040 if ((cum->words & 1) == 0)
4041 return 0;
4042
4043 if (type
4044 && TREE_CODE (type) != INTEGER_TYPE
4045 && TREE_CODE (type) != REAL_TYPE)
4046 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4047 else
4048 return words > 1;
4049 }
4050
4051 /* Return rtx for register where argument is passed, or zero if it is passed
4052 on the stack. */
4053 /* ??? 128-bit quad-precision floats are always passed in general
4054 registers. */
4055
4056 rtx
4057 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4058 int named, int incoming)
4059 {
4060 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4061 int words = ia64_function_arg_words (type, mode);
4062 int offset = ia64_function_arg_offset (cum, type, words);
4063 enum machine_mode hfa_mode = VOIDmode;
4064
4065 /* If all argument slots are used, then it must go on the stack. */
4066 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4067 return 0;
4068
4069 /* Check for and handle homogeneous FP aggregates. */
4070 if (type)
4071 hfa_mode = hfa_element_mode (type, 0);
4072
4073 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4074 and unprototyped hfas are passed specially. */
4075 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4076 {
4077 rtx loc[16];
4078 int i = 0;
4079 int fp_regs = cum->fp_regs;
4080 int int_regs = cum->words + offset;
4081 int hfa_size = GET_MODE_SIZE (hfa_mode);
4082 int byte_size;
4083 int args_byte_size;
4084
4085 /* If prototyped, pass it in FR regs then GR regs.
4086 If not prototyped, pass it in both FR and GR regs.
4087
4088 If this is an SFmode aggregate, then it is possible to run out of
4089 FR regs while GR regs are still left. In that case, we pass the
4090 remaining part in the GR regs. */
4091
4092 /* Fill the FP regs. We do this always. We stop if we reach the end
4093 of the argument, the last FP register, or the last argument slot. */
4094
4095 byte_size = ((mode == BLKmode)
4096 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4097 args_byte_size = int_regs * UNITS_PER_WORD;
4098 offset = 0;
4099 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4100 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4101 {
4102 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4103 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4104 + fp_regs)),
4105 GEN_INT (offset));
4106 offset += hfa_size;
4107 args_byte_size += hfa_size;
4108 fp_regs++;
4109 }
4110
4111 /* If no prototype, then the whole thing must go in GR regs. */
4112 if (! cum->prototype)
4113 offset = 0;
4114 /* If this is an SFmode aggregate, then we might have some left over
4115 that needs to go in GR regs. */
4116 else if (byte_size != offset)
4117 int_regs += offset / UNITS_PER_WORD;
4118
4119 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4120
4121 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4122 {
4123 enum machine_mode gr_mode = DImode;
4124 unsigned int gr_size;
4125
4126 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4127 then this goes in a GR reg left adjusted/little endian, right
4128 adjusted/big endian. */
4129 /* ??? Currently this is handled wrong, because 4-byte hunks are
4130 always right adjusted/little endian. */
4131 if (offset & 0x4)
4132 gr_mode = SImode;
4133 /* If we have an even 4 byte hunk because the aggregate is a
4134 multiple of 4 bytes in size, then this goes in a GR reg right
4135 adjusted/little endian. */
4136 else if (byte_size - offset == 4)
4137 gr_mode = SImode;
4138
4139 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4140 gen_rtx_REG (gr_mode, (basereg
4141 + int_regs)),
4142 GEN_INT (offset));
4143
4144 gr_size = GET_MODE_SIZE (gr_mode);
4145 offset += gr_size;
4146 if (gr_size == UNITS_PER_WORD
4147 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4148 int_regs++;
4149 else if (gr_size > UNITS_PER_WORD)
4150 int_regs += gr_size / UNITS_PER_WORD;
4151 }
4152 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4153 }
4154
4155 /* Integral and aggregates go in general registers. If we have run out of
4156 FR registers, then FP values must also go in general registers. This can
4157 happen when we have a SFmode HFA. */
4158 else if (mode == TFmode || mode == TCmode
4159 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4160 {
4161 int byte_size = ((mode == BLKmode)
4162 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4163 if (BYTES_BIG_ENDIAN
4164 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4165 && byte_size < UNITS_PER_WORD
4166 && byte_size > 0)
4167 {
4168 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4169 gen_rtx_REG (DImode,
4170 (basereg + cum->words
4171 + offset)),
4172 const0_rtx);
4173 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4174 }
4175 else
4176 return gen_rtx_REG (mode, basereg + cum->words + offset);
4177
4178 }
4179
4180 /* If there is a prototype, then FP values go in a FR register when
4181 named, and in a GR register when unnamed. */
4182 else if (cum->prototype)
4183 {
4184 if (named)
4185 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4186 /* In big-endian mode, an anonymous SFmode value must be represented
4187 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4188 the value into the high half of the general register. */
4189 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4190 return gen_rtx_PARALLEL (mode,
4191 gen_rtvec (1,
4192 gen_rtx_EXPR_LIST (VOIDmode,
4193 gen_rtx_REG (DImode, basereg + cum->words + offset),
4194 const0_rtx)));
4195 else
4196 return gen_rtx_REG (mode, basereg + cum->words + offset);
4197 }
4198 /* If there is no prototype, then FP values go in both FR and GR
4199 registers. */
4200 else
4201 {
4202 /* See comment above. */
4203 enum machine_mode inner_mode =
4204 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4205
4206 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4207 gen_rtx_REG (mode, (FR_ARG_FIRST
4208 + cum->fp_regs)),
4209 const0_rtx);
4210 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4211 gen_rtx_REG (inner_mode,
4212 (basereg + cum->words
4213 + offset)),
4214 const0_rtx);
4215
4216 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4217 }
4218 }
4219
4220 /* Return number of bytes, at the beginning of the argument, that must be
4221 put in registers. 0 is the argument is entirely in registers or entirely
4222 in memory. */
4223
4224 static int
4225 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4226 tree type, bool named ATTRIBUTE_UNUSED)
4227 {
4228 int words = ia64_function_arg_words (type, mode);
4229 int offset = ia64_function_arg_offset (cum, type, words);
4230
4231 /* If all argument slots are used, then it must go on the stack. */
4232 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4233 return 0;
4234
4235 /* It doesn't matter whether the argument goes in FR or GR regs. If
4236 it fits within the 8 argument slots, then it goes entirely in
4237 registers. If it extends past the last argument slot, then the rest
4238 goes on the stack. */
4239
4240 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4241 return 0;
4242
4243 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4244 }
4245
4246 /* Update CUM to point after this argument. This is patterned after
4247 ia64_function_arg. */
4248
4249 void
4250 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4251 tree type, int named)
4252 {
4253 int words = ia64_function_arg_words (type, mode);
4254 int offset = ia64_function_arg_offset (cum, type, words);
4255 enum machine_mode hfa_mode = VOIDmode;
4256
4257 /* If all arg slots are already full, then there is nothing to do. */
4258 if (cum->words >= MAX_ARGUMENT_SLOTS)
4259 return;
4260
4261 cum->words += words + offset;
4262
4263 /* Check for and handle homogeneous FP aggregates. */
4264 if (type)
4265 hfa_mode = hfa_element_mode (type, 0);
4266
4267 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4268 and unprototyped hfas are passed specially. */
4269 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4270 {
4271 int fp_regs = cum->fp_regs;
4272 /* This is the original value of cum->words + offset. */
4273 int int_regs = cum->words - words;
4274 int hfa_size = GET_MODE_SIZE (hfa_mode);
4275 int byte_size;
4276 int args_byte_size;
4277
4278 /* If prototyped, pass it in FR regs then GR regs.
4279 If not prototyped, pass it in both FR and GR regs.
4280
4281 If this is an SFmode aggregate, then it is possible to run out of
4282 FR regs while GR regs are still left. In that case, we pass the
4283 remaining part in the GR regs. */
4284
4285 /* Fill the FP regs. We do this always. We stop if we reach the end
4286 of the argument, the last FP register, or the last argument slot. */
4287
4288 byte_size = ((mode == BLKmode)
4289 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4290 args_byte_size = int_regs * UNITS_PER_WORD;
4291 offset = 0;
4292 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4293 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4294 {
4295 offset += hfa_size;
4296 args_byte_size += hfa_size;
4297 fp_regs++;
4298 }
4299
4300 cum->fp_regs = fp_regs;
4301 }
4302
4303 /* Integral and aggregates go in general registers. So do TFmode FP values.
4304 If we have run out of FR registers, then other FP values must also go in
4305 general registers. This can happen when we have a SFmode HFA. */
4306 else if (mode == TFmode || mode == TCmode
4307 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4308 cum->int_regs = cum->words;
4309
4310 /* If there is a prototype, then FP values go in a FR register when
4311 named, and in a GR register when unnamed. */
4312 else if (cum->prototype)
4313 {
4314 if (! named)
4315 cum->int_regs = cum->words;
4316 else
4317 /* ??? Complex types should not reach here. */
4318 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4319 }
4320 /* If there is no prototype, then FP values go in both FR and GR
4321 registers. */
4322 else
4323 {
4324 /* ??? Complex types should not reach here. */
4325 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4326 cum->int_regs = cum->words;
4327 }
4328 }
4329
4330 /* Arguments with alignment larger than 8 bytes start at the next even
4331 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4332 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4333
4334 int
4335 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4336 {
4337
4338 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4339 return PARM_BOUNDARY * 2;
4340
4341 if (type)
4342 {
4343 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4344 return PARM_BOUNDARY * 2;
4345 else
4346 return PARM_BOUNDARY;
4347 }
4348
4349 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4350 return PARM_BOUNDARY * 2;
4351 else
4352 return PARM_BOUNDARY;
4353 }
4354
4355 /* True if it is OK to do sibling call optimization for the specified
4356 call expression EXP. DECL will be the called function, or NULL if
4357 this is an indirect call. */
4358 static bool
4359 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4360 {
4361 /* We can't perform a sibcall if the current function has the syscall_linkage
4362 attribute. */
4363 if (lookup_attribute ("syscall_linkage",
4364 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4365 return false;
4366
4367 /* We must always return with our current GP. This means we can
4368 only sibcall to functions defined in the current module. */
4369 return decl && (*targetm.binds_local_p) (decl);
4370 }
4371 \f
4372
4373 /* Implement va_arg. */
4374
4375 static tree
4376 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4377 gimple_seq *post_p)
4378 {
4379 /* Variable sized types are passed by reference. */
4380 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4381 {
4382 tree ptrtype = build_pointer_type (type);
4383 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4384 return build_va_arg_indirect_ref (addr);
4385 }
4386
4387 /* Aggregate arguments with alignment larger than 8 bytes start at
4388 the next even boundary. Integer and floating point arguments
4389 do so if they are larger than 8 bytes, whether or not they are
4390 also aligned larger than 8 bytes. */
4391 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4392 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4393 {
4394 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4395 size_int (2 * UNITS_PER_WORD - 1));
4396 t = fold_convert (sizetype, t);
4397 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4398 size_int (-2 * UNITS_PER_WORD));
4399 t = fold_convert (TREE_TYPE (valist), t);
4400 gimplify_assign (unshare_expr (valist), t, pre_p);
4401 }
4402
4403 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4404 }
4405 \f
4406 /* Return 1 if function return value returned in memory. Return 0 if it is
4407 in a register. */
4408
4409 static bool
4410 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4411 {
4412 enum machine_mode mode;
4413 enum machine_mode hfa_mode;
4414 HOST_WIDE_INT byte_size;
4415
4416 mode = TYPE_MODE (valtype);
4417 byte_size = GET_MODE_SIZE (mode);
4418 if (mode == BLKmode)
4419 {
4420 byte_size = int_size_in_bytes (valtype);
4421 if (byte_size < 0)
4422 return true;
4423 }
4424
4425 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4426
4427 hfa_mode = hfa_element_mode (valtype, 0);
4428 if (hfa_mode != VOIDmode)
4429 {
4430 int hfa_size = GET_MODE_SIZE (hfa_mode);
4431
4432 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4433 return true;
4434 else
4435 return false;
4436 }
4437 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4438 return true;
4439 else
4440 return false;
4441 }
4442
4443 /* Return rtx for register that holds the function return value. */
4444
4445 rtx
4446 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
4447 {
4448 enum machine_mode mode;
4449 enum machine_mode hfa_mode;
4450
4451 mode = TYPE_MODE (valtype);
4452 hfa_mode = hfa_element_mode (valtype, 0);
4453
4454 if (hfa_mode != VOIDmode)
4455 {
4456 rtx loc[8];
4457 int i;
4458 int hfa_size;
4459 int byte_size;
4460 int offset;
4461
4462 hfa_size = GET_MODE_SIZE (hfa_mode);
4463 byte_size = ((mode == BLKmode)
4464 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4465 offset = 0;
4466 for (i = 0; offset < byte_size; i++)
4467 {
4468 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4469 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4470 GEN_INT (offset));
4471 offset += hfa_size;
4472 }
4473 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4474 }
4475 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4476 return gen_rtx_REG (mode, FR_ARG_FIRST);
4477 else
4478 {
4479 bool need_parallel = false;
4480
4481 /* In big-endian mode, we need to manage the layout of aggregates
4482 in the registers so that we get the bits properly aligned in
4483 the highpart of the registers. */
4484 if (BYTES_BIG_ENDIAN
4485 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4486 need_parallel = true;
4487
4488 /* Something like struct S { long double x; char a[0] } is not an
4489 HFA structure, and therefore doesn't go in fp registers. But
4490 the middle-end will give it XFmode anyway, and XFmode values
4491 don't normally fit in integer registers. So we need to smuggle
4492 the value inside a parallel. */
4493 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4494 need_parallel = true;
4495
4496 if (need_parallel)
4497 {
4498 rtx loc[8];
4499 int offset;
4500 int bytesize;
4501 int i;
4502
4503 offset = 0;
4504 bytesize = int_size_in_bytes (valtype);
4505 /* An empty PARALLEL is invalid here, but the return value
4506 doesn't matter for empty structs. */
4507 if (bytesize == 0)
4508 return gen_rtx_REG (mode, GR_RET_FIRST);
4509 for (i = 0; offset < bytesize; i++)
4510 {
4511 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4512 gen_rtx_REG (DImode,
4513 GR_RET_FIRST + i),
4514 GEN_INT (offset));
4515 offset += UNITS_PER_WORD;
4516 }
4517 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4518 }
4519
4520 return gen_rtx_REG (mode, GR_RET_FIRST);
4521 }
4522 }
4523
4524 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4525 We need to emit DTP-relative relocations. */
4526
4527 static void
4528 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4529 {
4530 gcc_assert (size == 4 || size == 8);
4531 if (size == 4)
4532 fputs ("\tdata4.ua\t@dtprel(", file);
4533 else
4534 fputs ("\tdata8.ua\t@dtprel(", file);
4535 output_addr_const (file, x);
4536 fputs (")", file);
4537 }
4538
4539 /* Print a memory address as an operand to reference that memory location. */
4540
4541 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4542 also call this from ia64_print_operand for memory addresses. */
4543
4544 void
4545 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4546 rtx address ATTRIBUTE_UNUSED)
4547 {
4548 }
4549
4550 /* Print an operand to an assembler instruction.
4551 C Swap and print a comparison operator.
4552 D Print an FP comparison operator.
4553 E Print 32 - constant, for SImode shifts as extract.
4554 e Print 64 - constant, for DImode rotates.
4555 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4556 a floating point register emitted normally.
4557 I Invert a predicate register by adding 1.
4558 J Select the proper predicate register for a condition.
4559 j Select the inverse predicate register for a condition.
4560 O Append .acq for volatile load.
4561 P Postincrement of a MEM.
4562 Q Append .rel for volatile store.
4563 R Print .s .d or nothing for a single, double or no truncation.
4564 S Shift amount for shladd instruction.
4565 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4566 for Intel assembler.
4567 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4568 for Intel assembler.
4569 X A pair of floating point registers.
4570 r Print register name, or constant 0 as r0. HP compatibility for
4571 Linux kernel.
4572 v Print vector constant value as an 8-byte integer value. */
4573
4574 void
4575 ia64_print_operand (FILE * file, rtx x, int code)
4576 {
4577 const char *str;
4578
4579 switch (code)
4580 {
4581 case 0:
4582 /* Handled below. */
4583 break;
4584
4585 case 'C':
4586 {
4587 enum rtx_code c = swap_condition (GET_CODE (x));
4588 fputs (GET_RTX_NAME (c), file);
4589 return;
4590 }
4591
4592 case 'D':
4593 switch (GET_CODE (x))
4594 {
4595 case NE:
4596 str = "neq";
4597 break;
4598 case UNORDERED:
4599 str = "unord";
4600 break;
4601 case ORDERED:
4602 str = "ord";
4603 break;
4604 case UNLT:
4605 str = "nge";
4606 break;
4607 case UNLE:
4608 str = "ngt";
4609 break;
4610 case UNGT:
4611 str = "nle";
4612 break;
4613 case UNGE:
4614 str = "nlt";
4615 break;
4616 default:
4617 str = GET_RTX_NAME (GET_CODE (x));
4618 break;
4619 }
4620 fputs (str, file);
4621 return;
4622
4623 case 'E':
4624 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4625 return;
4626
4627 case 'e':
4628 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4629 return;
4630
4631 case 'F':
4632 if (x == CONST0_RTX (GET_MODE (x)))
4633 str = reg_names [FR_REG (0)];
4634 else if (x == CONST1_RTX (GET_MODE (x)))
4635 str = reg_names [FR_REG (1)];
4636 else
4637 {
4638 gcc_assert (GET_CODE (x) == REG);
4639 str = reg_names [REGNO (x)];
4640 }
4641 fputs (str, file);
4642 return;
4643
4644 case 'I':
4645 fputs (reg_names [REGNO (x) + 1], file);
4646 return;
4647
4648 case 'J':
4649 case 'j':
4650 {
4651 unsigned int regno = REGNO (XEXP (x, 0));
4652 if (GET_CODE (x) == EQ)
4653 regno += 1;
4654 if (code == 'j')
4655 regno ^= 1;
4656 fputs (reg_names [regno], file);
4657 }
4658 return;
4659
4660 case 'O':
4661 if (MEM_VOLATILE_P (x))
4662 fputs(".acq", file);
4663 return;
4664
4665 case 'P':
4666 {
4667 HOST_WIDE_INT value;
4668
4669 switch (GET_CODE (XEXP (x, 0)))
4670 {
4671 default:
4672 return;
4673
4674 case POST_MODIFY:
4675 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4676 if (GET_CODE (x) == CONST_INT)
4677 value = INTVAL (x);
4678 else
4679 {
4680 gcc_assert (GET_CODE (x) == REG);
4681 fprintf (file, ", %s", reg_names[REGNO (x)]);
4682 return;
4683 }
4684 break;
4685
4686 case POST_INC:
4687 value = GET_MODE_SIZE (GET_MODE (x));
4688 break;
4689
4690 case POST_DEC:
4691 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4692 break;
4693 }
4694
4695 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4696 return;
4697 }
4698
4699 case 'Q':
4700 if (MEM_VOLATILE_P (x))
4701 fputs(".rel", file);
4702 return;
4703
4704 case 'R':
4705 if (x == CONST0_RTX (GET_MODE (x)))
4706 fputs(".s", file);
4707 else if (x == CONST1_RTX (GET_MODE (x)))
4708 fputs(".d", file);
4709 else if (x == CONST2_RTX (GET_MODE (x)))
4710 ;
4711 else
4712 output_operand_lossage ("invalid %%R value");
4713 return;
4714
4715 case 'S':
4716 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4717 return;
4718
4719 case 'T':
4720 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4721 {
4722 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4723 return;
4724 }
4725 break;
4726
4727 case 'U':
4728 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4729 {
4730 const char *prefix = "0x";
4731 if (INTVAL (x) & 0x80000000)
4732 {
4733 fprintf (file, "0xffffffff");
4734 prefix = "";
4735 }
4736 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4737 return;
4738 }
4739 break;
4740
4741 case 'X':
4742 {
4743 unsigned int regno = REGNO (x);
4744 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4745 }
4746 return;
4747
4748 case 'r':
4749 /* If this operand is the constant zero, write it as register zero.
4750 Any register, zero, or CONST_INT value is OK here. */
4751 if (GET_CODE (x) == REG)
4752 fputs (reg_names[REGNO (x)], file);
4753 else if (x == CONST0_RTX (GET_MODE (x)))
4754 fputs ("r0", file);
4755 else if (GET_CODE (x) == CONST_INT)
4756 output_addr_const (file, x);
4757 else
4758 output_operand_lossage ("invalid %%r value");
4759 return;
4760
4761 case 'v':
4762 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4763 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4764 break;
4765
4766 case '+':
4767 {
4768 const char *which;
4769
4770 /* For conditional branches, returns or calls, substitute
4771 sptk, dptk, dpnt, or spnt for %s. */
4772 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4773 if (x)
4774 {
4775 int pred_val = INTVAL (XEXP (x, 0));
4776
4777 /* Guess top and bottom 10% statically predicted. */
4778 if (pred_val < REG_BR_PROB_BASE / 50
4779 && br_prob_note_reliable_p (x))
4780 which = ".spnt";
4781 else if (pred_val < REG_BR_PROB_BASE / 2)
4782 which = ".dpnt";
4783 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4784 || !br_prob_note_reliable_p (x))
4785 which = ".dptk";
4786 else
4787 which = ".sptk";
4788 }
4789 else if (GET_CODE (current_output_insn) == CALL_INSN)
4790 which = ".sptk";
4791 else
4792 which = ".dptk";
4793
4794 fputs (which, file);
4795 return;
4796 }
4797
4798 case ',':
4799 x = current_insn_predicate;
4800 if (x)
4801 {
4802 unsigned int regno = REGNO (XEXP (x, 0));
4803 if (GET_CODE (x) == EQ)
4804 regno += 1;
4805 fprintf (file, "(%s) ", reg_names [regno]);
4806 }
4807 return;
4808
4809 default:
4810 output_operand_lossage ("ia64_print_operand: unknown code");
4811 return;
4812 }
4813
4814 switch (GET_CODE (x))
4815 {
4816 /* This happens for the spill/restore instructions. */
4817 case POST_INC:
4818 case POST_DEC:
4819 case POST_MODIFY:
4820 x = XEXP (x, 0);
4821 /* ... fall through ... */
4822
4823 case REG:
4824 fputs (reg_names [REGNO (x)], file);
4825 break;
4826
4827 case MEM:
4828 {
4829 rtx addr = XEXP (x, 0);
4830 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4831 addr = XEXP (addr, 0);
4832 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4833 break;
4834 }
4835
4836 default:
4837 output_addr_const (file, x);
4838 break;
4839 }
4840
4841 return;
4842 }
4843 \f
4844 /* Compute a (partial) cost for rtx X. Return true if the complete
4845 cost has been computed, and false if subexpressions should be
4846 scanned. In either case, *TOTAL contains the cost result. */
4847 /* ??? This is incomplete. */
4848
4849 static bool
4850 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
4851 bool speed ATTRIBUTE_UNUSED)
4852 {
4853 switch (code)
4854 {
4855 case CONST_INT:
4856 switch (outer_code)
4857 {
4858 case SET:
4859 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4860 return true;
4861 case PLUS:
4862 if (satisfies_constraint_I (x))
4863 *total = 0;
4864 else if (satisfies_constraint_J (x))
4865 *total = 1;
4866 else
4867 *total = COSTS_N_INSNS (1);
4868 return true;
4869 default:
4870 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4871 *total = 0;
4872 else
4873 *total = COSTS_N_INSNS (1);
4874 return true;
4875 }
4876
4877 case CONST_DOUBLE:
4878 *total = COSTS_N_INSNS (1);
4879 return true;
4880
4881 case CONST:
4882 case SYMBOL_REF:
4883 case LABEL_REF:
4884 *total = COSTS_N_INSNS (3);
4885 return true;
4886
4887 case MULT:
4888 /* For multiplies wider than HImode, we have to go to the FPU,
4889 which normally involves copies. Plus there's the latency
4890 of the multiply itself, and the latency of the instructions to
4891 transfer integer regs to FP regs. */
4892 /* ??? Check for FP mode. */
4893 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4894 *total = COSTS_N_INSNS (10);
4895 else
4896 *total = COSTS_N_INSNS (2);
4897 return true;
4898
4899 case PLUS:
4900 case MINUS:
4901 case ASHIFT:
4902 case ASHIFTRT:
4903 case LSHIFTRT:
4904 *total = COSTS_N_INSNS (1);
4905 return true;
4906
4907 case DIV:
4908 case UDIV:
4909 case MOD:
4910 case UMOD:
4911 /* We make divide expensive, so that divide-by-constant will be
4912 optimized to a multiply. */
4913 *total = COSTS_N_INSNS (60);
4914 return true;
4915
4916 default:
4917 return false;
4918 }
4919 }
4920
4921 /* Calculate the cost of moving data from a register in class FROM to
4922 one in class TO, using MODE. */
4923
4924 int
4925 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4926 enum reg_class to)
4927 {
4928 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4929 if (to == ADDL_REGS)
4930 to = GR_REGS;
4931 if (from == ADDL_REGS)
4932 from = GR_REGS;
4933
4934 /* All costs are symmetric, so reduce cases by putting the
4935 lower number class as the destination. */
4936 if (from < to)
4937 {
4938 enum reg_class tmp = to;
4939 to = from, from = tmp;
4940 }
4941
4942 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4943 so that we get secondary memory reloads. Between FR_REGS,
4944 we have to make this at least as expensive as MEMORY_MOVE_COST
4945 to avoid spectacularly poor register class preferencing. */
4946 if (mode == XFmode || mode == RFmode)
4947 {
4948 if (to != GR_REGS || from != GR_REGS)
4949 return MEMORY_MOVE_COST (mode, to, 0);
4950 else
4951 return 3;
4952 }
4953
4954 switch (to)
4955 {
4956 case PR_REGS:
4957 /* Moving between PR registers takes two insns. */
4958 if (from == PR_REGS)
4959 return 3;
4960 /* Moving between PR and anything but GR is impossible. */
4961 if (from != GR_REGS)
4962 return MEMORY_MOVE_COST (mode, to, 0);
4963 break;
4964
4965 case BR_REGS:
4966 /* Moving between BR and anything but GR is impossible. */
4967 if (from != GR_REGS && from != GR_AND_BR_REGS)
4968 return MEMORY_MOVE_COST (mode, to, 0);
4969 break;
4970
4971 case AR_I_REGS:
4972 case AR_M_REGS:
4973 /* Moving between AR and anything but GR is impossible. */
4974 if (from != GR_REGS)
4975 return MEMORY_MOVE_COST (mode, to, 0);
4976 break;
4977
4978 case GR_REGS:
4979 case FR_REGS:
4980 case FP_REGS:
4981 case GR_AND_FR_REGS:
4982 case GR_AND_BR_REGS:
4983 case ALL_REGS:
4984 break;
4985
4986 default:
4987 gcc_unreachable ();
4988 }
4989
4990 return 2;
4991 }
4992
4993 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on RCLASS
4994 to use when copying X into that class. */
4995
4996 enum reg_class
4997 ia64_preferred_reload_class (rtx x, enum reg_class rclass)
4998 {
4999 switch (rclass)
5000 {
5001 case FR_REGS:
5002 case FP_REGS:
5003 /* Don't allow volatile mem reloads into floating point registers.
5004 This is defined to force reload to choose the r/m case instead
5005 of the f/f case when reloading (set (reg fX) (mem/v)). */
5006 if (MEM_P (x) && MEM_VOLATILE_P (x))
5007 return NO_REGS;
5008
5009 /* Force all unrecognized constants into the constant pool. */
5010 if (CONSTANT_P (x))
5011 return NO_REGS;
5012 break;
5013
5014 case AR_M_REGS:
5015 case AR_I_REGS:
5016 if (!OBJECT_P (x))
5017 return NO_REGS;
5018 break;
5019
5020 default:
5021 break;
5022 }
5023
5024 return rclass;
5025 }
5026
5027 /* This function returns the register class required for a secondary
5028 register when copying between one of the registers in RCLASS, and X,
5029 using MODE. A return value of NO_REGS means that no secondary register
5030 is required. */
5031
5032 enum reg_class
5033 ia64_secondary_reload_class (enum reg_class rclass,
5034 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5035 {
5036 int regno = -1;
5037
5038 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5039 regno = true_regnum (x);
5040
5041 switch (rclass)
5042 {
5043 case BR_REGS:
5044 case AR_M_REGS:
5045 case AR_I_REGS:
5046 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5047 interaction. We end up with two pseudos with overlapping lifetimes
5048 both of which are equiv to the same constant, and both which need
5049 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5050 changes depending on the path length, which means the qty_first_reg
5051 check in make_regs_eqv can give different answers at different times.
5052 At some point I'll probably need a reload_indi pattern to handle
5053 this.
5054
5055 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5056 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5057 non-general registers for good measure. */
5058 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5059 return GR_REGS;
5060
5061 /* This is needed if a pseudo used as a call_operand gets spilled to a
5062 stack slot. */
5063 if (GET_CODE (x) == MEM)
5064 return GR_REGS;
5065 break;
5066
5067 case FR_REGS:
5068 case FP_REGS:
5069 /* Need to go through general registers to get to other class regs. */
5070 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5071 return GR_REGS;
5072
5073 /* This can happen when a paradoxical subreg is an operand to the
5074 muldi3 pattern. */
5075 /* ??? This shouldn't be necessary after instruction scheduling is
5076 enabled, because paradoxical subregs are not accepted by
5077 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5078 stop the paradoxical subreg stupidity in the *_operand functions
5079 in recog.c. */
5080 if (GET_CODE (x) == MEM
5081 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5082 || GET_MODE (x) == QImode))
5083 return GR_REGS;
5084
5085 /* This can happen because of the ior/and/etc patterns that accept FP
5086 registers as operands. If the third operand is a constant, then it
5087 needs to be reloaded into a FP register. */
5088 if (GET_CODE (x) == CONST_INT)
5089 return GR_REGS;
5090
5091 /* This can happen because of register elimination in a muldi3 insn.
5092 E.g. `26107 * (unsigned long)&u'. */
5093 if (GET_CODE (x) == PLUS)
5094 return GR_REGS;
5095 break;
5096
5097 case PR_REGS:
5098 /* ??? This happens if we cse/gcse a BImode value across a call,
5099 and the function has a nonlocal goto. This is because global
5100 does not allocate call crossing pseudos to hard registers when
5101 crtl->has_nonlocal_goto is true. This is relatively
5102 common for C++ programs that use exceptions. To reproduce,
5103 return NO_REGS and compile libstdc++. */
5104 if (GET_CODE (x) == MEM)
5105 return GR_REGS;
5106
5107 /* This can happen when we take a BImode subreg of a DImode value,
5108 and that DImode value winds up in some non-GR register. */
5109 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5110 return GR_REGS;
5111 break;
5112
5113 default:
5114 break;
5115 }
5116
5117 return NO_REGS;
5118 }
5119
5120 \f
5121 /* Implement targetm.unspec_may_trap_p hook. */
5122 static int
5123 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5124 {
5125 if (GET_CODE (x) == UNSPEC)
5126 {
5127 switch (XINT (x, 1))
5128 {
5129 case UNSPEC_LDA:
5130 case UNSPEC_LDS:
5131 case UNSPEC_LDSA:
5132 case UNSPEC_LDCCLR:
5133 case UNSPEC_CHKACLR:
5134 case UNSPEC_CHKS:
5135 /* These unspecs are just wrappers. */
5136 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5137 }
5138 }
5139
5140 return default_unspec_may_trap_p (x, flags);
5141 }
5142
5143 \f
5144 /* Parse the -mfixed-range= option string. */
5145
5146 static void
5147 fix_range (const char *const_str)
5148 {
5149 int i, first, last;
5150 char *str, *dash, *comma;
5151
5152 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5153 REG2 are either register names or register numbers. The effect
5154 of this option is to mark the registers in the range from REG1 to
5155 REG2 as ``fixed'' so they won't be used by the compiler. This is
5156 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5157
5158 i = strlen (const_str);
5159 str = (char *) alloca (i + 1);
5160 memcpy (str, const_str, i + 1);
5161
5162 while (1)
5163 {
5164 dash = strchr (str, '-');
5165 if (!dash)
5166 {
5167 warning (0, "value of -mfixed-range must have form REG1-REG2");
5168 return;
5169 }
5170 *dash = '\0';
5171
5172 comma = strchr (dash + 1, ',');
5173 if (comma)
5174 *comma = '\0';
5175
5176 first = decode_reg_name (str);
5177 if (first < 0)
5178 {
5179 warning (0, "unknown register name: %s", str);
5180 return;
5181 }
5182
5183 last = decode_reg_name (dash + 1);
5184 if (last < 0)
5185 {
5186 warning (0, "unknown register name: %s", dash + 1);
5187 return;
5188 }
5189
5190 *dash = '-';
5191
5192 if (first > last)
5193 {
5194 warning (0, "%s-%s is an empty range", str, dash + 1);
5195 return;
5196 }
5197
5198 for (i = first; i <= last; ++i)
5199 fixed_regs[i] = call_used_regs[i] = 1;
5200
5201 if (!comma)
5202 break;
5203
5204 *comma = ',';
5205 str = comma + 1;
5206 }
5207 }
5208
5209 /* Implement TARGET_HANDLE_OPTION. */
5210
5211 static bool
5212 ia64_handle_option (size_t code, const char *arg, int value)
5213 {
5214 switch (code)
5215 {
5216 case OPT_mfixed_range_:
5217 fix_range (arg);
5218 return true;
5219
5220 case OPT_mtls_size_:
5221 if (value != 14 && value != 22 && value != 64)
5222 error ("bad value %<%s%> for -mtls-size= switch", arg);
5223 return true;
5224
5225 case OPT_mtune_:
5226 {
5227 static struct pta
5228 {
5229 const char *name; /* processor name or nickname. */
5230 enum processor_type processor;
5231 }
5232 const processor_alias_table[] =
5233 {
5234 {"itanium", PROCESSOR_ITANIUM},
5235 {"itanium1", PROCESSOR_ITANIUM},
5236 {"merced", PROCESSOR_ITANIUM},
5237 {"itanium2", PROCESSOR_ITANIUM2},
5238 {"mckinley", PROCESSOR_ITANIUM2},
5239 };
5240 int const pta_size = ARRAY_SIZE (processor_alias_table);
5241 int i;
5242
5243 for (i = 0; i < pta_size; i++)
5244 if (!strcmp (arg, processor_alias_table[i].name))
5245 {
5246 ia64_tune = processor_alias_table[i].processor;
5247 break;
5248 }
5249 if (i == pta_size)
5250 error ("bad value %<%s%> for -mtune= switch", arg);
5251 return true;
5252 }
5253
5254 default:
5255 return true;
5256 }
5257 }
5258
5259 /* Implement OVERRIDE_OPTIONS. */
5260
5261 void
5262 ia64_override_options (void)
5263 {
5264 if (TARGET_AUTO_PIC)
5265 target_flags |= MASK_CONST_GP;
5266
5267 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5268 {
5269 warning (0, "not yet implemented: latency-optimized inline square root");
5270 TARGET_INLINE_SQRT = INL_MAX_THR;
5271 }
5272
5273 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5274 flag_schedule_insns_after_reload = 0;
5275
5276 if (optimize >= 3
5277 && ! sel_sched_switch_set)
5278 {
5279 flag_selective_scheduling2 = 1;
5280 flag_sel_sched_pipelining = 1;
5281 }
5282 if (mflag_sched_control_spec == 2)
5283 {
5284 /* Control speculation is on by default for the selective scheduler,
5285 but not for the Haifa scheduler. */
5286 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5287 }
5288 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5289 {
5290 /* FIXME: remove this when we'd implement breaking autoinsns as
5291 a transformation. */
5292 flag_auto_inc_dec = 0;
5293 }
5294
5295 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5296
5297 init_machine_status = ia64_init_machine_status;
5298
5299 if (align_functions <= 0)
5300 align_functions = 64;
5301 if (align_loops <= 0)
5302 align_loops = 32;
5303 }
5304
5305 /* Initialize the record of emitted frame related registers. */
5306
5307 void ia64_init_expanders (void)
5308 {
5309 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5310 }
5311
5312 static struct machine_function *
5313 ia64_init_machine_status (void)
5314 {
5315 return GGC_CNEW (struct machine_function);
5316 }
5317 \f
5318 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5319 static enum attr_type ia64_safe_type (rtx);
5320
5321 static enum attr_itanium_class
5322 ia64_safe_itanium_class (rtx insn)
5323 {
5324 if (recog_memoized (insn) >= 0)
5325 return get_attr_itanium_class (insn);
5326 else
5327 return ITANIUM_CLASS_UNKNOWN;
5328 }
5329
5330 static enum attr_type
5331 ia64_safe_type (rtx insn)
5332 {
5333 if (recog_memoized (insn) >= 0)
5334 return get_attr_type (insn);
5335 else
5336 return TYPE_UNKNOWN;
5337 }
5338 \f
5339 /* The following collection of routines emit instruction group stop bits as
5340 necessary to avoid dependencies. */
5341
5342 /* Need to track some additional registers as far as serialization is
5343 concerned so we can properly handle br.call and br.ret. We could
5344 make these registers visible to gcc, but since these registers are
5345 never explicitly used in gcc generated code, it seems wasteful to
5346 do so (plus it would make the call and return patterns needlessly
5347 complex). */
5348 #define REG_RP (BR_REG (0))
5349 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5350 /* This is used for volatile asms which may require a stop bit immediately
5351 before and after them. */
5352 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5353 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5354 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5355
5356 /* For each register, we keep track of how it has been written in the
5357 current instruction group.
5358
5359 If a register is written unconditionally (no qualifying predicate),
5360 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5361
5362 If a register is written if its qualifying predicate P is true, we
5363 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5364 may be written again by the complement of P (P^1) and when this happens,
5365 WRITE_COUNT gets set to 2.
5366
5367 The result of this is that whenever an insn attempts to write a register
5368 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5369
5370 If a predicate register is written by a floating-point insn, we set
5371 WRITTEN_BY_FP to true.
5372
5373 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5374 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5375
5376 #if GCC_VERSION >= 4000
5377 #define RWS_FIELD_TYPE __extension__ unsigned short
5378 #else
5379 #define RWS_FIELD_TYPE unsigned int
5380 #endif
5381 struct reg_write_state
5382 {
5383 RWS_FIELD_TYPE write_count : 2;
5384 RWS_FIELD_TYPE first_pred : 10;
5385 RWS_FIELD_TYPE written_by_fp : 1;
5386 RWS_FIELD_TYPE written_by_and : 1;
5387 RWS_FIELD_TYPE written_by_or : 1;
5388 };
5389
5390 /* Cumulative info for the current instruction group. */
5391 struct reg_write_state rws_sum[NUM_REGS];
5392 #ifdef ENABLE_CHECKING
5393 /* Bitmap whether a register has been written in the current insn. */
5394 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5395 / HOST_BITS_PER_WIDEST_FAST_INT];
5396
5397 static inline void
5398 rws_insn_set (int regno)
5399 {
5400 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5401 SET_HARD_REG_BIT (rws_insn, regno);
5402 }
5403
5404 static inline int
5405 rws_insn_test (int regno)
5406 {
5407 return TEST_HARD_REG_BIT (rws_insn, regno);
5408 }
5409 #else
5410 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5411 unsigned char rws_insn[2];
5412
5413 static inline void
5414 rws_insn_set (int regno)
5415 {
5416 if (regno == REG_AR_CFM)
5417 rws_insn[0] = 1;
5418 else if (regno == REG_VOLATILE)
5419 rws_insn[1] = 1;
5420 }
5421
5422 static inline int
5423 rws_insn_test (int regno)
5424 {
5425 if (regno == REG_AR_CFM)
5426 return rws_insn[0];
5427 if (regno == REG_VOLATILE)
5428 return rws_insn[1];
5429 return 0;
5430 }
5431 #endif
5432
5433 /* Indicates whether this is the first instruction after a stop bit,
5434 in which case we don't need another stop bit. Without this,
5435 ia64_variable_issue will die when scheduling an alloc. */
5436 static int first_instruction;
5437
5438 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5439 RTL for one instruction. */
5440 struct reg_flags
5441 {
5442 unsigned int is_write : 1; /* Is register being written? */
5443 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5444 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5445 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5446 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5447 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5448 };
5449
5450 static void rws_update (int, struct reg_flags, int);
5451 static int rws_access_regno (int, struct reg_flags, int);
5452 static int rws_access_reg (rtx, struct reg_flags, int);
5453 static void update_set_flags (rtx, struct reg_flags *);
5454 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5455 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5456 static void init_insn_group_barriers (void);
5457 static int group_barrier_needed (rtx);
5458 static int safe_group_barrier_needed (rtx);
5459 static int in_safe_group_barrier;
5460
5461 /* Update *RWS for REGNO, which is being written by the current instruction,
5462 with predicate PRED, and associated register flags in FLAGS. */
5463
5464 static void
5465 rws_update (int regno, struct reg_flags flags, int pred)
5466 {
5467 if (pred)
5468 rws_sum[regno].write_count++;
5469 else
5470 rws_sum[regno].write_count = 2;
5471 rws_sum[regno].written_by_fp |= flags.is_fp;
5472 /* ??? Not tracking and/or across differing predicates. */
5473 rws_sum[regno].written_by_and = flags.is_and;
5474 rws_sum[regno].written_by_or = flags.is_or;
5475 rws_sum[regno].first_pred = pred;
5476 }
5477
5478 /* Handle an access to register REGNO of type FLAGS using predicate register
5479 PRED. Update rws_sum array. Return 1 if this access creates
5480 a dependency with an earlier instruction in the same group. */
5481
5482 static int
5483 rws_access_regno (int regno, struct reg_flags flags, int pred)
5484 {
5485 int need_barrier = 0;
5486
5487 gcc_assert (regno < NUM_REGS);
5488
5489 if (! PR_REGNO_P (regno))
5490 flags.is_and = flags.is_or = 0;
5491
5492 if (flags.is_write)
5493 {
5494 int write_count;
5495
5496 rws_insn_set (regno);
5497 write_count = rws_sum[regno].write_count;
5498
5499 switch (write_count)
5500 {
5501 case 0:
5502 /* The register has not been written yet. */
5503 if (!in_safe_group_barrier)
5504 rws_update (regno, flags, pred);
5505 break;
5506
5507 case 1:
5508 /* The register has been written via a predicate. If this is
5509 not a complementary predicate, then we need a barrier. */
5510 /* ??? This assumes that P and P+1 are always complementary
5511 predicates for P even. */
5512 if (flags.is_and && rws_sum[regno].written_by_and)
5513 ;
5514 else if (flags.is_or && rws_sum[regno].written_by_or)
5515 ;
5516 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5517 need_barrier = 1;
5518 if (!in_safe_group_barrier)
5519 rws_update (regno, flags, pred);
5520 break;
5521
5522 case 2:
5523 /* The register has been unconditionally written already. We
5524 need a barrier. */
5525 if (flags.is_and && rws_sum[regno].written_by_and)
5526 ;
5527 else if (flags.is_or && rws_sum[regno].written_by_or)
5528 ;
5529 else
5530 need_barrier = 1;
5531 if (!in_safe_group_barrier)
5532 {
5533 rws_sum[regno].written_by_and = flags.is_and;
5534 rws_sum[regno].written_by_or = flags.is_or;
5535 }
5536 break;
5537
5538 default:
5539 gcc_unreachable ();
5540 }
5541 }
5542 else
5543 {
5544 if (flags.is_branch)
5545 {
5546 /* Branches have several RAW exceptions that allow to avoid
5547 barriers. */
5548
5549 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5550 /* RAW dependencies on branch regs are permissible as long
5551 as the writer is a non-branch instruction. Since we
5552 never generate code that uses a branch register written
5553 by a branch instruction, handling this case is
5554 easy. */
5555 return 0;
5556
5557 if (REGNO_REG_CLASS (regno) == PR_REGS
5558 && ! rws_sum[regno].written_by_fp)
5559 /* The predicates of a branch are available within the
5560 same insn group as long as the predicate was written by
5561 something other than a floating-point instruction. */
5562 return 0;
5563 }
5564
5565 if (flags.is_and && rws_sum[regno].written_by_and)
5566 return 0;
5567 if (flags.is_or && rws_sum[regno].written_by_or)
5568 return 0;
5569
5570 switch (rws_sum[regno].write_count)
5571 {
5572 case 0:
5573 /* The register has not been written yet. */
5574 break;
5575
5576 case 1:
5577 /* The register has been written via a predicate. If this is
5578 not a complementary predicate, then we need a barrier. */
5579 /* ??? This assumes that P and P+1 are always complementary
5580 predicates for P even. */
5581 if ((rws_sum[regno].first_pred ^ 1) != pred)
5582 need_barrier = 1;
5583 break;
5584
5585 case 2:
5586 /* The register has been unconditionally written already. We
5587 need a barrier. */
5588 need_barrier = 1;
5589 break;
5590
5591 default:
5592 gcc_unreachable ();
5593 }
5594 }
5595
5596 return need_barrier;
5597 }
5598
5599 static int
5600 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5601 {
5602 int regno = REGNO (reg);
5603 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5604
5605 if (n == 1)
5606 return rws_access_regno (regno, flags, pred);
5607 else
5608 {
5609 int need_barrier = 0;
5610 while (--n >= 0)
5611 need_barrier |= rws_access_regno (regno + n, flags, pred);
5612 return need_barrier;
5613 }
5614 }
5615
5616 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5617 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5618
5619 static void
5620 update_set_flags (rtx x, struct reg_flags *pflags)
5621 {
5622 rtx src = SET_SRC (x);
5623
5624 switch (GET_CODE (src))
5625 {
5626 case CALL:
5627 return;
5628
5629 case IF_THEN_ELSE:
5630 /* There are four cases here:
5631 (1) The destination is (pc), in which case this is a branch,
5632 nothing here applies.
5633 (2) The destination is ar.lc, in which case this is a
5634 doloop_end_internal,
5635 (3) The destination is an fp register, in which case this is
5636 an fselect instruction.
5637 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5638 this is a check load.
5639 In all cases, nothing we do in this function applies. */
5640 return;
5641
5642 default:
5643 if (COMPARISON_P (src)
5644 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5645 /* Set pflags->is_fp to 1 so that we know we're dealing
5646 with a floating point comparison when processing the
5647 destination of the SET. */
5648 pflags->is_fp = 1;
5649
5650 /* Discover if this is a parallel comparison. We only handle
5651 and.orcm and or.andcm at present, since we must retain a
5652 strict inverse on the predicate pair. */
5653 else if (GET_CODE (src) == AND)
5654 pflags->is_and = 1;
5655 else if (GET_CODE (src) == IOR)
5656 pflags->is_or = 1;
5657
5658 break;
5659 }
5660 }
5661
5662 /* Subroutine of rtx_needs_barrier; this function determines whether the
5663 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5664 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5665 for this insn. */
5666
5667 static int
5668 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5669 {
5670 int need_barrier = 0;
5671 rtx dst;
5672 rtx src = SET_SRC (x);
5673
5674 if (GET_CODE (src) == CALL)
5675 /* We don't need to worry about the result registers that
5676 get written by subroutine call. */
5677 return rtx_needs_barrier (src, flags, pred);
5678 else if (SET_DEST (x) == pc_rtx)
5679 {
5680 /* X is a conditional branch. */
5681 /* ??? This seems redundant, as the caller sets this bit for
5682 all JUMP_INSNs. */
5683 if (!ia64_spec_check_src_p (src))
5684 flags.is_branch = 1;
5685 return rtx_needs_barrier (src, flags, pred);
5686 }
5687
5688 if (ia64_spec_check_src_p (src))
5689 /* Avoid checking one register twice (in condition
5690 and in 'then' section) for ldc pattern. */
5691 {
5692 gcc_assert (REG_P (XEXP (src, 2)));
5693 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5694
5695 /* We process MEM below. */
5696 src = XEXP (src, 1);
5697 }
5698
5699 need_barrier |= rtx_needs_barrier (src, flags, pred);
5700
5701 dst = SET_DEST (x);
5702 if (GET_CODE (dst) == ZERO_EXTRACT)
5703 {
5704 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5705 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5706 }
5707 return need_barrier;
5708 }
5709
5710 /* Handle an access to rtx X of type FLAGS using predicate register
5711 PRED. Return 1 if this access creates a dependency with an earlier
5712 instruction in the same group. */
5713
5714 static int
5715 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5716 {
5717 int i, j;
5718 int is_complemented = 0;
5719 int need_barrier = 0;
5720 const char *format_ptr;
5721 struct reg_flags new_flags;
5722 rtx cond;
5723
5724 if (! x)
5725 return 0;
5726
5727 new_flags = flags;
5728
5729 switch (GET_CODE (x))
5730 {
5731 case SET:
5732 update_set_flags (x, &new_flags);
5733 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5734 if (GET_CODE (SET_SRC (x)) != CALL)
5735 {
5736 new_flags.is_write = 1;
5737 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5738 }
5739 break;
5740
5741 case CALL:
5742 new_flags.is_write = 0;
5743 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5744
5745 /* Avoid multiple register writes, in case this is a pattern with
5746 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5747 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
5748 {
5749 new_flags.is_write = 1;
5750 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5751 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5752 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5753 }
5754 break;
5755
5756 case COND_EXEC:
5757 /* X is a predicated instruction. */
5758
5759 cond = COND_EXEC_TEST (x);
5760 gcc_assert (!pred);
5761 need_barrier = rtx_needs_barrier (cond, flags, 0);
5762
5763 if (GET_CODE (cond) == EQ)
5764 is_complemented = 1;
5765 cond = XEXP (cond, 0);
5766 gcc_assert (GET_CODE (cond) == REG
5767 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5768 pred = REGNO (cond);
5769 if (is_complemented)
5770 ++pred;
5771
5772 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5773 return need_barrier;
5774
5775 case CLOBBER:
5776 case USE:
5777 /* Clobber & use are for earlier compiler-phases only. */
5778 break;
5779
5780 case ASM_OPERANDS:
5781 case ASM_INPUT:
5782 /* We always emit stop bits for traditional asms. We emit stop bits
5783 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5784 if (GET_CODE (x) != ASM_OPERANDS
5785 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5786 {
5787 /* Avoid writing the register multiple times if we have multiple
5788 asm outputs. This avoids a failure in rws_access_reg. */
5789 if (! rws_insn_test (REG_VOLATILE))
5790 {
5791 new_flags.is_write = 1;
5792 rws_access_regno (REG_VOLATILE, new_flags, pred);
5793 }
5794 return 1;
5795 }
5796
5797 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5798 We cannot just fall through here since then we would be confused
5799 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5800 traditional asms unlike their normal usage. */
5801
5802 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5803 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5804 need_barrier = 1;
5805 break;
5806
5807 case PARALLEL:
5808 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5809 {
5810 rtx pat = XVECEXP (x, 0, i);
5811 switch (GET_CODE (pat))
5812 {
5813 case SET:
5814 update_set_flags (pat, &new_flags);
5815 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5816 break;
5817
5818 case USE:
5819 case CALL:
5820 case ASM_OPERANDS:
5821 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5822 break;
5823
5824 case CLOBBER:
5825 case RETURN:
5826 break;
5827
5828 default:
5829 gcc_unreachable ();
5830 }
5831 }
5832 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5833 {
5834 rtx pat = XVECEXP (x, 0, i);
5835 if (GET_CODE (pat) == SET)
5836 {
5837 if (GET_CODE (SET_SRC (pat)) != CALL)
5838 {
5839 new_flags.is_write = 1;
5840 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5841 pred);
5842 }
5843 }
5844 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5845 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5846 }
5847 break;
5848
5849 case SUBREG:
5850 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5851 break;
5852 case REG:
5853 if (REGNO (x) == AR_UNAT_REGNUM)
5854 {
5855 for (i = 0; i < 64; ++i)
5856 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5857 }
5858 else
5859 need_barrier = rws_access_reg (x, flags, pred);
5860 break;
5861
5862 case MEM:
5863 /* Find the regs used in memory address computation. */
5864 new_flags.is_write = 0;
5865 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5866 break;
5867
5868 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5869 case SYMBOL_REF: case LABEL_REF: case CONST:
5870 break;
5871
5872 /* Operators with side-effects. */
5873 case POST_INC: case POST_DEC:
5874 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5875
5876 new_flags.is_write = 0;
5877 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5878 new_flags.is_write = 1;
5879 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5880 break;
5881
5882 case POST_MODIFY:
5883 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5884
5885 new_flags.is_write = 0;
5886 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5887 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5888 new_flags.is_write = 1;
5889 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5890 break;
5891
5892 /* Handle common unary and binary ops for efficiency. */
5893 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5894 case MOD: case UDIV: case UMOD: case AND: case IOR:
5895 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5896 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5897 case NE: case EQ: case GE: case GT: case LE:
5898 case LT: case GEU: case GTU: case LEU: case LTU:
5899 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5900 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5901 break;
5902
5903 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5904 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5905 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5906 case SQRT: case FFS: case POPCOUNT:
5907 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5908 break;
5909
5910 case VEC_SELECT:
5911 /* VEC_SELECT's second argument is a PARALLEL with integers that
5912 describe the elements selected. On ia64, those integers are
5913 always constants. Avoid walking the PARALLEL so that we don't
5914 get confused with "normal" parallels and then die. */
5915 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5916 break;
5917
5918 case UNSPEC:
5919 switch (XINT (x, 1))
5920 {
5921 case UNSPEC_LTOFF_DTPMOD:
5922 case UNSPEC_LTOFF_DTPREL:
5923 case UNSPEC_DTPREL:
5924 case UNSPEC_LTOFF_TPREL:
5925 case UNSPEC_TPREL:
5926 case UNSPEC_PRED_REL_MUTEX:
5927 case UNSPEC_PIC_CALL:
5928 case UNSPEC_MF:
5929 case UNSPEC_FETCHADD_ACQ:
5930 case UNSPEC_BSP_VALUE:
5931 case UNSPEC_FLUSHRS:
5932 case UNSPEC_BUNDLE_SELECTOR:
5933 break;
5934
5935 case UNSPEC_GR_SPILL:
5936 case UNSPEC_GR_RESTORE:
5937 {
5938 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5939 HOST_WIDE_INT bit = (offset >> 3) & 63;
5940
5941 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5942 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5943 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5944 new_flags, pred);
5945 break;
5946 }
5947
5948 case UNSPEC_FR_SPILL:
5949 case UNSPEC_FR_RESTORE:
5950 case UNSPEC_GETF_EXP:
5951 case UNSPEC_SETF_EXP:
5952 case UNSPEC_ADDP4:
5953 case UNSPEC_FR_SQRT_RECIP_APPROX:
5954 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
5955 case UNSPEC_LDA:
5956 case UNSPEC_LDS:
5957 case UNSPEC_LDS_A:
5958 case UNSPEC_LDSA:
5959 case UNSPEC_CHKACLR:
5960 case UNSPEC_CHKS:
5961 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5962 break;
5963
5964 case UNSPEC_FR_RECIP_APPROX:
5965 case UNSPEC_SHRP:
5966 case UNSPEC_COPYSIGN:
5967 case UNSPEC_FR_RECIP_APPROX_RES:
5968 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5969 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5970 break;
5971
5972 case UNSPEC_CMPXCHG_ACQ:
5973 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5974 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5975 break;
5976
5977 default:
5978 gcc_unreachable ();
5979 }
5980 break;
5981
5982 case UNSPEC_VOLATILE:
5983 switch (XINT (x, 1))
5984 {
5985 case UNSPECV_ALLOC:
5986 /* Alloc must always be the first instruction of a group.
5987 We force this by always returning true. */
5988 /* ??? We might get better scheduling if we explicitly check for
5989 input/local/output register dependencies, and modify the
5990 scheduler so that alloc is always reordered to the start of
5991 the current group. We could then eliminate all of the
5992 first_instruction code. */
5993 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5994
5995 new_flags.is_write = 1;
5996 rws_access_regno (REG_AR_CFM, new_flags, pred);
5997 return 1;
5998
5999 case UNSPECV_SET_BSP:
6000 need_barrier = 1;
6001 break;
6002
6003 case UNSPECV_BLOCKAGE:
6004 case UNSPECV_INSN_GROUP_BARRIER:
6005 case UNSPECV_BREAK:
6006 case UNSPECV_PSAC_ALL:
6007 case UNSPECV_PSAC_NORMAL:
6008 return 0;
6009
6010 default:
6011 gcc_unreachable ();
6012 }
6013 break;
6014
6015 case RETURN:
6016 new_flags.is_write = 0;
6017 need_barrier = rws_access_regno (REG_RP, flags, pred);
6018 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6019
6020 new_flags.is_write = 1;
6021 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6022 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6023 break;
6024
6025 default:
6026 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6027 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6028 switch (format_ptr[i])
6029 {
6030 case '0': /* unused field */
6031 case 'i': /* integer */
6032 case 'n': /* note */
6033 case 'w': /* wide integer */
6034 case 's': /* pointer to string */
6035 case 'S': /* optional pointer to string */
6036 break;
6037
6038 case 'e':
6039 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6040 need_barrier = 1;
6041 break;
6042
6043 case 'E':
6044 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6045 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6046 need_barrier = 1;
6047 break;
6048
6049 default:
6050 gcc_unreachable ();
6051 }
6052 break;
6053 }
6054 return need_barrier;
6055 }
6056
6057 /* Clear out the state for group_barrier_needed at the start of a
6058 sequence of insns. */
6059
6060 static void
6061 init_insn_group_barriers (void)
6062 {
6063 memset (rws_sum, 0, sizeof (rws_sum));
6064 first_instruction = 1;
6065 }
6066
6067 /* Given the current state, determine whether a group barrier (a stop bit) is
6068 necessary before INSN. Return nonzero if so. This modifies the state to
6069 include the effects of INSN as a side-effect. */
6070
6071 static int
6072 group_barrier_needed (rtx insn)
6073 {
6074 rtx pat;
6075 int need_barrier = 0;
6076 struct reg_flags flags;
6077
6078 memset (&flags, 0, sizeof (flags));
6079 switch (GET_CODE (insn))
6080 {
6081 case NOTE:
6082 break;
6083
6084 case BARRIER:
6085 /* A barrier doesn't imply an instruction group boundary. */
6086 break;
6087
6088 case CODE_LABEL:
6089 memset (rws_insn, 0, sizeof (rws_insn));
6090 return 1;
6091
6092 case CALL_INSN:
6093 flags.is_branch = 1;
6094 flags.is_sibcall = SIBLING_CALL_P (insn);
6095 memset (rws_insn, 0, sizeof (rws_insn));
6096
6097 /* Don't bundle a call following another call. */
6098 if ((pat = prev_active_insn (insn))
6099 && GET_CODE (pat) == CALL_INSN)
6100 {
6101 need_barrier = 1;
6102 break;
6103 }
6104
6105 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6106 break;
6107
6108 case JUMP_INSN:
6109 if (!ia64_spec_check_p (insn))
6110 flags.is_branch = 1;
6111
6112 /* Don't bundle a jump following a call. */
6113 if ((pat = prev_active_insn (insn))
6114 && GET_CODE (pat) == CALL_INSN)
6115 {
6116 need_barrier = 1;
6117 break;
6118 }
6119 /* FALLTHRU */
6120
6121 case INSN:
6122 if (GET_CODE (PATTERN (insn)) == USE
6123 || GET_CODE (PATTERN (insn)) == CLOBBER)
6124 /* Don't care about USE and CLOBBER "insns"---those are used to
6125 indicate to the optimizer that it shouldn't get rid of
6126 certain operations. */
6127 break;
6128
6129 pat = PATTERN (insn);
6130
6131 /* Ug. Hack hacks hacked elsewhere. */
6132 switch (recog_memoized (insn))
6133 {
6134 /* We play dependency tricks with the epilogue in order
6135 to get proper schedules. Undo this for dv analysis. */
6136 case CODE_FOR_epilogue_deallocate_stack:
6137 case CODE_FOR_prologue_allocate_stack:
6138 pat = XVECEXP (pat, 0, 0);
6139 break;
6140
6141 /* The pattern we use for br.cloop confuses the code above.
6142 The second element of the vector is representative. */
6143 case CODE_FOR_doloop_end_internal:
6144 pat = XVECEXP (pat, 0, 1);
6145 break;
6146
6147 /* Doesn't generate code. */
6148 case CODE_FOR_pred_rel_mutex:
6149 case CODE_FOR_prologue_use:
6150 return 0;
6151
6152 default:
6153 break;
6154 }
6155
6156 memset (rws_insn, 0, sizeof (rws_insn));
6157 need_barrier = rtx_needs_barrier (pat, flags, 0);
6158
6159 /* Check to see if the previous instruction was a volatile
6160 asm. */
6161 if (! need_barrier)
6162 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6163
6164 break;
6165
6166 default:
6167 gcc_unreachable ();
6168 }
6169
6170 if (first_instruction && INSN_P (insn)
6171 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6172 && GET_CODE (PATTERN (insn)) != USE
6173 && GET_CODE (PATTERN (insn)) != CLOBBER)
6174 {
6175 need_barrier = 0;
6176 first_instruction = 0;
6177 }
6178
6179 return need_barrier;
6180 }
6181
6182 /* Like group_barrier_needed, but do not clobber the current state. */
6183
6184 static int
6185 safe_group_barrier_needed (rtx insn)
6186 {
6187 int saved_first_instruction;
6188 int t;
6189
6190 saved_first_instruction = first_instruction;
6191 in_safe_group_barrier = 1;
6192
6193 t = group_barrier_needed (insn);
6194
6195 first_instruction = saved_first_instruction;
6196 in_safe_group_barrier = 0;
6197
6198 return t;
6199 }
6200
6201 /* Scan the current function and insert stop bits as necessary to
6202 eliminate dependencies. This function assumes that a final
6203 instruction scheduling pass has been run which has already
6204 inserted most of the necessary stop bits. This function only
6205 inserts new ones at basic block boundaries, since these are
6206 invisible to the scheduler. */
6207
6208 static void
6209 emit_insn_group_barriers (FILE *dump)
6210 {
6211 rtx insn;
6212 rtx last_label = 0;
6213 int insns_since_last_label = 0;
6214
6215 init_insn_group_barriers ();
6216
6217 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6218 {
6219 if (GET_CODE (insn) == CODE_LABEL)
6220 {
6221 if (insns_since_last_label)
6222 last_label = insn;
6223 insns_since_last_label = 0;
6224 }
6225 else if (GET_CODE (insn) == NOTE
6226 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6227 {
6228 if (insns_since_last_label)
6229 last_label = insn;
6230 insns_since_last_label = 0;
6231 }
6232 else if (GET_CODE (insn) == INSN
6233 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6234 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6235 {
6236 init_insn_group_barriers ();
6237 last_label = 0;
6238 }
6239 else if (INSN_P (insn))
6240 {
6241 insns_since_last_label = 1;
6242
6243 if (group_barrier_needed (insn))
6244 {
6245 if (last_label)
6246 {
6247 if (dump)
6248 fprintf (dump, "Emitting stop before label %d\n",
6249 INSN_UID (last_label));
6250 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6251 insn = last_label;
6252
6253 init_insn_group_barriers ();
6254 last_label = 0;
6255 }
6256 }
6257 }
6258 }
6259 }
6260
6261 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6262 This function has to emit all necessary group barriers. */
6263
6264 static void
6265 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6266 {
6267 rtx insn;
6268
6269 init_insn_group_barriers ();
6270
6271 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6272 {
6273 if (GET_CODE (insn) == BARRIER)
6274 {
6275 rtx last = prev_active_insn (insn);
6276
6277 if (! last)
6278 continue;
6279 if (GET_CODE (last) == JUMP_INSN
6280 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6281 last = prev_active_insn (last);
6282 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6283 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6284
6285 init_insn_group_barriers ();
6286 }
6287 else if (INSN_P (insn))
6288 {
6289 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6290 init_insn_group_barriers ();
6291 else if (group_barrier_needed (insn))
6292 {
6293 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6294 init_insn_group_barriers ();
6295 group_barrier_needed (insn);
6296 }
6297 }
6298 }
6299 }
6300
6301 \f
6302
6303 /* Instruction scheduling support. */
6304
6305 #define NR_BUNDLES 10
6306
6307 /* A list of names of all available bundles. */
6308
6309 static const char *bundle_name [NR_BUNDLES] =
6310 {
6311 ".mii",
6312 ".mmi",
6313 ".mfi",
6314 ".mmf",
6315 #if NR_BUNDLES == 10
6316 ".bbb",
6317 ".mbb",
6318 #endif
6319 ".mib",
6320 ".mmb",
6321 ".mfb",
6322 ".mlx"
6323 };
6324
6325 /* Nonzero if we should insert stop bits into the schedule. */
6326
6327 int ia64_final_schedule = 0;
6328
6329 /* Codes of the corresponding queried units: */
6330
6331 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6332 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6333
6334 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6335 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6336
6337 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6338
6339 /* The following variable value is an insn group barrier. */
6340
6341 static rtx dfa_stop_insn;
6342
6343 /* The following variable value is the last issued insn. */
6344
6345 static rtx last_scheduled_insn;
6346
6347 /* The following variable value is pointer to a DFA state used as
6348 temporary variable. */
6349
6350 static state_t temp_dfa_state = NULL;
6351
6352 /* The following variable value is DFA state after issuing the last
6353 insn. */
6354
6355 static state_t prev_cycle_state = NULL;
6356
6357 /* The following array element values are TRUE if the corresponding
6358 insn requires to add stop bits before it. */
6359
6360 static char *stops_p = NULL;
6361
6362 /* The following variable is used to set up the mentioned above array. */
6363
6364 static int stop_before_p = 0;
6365
6366 /* The following variable value is length of the arrays `clocks' and
6367 `add_cycles'. */
6368
6369 static int clocks_length;
6370
6371 /* The following array element values are cycles on which the
6372 corresponding insn will be issued. The array is used only for
6373 Itanium1. */
6374
6375 static int *clocks;
6376
6377 /* The following array element values are numbers of cycles should be
6378 added to improve insn scheduling for MM_insns for Itanium1. */
6379
6380 static int *add_cycles;
6381
6382 /* The following variable value is number of data speculations in progress. */
6383 static int pending_data_specs = 0;
6384
6385 /* Number of memory references on current and three future processor cycles. */
6386 static char mem_ops_in_group[4];
6387
6388 /* Number of current processor cycle (from scheduler's point of view). */
6389 static int current_cycle;
6390
6391 static rtx ia64_single_set (rtx);
6392 static void ia64_emit_insn_before (rtx, rtx);
6393
6394 /* Map a bundle number to its pseudo-op. */
6395
6396 const char *
6397 get_bundle_name (int b)
6398 {
6399 return bundle_name[b];
6400 }
6401
6402
6403 /* Return the maximum number of instructions a cpu can issue. */
6404
6405 static int
6406 ia64_issue_rate (void)
6407 {
6408 return 6;
6409 }
6410
6411 /* Helper function - like single_set, but look inside COND_EXEC. */
6412
6413 static rtx
6414 ia64_single_set (rtx insn)
6415 {
6416 rtx x = PATTERN (insn), ret;
6417 if (GET_CODE (x) == COND_EXEC)
6418 x = COND_EXEC_CODE (x);
6419 if (GET_CODE (x) == SET)
6420 return x;
6421
6422 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6423 Although they are not classical single set, the second set is there just
6424 to protect it from moving past FP-relative stack accesses. */
6425 switch (recog_memoized (insn))
6426 {
6427 case CODE_FOR_prologue_allocate_stack:
6428 case CODE_FOR_epilogue_deallocate_stack:
6429 ret = XVECEXP (x, 0, 0);
6430 break;
6431
6432 default:
6433 ret = single_set_2 (insn, x);
6434 break;
6435 }
6436
6437 return ret;
6438 }
6439
6440 /* Adjust the cost of a scheduling dependency.
6441 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6442 COST is the current cost, DW is dependency weakness. */
6443 static int
6444 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6445 {
6446 enum reg_note dep_type = (enum reg_note) dep_type1;
6447 enum attr_itanium_class dep_class;
6448 enum attr_itanium_class insn_class;
6449
6450 insn_class = ia64_safe_itanium_class (insn);
6451 dep_class = ia64_safe_itanium_class (dep_insn);
6452
6453 /* Treat true memory dependencies separately. Ignore apparent true
6454 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6455 if (dep_type == REG_DEP_TRUE
6456 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6457 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6458 return 0;
6459
6460 if (dw == MIN_DEP_WEAK)
6461 /* Store and load are likely to alias, use higher cost to avoid stall. */
6462 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6463 else if (dw > MIN_DEP_WEAK)
6464 {
6465 /* Store and load are less likely to alias. */
6466 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6467 /* Assume there will be no cache conflict for floating-point data.
6468 For integer data, L1 conflict penalty is huge (17 cycles), so we
6469 never assume it will not cause a conflict. */
6470 return 0;
6471 else
6472 return cost;
6473 }
6474
6475 if (dep_type != REG_DEP_OUTPUT)
6476 return cost;
6477
6478 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6479 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6480 return 0;
6481
6482 return cost;
6483 }
6484
6485 /* Like emit_insn_before, but skip cycle_display notes.
6486 ??? When cycle display notes are implemented, update this. */
6487
6488 static void
6489 ia64_emit_insn_before (rtx insn, rtx before)
6490 {
6491 emit_insn_before (insn, before);
6492 }
6493
6494 /* The following function marks insns who produce addresses for load
6495 and store insns. Such insns will be placed into M slots because it
6496 decrease latency time for Itanium1 (see function
6497 `ia64_produce_address_p' and the DFA descriptions). */
6498
6499 static void
6500 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6501 {
6502 rtx insn, next, next_tail;
6503
6504 /* Before reload, which_alternative is not set, which means that
6505 ia64_safe_itanium_class will produce wrong results for (at least)
6506 move instructions. */
6507 if (!reload_completed)
6508 return;
6509
6510 next_tail = NEXT_INSN (tail);
6511 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6512 if (INSN_P (insn))
6513 insn->call = 0;
6514 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6515 if (INSN_P (insn)
6516 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6517 {
6518 sd_iterator_def sd_it;
6519 dep_t dep;
6520 bool has_mem_op_consumer_p = false;
6521
6522 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6523 {
6524 enum attr_itanium_class c;
6525
6526 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6527 continue;
6528
6529 next = DEP_CON (dep);
6530 c = ia64_safe_itanium_class (next);
6531 if ((c == ITANIUM_CLASS_ST
6532 || c == ITANIUM_CLASS_STF)
6533 && ia64_st_address_bypass_p (insn, next))
6534 {
6535 has_mem_op_consumer_p = true;
6536 break;
6537 }
6538 else if ((c == ITANIUM_CLASS_LD
6539 || c == ITANIUM_CLASS_FLD
6540 || c == ITANIUM_CLASS_FLDP)
6541 && ia64_ld_address_bypass_p (insn, next))
6542 {
6543 has_mem_op_consumer_p = true;
6544 break;
6545 }
6546 }
6547
6548 insn->call = has_mem_op_consumer_p;
6549 }
6550 }
6551
6552 /* We're beginning a new block. Initialize data structures as necessary. */
6553
6554 static void
6555 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6556 int sched_verbose ATTRIBUTE_UNUSED,
6557 int max_ready ATTRIBUTE_UNUSED)
6558 {
6559 #ifdef ENABLE_CHECKING
6560 rtx insn;
6561
6562 if (!sel_sched_p () && reload_completed)
6563 for (insn = NEXT_INSN (current_sched_info->prev_head);
6564 insn != current_sched_info->next_tail;
6565 insn = NEXT_INSN (insn))
6566 gcc_assert (!SCHED_GROUP_P (insn));
6567 #endif
6568 last_scheduled_insn = NULL_RTX;
6569 init_insn_group_barriers ();
6570
6571 current_cycle = 0;
6572 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
6573 }
6574
6575 /* We're beginning a scheduling pass. Check assertion. */
6576
6577 static void
6578 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6579 int sched_verbose ATTRIBUTE_UNUSED,
6580 int max_ready ATTRIBUTE_UNUSED)
6581 {
6582 gcc_assert (pending_data_specs == 0);
6583 }
6584
6585 /* Scheduling pass is now finished. Free/reset static variable. */
6586 static void
6587 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6588 int sched_verbose ATTRIBUTE_UNUSED)
6589 {
6590 gcc_assert (pending_data_specs == 0);
6591 }
6592
6593 /* Return TRUE if INSN is a load (either normal or speculative, but not a
6594 speculation check), FALSE otherwise. */
6595 static bool
6596 is_load_p (rtx insn)
6597 {
6598 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
6599
6600 return
6601 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
6602 && get_attr_check_load (insn) == CHECK_LOAD_NO);
6603 }
6604
6605 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
6606 (taking account for 3-cycle cache reference postponing for stores: Intel
6607 Itanium 2 Reference Manual for Software Development and Optimization,
6608 6.7.3.1). */
6609 static void
6610 record_memory_reference (rtx insn)
6611 {
6612 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
6613
6614 switch (insn_class) {
6615 case ITANIUM_CLASS_FLD:
6616 case ITANIUM_CLASS_LD:
6617 mem_ops_in_group[current_cycle % 4]++;
6618 break;
6619 case ITANIUM_CLASS_STF:
6620 case ITANIUM_CLASS_ST:
6621 mem_ops_in_group[(current_cycle + 3) % 4]++;
6622 break;
6623 default:;
6624 }
6625 }
6626
6627 /* We are about to being issuing insns for this clock cycle.
6628 Override the default sort algorithm to better slot instructions. */
6629
6630 static int
6631 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6632 int *pn_ready, int clock_var,
6633 int reorder_type)
6634 {
6635 int n_asms;
6636 int n_ready = *pn_ready;
6637 rtx *e_ready = ready + n_ready;
6638 rtx *insnp;
6639
6640 if (sched_verbose)
6641 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6642
6643 if (reorder_type == 0)
6644 {
6645 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6646 n_asms = 0;
6647 for (insnp = ready; insnp < e_ready; insnp++)
6648 if (insnp < e_ready)
6649 {
6650 rtx insn = *insnp;
6651 enum attr_type t = ia64_safe_type (insn);
6652 if (t == TYPE_UNKNOWN)
6653 {
6654 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6655 || asm_noperands (PATTERN (insn)) >= 0)
6656 {
6657 rtx lowest = ready[n_asms];
6658 ready[n_asms] = insn;
6659 *insnp = lowest;
6660 n_asms++;
6661 }
6662 else
6663 {
6664 rtx highest = ready[n_ready - 1];
6665 ready[n_ready - 1] = insn;
6666 *insnp = highest;
6667 return 1;
6668 }
6669 }
6670 }
6671
6672 if (n_asms < n_ready)
6673 {
6674 /* Some normal insns to process. Skip the asms. */
6675 ready += n_asms;
6676 n_ready -= n_asms;
6677 }
6678 else if (n_ready > 0)
6679 return 1;
6680 }
6681
6682 if (ia64_final_schedule)
6683 {
6684 int deleted = 0;
6685 int nr_need_stop = 0;
6686
6687 for (insnp = ready; insnp < e_ready; insnp++)
6688 if (safe_group_barrier_needed (*insnp))
6689 nr_need_stop++;
6690
6691 if (reorder_type == 1 && n_ready == nr_need_stop)
6692 return 0;
6693 if (reorder_type == 0)
6694 return 1;
6695 insnp = e_ready;
6696 /* Move down everything that needs a stop bit, preserving
6697 relative order. */
6698 while (insnp-- > ready + deleted)
6699 while (insnp >= ready + deleted)
6700 {
6701 rtx insn = *insnp;
6702 if (! safe_group_barrier_needed (insn))
6703 break;
6704 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6705 *ready = insn;
6706 deleted++;
6707 }
6708 n_ready -= deleted;
6709 ready += deleted;
6710 }
6711
6712 current_cycle = clock_var;
6713 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
6714 {
6715 int moved = 0;
6716
6717 insnp = e_ready;
6718 /* Move down loads/stores, preserving relative order. */
6719 while (insnp-- > ready + moved)
6720 while (insnp >= ready + moved)
6721 {
6722 rtx insn = *insnp;
6723 if (! is_load_p (insn))
6724 break;
6725 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6726 *ready = insn;
6727 moved++;
6728 }
6729 n_ready -= moved;
6730 ready += moved;
6731 }
6732
6733 return 1;
6734 }
6735
6736 /* We are about to being issuing insns for this clock cycle. Override
6737 the default sort algorithm to better slot instructions. */
6738
6739 static int
6740 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6741 int clock_var)
6742 {
6743 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6744 pn_ready, clock_var, 0);
6745 }
6746
6747 /* Like ia64_sched_reorder, but called after issuing each insn.
6748 Override the default sort algorithm to better slot instructions. */
6749
6750 static int
6751 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6752 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6753 int *pn_ready, int clock_var)
6754 {
6755 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6756 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6757 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6758 clock_var, 1);
6759 }
6760
6761 /* We are about to issue INSN. Return the number of insns left on the
6762 ready queue that can be issued this cycle. */
6763
6764 static int
6765 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6766 int sched_verbose ATTRIBUTE_UNUSED,
6767 rtx insn ATTRIBUTE_UNUSED,
6768 int can_issue_more ATTRIBUTE_UNUSED)
6769 {
6770 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
6771 /* Modulo scheduling does not extend h_i_d when emitting
6772 new instructions. Don't use h_i_d, if we don't have to. */
6773 {
6774 if (DONE_SPEC (insn) & BEGIN_DATA)
6775 pending_data_specs++;
6776 if (CHECK_SPEC (insn) & BEGIN_DATA)
6777 pending_data_specs--;
6778 }
6779
6780 last_scheduled_insn = insn;
6781 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6782 if (reload_completed)
6783 {
6784 int needed = group_barrier_needed (insn);
6785
6786 gcc_assert (!needed);
6787 if (GET_CODE (insn) == CALL_INSN)
6788 init_insn_group_barriers ();
6789 stops_p [INSN_UID (insn)] = stop_before_p;
6790 stop_before_p = 0;
6791
6792 record_memory_reference (insn);
6793 }
6794 return 1;
6795 }
6796
6797 /* We are choosing insn from the ready queue. Return nonzero if INSN
6798 can be chosen. */
6799
6800 static int
6801 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6802 {
6803 gcc_assert (insn && INSN_P (insn));
6804 return ((!reload_completed
6805 || !safe_group_barrier_needed (insn))
6806 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
6807 && (!mflag_sched_mem_insns_hard_limit
6808 || !is_load_p (insn)
6809 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
6810 }
6811
6812 /* We are choosing insn from the ready queue. Return nonzero if INSN
6813 can be chosen. */
6814
6815 static bool
6816 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
6817 {
6818 gcc_assert (insn && INSN_P (insn));
6819 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6820 we keep ALAT half-empty. */
6821 return (pending_data_specs < 16
6822 || !(TODO_SPEC (insn) & BEGIN_DATA));
6823 }
6824
6825 /* The following variable value is pseudo-insn used by the DFA insn
6826 scheduler to change the DFA state when the simulated clock is
6827 increased. */
6828
6829 static rtx dfa_pre_cycle_insn;
6830
6831 /* Returns 1 when a meaningful insn was scheduled between the last group
6832 barrier and LAST. */
6833 static int
6834 scheduled_good_insn (rtx last)
6835 {
6836 if (last && recog_memoized (last) >= 0)
6837 return 1;
6838
6839 for ( ;
6840 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
6841 && !stops_p[INSN_UID (last)];
6842 last = PREV_INSN (last))
6843 /* We could hit a NOTE_INSN_DELETED here which is actually outside
6844 the ebb we're scheduling. */
6845 if (INSN_P (last) && recog_memoized (last) >= 0)
6846 return 1;
6847
6848 return 0;
6849 }
6850
6851 /* We are about to being issuing INSN. Return nonzero if we cannot
6852 issue it on given cycle CLOCK and return zero if we should not sort
6853 the ready queue on the next clock start. */
6854
6855 static int
6856 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6857 int clock, int *sort_p)
6858 {
6859 int setup_clocks_p = FALSE;
6860
6861 gcc_assert (insn && INSN_P (insn));
6862 /* When a group barrier is needed for insn, last_scheduled_insn
6863 should be set. */
6864 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
6865 || last_scheduled_insn);
6866
6867 if ((reload_completed
6868 && (safe_group_barrier_needed (insn)
6869 || (mflag_sched_stop_bits_after_every_cycle
6870 && last_clock != clock
6871 && last_scheduled_insn
6872 && scheduled_good_insn (last_scheduled_insn))))
6873 || (last_scheduled_insn
6874 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6875 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6876 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6877 {
6878 init_insn_group_barriers ();
6879
6880 if (verbose && dump)
6881 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6882 last_clock == clock ? " + cycle advance" : "");
6883
6884 stop_before_p = 1;
6885 current_cycle = clock;
6886 mem_ops_in_group[current_cycle % 4] = 0;
6887
6888 if (last_clock == clock)
6889 {
6890 state_transition (curr_state, dfa_stop_insn);
6891 if (TARGET_EARLY_STOP_BITS)
6892 *sort_p = (last_scheduled_insn == NULL_RTX
6893 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6894 else
6895 *sort_p = 0;
6896 return 1;
6897 }
6898 else if (reload_completed)
6899 setup_clocks_p = TRUE;
6900
6901 if (last_scheduled_insn)
6902 {
6903 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6904 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6905 state_reset (curr_state);
6906 else
6907 {
6908 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6909 state_transition (curr_state, dfa_stop_insn);
6910 state_transition (curr_state, dfa_pre_cycle_insn);
6911 state_transition (curr_state, NULL);
6912 }
6913 }
6914 }
6915 else if (reload_completed)
6916 setup_clocks_p = TRUE;
6917
6918 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6919 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6920 && asm_noperands (PATTERN (insn)) < 0)
6921 {
6922 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6923
6924 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6925 {
6926 sd_iterator_def sd_it;
6927 dep_t dep;
6928 int d = -1;
6929
6930 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6931 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6932 {
6933 enum attr_itanium_class dep_class;
6934 rtx dep_insn = DEP_PRO (dep);
6935
6936 dep_class = ia64_safe_itanium_class (dep_insn);
6937 if ((dep_class == ITANIUM_CLASS_MMMUL
6938 || dep_class == ITANIUM_CLASS_MMSHF)
6939 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6940 && (d < 0
6941 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6942 d = last_clock - clocks [INSN_UID (dep_insn)];
6943 }
6944 if (d >= 0)
6945 add_cycles [INSN_UID (insn)] = 3 - d;
6946 }
6947 }
6948
6949 return 0;
6950 }
6951
6952 /* Implement targetm.sched.h_i_d_extended hook.
6953 Extend internal data structures. */
6954 static void
6955 ia64_h_i_d_extended (void)
6956 {
6957 if (stops_p != NULL)
6958 {
6959 int new_clocks_length = get_max_uid () * 3 / 2;
6960
6961 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6962
6963 if (ia64_tune == PROCESSOR_ITANIUM)
6964 {
6965 clocks = (int *) xrecalloc (clocks, new_clocks_length, clocks_length,
6966 sizeof (int));
6967 add_cycles = (int *) xrecalloc (add_cycles, new_clocks_length,
6968 clocks_length, sizeof (int));
6969 }
6970
6971 clocks_length = new_clocks_length;
6972 }
6973 }
6974 \f
6975
6976 /* This structure describes the data used by the backend to guide scheduling.
6977 When the current scheduling point is switched, this data should be saved
6978 and restored later, if the scheduler returns to this point. */
6979 struct _ia64_sched_context
6980 {
6981 state_t prev_cycle_state;
6982 rtx last_scheduled_insn;
6983 struct reg_write_state rws_sum[NUM_REGS];
6984 struct reg_write_state rws_insn[NUM_REGS];
6985 int first_instruction;
6986 int pending_data_specs;
6987 int current_cycle;
6988 char mem_ops_in_group[4];
6989 };
6990 typedef struct _ia64_sched_context *ia64_sched_context_t;
6991
6992 /* Allocates a scheduling context. */
6993 static void *
6994 ia64_alloc_sched_context (void)
6995 {
6996 return xmalloc (sizeof (struct _ia64_sched_context));
6997 }
6998
6999 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7000 the global context otherwise. */
7001 static void
7002 ia64_init_sched_context (void *_sc, bool clean_p)
7003 {
7004 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7005
7006 sc->prev_cycle_state = xmalloc (dfa_state_size);
7007 if (clean_p)
7008 {
7009 state_reset (sc->prev_cycle_state);
7010 sc->last_scheduled_insn = NULL_RTX;
7011 memset (sc->rws_sum, 0, sizeof (rws_sum));
7012 memset (sc->rws_insn, 0, sizeof (rws_insn));
7013 sc->first_instruction = 1;
7014 sc->pending_data_specs = 0;
7015 sc->current_cycle = 0;
7016 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7017 }
7018 else
7019 {
7020 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7021 sc->last_scheduled_insn = last_scheduled_insn;
7022 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7023 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7024 sc->first_instruction = first_instruction;
7025 sc->pending_data_specs = pending_data_specs;
7026 sc->current_cycle = current_cycle;
7027 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7028 }
7029 }
7030
7031 /* Sets the global scheduling context to the one pointed to by _SC. */
7032 static void
7033 ia64_set_sched_context (void *_sc)
7034 {
7035 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7036
7037 gcc_assert (sc != NULL);
7038
7039 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7040 last_scheduled_insn = sc->last_scheduled_insn;
7041 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7042 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7043 first_instruction = sc->first_instruction;
7044 pending_data_specs = sc->pending_data_specs;
7045 current_cycle = sc->current_cycle;
7046 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7047 }
7048
7049 /* Clears the data in the _SC scheduling context. */
7050 static void
7051 ia64_clear_sched_context (void *_sc)
7052 {
7053 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7054
7055 free (sc->prev_cycle_state);
7056 sc->prev_cycle_state = NULL;
7057 }
7058
7059 /* Frees the _SC scheduling context. */
7060 static void
7061 ia64_free_sched_context (void *_sc)
7062 {
7063 gcc_assert (_sc != NULL);
7064
7065 free (_sc);
7066 }
7067
7068 typedef rtx (* gen_func_t) (rtx, rtx);
7069
7070 /* Return a function that will generate a load of mode MODE_NO
7071 with speculation types TS. */
7072 static gen_func_t
7073 get_spec_load_gen_function (ds_t ts, int mode_no)
7074 {
7075 static gen_func_t gen_ld_[] = {
7076 gen_movbi,
7077 gen_movqi_internal,
7078 gen_movhi_internal,
7079 gen_movsi_internal,
7080 gen_movdi_internal,
7081 gen_movsf_internal,
7082 gen_movdf_internal,
7083 gen_movxf_internal,
7084 gen_movti_internal,
7085 gen_zero_extendqidi2,
7086 gen_zero_extendhidi2,
7087 gen_zero_extendsidi2,
7088 };
7089
7090 static gen_func_t gen_ld_a[] = {
7091 gen_movbi_advanced,
7092 gen_movqi_advanced,
7093 gen_movhi_advanced,
7094 gen_movsi_advanced,
7095 gen_movdi_advanced,
7096 gen_movsf_advanced,
7097 gen_movdf_advanced,
7098 gen_movxf_advanced,
7099 gen_movti_advanced,
7100 gen_zero_extendqidi2_advanced,
7101 gen_zero_extendhidi2_advanced,
7102 gen_zero_extendsidi2_advanced,
7103 };
7104 static gen_func_t gen_ld_s[] = {
7105 gen_movbi_speculative,
7106 gen_movqi_speculative,
7107 gen_movhi_speculative,
7108 gen_movsi_speculative,
7109 gen_movdi_speculative,
7110 gen_movsf_speculative,
7111 gen_movdf_speculative,
7112 gen_movxf_speculative,
7113 gen_movti_speculative,
7114 gen_zero_extendqidi2_speculative,
7115 gen_zero_extendhidi2_speculative,
7116 gen_zero_extendsidi2_speculative,
7117 };
7118 static gen_func_t gen_ld_sa[] = {
7119 gen_movbi_speculative_advanced,
7120 gen_movqi_speculative_advanced,
7121 gen_movhi_speculative_advanced,
7122 gen_movsi_speculative_advanced,
7123 gen_movdi_speculative_advanced,
7124 gen_movsf_speculative_advanced,
7125 gen_movdf_speculative_advanced,
7126 gen_movxf_speculative_advanced,
7127 gen_movti_speculative_advanced,
7128 gen_zero_extendqidi2_speculative_advanced,
7129 gen_zero_extendhidi2_speculative_advanced,
7130 gen_zero_extendsidi2_speculative_advanced,
7131 };
7132 static gen_func_t gen_ld_s_a[] = {
7133 gen_movbi_speculative_a,
7134 gen_movqi_speculative_a,
7135 gen_movhi_speculative_a,
7136 gen_movsi_speculative_a,
7137 gen_movdi_speculative_a,
7138 gen_movsf_speculative_a,
7139 gen_movdf_speculative_a,
7140 gen_movxf_speculative_a,
7141 gen_movti_speculative_a,
7142 gen_zero_extendqidi2_speculative_a,
7143 gen_zero_extendhidi2_speculative_a,
7144 gen_zero_extendsidi2_speculative_a,
7145 };
7146
7147 gen_func_t *gen_ld;
7148
7149 if (ts & BEGIN_DATA)
7150 {
7151 if (ts & BEGIN_CONTROL)
7152 gen_ld = gen_ld_sa;
7153 else
7154 gen_ld = gen_ld_a;
7155 }
7156 else if (ts & BEGIN_CONTROL)
7157 {
7158 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7159 || ia64_needs_block_p (ts))
7160 gen_ld = gen_ld_s;
7161 else
7162 gen_ld = gen_ld_s_a;
7163 }
7164 else if (ts == 0)
7165 gen_ld = gen_ld_;
7166 else
7167 gcc_unreachable ();
7168
7169 return gen_ld[mode_no];
7170 }
7171
7172 /* Constants that help mapping 'enum machine_mode' to int. */
7173 enum SPEC_MODES
7174 {
7175 SPEC_MODE_INVALID = -1,
7176 SPEC_MODE_FIRST = 0,
7177 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7178 SPEC_MODE_FOR_EXTEND_LAST = 3,
7179 SPEC_MODE_LAST = 8
7180 };
7181
7182 enum
7183 {
7184 /* Offset to reach ZERO_EXTEND patterns. */
7185 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7186 };
7187
7188 /* Return index of the MODE. */
7189 static int
7190 ia64_mode_to_int (enum machine_mode mode)
7191 {
7192 switch (mode)
7193 {
7194 case BImode: return 0; /* SPEC_MODE_FIRST */
7195 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7196 case HImode: return 2;
7197 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7198 case DImode: return 4;
7199 case SFmode: return 5;
7200 case DFmode: return 6;
7201 case XFmode: return 7;
7202 case TImode:
7203 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7204 mentioned in itanium[12].md. Predicate fp_register_operand also
7205 needs to be defined. Bottom line: better disable for now. */
7206 return SPEC_MODE_INVALID;
7207 default: return SPEC_MODE_INVALID;
7208 }
7209 }
7210
7211 /* Provide information about speculation capabilities. */
7212 static void
7213 ia64_set_sched_flags (spec_info_t spec_info)
7214 {
7215 unsigned int *flags = &(current_sched_info->flags);
7216
7217 if (*flags & SCHED_RGN
7218 || *flags & SCHED_EBB
7219 || *flags & SEL_SCHED)
7220 {
7221 int mask = 0;
7222
7223 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7224 || (mflag_sched_ar_data_spec && reload_completed))
7225 {
7226 mask |= BEGIN_DATA;
7227
7228 if (!sel_sched_p ()
7229 && ((mflag_sched_br_in_data_spec && !reload_completed)
7230 || (mflag_sched_ar_in_data_spec && reload_completed)))
7231 mask |= BE_IN_DATA;
7232 }
7233
7234 if (mflag_sched_control_spec
7235 && (!sel_sched_p ()
7236 || reload_completed))
7237 {
7238 mask |= BEGIN_CONTROL;
7239
7240 if (!sel_sched_p () && mflag_sched_in_control_spec)
7241 mask |= BE_IN_CONTROL;
7242 }
7243
7244 spec_info->mask = mask;
7245
7246 if (mask)
7247 {
7248 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7249
7250 if (mask & BE_IN_SPEC)
7251 *flags |= NEW_BBS;
7252
7253 spec_info->flags = 0;
7254
7255 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7256 spec_info->flags |= PREFER_NON_DATA_SPEC;
7257
7258 if (mask & CONTROL_SPEC)
7259 {
7260 if (mflag_sched_prefer_non_control_spec_insns)
7261 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7262
7263 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7264 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7265 }
7266
7267 if (sched_verbose >= 1)
7268 spec_info->dump = sched_dump;
7269 else
7270 spec_info->dump = 0;
7271
7272 if (mflag_sched_count_spec_in_critical_path)
7273 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7274 }
7275 }
7276 }
7277
7278 /* If INSN is an appropriate load return its mode.
7279 Return -1 otherwise. */
7280 static int
7281 get_mode_no_for_insn (rtx insn)
7282 {
7283 rtx reg, mem, mode_rtx;
7284 int mode_no;
7285 bool extend_p;
7286
7287 extract_insn_cached (insn);
7288
7289 /* We use WHICH_ALTERNATIVE only after reload. This will
7290 guarantee that reload won't touch a speculative insn. */
7291
7292 if (recog_data.n_operands != 2)
7293 return -1;
7294
7295 reg = recog_data.operand[0];
7296 mem = recog_data.operand[1];
7297
7298 /* We should use MEM's mode since REG's mode in presence of
7299 ZERO_EXTEND will always be DImode. */
7300 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7301 /* Process non-speculative ld. */
7302 {
7303 if (!reload_completed)
7304 {
7305 /* Do not speculate into regs like ar.lc. */
7306 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7307 return -1;
7308
7309 if (!MEM_P (mem))
7310 return -1;
7311
7312 {
7313 rtx mem_reg = XEXP (mem, 0);
7314
7315 if (!REG_P (mem_reg))
7316 return -1;
7317 }
7318
7319 mode_rtx = mem;
7320 }
7321 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7322 {
7323 gcc_assert (REG_P (reg) && MEM_P (mem));
7324 mode_rtx = mem;
7325 }
7326 else
7327 return -1;
7328 }
7329 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7330 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7331 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7332 /* Process speculative ld or ld.c. */
7333 {
7334 gcc_assert (REG_P (reg) && MEM_P (mem));
7335 mode_rtx = mem;
7336 }
7337 else
7338 {
7339 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7340
7341 if (attr_class == ITANIUM_CLASS_CHK_A
7342 || attr_class == ITANIUM_CLASS_CHK_S_I
7343 || attr_class == ITANIUM_CLASS_CHK_S_F)
7344 /* Process chk. */
7345 mode_rtx = reg;
7346 else
7347 return -1;
7348 }
7349
7350 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7351
7352 if (mode_no == SPEC_MODE_INVALID)
7353 return -1;
7354
7355 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7356
7357 if (extend_p)
7358 {
7359 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7360 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7361 return -1;
7362
7363 mode_no += SPEC_GEN_EXTEND_OFFSET;
7364 }
7365
7366 return mode_no;
7367 }
7368
7369 /* If X is an unspec part of a speculative load, return its code.
7370 Return -1 otherwise. */
7371 static int
7372 get_spec_unspec_code (const_rtx x)
7373 {
7374 if (GET_CODE (x) != UNSPEC)
7375 return -1;
7376
7377 {
7378 int code;
7379
7380 code = XINT (x, 1);
7381
7382 switch (code)
7383 {
7384 case UNSPEC_LDA:
7385 case UNSPEC_LDS:
7386 case UNSPEC_LDS_A:
7387 case UNSPEC_LDSA:
7388 return code;
7389
7390 default:
7391 return -1;
7392 }
7393 }
7394 }
7395
7396 /* Implement skip_rtx_p hook. */
7397 static bool
7398 ia64_skip_rtx_p (const_rtx x)
7399 {
7400 return get_spec_unspec_code (x) != -1;
7401 }
7402
7403 /* If INSN is a speculative load, return its UNSPEC code.
7404 Return -1 otherwise. */
7405 static int
7406 get_insn_spec_code (const_rtx insn)
7407 {
7408 rtx pat, reg, mem;
7409
7410 pat = PATTERN (insn);
7411
7412 if (GET_CODE (pat) == COND_EXEC)
7413 pat = COND_EXEC_CODE (pat);
7414
7415 if (GET_CODE (pat) != SET)
7416 return -1;
7417
7418 reg = SET_DEST (pat);
7419 if (!REG_P (reg))
7420 return -1;
7421
7422 mem = SET_SRC (pat);
7423 if (GET_CODE (mem) == ZERO_EXTEND)
7424 mem = XEXP (mem, 0);
7425
7426 return get_spec_unspec_code (mem);
7427 }
7428
7429 /* If INSN is a speculative load, return a ds with the speculation types.
7430 Otherwise [if INSN is a normal instruction] return 0. */
7431 static ds_t
7432 ia64_get_insn_spec_ds (rtx insn)
7433 {
7434 int code = get_insn_spec_code (insn);
7435
7436 switch (code)
7437 {
7438 case UNSPEC_LDA:
7439 return BEGIN_DATA;
7440
7441 case UNSPEC_LDS:
7442 case UNSPEC_LDS_A:
7443 return BEGIN_CONTROL;
7444
7445 case UNSPEC_LDSA:
7446 return BEGIN_DATA | BEGIN_CONTROL;
7447
7448 default:
7449 return 0;
7450 }
7451 }
7452
7453 /* If INSN is a speculative load return a ds with the speculation types that
7454 will be checked.
7455 Otherwise [if INSN is a normal instruction] return 0. */
7456 static ds_t
7457 ia64_get_insn_checked_ds (rtx insn)
7458 {
7459 int code = get_insn_spec_code (insn);
7460
7461 switch (code)
7462 {
7463 case UNSPEC_LDA:
7464 return BEGIN_DATA | BEGIN_CONTROL;
7465
7466 case UNSPEC_LDS:
7467 return BEGIN_CONTROL;
7468
7469 case UNSPEC_LDS_A:
7470 case UNSPEC_LDSA:
7471 return BEGIN_DATA | BEGIN_CONTROL;
7472
7473 default:
7474 return 0;
7475 }
7476 }
7477
7478 /* If GEN_P is true, calculate the index of needed speculation check and return
7479 speculative pattern for INSN with speculative mode TS, machine mode
7480 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7481 If GEN_P is false, just calculate the index of needed speculation check. */
7482 static rtx
7483 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7484 {
7485 rtx pat, new_pat;
7486 gen_func_t gen_load;
7487
7488 gen_load = get_spec_load_gen_function (ts, mode_no);
7489
7490 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7491 copy_rtx (recog_data.operand[1]));
7492
7493 pat = PATTERN (insn);
7494 if (GET_CODE (pat) == COND_EXEC)
7495 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7496 new_pat);
7497
7498 return new_pat;
7499 }
7500
7501 static bool
7502 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7503 ds_t ds ATTRIBUTE_UNUSED)
7504 {
7505 return false;
7506 }
7507
7508 /* Implement targetm.sched.speculate_insn hook.
7509 Check if the INSN can be TS speculative.
7510 If 'no' - return -1.
7511 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7512 If current pattern of the INSN already provides TS speculation,
7513 return 0. */
7514 static int
7515 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7516 {
7517 int mode_no;
7518 int res;
7519
7520 gcc_assert (!(ts & ~SPECULATIVE));
7521
7522 if (ia64_spec_check_p (insn))
7523 return -1;
7524
7525 if ((ts & BE_IN_SPEC)
7526 && !insn_can_be_in_speculative_p (insn, ts))
7527 return -1;
7528
7529 mode_no = get_mode_no_for_insn (insn);
7530
7531 if (mode_no != SPEC_MODE_INVALID)
7532 {
7533 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7534 res = 0;
7535 else
7536 {
7537 res = 1;
7538 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7539 }
7540 }
7541 else
7542 res = -1;
7543
7544 return res;
7545 }
7546
7547 /* Return a function that will generate a check for speculation TS with mode
7548 MODE_NO.
7549 If simple check is needed, pass true for SIMPLE_CHECK_P.
7550 If clearing check is needed, pass true for CLEARING_CHECK_P. */
7551 static gen_func_t
7552 get_spec_check_gen_function (ds_t ts, int mode_no,
7553 bool simple_check_p, bool clearing_check_p)
7554 {
7555 static gen_func_t gen_ld_c_clr[] = {
7556 gen_movbi_clr,
7557 gen_movqi_clr,
7558 gen_movhi_clr,
7559 gen_movsi_clr,
7560 gen_movdi_clr,
7561 gen_movsf_clr,
7562 gen_movdf_clr,
7563 gen_movxf_clr,
7564 gen_movti_clr,
7565 gen_zero_extendqidi2_clr,
7566 gen_zero_extendhidi2_clr,
7567 gen_zero_extendsidi2_clr,
7568 };
7569 static gen_func_t gen_ld_c_nc[] = {
7570 gen_movbi_nc,
7571 gen_movqi_nc,
7572 gen_movhi_nc,
7573 gen_movsi_nc,
7574 gen_movdi_nc,
7575 gen_movsf_nc,
7576 gen_movdf_nc,
7577 gen_movxf_nc,
7578 gen_movti_nc,
7579 gen_zero_extendqidi2_nc,
7580 gen_zero_extendhidi2_nc,
7581 gen_zero_extendsidi2_nc,
7582 };
7583 static gen_func_t gen_chk_a_clr[] = {
7584 gen_advanced_load_check_clr_bi,
7585 gen_advanced_load_check_clr_qi,
7586 gen_advanced_load_check_clr_hi,
7587 gen_advanced_load_check_clr_si,
7588 gen_advanced_load_check_clr_di,
7589 gen_advanced_load_check_clr_sf,
7590 gen_advanced_load_check_clr_df,
7591 gen_advanced_load_check_clr_xf,
7592 gen_advanced_load_check_clr_ti,
7593 gen_advanced_load_check_clr_di,
7594 gen_advanced_load_check_clr_di,
7595 gen_advanced_load_check_clr_di,
7596 };
7597 static gen_func_t gen_chk_a_nc[] = {
7598 gen_advanced_load_check_nc_bi,
7599 gen_advanced_load_check_nc_qi,
7600 gen_advanced_load_check_nc_hi,
7601 gen_advanced_load_check_nc_si,
7602 gen_advanced_load_check_nc_di,
7603 gen_advanced_load_check_nc_sf,
7604 gen_advanced_load_check_nc_df,
7605 gen_advanced_load_check_nc_xf,
7606 gen_advanced_load_check_nc_ti,
7607 gen_advanced_load_check_nc_di,
7608 gen_advanced_load_check_nc_di,
7609 gen_advanced_load_check_nc_di,
7610 };
7611 static gen_func_t gen_chk_s[] = {
7612 gen_speculation_check_bi,
7613 gen_speculation_check_qi,
7614 gen_speculation_check_hi,
7615 gen_speculation_check_si,
7616 gen_speculation_check_di,
7617 gen_speculation_check_sf,
7618 gen_speculation_check_df,
7619 gen_speculation_check_xf,
7620 gen_speculation_check_ti,
7621 gen_speculation_check_di,
7622 gen_speculation_check_di,
7623 gen_speculation_check_di,
7624 };
7625
7626 gen_func_t *gen_check;
7627
7628 if (ts & BEGIN_DATA)
7629 {
7630 /* We don't need recovery because even if this is ld.sa
7631 ALAT entry will be allocated only if NAT bit is set to zero.
7632 So it is enough to use ld.c here. */
7633
7634 if (simple_check_p)
7635 {
7636 gcc_assert (mflag_sched_spec_ldc);
7637
7638 if (clearing_check_p)
7639 gen_check = gen_ld_c_clr;
7640 else
7641 gen_check = gen_ld_c_nc;
7642 }
7643 else
7644 {
7645 if (clearing_check_p)
7646 gen_check = gen_chk_a_clr;
7647 else
7648 gen_check = gen_chk_a_nc;
7649 }
7650 }
7651 else if (ts & BEGIN_CONTROL)
7652 {
7653 if (simple_check_p)
7654 /* We might want to use ld.sa -> ld.c instead of
7655 ld.s -> chk.s. */
7656 {
7657 gcc_assert (!ia64_needs_block_p (ts));
7658
7659 if (clearing_check_p)
7660 gen_check = gen_ld_c_clr;
7661 else
7662 gen_check = gen_ld_c_nc;
7663 }
7664 else
7665 {
7666 gen_check = gen_chk_s;
7667 }
7668 }
7669 else
7670 gcc_unreachable ();
7671
7672 gcc_assert (mode_no >= 0);
7673 return gen_check[mode_no];
7674 }
7675
7676 /* Return nonzero, if INSN needs branchy recovery check. */
7677 static bool
7678 ia64_needs_block_p (ds_t ts)
7679 {
7680 if (ts & BEGIN_DATA)
7681 return !mflag_sched_spec_ldc;
7682
7683 gcc_assert ((ts & BEGIN_CONTROL) != 0);
7684
7685 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
7686 }
7687
7688 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7689 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7690 Otherwise, generate a simple check. */
7691 static rtx
7692 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
7693 {
7694 rtx op1, pat, check_pat;
7695 gen_func_t gen_check;
7696 int mode_no;
7697
7698 mode_no = get_mode_no_for_insn (insn);
7699 gcc_assert (mode_no >= 0);
7700
7701 if (label)
7702 op1 = label;
7703 else
7704 {
7705 gcc_assert (!ia64_needs_block_p (ds));
7706 op1 = copy_rtx (recog_data.operand[1]);
7707 }
7708
7709 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
7710 true);
7711
7712 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
7713
7714 pat = PATTERN (insn);
7715 if (GET_CODE (pat) == COND_EXEC)
7716 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7717 check_pat);
7718
7719 return check_pat;
7720 }
7721
7722 /* Return nonzero, if X is branchy recovery check. */
7723 static int
7724 ia64_spec_check_p (rtx x)
7725 {
7726 x = PATTERN (x);
7727 if (GET_CODE (x) == COND_EXEC)
7728 x = COND_EXEC_CODE (x);
7729 if (GET_CODE (x) == SET)
7730 return ia64_spec_check_src_p (SET_SRC (x));
7731 return 0;
7732 }
7733
7734 /* Return nonzero, if SRC belongs to recovery check. */
7735 static int
7736 ia64_spec_check_src_p (rtx src)
7737 {
7738 if (GET_CODE (src) == IF_THEN_ELSE)
7739 {
7740 rtx t;
7741
7742 t = XEXP (src, 0);
7743 if (GET_CODE (t) == NE)
7744 {
7745 t = XEXP (t, 0);
7746
7747 if (GET_CODE (t) == UNSPEC)
7748 {
7749 int code;
7750
7751 code = XINT (t, 1);
7752
7753 if (code == UNSPEC_LDCCLR
7754 || code == UNSPEC_LDCNC
7755 || code == UNSPEC_CHKACLR
7756 || code == UNSPEC_CHKANC
7757 || code == UNSPEC_CHKS)
7758 {
7759 gcc_assert (code != 0);
7760 return code;
7761 }
7762 }
7763 }
7764 }
7765 return 0;
7766 }
7767 \f
7768
7769 /* The following page contains abstract data `bundle states' which are
7770 used for bundling insns (inserting nops and template generation). */
7771
7772 /* The following describes state of insn bundling. */
7773
7774 struct bundle_state
7775 {
7776 /* Unique bundle state number to identify them in the debugging
7777 output */
7778 int unique_num;
7779 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7780 /* number nops before and after the insn */
7781 short before_nops_num, after_nops_num;
7782 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7783 insn */
7784 int cost; /* cost of the state in cycles */
7785 int accumulated_insns_num; /* number of all previous insns including
7786 nops. L is considered as 2 insns */
7787 int branch_deviation; /* deviation of previous branches from 3rd slots */
7788 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
7789 struct bundle_state *next; /* next state with the same insn_num */
7790 struct bundle_state *originator; /* originator (previous insn state) */
7791 /* All bundle states are in the following chain. */
7792 struct bundle_state *allocated_states_chain;
7793 /* The DFA State after issuing the insn and the nops. */
7794 state_t dfa_state;
7795 };
7796
7797 /* The following is map insn number to the corresponding bundle state. */
7798
7799 static struct bundle_state **index_to_bundle_states;
7800
7801 /* The unique number of next bundle state. */
7802
7803 static int bundle_states_num;
7804
7805 /* All allocated bundle states are in the following chain. */
7806
7807 static struct bundle_state *allocated_bundle_states_chain;
7808
7809 /* All allocated but not used bundle states are in the following
7810 chain. */
7811
7812 static struct bundle_state *free_bundle_state_chain;
7813
7814
7815 /* The following function returns a free bundle state. */
7816
7817 static struct bundle_state *
7818 get_free_bundle_state (void)
7819 {
7820 struct bundle_state *result;
7821
7822 if (free_bundle_state_chain != NULL)
7823 {
7824 result = free_bundle_state_chain;
7825 free_bundle_state_chain = result->next;
7826 }
7827 else
7828 {
7829 result = XNEW (struct bundle_state);
7830 result->dfa_state = xmalloc (dfa_state_size);
7831 result->allocated_states_chain = allocated_bundle_states_chain;
7832 allocated_bundle_states_chain = result;
7833 }
7834 result->unique_num = bundle_states_num++;
7835 return result;
7836
7837 }
7838
7839 /* The following function frees given bundle state. */
7840
7841 static void
7842 free_bundle_state (struct bundle_state *state)
7843 {
7844 state->next = free_bundle_state_chain;
7845 free_bundle_state_chain = state;
7846 }
7847
7848 /* Start work with abstract data `bundle states'. */
7849
7850 static void
7851 initiate_bundle_states (void)
7852 {
7853 bundle_states_num = 0;
7854 free_bundle_state_chain = NULL;
7855 allocated_bundle_states_chain = NULL;
7856 }
7857
7858 /* Finish work with abstract data `bundle states'. */
7859
7860 static void
7861 finish_bundle_states (void)
7862 {
7863 struct bundle_state *curr_state, *next_state;
7864
7865 for (curr_state = allocated_bundle_states_chain;
7866 curr_state != NULL;
7867 curr_state = next_state)
7868 {
7869 next_state = curr_state->allocated_states_chain;
7870 free (curr_state->dfa_state);
7871 free (curr_state);
7872 }
7873 }
7874
7875 /* Hash table of the bundle states. The key is dfa_state and insn_num
7876 of the bundle states. */
7877
7878 static htab_t bundle_state_table;
7879
7880 /* The function returns hash of BUNDLE_STATE. */
7881
7882 static unsigned
7883 bundle_state_hash (const void *bundle_state)
7884 {
7885 const struct bundle_state *const state
7886 = (const struct bundle_state *) bundle_state;
7887 unsigned result, i;
7888
7889 for (result = i = 0; i < dfa_state_size; i++)
7890 result += (((unsigned char *) state->dfa_state) [i]
7891 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7892 return result + state->insn_num;
7893 }
7894
7895 /* The function returns nonzero if the bundle state keys are equal. */
7896
7897 static int
7898 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7899 {
7900 const struct bundle_state *const state1
7901 = (const struct bundle_state *) bundle_state_1;
7902 const struct bundle_state *const state2
7903 = (const struct bundle_state *) bundle_state_2;
7904
7905 return (state1->insn_num == state2->insn_num
7906 && memcmp (state1->dfa_state, state2->dfa_state,
7907 dfa_state_size) == 0);
7908 }
7909
7910 /* The function inserts the BUNDLE_STATE into the hash table. The
7911 function returns nonzero if the bundle has been inserted into the
7912 table. The table contains the best bundle state with given key. */
7913
7914 static int
7915 insert_bundle_state (struct bundle_state *bundle_state)
7916 {
7917 void **entry_ptr;
7918
7919 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7920 if (*entry_ptr == NULL)
7921 {
7922 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7923 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7924 *entry_ptr = (void *) bundle_state;
7925 return TRUE;
7926 }
7927 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7928 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7929 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7930 > bundle_state->accumulated_insns_num
7931 || (((struct bundle_state *)
7932 *entry_ptr)->accumulated_insns_num
7933 == bundle_state->accumulated_insns_num
7934 && (((struct bundle_state *)
7935 *entry_ptr)->branch_deviation
7936 > bundle_state->branch_deviation
7937 || (((struct bundle_state *)
7938 *entry_ptr)->branch_deviation
7939 == bundle_state->branch_deviation
7940 && ((struct bundle_state *)
7941 *entry_ptr)->middle_bundle_stops
7942 > bundle_state->middle_bundle_stops))))))
7943
7944 {
7945 struct bundle_state temp;
7946
7947 temp = *(struct bundle_state *) *entry_ptr;
7948 *(struct bundle_state *) *entry_ptr = *bundle_state;
7949 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7950 *bundle_state = temp;
7951 }
7952 return FALSE;
7953 }
7954
7955 /* Start work with the hash table. */
7956
7957 static void
7958 initiate_bundle_state_table (void)
7959 {
7960 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7961 (htab_del) 0);
7962 }
7963
7964 /* Finish work with the hash table. */
7965
7966 static void
7967 finish_bundle_state_table (void)
7968 {
7969 htab_delete (bundle_state_table);
7970 }
7971
7972 \f
7973
7974 /* The following variable is a insn `nop' used to check bundle states
7975 with different number of inserted nops. */
7976
7977 static rtx ia64_nop;
7978
7979 /* The following function tries to issue NOPS_NUM nops for the current
7980 state without advancing processor cycle. If it failed, the
7981 function returns FALSE and frees the current state. */
7982
7983 static int
7984 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7985 {
7986 int i;
7987
7988 for (i = 0; i < nops_num; i++)
7989 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7990 {
7991 free_bundle_state (curr_state);
7992 return FALSE;
7993 }
7994 return TRUE;
7995 }
7996
7997 /* The following function tries to issue INSN for the current
7998 state without advancing processor cycle. If it failed, the
7999 function returns FALSE and frees the current state. */
8000
8001 static int
8002 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8003 {
8004 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8005 {
8006 free_bundle_state (curr_state);
8007 return FALSE;
8008 }
8009 return TRUE;
8010 }
8011
8012 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8013 starting with ORIGINATOR without advancing processor cycle. If
8014 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8015 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8016 If it was successful, the function creates new bundle state and
8017 insert into the hash table and into `index_to_bundle_states'. */
8018
8019 static void
8020 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8021 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8022 {
8023 struct bundle_state *curr_state;
8024
8025 curr_state = get_free_bundle_state ();
8026 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8027 curr_state->insn = insn;
8028 curr_state->insn_num = originator->insn_num + 1;
8029 curr_state->cost = originator->cost;
8030 curr_state->originator = originator;
8031 curr_state->before_nops_num = before_nops_num;
8032 curr_state->after_nops_num = 0;
8033 curr_state->accumulated_insns_num
8034 = originator->accumulated_insns_num + before_nops_num;
8035 curr_state->branch_deviation = originator->branch_deviation;
8036 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8037 gcc_assert (insn);
8038 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8039 {
8040 gcc_assert (GET_MODE (insn) != TImode);
8041 if (!try_issue_nops (curr_state, before_nops_num))
8042 return;
8043 if (!try_issue_insn (curr_state, insn))
8044 return;
8045 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8046 if (curr_state->accumulated_insns_num % 3 != 0)
8047 curr_state->middle_bundle_stops++;
8048 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8049 && curr_state->accumulated_insns_num % 3 != 0)
8050 {
8051 free_bundle_state (curr_state);
8052 return;
8053 }
8054 }
8055 else if (GET_MODE (insn) != TImode)
8056 {
8057 if (!try_issue_nops (curr_state, before_nops_num))
8058 return;
8059 if (!try_issue_insn (curr_state, insn))
8060 return;
8061 curr_state->accumulated_insns_num++;
8062 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8063 && asm_noperands (PATTERN (insn)) < 0);
8064
8065 if (ia64_safe_type (insn) == TYPE_L)
8066 curr_state->accumulated_insns_num++;
8067 }
8068 else
8069 {
8070 /* If this is an insn that must be first in a group, then don't allow
8071 nops to be emitted before it. Currently, alloc is the only such
8072 supported instruction. */
8073 /* ??? The bundling automatons should handle this for us, but they do
8074 not yet have support for the first_insn attribute. */
8075 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8076 {
8077 free_bundle_state (curr_state);
8078 return;
8079 }
8080
8081 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8082 state_transition (curr_state->dfa_state, NULL);
8083 curr_state->cost++;
8084 if (!try_issue_nops (curr_state, before_nops_num))
8085 return;
8086 if (!try_issue_insn (curr_state, insn))
8087 return;
8088 curr_state->accumulated_insns_num++;
8089 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8090 || asm_noperands (PATTERN (insn)) >= 0)
8091 {
8092 /* Finish bundle containing asm insn. */
8093 curr_state->after_nops_num
8094 = 3 - curr_state->accumulated_insns_num % 3;
8095 curr_state->accumulated_insns_num
8096 += 3 - curr_state->accumulated_insns_num % 3;
8097 }
8098 else if (ia64_safe_type (insn) == TYPE_L)
8099 curr_state->accumulated_insns_num++;
8100 }
8101 if (ia64_safe_type (insn) == TYPE_B)
8102 curr_state->branch_deviation
8103 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8104 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8105 {
8106 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8107 {
8108 state_t dfa_state;
8109 struct bundle_state *curr_state1;
8110 struct bundle_state *allocated_states_chain;
8111
8112 curr_state1 = get_free_bundle_state ();
8113 dfa_state = curr_state1->dfa_state;
8114 allocated_states_chain = curr_state1->allocated_states_chain;
8115 *curr_state1 = *curr_state;
8116 curr_state1->dfa_state = dfa_state;
8117 curr_state1->allocated_states_chain = allocated_states_chain;
8118 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8119 dfa_state_size);
8120 curr_state = curr_state1;
8121 }
8122 if (!try_issue_nops (curr_state,
8123 3 - curr_state->accumulated_insns_num % 3))
8124 return;
8125 curr_state->after_nops_num
8126 = 3 - curr_state->accumulated_insns_num % 3;
8127 curr_state->accumulated_insns_num
8128 += 3 - curr_state->accumulated_insns_num % 3;
8129 }
8130 if (!insert_bundle_state (curr_state))
8131 free_bundle_state (curr_state);
8132 return;
8133 }
8134
8135 /* The following function returns position in the two window bundle
8136 for given STATE. */
8137
8138 static int
8139 get_max_pos (state_t state)
8140 {
8141 if (cpu_unit_reservation_p (state, pos_6))
8142 return 6;
8143 else if (cpu_unit_reservation_p (state, pos_5))
8144 return 5;
8145 else if (cpu_unit_reservation_p (state, pos_4))
8146 return 4;
8147 else if (cpu_unit_reservation_p (state, pos_3))
8148 return 3;
8149 else if (cpu_unit_reservation_p (state, pos_2))
8150 return 2;
8151 else if (cpu_unit_reservation_p (state, pos_1))
8152 return 1;
8153 else
8154 return 0;
8155 }
8156
8157 /* The function returns code of a possible template for given position
8158 and state. The function should be called only with 2 values of
8159 position equal to 3 or 6. We avoid generating F NOPs by putting
8160 templates containing F insns at the end of the template search
8161 because undocumented anomaly in McKinley derived cores which can
8162 cause stalls if an F-unit insn (including a NOP) is issued within a
8163 six-cycle window after reading certain application registers (such
8164 as ar.bsp). Furthermore, power-considerations also argue against
8165 the use of F-unit instructions unless they're really needed. */
8166
8167 static int
8168 get_template (state_t state, int pos)
8169 {
8170 switch (pos)
8171 {
8172 case 3:
8173 if (cpu_unit_reservation_p (state, _0mmi_))
8174 return 1;
8175 else if (cpu_unit_reservation_p (state, _0mii_))
8176 return 0;
8177 else if (cpu_unit_reservation_p (state, _0mmb_))
8178 return 7;
8179 else if (cpu_unit_reservation_p (state, _0mib_))
8180 return 6;
8181 else if (cpu_unit_reservation_p (state, _0mbb_))
8182 return 5;
8183 else if (cpu_unit_reservation_p (state, _0bbb_))
8184 return 4;
8185 else if (cpu_unit_reservation_p (state, _0mmf_))
8186 return 3;
8187 else if (cpu_unit_reservation_p (state, _0mfi_))
8188 return 2;
8189 else if (cpu_unit_reservation_p (state, _0mfb_))
8190 return 8;
8191 else if (cpu_unit_reservation_p (state, _0mlx_))
8192 return 9;
8193 else
8194 gcc_unreachable ();
8195 case 6:
8196 if (cpu_unit_reservation_p (state, _1mmi_))
8197 return 1;
8198 else if (cpu_unit_reservation_p (state, _1mii_))
8199 return 0;
8200 else if (cpu_unit_reservation_p (state, _1mmb_))
8201 return 7;
8202 else if (cpu_unit_reservation_p (state, _1mib_))
8203 return 6;
8204 else if (cpu_unit_reservation_p (state, _1mbb_))
8205 return 5;
8206 else if (cpu_unit_reservation_p (state, _1bbb_))
8207 return 4;
8208 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8209 return 3;
8210 else if (cpu_unit_reservation_p (state, _1mfi_))
8211 return 2;
8212 else if (cpu_unit_reservation_p (state, _1mfb_))
8213 return 8;
8214 else if (cpu_unit_reservation_p (state, _1mlx_))
8215 return 9;
8216 else
8217 gcc_unreachable ();
8218 default:
8219 gcc_unreachable ();
8220 }
8221 }
8222
8223 /* True when INSN is important for bundling. */
8224 static bool
8225 important_for_bundling_p (rtx insn)
8226 {
8227 return (INSN_P (insn)
8228 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8229 && GET_CODE (PATTERN (insn)) != USE
8230 && GET_CODE (PATTERN (insn)) != CLOBBER);
8231 }
8232
8233 /* The following function returns an insn important for insn bundling
8234 followed by INSN and before TAIL. */
8235
8236 static rtx
8237 get_next_important_insn (rtx insn, rtx tail)
8238 {
8239 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8240 if (important_for_bundling_p (insn))
8241 return insn;
8242 return NULL_RTX;
8243 }
8244
8245 /* Add a bundle selector TEMPLATE0 before INSN. */
8246
8247 static void
8248 ia64_add_bundle_selector_before (int template0, rtx insn)
8249 {
8250 rtx b = gen_bundle_selector (GEN_INT (template0));
8251
8252 ia64_emit_insn_before (b, insn);
8253 #if NR_BUNDLES == 10
8254 if ((template0 == 4 || template0 == 5)
8255 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
8256 {
8257 int i;
8258 rtx note = NULL_RTX;
8259
8260 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8261 first or second slot. If it is and has REG_EH_NOTE set, copy it
8262 to following nops, as br.call sets rp to the address of following
8263 bundle and therefore an EH region end must be on a bundle
8264 boundary. */
8265 insn = PREV_INSN (insn);
8266 for (i = 0; i < 3; i++)
8267 {
8268 do
8269 insn = next_active_insn (insn);
8270 while (GET_CODE (insn) == INSN
8271 && get_attr_empty (insn) == EMPTY_YES);
8272 if (GET_CODE (insn) == CALL_INSN)
8273 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8274 else if (note)
8275 {
8276 int code;
8277
8278 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8279 || code == CODE_FOR_nop_b);
8280 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8281 note = NULL_RTX;
8282 else
8283 REG_NOTES (insn)
8284 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
8285 REG_NOTES (insn));
8286 }
8287 }
8288 }
8289 #endif
8290 }
8291
8292 /* The following function does insn bundling. Bundling means
8293 inserting templates and nop insns to fit insn groups into permitted
8294 templates. Instruction scheduling uses NDFA (non-deterministic
8295 finite automata) encoding informations about the templates and the
8296 inserted nops. Nondeterminism of the automata permits follows
8297 all possible insn sequences very fast.
8298
8299 Unfortunately it is not possible to get information about inserting
8300 nop insns and used templates from the automata states. The
8301 automata only says that we can issue an insn possibly inserting
8302 some nops before it and using some template. Therefore insn
8303 bundling in this function is implemented by using DFA
8304 (deterministic finite automata). We follow all possible insn
8305 sequences by inserting 0-2 nops (that is what the NDFA describe for
8306 insn scheduling) before/after each insn being bundled. We know the
8307 start of simulated processor cycle from insn scheduling (insn
8308 starting a new cycle has TImode).
8309
8310 Simple implementation of insn bundling would create enormous
8311 number of possible insn sequences satisfying information about new
8312 cycle ticks taken from the insn scheduling. To make the algorithm
8313 practical we use dynamic programming. Each decision (about
8314 inserting nops and implicitly about previous decisions) is described
8315 by structure bundle_state (see above). If we generate the same
8316 bundle state (key is automaton state after issuing the insns and
8317 nops for it), we reuse already generated one. As consequence we
8318 reject some decisions which cannot improve the solution and
8319 reduce memory for the algorithm.
8320
8321 When we reach the end of EBB (extended basic block), we choose the
8322 best sequence and then, moving back in EBB, insert templates for
8323 the best alternative. The templates are taken from querying
8324 automaton state for each insn in chosen bundle states.
8325
8326 So the algorithm makes two (forward and backward) passes through
8327 EBB. There is an additional forward pass through EBB for Itanium1
8328 processor. This pass inserts more nops to make dependency between
8329 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
8330
8331 static void
8332 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8333 {
8334 struct bundle_state *curr_state, *next_state, *best_state;
8335 rtx insn, next_insn;
8336 int insn_num;
8337 int i, bundle_end_p, only_bundle_end_p, asm_p;
8338 int pos = 0, max_pos, template0, template1;
8339 rtx b;
8340 rtx nop;
8341 enum attr_type type;
8342
8343 insn_num = 0;
8344 /* Count insns in the EBB. */
8345 for (insn = NEXT_INSN (prev_head_insn);
8346 insn && insn != tail;
8347 insn = NEXT_INSN (insn))
8348 if (INSN_P (insn))
8349 insn_num++;
8350 if (insn_num == 0)
8351 return;
8352 bundling_p = 1;
8353 dfa_clean_insn_cache ();
8354 initiate_bundle_state_table ();
8355 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8356 /* First (forward) pass -- generation of bundle states. */
8357 curr_state = get_free_bundle_state ();
8358 curr_state->insn = NULL;
8359 curr_state->before_nops_num = 0;
8360 curr_state->after_nops_num = 0;
8361 curr_state->insn_num = 0;
8362 curr_state->cost = 0;
8363 curr_state->accumulated_insns_num = 0;
8364 curr_state->branch_deviation = 0;
8365 curr_state->middle_bundle_stops = 0;
8366 curr_state->next = NULL;
8367 curr_state->originator = NULL;
8368 state_reset (curr_state->dfa_state);
8369 index_to_bundle_states [0] = curr_state;
8370 insn_num = 0;
8371 /* Shift cycle mark if it is put on insn which could be ignored. */
8372 for (insn = NEXT_INSN (prev_head_insn);
8373 insn != tail;
8374 insn = NEXT_INSN (insn))
8375 if (INSN_P (insn)
8376 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8377 || GET_CODE (PATTERN (insn)) == USE
8378 || GET_CODE (PATTERN (insn)) == CLOBBER)
8379 && GET_MODE (insn) == TImode)
8380 {
8381 PUT_MODE (insn, VOIDmode);
8382 for (next_insn = NEXT_INSN (insn);
8383 next_insn != tail;
8384 next_insn = NEXT_INSN (next_insn))
8385 if (INSN_P (next_insn)
8386 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8387 && GET_CODE (PATTERN (next_insn)) != USE
8388 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8389 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8390 {
8391 PUT_MODE (next_insn, TImode);
8392 break;
8393 }
8394 }
8395 /* Forward pass: generation of bundle states. */
8396 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8397 insn != NULL_RTX;
8398 insn = next_insn)
8399 {
8400 gcc_assert (INSN_P (insn)
8401 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8402 && GET_CODE (PATTERN (insn)) != USE
8403 && GET_CODE (PATTERN (insn)) != CLOBBER);
8404 type = ia64_safe_type (insn);
8405 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8406 insn_num++;
8407 index_to_bundle_states [insn_num] = NULL;
8408 for (curr_state = index_to_bundle_states [insn_num - 1];
8409 curr_state != NULL;
8410 curr_state = next_state)
8411 {
8412 pos = curr_state->accumulated_insns_num % 3;
8413 next_state = curr_state->next;
8414 /* We must fill up the current bundle in order to start a
8415 subsequent asm insn in a new bundle. Asm insn is always
8416 placed in a separate bundle. */
8417 only_bundle_end_p
8418 = (next_insn != NULL_RTX
8419 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8420 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8421 /* We may fill up the current bundle if it is the cycle end
8422 without a group barrier. */
8423 bundle_end_p
8424 = (only_bundle_end_p || next_insn == NULL_RTX
8425 || (GET_MODE (next_insn) == TImode
8426 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8427 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8428 || type == TYPE_S
8429 /* We need to insert 2 nops for cases like M_MII. To
8430 guarantee issuing all insns on the same cycle for
8431 Itanium 1, we need to issue 2 nops after the first M
8432 insn (MnnMII where n is a nop insn). */
8433 || ((type == TYPE_M || type == TYPE_A)
8434 && ia64_tune == PROCESSOR_ITANIUM
8435 && !bundle_end_p && pos == 1))
8436 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8437 only_bundle_end_p);
8438 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8439 only_bundle_end_p);
8440 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8441 only_bundle_end_p);
8442 }
8443 gcc_assert (index_to_bundle_states [insn_num]);
8444 for (curr_state = index_to_bundle_states [insn_num];
8445 curr_state != NULL;
8446 curr_state = curr_state->next)
8447 if (verbose >= 2 && dump)
8448 {
8449 /* This structure is taken from generated code of the
8450 pipeline hazard recognizer (see file insn-attrtab.c).
8451 Please don't forget to change the structure if a new
8452 automaton is added to .md file. */
8453 struct DFA_chip
8454 {
8455 unsigned short one_automaton_state;
8456 unsigned short oneb_automaton_state;
8457 unsigned short two_automaton_state;
8458 unsigned short twob_automaton_state;
8459 };
8460
8461 fprintf
8462 (dump,
8463 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8464 curr_state->unique_num,
8465 (curr_state->originator == NULL
8466 ? -1 : curr_state->originator->unique_num),
8467 curr_state->cost,
8468 curr_state->before_nops_num, curr_state->after_nops_num,
8469 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8470 curr_state->middle_bundle_stops,
8471 (ia64_tune == PROCESSOR_ITANIUM
8472 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8473 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8474 INSN_UID (insn));
8475 }
8476 }
8477
8478 /* We should find a solution because the 2nd insn scheduling has
8479 found one. */
8480 gcc_assert (index_to_bundle_states [insn_num]);
8481 /* Find a state corresponding to the best insn sequence. */
8482 best_state = NULL;
8483 for (curr_state = index_to_bundle_states [insn_num];
8484 curr_state != NULL;
8485 curr_state = curr_state->next)
8486 /* We are just looking at the states with fully filled up last
8487 bundle. The first we prefer insn sequences with minimal cost
8488 then with minimal inserted nops and finally with branch insns
8489 placed in the 3rd slots. */
8490 if (curr_state->accumulated_insns_num % 3 == 0
8491 && (best_state == NULL || best_state->cost > curr_state->cost
8492 || (best_state->cost == curr_state->cost
8493 && (curr_state->accumulated_insns_num
8494 < best_state->accumulated_insns_num
8495 || (curr_state->accumulated_insns_num
8496 == best_state->accumulated_insns_num
8497 && (curr_state->branch_deviation
8498 < best_state->branch_deviation
8499 || (curr_state->branch_deviation
8500 == best_state->branch_deviation
8501 && curr_state->middle_bundle_stops
8502 < best_state->middle_bundle_stops)))))))
8503 best_state = curr_state;
8504 /* Second (backward) pass: adding nops and templates. */
8505 gcc_assert (best_state);
8506 insn_num = best_state->before_nops_num;
8507 template0 = template1 = -1;
8508 for (curr_state = best_state;
8509 curr_state->originator != NULL;
8510 curr_state = curr_state->originator)
8511 {
8512 insn = curr_state->insn;
8513 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8514 || asm_noperands (PATTERN (insn)) >= 0);
8515 insn_num++;
8516 if (verbose >= 2 && dump)
8517 {
8518 struct DFA_chip
8519 {
8520 unsigned short one_automaton_state;
8521 unsigned short oneb_automaton_state;
8522 unsigned short two_automaton_state;
8523 unsigned short twob_automaton_state;
8524 };
8525
8526 fprintf
8527 (dump,
8528 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8529 curr_state->unique_num,
8530 (curr_state->originator == NULL
8531 ? -1 : curr_state->originator->unique_num),
8532 curr_state->cost,
8533 curr_state->before_nops_num, curr_state->after_nops_num,
8534 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8535 curr_state->middle_bundle_stops,
8536 (ia64_tune == PROCESSOR_ITANIUM
8537 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8538 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8539 INSN_UID (insn));
8540 }
8541 /* Find the position in the current bundle window. The window can
8542 contain at most two bundles. Two bundle window means that
8543 the processor will make two bundle rotation. */
8544 max_pos = get_max_pos (curr_state->dfa_state);
8545 if (max_pos == 6
8546 /* The following (negative template number) means that the
8547 processor did one bundle rotation. */
8548 || (max_pos == 3 && template0 < 0))
8549 {
8550 /* We are at the end of the window -- find template(s) for
8551 its bundle(s). */
8552 pos = max_pos;
8553 if (max_pos == 3)
8554 template0 = get_template (curr_state->dfa_state, 3);
8555 else
8556 {
8557 template1 = get_template (curr_state->dfa_state, 3);
8558 template0 = get_template (curr_state->dfa_state, 6);
8559 }
8560 }
8561 if (max_pos > 3 && template1 < 0)
8562 /* It may happen when we have the stop inside a bundle. */
8563 {
8564 gcc_assert (pos <= 3);
8565 template1 = get_template (curr_state->dfa_state, 3);
8566 pos += 3;
8567 }
8568 if (!asm_p)
8569 /* Emit nops after the current insn. */
8570 for (i = 0; i < curr_state->after_nops_num; i++)
8571 {
8572 nop = gen_nop ();
8573 emit_insn_after (nop, insn);
8574 pos--;
8575 gcc_assert (pos >= 0);
8576 if (pos % 3 == 0)
8577 {
8578 /* We are at the start of a bundle: emit the template
8579 (it should be defined). */
8580 gcc_assert (template0 >= 0);
8581 ia64_add_bundle_selector_before (template0, nop);
8582 /* If we have two bundle window, we make one bundle
8583 rotation. Otherwise template0 will be undefined
8584 (negative value). */
8585 template0 = template1;
8586 template1 = -1;
8587 }
8588 }
8589 /* Move the position backward in the window. Group barrier has
8590 no slot. Asm insn takes all bundle. */
8591 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8592 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8593 && asm_noperands (PATTERN (insn)) < 0)
8594 pos--;
8595 /* Long insn takes 2 slots. */
8596 if (ia64_safe_type (insn) == TYPE_L)
8597 pos--;
8598 gcc_assert (pos >= 0);
8599 if (pos % 3 == 0
8600 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8601 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8602 && asm_noperands (PATTERN (insn)) < 0)
8603 {
8604 /* The current insn is at the bundle start: emit the
8605 template. */
8606 gcc_assert (template0 >= 0);
8607 ia64_add_bundle_selector_before (template0, insn);
8608 b = PREV_INSN (insn);
8609 insn = b;
8610 /* See comment above in analogous place for emitting nops
8611 after the insn. */
8612 template0 = template1;
8613 template1 = -1;
8614 }
8615 /* Emit nops after the current insn. */
8616 for (i = 0; i < curr_state->before_nops_num; i++)
8617 {
8618 nop = gen_nop ();
8619 ia64_emit_insn_before (nop, insn);
8620 nop = PREV_INSN (insn);
8621 insn = nop;
8622 pos--;
8623 gcc_assert (pos >= 0);
8624 if (pos % 3 == 0)
8625 {
8626 /* See comment above in analogous place for emitting nops
8627 after the insn. */
8628 gcc_assert (template0 >= 0);
8629 ia64_add_bundle_selector_before (template0, insn);
8630 b = PREV_INSN (insn);
8631 insn = b;
8632 template0 = template1;
8633 template1 = -1;
8634 }
8635 }
8636 }
8637 if (ia64_tune == PROCESSOR_ITANIUM)
8638 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8639 Itanium1 has a strange design, if the distance between an insn
8640 and dependent MM-insn is less 4 then we have a 6 additional
8641 cycles stall. So we make the distance equal to 4 cycles if it
8642 is less. */
8643 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8644 insn != NULL_RTX;
8645 insn = next_insn)
8646 {
8647 gcc_assert (INSN_P (insn)
8648 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8649 && GET_CODE (PATTERN (insn)) != USE
8650 && GET_CODE (PATTERN (insn)) != CLOBBER);
8651 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8652 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8653 /* We found a MM-insn which needs additional cycles. */
8654 {
8655 rtx last;
8656 int i, j, n;
8657 int pred_stop_p;
8658
8659 /* Now we are searching for a template of the bundle in
8660 which the MM-insn is placed and the position of the
8661 insn in the bundle (0, 1, 2). Also we are searching
8662 for that there is a stop before the insn. */
8663 last = prev_active_insn (insn);
8664 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8665 if (pred_stop_p)
8666 last = prev_active_insn (last);
8667 n = 0;
8668 for (;; last = prev_active_insn (last))
8669 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8670 {
8671 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8672 if (template0 == 9)
8673 /* The insn is in MLX bundle. Change the template
8674 onto MFI because we will add nops before the
8675 insn. It simplifies subsequent code a lot. */
8676 PATTERN (last)
8677 = gen_bundle_selector (const2_rtx); /* -> MFI */
8678 break;
8679 }
8680 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8681 && (ia64_safe_itanium_class (last)
8682 != ITANIUM_CLASS_IGNORE))
8683 n++;
8684 /* Some check of correctness: the stop is not at the
8685 bundle start, there are no more 3 insns in the bundle,
8686 and the MM-insn is not at the start of bundle with
8687 template MLX. */
8688 gcc_assert ((!pred_stop_p || n)
8689 && n <= 2
8690 && (template0 != 9 || !n));
8691 /* Put nops after the insn in the bundle. */
8692 for (j = 3 - n; j > 0; j --)
8693 ia64_emit_insn_before (gen_nop (), insn);
8694 /* It takes into account that we will add more N nops
8695 before the insn lately -- please see code below. */
8696 add_cycles [INSN_UID (insn)]--;
8697 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8698 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8699 insn);
8700 if (pred_stop_p)
8701 add_cycles [INSN_UID (insn)]--;
8702 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8703 {
8704 /* Insert "MII;" template. */
8705 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8706 insn);
8707 ia64_emit_insn_before (gen_nop (), insn);
8708 ia64_emit_insn_before (gen_nop (), insn);
8709 if (i > 1)
8710 {
8711 /* To decrease code size, we use "MI;I;"
8712 template. */
8713 ia64_emit_insn_before
8714 (gen_insn_group_barrier (GEN_INT (3)), insn);
8715 i--;
8716 }
8717 ia64_emit_insn_before (gen_nop (), insn);
8718 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8719 insn);
8720 }
8721 /* Put the MM-insn in the same slot of a bundle with the
8722 same template as the original one. */
8723 ia64_add_bundle_selector_before (template0, insn);
8724 /* To put the insn in the same slot, add necessary number
8725 of nops. */
8726 for (j = n; j > 0; j --)
8727 ia64_emit_insn_before (gen_nop (), insn);
8728 /* Put the stop if the original bundle had it. */
8729 if (pred_stop_p)
8730 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8731 insn);
8732 }
8733 }
8734
8735 #ifdef ENABLE_CHECKING
8736 {
8737 /* Assert right calculation of middle_bundle_stops. */
8738 int num = best_state->middle_bundle_stops;
8739 bool start_bundle = true, end_bundle = false;
8740
8741 for (insn = NEXT_INSN (prev_head_insn);
8742 insn && insn != tail;
8743 insn = NEXT_INSN (insn))
8744 {
8745 if (!INSN_P (insn))
8746 continue;
8747 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
8748 start_bundle = true;
8749 else
8750 {
8751 rtx next_insn;
8752
8753 for (next_insn = NEXT_INSN (insn);
8754 next_insn && next_insn != tail;
8755 next_insn = NEXT_INSN (next_insn))
8756 if (INSN_P (next_insn)
8757 && (ia64_safe_itanium_class (next_insn)
8758 != ITANIUM_CLASS_IGNORE
8759 || recog_memoized (next_insn)
8760 == CODE_FOR_bundle_selector)
8761 && GET_CODE (PATTERN (next_insn)) != USE
8762 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
8763 break;
8764
8765 end_bundle = next_insn == NULL_RTX
8766 || next_insn == tail
8767 || (INSN_P (next_insn)
8768 && recog_memoized (next_insn)
8769 == CODE_FOR_bundle_selector);
8770 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
8771 && !start_bundle && !end_bundle
8772 && next_insn
8773 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
8774 && asm_noperands (PATTERN (next_insn)) < 0)
8775 num--;
8776
8777 start_bundle = false;
8778 }
8779 }
8780
8781 gcc_assert (num == 0);
8782 }
8783 #endif
8784
8785 free (index_to_bundle_states);
8786 finish_bundle_state_table ();
8787 bundling_p = 0;
8788 dfa_clean_insn_cache ();
8789 }
8790
8791 /* The following function is called at the end of scheduling BB or
8792 EBB. After reload, it inserts stop bits and does insn bundling. */
8793
8794 static void
8795 ia64_sched_finish (FILE *dump, int sched_verbose)
8796 {
8797 if (sched_verbose)
8798 fprintf (dump, "// Finishing schedule.\n");
8799 if (!reload_completed)
8800 return;
8801 if (reload_completed)
8802 {
8803 final_emit_insn_group_barriers (dump);
8804 bundling (dump, sched_verbose, current_sched_info->prev_head,
8805 current_sched_info->next_tail);
8806 if (sched_verbose && dump)
8807 fprintf (dump, "// finishing %d-%d\n",
8808 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8809 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8810
8811 return;
8812 }
8813 }
8814
8815 /* The following function inserts stop bits in scheduled BB or EBB. */
8816
8817 static void
8818 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8819 {
8820 rtx insn;
8821 int need_barrier_p = 0;
8822 int seen_good_insn = 0;
8823 rtx prev_insn = NULL_RTX;
8824
8825 init_insn_group_barriers ();
8826
8827 for (insn = NEXT_INSN (current_sched_info->prev_head);
8828 insn != current_sched_info->next_tail;
8829 insn = NEXT_INSN (insn))
8830 {
8831 if (GET_CODE (insn) == BARRIER)
8832 {
8833 rtx last = prev_active_insn (insn);
8834
8835 if (! last)
8836 continue;
8837 if (GET_CODE (last) == JUMP_INSN
8838 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8839 last = prev_active_insn (last);
8840 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8841 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8842
8843 init_insn_group_barriers ();
8844 seen_good_insn = 0;
8845 need_barrier_p = 0;
8846 prev_insn = NULL_RTX;
8847 }
8848 else if (INSN_P (insn))
8849 {
8850 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8851 {
8852 init_insn_group_barriers ();
8853 seen_good_insn = 0;
8854 need_barrier_p = 0;
8855 prev_insn = NULL_RTX;
8856 }
8857 else if (need_barrier_p || group_barrier_needed (insn)
8858 || (mflag_sched_stop_bits_after_every_cycle
8859 && GET_MODE (insn) == TImode
8860 && seen_good_insn))
8861 {
8862 if (TARGET_EARLY_STOP_BITS)
8863 {
8864 rtx last;
8865
8866 for (last = insn;
8867 last != current_sched_info->prev_head;
8868 last = PREV_INSN (last))
8869 if (INSN_P (last) && GET_MODE (last) == TImode
8870 && stops_p [INSN_UID (last)])
8871 break;
8872 if (last == current_sched_info->prev_head)
8873 last = insn;
8874 last = prev_active_insn (last);
8875 if (last
8876 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8877 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8878 last);
8879 init_insn_group_barriers ();
8880 for (last = NEXT_INSN (last);
8881 last != insn;
8882 last = NEXT_INSN (last))
8883 if (INSN_P (last))
8884 {
8885 group_barrier_needed (last);
8886 if (recog_memoized (last) >= 0
8887 && important_for_bundling_p (last))
8888 seen_good_insn = 1;
8889 }
8890 }
8891 else
8892 {
8893 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8894 insn);
8895 init_insn_group_barriers ();
8896 seen_good_insn = 0;
8897 }
8898 group_barrier_needed (insn);
8899 if (recog_memoized (insn) >= 0
8900 && important_for_bundling_p (insn))
8901 seen_good_insn = 1;
8902 prev_insn = NULL_RTX;
8903 }
8904 else if (recog_memoized (insn) >= 0
8905 && important_for_bundling_p (insn))
8906 {
8907 prev_insn = insn;
8908 seen_good_insn = 1;
8909 }
8910 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8911 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8912 || asm_noperands (PATTERN (insn)) >= 0);
8913 }
8914 }
8915 }
8916
8917 \f
8918
8919 /* If the following function returns TRUE, we will use the DFA
8920 insn scheduler. */
8921
8922 static int
8923 ia64_first_cycle_multipass_dfa_lookahead (void)
8924 {
8925 return (reload_completed ? 6 : 4);
8926 }
8927
8928 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8929
8930 static void
8931 ia64_init_dfa_pre_cycle_insn (void)
8932 {
8933 if (temp_dfa_state == NULL)
8934 {
8935 dfa_state_size = state_size ();
8936 temp_dfa_state = xmalloc (dfa_state_size);
8937 prev_cycle_state = xmalloc (dfa_state_size);
8938 }
8939 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8940 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8941 recog_memoized (dfa_pre_cycle_insn);
8942 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8943 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8944 recog_memoized (dfa_stop_insn);
8945 }
8946
8947 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8948 used by the DFA insn scheduler. */
8949
8950 static rtx
8951 ia64_dfa_pre_cycle_insn (void)
8952 {
8953 return dfa_pre_cycle_insn;
8954 }
8955
8956 /* The following function returns TRUE if PRODUCER (of type ilog or
8957 ld) produces address for CONSUMER (of type st or stf). */
8958
8959 int
8960 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8961 {
8962 rtx dest, reg, mem;
8963
8964 gcc_assert (producer && consumer);
8965 dest = ia64_single_set (producer);
8966 gcc_assert (dest);
8967 reg = SET_DEST (dest);
8968 gcc_assert (reg);
8969 if (GET_CODE (reg) == SUBREG)
8970 reg = SUBREG_REG (reg);
8971 gcc_assert (GET_CODE (reg) == REG);
8972
8973 dest = ia64_single_set (consumer);
8974 gcc_assert (dest);
8975 mem = SET_DEST (dest);
8976 gcc_assert (mem && GET_CODE (mem) == MEM);
8977 return reg_mentioned_p (reg, mem);
8978 }
8979
8980 /* The following function returns TRUE if PRODUCER (of type ilog or
8981 ld) produces address for CONSUMER (of type ld or fld). */
8982
8983 int
8984 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8985 {
8986 rtx dest, src, reg, mem;
8987
8988 gcc_assert (producer && consumer);
8989 dest = ia64_single_set (producer);
8990 gcc_assert (dest);
8991 reg = SET_DEST (dest);
8992 gcc_assert (reg);
8993 if (GET_CODE (reg) == SUBREG)
8994 reg = SUBREG_REG (reg);
8995 gcc_assert (GET_CODE (reg) == REG);
8996
8997 src = ia64_single_set (consumer);
8998 gcc_assert (src);
8999 mem = SET_SRC (src);
9000 gcc_assert (mem);
9001
9002 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9003 mem = XVECEXP (mem, 0, 0);
9004 else if (GET_CODE (mem) == IF_THEN_ELSE)
9005 /* ??? Is this bypass necessary for ld.c? */
9006 {
9007 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9008 mem = XEXP (mem, 1);
9009 }
9010
9011 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9012 mem = XEXP (mem, 0);
9013
9014 if (GET_CODE (mem) == UNSPEC)
9015 {
9016 int c = XINT (mem, 1);
9017
9018 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9019 || c == UNSPEC_LDSA);
9020 mem = XVECEXP (mem, 0, 0);
9021 }
9022
9023 /* Note that LO_SUM is used for GOT loads. */
9024 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9025
9026 return reg_mentioned_p (reg, mem);
9027 }
9028
9029 /* The following function returns TRUE if INSN produces address for a
9030 load/store insn. We will place such insns into M slot because it
9031 decreases its latency time. */
9032
9033 int
9034 ia64_produce_address_p (rtx insn)
9035 {
9036 return insn->call;
9037 }
9038
9039 \f
9040 /* Emit pseudo-ops for the assembler to describe predicate relations.
9041 At present this assumes that we only consider predicate pairs to
9042 be mutex, and that the assembler can deduce proper values from
9043 straight-line code. */
9044
9045 static void
9046 emit_predicate_relation_info (void)
9047 {
9048 basic_block bb;
9049
9050 FOR_EACH_BB_REVERSE (bb)
9051 {
9052 int r;
9053 rtx head = BB_HEAD (bb);
9054
9055 /* We only need such notes at code labels. */
9056 if (GET_CODE (head) != CODE_LABEL)
9057 continue;
9058 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9059 head = NEXT_INSN (head);
9060
9061 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9062 grabbing the entire block of predicate registers. */
9063 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9064 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9065 {
9066 rtx p = gen_rtx_REG (BImode, r);
9067 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9068 if (head == BB_END (bb))
9069 BB_END (bb) = n;
9070 head = n;
9071 }
9072 }
9073
9074 /* Look for conditional calls that do not return, and protect predicate
9075 relations around them. Otherwise the assembler will assume the call
9076 returns, and complain about uses of call-clobbered predicates after
9077 the call. */
9078 FOR_EACH_BB_REVERSE (bb)
9079 {
9080 rtx insn = BB_HEAD (bb);
9081
9082 while (1)
9083 {
9084 if (GET_CODE (insn) == CALL_INSN
9085 && GET_CODE (PATTERN (insn)) == COND_EXEC
9086 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9087 {
9088 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9089 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9090 if (BB_HEAD (bb) == insn)
9091 BB_HEAD (bb) = b;
9092 if (BB_END (bb) == insn)
9093 BB_END (bb) = a;
9094 }
9095
9096 if (insn == BB_END (bb))
9097 break;
9098 insn = NEXT_INSN (insn);
9099 }
9100 }
9101 }
9102
9103 /* Perform machine dependent operations on the rtl chain INSNS. */
9104
9105 static void
9106 ia64_reorg (void)
9107 {
9108 /* We are freeing block_for_insn in the toplev to keep compatibility
9109 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9110 compute_bb_for_insn ();
9111
9112 /* If optimizing, we'll have split before scheduling. */
9113 if (optimize == 0)
9114 split_all_insns ();
9115
9116 if (optimize && ia64_flag_schedule_insns2
9117 && dbg_cnt (ia64_sched2))
9118 {
9119 timevar_push (TV_SCHED2);
9120 ia64_final_schedule = 1;
9121
9122 initiate_bundle_states ();
9123 ia64_nop = make_insn_raw (gen_nop ());
9124 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9125 recog_memoized (ia64_nop);
9126 clocks_length = get_max_uid () + 1;
9127 stops_p = XCNEWVEC (char, clocks_length);
9128 if (ia64_tune == PROCESSOR_ITANIUM)
9129 {
9130 clocks = XCNEWVEC (int, clocks_length);
9131 add_cycles = XCNEWVEC (int, clocks_length);
9132 }
9133 if (ia64_tune == PROCESSOR_ITANIUM2)
9134 {
9135 pos_1 = get_cpu_unit_code ("2_1");
9136 pos_2 = get_cpu_unit_code ("2_2");
9137 pos_3 = get_cpu_unit_code ("2_3");
9138 pos_4 = get_cpu_unit_code ("2_4");
9139 pos_5 = get_cpu_unit_code ("2_5");
9140 pos_6 = get_cpu_unit_code ("2_6");
9141 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9142 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9143 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9144 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9145 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9146 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9147 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9148 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9149 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9150 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9151 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9152 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9153 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9154 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9155 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9156 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9157 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9158 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9159 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9160 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9161 }
9162 else
9163 {
9164 pos_1 = get_cpu_unit_code ("1_1");
9165 pos_2 = get_cpu_unit_code ("1_2");
9166 pos_3 = get_cpu_unit_code ("1_3");
9167 pos_4 = get_cpu_unit_code ("1_4");
9168 pos_5 = get_cpu_unit_code ("1_5");
9169 pos_6 = get_cpu_unit_code ("1_6");
9170 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9171 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9172 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9173 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9174 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9175 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9176 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9177 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9178 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9179 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9180 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9181 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9182 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9183 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9184 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9185 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9186 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9187 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9188 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9189 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9190 }
9191
9192 if (flag_selective_scheduling2
9193 && !maybe_skip_selective_scheduling ())
9194 run_selective_scheduling ();
9195 else
9196 schedule_ebbs ();
9197
9198 /* Redo alignment computation, as it might gone wrong. */
9199 compute_alignments ();
9200
9201 /* We cannot reuse this one because it has been corrupted by the
9202 evil glat. */
9203 finish_bundle_states ();
9204 if (ia64_tune == PROCESSOR_ITANIUM)
9205 {
9206 free (add_cycles);
9207 free (clocks);
9208 }
9209 free (stops_p);
9210 stops_p = NULL;
9211 emit_insn_group_barriers (dump_file);
9212
9213 ia64_final_schedule = 0;
9214 timevar_pop (TV_SCHED2);
9215 }
9216 else
9217 emit_all_insn_group_barriers (dump_file);
9218
9219 df_analyze ();
9220
9221 /* A call must not be the last instruction in a function, so that the
9222 return address is still within the function, so that unwinding works
9223 properly. Note that IA-64 differs from dwarf2 on this point. */
9224 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
9225 {
9226 rtx insn;
9227 int saw_stop = 0;
9228
9229 insn = get_last_insn ();
9230 if (! INSN_P (insn))
9231 insn = prev_active_insn (insn);
9232 /* Skip over insns that expand to nothing. */
9233 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
9234 {
9235 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9236 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9237 saw_stop = 1;
9238 insn = prev_active_insn (insn);
9239 }
9240 if (GET_CODE (insn) == CALL_INSN)
9241 {
9242 if (! saw_stop)
9243 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9244 emit_insn (gen_break_f ());
9245 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9246 }
9247 }
9248
9249 emit_predicate_relation_info ();
9250
9251 if (ia64_flag_var_tracking)
9252 {
9253 timevar_push (TV_VAR_TRACKING);
9254 variable_tracking_main ();
9255 timevar_pop (TV_VAR_TRACKING);
9256 }
9257 df_finish_pass (false);
9258 }
9259 \f
9260 /* Return true if REGNO is used by the epilogue. */
9261
9262 int
9263 ia64_epilogue_uses (int regno)
9264 {
9265 switch (regno)
9266 {
9267 case R_GR (1):
9268 /* With a call to a function in another module, we will write a new
9269 value to "gp". After returning from such a call, we need to make
9270 sure the function restores the original gp-value, even if the
9271 function itself does not use the gp anymore. */
9272 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9273
9274 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9275 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9276 /* For functions defined with the syscall_linkage attribute, all
9277 input registers are marked as live at all function exits. This
9278 prevents the register allocator from using the input registers,
9279 which in turn makes it possible to restart a system call after
9280 an interrupt without having to save/restore the input registers.
9281 This also prevents kernel data from leaking to application code. */
9282 return lookup_attribute ("syscall_linkage",
9283 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9284
9285 case R_BR (0):
9286 /* Conditional return patterns can't represent the use of `b0' as
9287 the return address, so we force the value live this way. */
9288 return 1;
9289
9290 case AR_PFS_REGNUM:
9291 /* Likewise for ar.pfs, which is used by br.ret. */
9292 return 1;
9293
9294 default:
9295 return 0;
9296 }
9297 }
9298
9299 /* Return true if REGNO is used by the frame unwinder. */
9300
9301 int
9302 ia64_eh_uses (int regno)
9303 {
9304 enum ia64_frame_regs r;
9305
9306 if (! reload_completed)
9307 return 0;
9308
9309 if (regno == 0)
9310 return 0;
9311
9312 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9313 if (regno == current_frame_info.r[r]
9314 || regno == emitted_frame_related_regs[r])
9315 return 1;
9316
9317 return 0;
9318 }
9319 \f
9320 /* Return true if this goes in small data/bss. */
9321
9322 /* ??? We could also support own long data here. Generating movl/add/ld8
9323 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9324 code faster because there is one less load. This also includes incomplete
9325 types which can't go in sdata/sbss. */
9326
9327 static bool
9328 ia64_in_small_data_p (const_tree exp)
9329 {
9330 if (TARGET_NO_SDATA)
9331 return false;
9332
9333 /* We want to merge strings, so we never consider them small data. */
9334 if (TREE_CODE (exp) == STRING_CST)
9335 return false;
9336
9337 /* Functions are never small data. */
9338 if (TREE_CODE (exp) == FUNCTION_DECL)
9339 return false;
9340
9341 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9342 {
9343 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9344
9345 if (strcmp (section, ".sdata") == 0
9346 || strncmp (section, ".sdata.", 7) == 0
9347 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9348 || strcmp (section, ".sbss") == 0
9349 || strncmp (section, ".sbss.", 6) == 0
9350 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9351 return true;
9352 }
9353 else
9354 {
9355 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9356
9357 /* If this is an incomplete type with size 0, then we can't put it
9358 in sdata because it might be too big when completed. */
9359 if (size > 0 && size <= ia64_section_threshold)
9360 return true;
9361 }
9362
9363 return false;
9364 }
9365 \f
9366 /* Output assembly directives for prologue regions. */
9367
9368 /* The current basic block number. */
9369
9370 static bool last_block;
9371
9372 /* True if we need a copy_state command at the start of the next block. */
9373
9374 static bool need_copy_state;
9375
9376 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9377 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9378 #endif
9379
9380 /* Emit a debugging label after a call-frame-related insn. We'd
9381 rather output the label right away, but we'd have to output it
9382 after, not before, the instruction, and the instruction has not
9383 been output yet. So we emit the label after the insn, delete it to
9384 avoid introducing basic blocks, and mark it as preserved, such that
9385 it is still output, given that it is referenced in debug info. */
9386
9387 static const char *
9388 ia64_emit_deleted_label_after_insn (rtx insn)
9389 {
9390 char label[MAX_ARTIFICIAL_LABEL_BYTES];
9391 rtx lb = gen_label_rtx ();
9392 rtx label_insn = emit_label_after (lb, insn);
9393
9394 LABEL_PRESERVE_P (lb) = 1;
9395
9396 delete_insn (label_insn);
9397
9398 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
9399
9400 return xstrdup (label);
9401 }
9402
9403 /* Define the CFA after INSN with the steady-state definition. */
9404
9405 static void
9406 ia64_dwarf2out_def_steady_cfa (rtx insn)
9407 {
9408 rtx fp = frame_pointer_needed
9409 ? hard_frame_pointer_rtx
9410 : stack_pointer_rtx;
9411
9412 dwarf2out_def_cfa
9413 (ia64_emit_deleted_label_after_insn (insn),
9414 REGNO (fp),
9415 ia64_initial_elimination_offset
9416 (REGNO (arg_pointer_rtx), REGNO (fp))
9417 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9418 }
9419
9420 /* The generic dwarf2 frame debug info generator does not define a
9421 separate region for the very end of the epilogue, so refrain from
9422 doing so in the IA64-specific code as well. */
9423
9424 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
9425
9426 /* The function emits unwind directives for the start of an epilogue. */
9427
9428 static void
9429 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
9430 {
9431 /* If this isn't the last block of the function, then we need to label the
9432 current state, and copy it back in at the start of the next block. */
9433
9434 if (!last_block)
9435 {
9436 if (unwind)
9437 fprintf (asm_out_file, "\t.label_state %d\n",
9438 ++cfun->machine->state_num);
9439 need_copy_state = true;
9440 }
9441
9442 if (unwind)
9443 fprintf (asm_out_file, "\t.restore sp\n");
9444 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9445 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
9446 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
9447 }
9448
9449 /* This function processes a SET pattern looking for specific patterns
9450 which result in emitting an assembly directive required for unwinding. */
9451
9452 static int
9453 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
9454 {
9455 rtx src = SET_SRC (pat);
9456 rtx dest = SET_DEST (pat);
9457 int src_regno, dest_regno;
9458
9459 /* Look for the ALLOC insn. */
9460 if (GET_CODE (src) == UNSPEC_VOLATILE
9461 && XINT (src, 1) == UNSPECV_ALLOC
9462 && GET_CODE (dest) == REG)
9463 {
9464 dest_regno = REGNO (dest);
9465
9466 /* If this is the final destination for ar.pfs, then this must
9467 be the alloc in the prologue. */
9468 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
9469 {
9470 if (unwind)
9471 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
9472 ia64_dbx_register_number (dest_regno));
9473 }
9474 else
9475 {
9476 /* This must be an alloc before a sibcall. We must drop the
9477 old frame info. The easiest way to drop the old frame
9478 info is to ensure we had a ".restore sp" directive
9479 followed by a new prologue. If the procedure doesn't
9480 have a memory-stack frame, we'll issue a dummy ".restore
9481 sp" now. */
9482 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
9483 /* if haven't done process_epilogue() yet, do it now */
9484 process_epilogue (asm_out_file, insn, unwind, frame);
9485 if (unwind)
9486 fprintf (asm_out_file, "\t.prologue\n");
9487 }
9488 return 1;
9489 }
9490
9491 /* Look for SP = .... */
9492 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
9493 {
9494 if (GET_CODE (src) == PLUS)
9495 {
9496 rtx op0 = XEXP (src, 0);
9497 rtx op1 = XEXP (src, 1);
9498
9499 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9500
9501 if (INTVAL (op1) < 0)
9502 {
9503 gcc_assert (!frame_pointer_needed);
9504 if (unwind)
9505 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9506 -INTVAL (op1));
9507 if (frame)
9508 ia64_dwarf2out_def_steady_cfa (insn);
9509 }
9510 else
9511 process_epilogue (asm_out_file, insn, unwind, frame);
9512 }
9513 else
9514 {
9515 gcc_assert (GET_CODE (src) == REG
9516 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
9517 process_epilogue (asm_out_file, insn, unwind, frame);
9518 }
9519
9520 return 1;
9521 }
9522
9523 /* Register move we need to look at. */
9524 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
9525 {
9526 src_regno = REGNO (src);
9527 dest_regno = REGNO (dest);
9528
9529 switch (src_regno)
9530 {
9531 case BR_REG (0):
9532 /* Saving return address pointer. */
9533 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9534 if (unwind)
9535 fprintf (asm_out_file, "\t.save rp, r%d\n",
9536 ia64_dbx_register_number (dest_regno));
9537 return 1;
9538
9539 case PR_REG (0):
9540 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9541 if (unwind)
9542 fprintf (asm_out_file, "\t.save pr, r%d\n",
9543 ia64_dbx_register_number (dest_regno));
9544 return 1;
9545
9546 case AR_UNAT_REGNUM:
9547 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9548 if (unwind)
9549 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9550 ia64_dbx_register_number (dest_regno));
9551 return 1;
9552
9553 case AR_LC_REGNUM:
9554 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9555 if (unwind)
9556 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9557 ia64_dbx_register_number (dest_regno));
9558 return 1;
9559
9560 case STACK_POINTER_REGNUM:
9561 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9562 && frame_pointer_needed);
9563 if (unwind)
9564 fprintf (asm_out_file, "\t.vframe r%d\n",
9565 ia64_dbx_register_number (dest_regno));
9566 if (frame)
9567 ia64_dwarf2out_def_steady_cfa (insn);
9568 return 1;
9569
9570 default:
9571 /* Everything else should indicate being stored to memory. */
9572 gcc_unreachable ();
9573 }
9574 }
9575
9576 /* Memory store we need to look at. */
9577 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
9578 {
9579 long off;
9580 rtx base;
9581 const char *saveop;
9582
9583 if (GET_CODE (XEXP (dest, 0)) == REG)
9584 {
9585 base = XEXP (dest, 0);
9586 off = 0;
9587 }
9588 else
9589 {
9590 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9591 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9592 base = XEXP (XEXP (dest, 0), 0);
9593 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9594 }
9595
9596 if (base == hard_frame_pointer_rtx)
9597 {
9598 saveop = ".savepsp";
9599 off = - off;
9600 }
9601 else
9602 {
9603 gcc_assert (base == stack_pointer_rtx);
9604 saveop = ".savesp";
9605 }
9606
9607 src_regno = REGNO (src);
9608 switch (src_regno)
9609 {
9610 case BR_REG (0):
9611 gcc_assert (!current_frame_info.r[reg_save_b0]);
9612 if (unwind)
9613 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9614 return 1;
9615
9616 case PR_REG (0):
9617 gcc_assert (!current_frame_info.r[reg_save_pr]);
9618 if (unwind)
9619 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9620 return 1;
9621
9622 case AR_LC_REGNUM:
9623 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9624 if (unwind)
9625 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9626 return 1;
9627
9628 case AR_PFS_REGNUM:
9629 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9630 if (unwind)
9631 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9632 return 1;
9633
9634 case AR_UNAT_REGNUM:
9635 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9636 if (unwind)
9637 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9638 return 1;
9639
9640 case GR_REG (4):
9641 case GR_REG (5):
9642 case GR_REG (6):
9643 case GR_REG (7):
9644 if (unwind)
9645 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9646 1 << (src_regno - GR_REG (4)));
9647 return 1;
9648
9649 case BR_REG (1):
9650 case BR_REG (2):
9651 case BR_REG (3):
9652 case BR_REG (4):
9653 case BR_REG (5):
9654 if (unwind)
9655 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9656 1 << (src_regno - BR_REG (1)));
9657 return 1;
9658
9659 case FR_REG (2):
9660 case FR_REG (3):
9661 case FR_REG (4):
9662 case FR_REG (5):
9663 if (unwind)
9664 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9665 1 << (src_regno - FR_REG (2)));
9666 return 1;
9667
9668 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9669 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9670 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9671 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9672 if (unwind)
9673 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9674 1 << (src_regno - FR_REG (12)));
9675 return 1;
9676
9677 default:
9678 return 0;
9679 }
9680 }
9681
9682 return 0;
9683 }
9684
9685
9686 /* This function looks at a single insn and emits any directives
9687 required to unwind this insn. */
9688 void
9689 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9690 {
9691 bool unwind = (flag_unwind_tables
9692 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9693 bool frame = dwarf2out_do_frame ();
9694
9695 if (unwind || frame)
9696 {
9697 rtx pat;
9698
9699 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9700 {
9701 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9702
9703 /* Restore unwind state from immediately before the epilogue. */
9704 if (need_copy_state)
9705 {
9706 if (unwind)
9707 {
9708 fprintf (asm_out_file, "\t.body\n");
9709 fprintf (asm_out_file, "\t.copy_state %d\n",
9710 cfun->machine->state_num);
9711 }
9712 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9713 ia64_dwarf2out_def_steady_cfa (insn);
9714 need_copy_state = false;
9715 }
9716 }
9717
9718 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9719 return;
9720
9721 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9722 if (pat)
9723 pat = XEXP (pat, 0);
9724 else
9725 pat = PATTERN (insn);
9726
9727 switch (GET_CODE (pat))
9728 {
9729 case SET:
9730 process_set (asm_out_file, pat, insn, unwind, frame);
9731 break;
9732
9733 case PARALLEL:
9734 {
9735 int par_index;
9736 int limit = XVECLEN (pat, 0);
9737 for (par_index = 0; par_index < limit; par_index++)
9738 {
9739 rtx x = XVECEXP (pat, 0, par_index);
9740 if (GET_CODE (x) == SET)
9741 process_set (asm_out_file, x, insn, unwind, frame);
9742 }
9743 break;
9744 }
9745
9746 default:
9747 gcc_unreachable ();
9748 }
9749 }
9750 }
9751
9752 \f
9753 enum ia64_builtins
9754 {
9755 IA64_BUILTIN_BSP,
9756 IA64_BUILTIN_COPYSIGNQ,
9757 IA64_BUILTIN_FABSQ,
9758 IA64_BUILTIN_FLUSHRS,
9759 IA64_BUILTIN_INFQ
9760 };
9761
9762 void
9763 ia64_init_builtins (void)
9764 {
9765 tree fpreg_type;
9766 tree float80_type;
9767
9768 /* The __fpreg type. */
9769 fpreg_type = make_node (REAL_TYPE);
9770 TYPE_PRECISION (fpreg_type) = 82;
9771 layout_type (fpreg_type);
9772 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9773
9774 /* The __float80 type. */
9775 float80_type = make_node (REAL_TYPE);
9776 TYPE_PRECISION (float80_type) = 80;
9777 layout_type (float80_type);
9778 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9779
9780 /* The __float128 type. */
9781 if (!TARGET_HPUX)
9782 {
9783 tree ftype, decl;
9784 tree float128_type = make_node (REAL_TYPE);
9785
9786 TYPE_PRECISION (float128_type) = 128;
9787 layout_type (float128_type);
9788 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9789
9790 /* TFmode support builtins. */
9791 ftype = build_function_type (float128_type, void_list_node);
9792 add_builtin_function ("__builtin_infq", ftype,
9793 IA64_BUILTIN_INFQ, BUILT_IN_MD,
9794 NULL, NULL_TREE);
9795
9796 ftype = build_function_type_list (float128_type,
9797 float128_type,
9798 NULL_TREE);
9799 decl = add_builtin_function ("__builtin_fabsq", ftype,
9800 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
9801 "__fabstf2", NULL_TREE);
9802 TREE_READONLY (decl) = 1;
9803
9804 ftype = build_function_type_list (float128_type,
9805 float128_type,
9806 float128_type,
9807 NULL_TREE);
9808 decl = add_builtin_function ("__builtin_copysignq", ftype,
9809 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
9810 "__copysigntf3", NULL_TREE);
9811 TREE_READONLY (decl) = 1;
9812 }
9813 else
9814 /* Under HPUX, this is a synonym for "long double". */
9815 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9816 "__float128");
9817
9818 #define def_builtin(name, type, code) \
9819 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9820 NULL, NULL_TREE)
9821
9822 def_builtin ("__builtin_ia64_bsp",
9823 build_function_type (ptr_type_node, void_list_node),
9824 IA64_BUILTIN_BSP);
9825
9826 def_builtin ("__builtin_ia64_flushrs",
9827 build_function_type (void_type_node, void_list_node),
9828 IA64_BUILTIN_FLUSHRS);
9829
9830 #undef def_builtin
9831
9832 if (TARGET_HPUX)
9833 {
9834 if (built_in_decls [BUILT_IN_FINITE])
9835 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9836 "_Isfinite");
9837 if (built_in_decls [BUILT_IN_FINITEF])
9838 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9839 "_Isfinitef");
9840 if (built_in_decls [BUILT_IN_FINITEL])
9841 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9842 "_Isfinitef128");
9843 }
9844 }
9845
9846 rtx
9847 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9848 enum machine_mode mode ATTRIBUTE_UNUSED,
9849 int ignore ATTRIBUTE_UNUSED)
9850 {
9851 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9852 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9853
9854 switch (fcode)
9855 {
9856 case IA64_BUILTIN_BSP:
9857 if (! target || ! register_operand (target, DImode))
9858 target = gen_reg_rtx (DImode);
9859 emit_insn (gen_bsp_value (target));
9860 #ifdef POINTERS_EXTEND_UNSIGNED
9861 target = convert_memory_address (ptr_mode, target);
9862 #endif
9863 return target;
9864
9865 case IA64_BUILTIN_FLUSHRS:
9866 emit_insn (gen_flushrs ());
9867 return const0_rtx;
9868
9869 case IA64_BUILTIN_INFQ:
9870 {
9871 REAL_VALUE_TYPE inf;
9872 rtx tmp;
9873
9874 real_inf (&inf);
9875 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
9876
9877 tmp = validize_mem (force_const_mem (mode, tmp));
9878
9879 if (target == 0)
9880 target = gen_reg_rtx (mode);
9881
9882 emit_move_insn (target, tmp);
9883 return target;
9884 }
9885
9886 case IA64_BUILTIN_FABSQ:
9887 case IA64_BUILTIN_COPYSIGNQ:
9888 return expand_call (exp, target, ignore);
9889
9890 default:
9891 gcc_unreachable ();
9892 }
9893
9894 return NULL_RTX;
9895 }
9896
9897 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9898 most significant bits of the stack slot. */
9899
9900 enum direction
9901 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
9902 {
9903 /* Exception to normal case for structures/unions/etc. */
9904
9905 if (type && AGGREGATE_TYPE_P (type)
9906 && int_size_in_bytes (type) < UNITS_PER_WORD)
9907 return upward;
9908
9909 /* Fall back to the default. */
9910 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9911 }
9912
9913 /* Emit text to declare externally defined variables and functions, because
9914 the Intel assembler does not support undefined externals. */
9915
9916 void
9917 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9918 {
9919 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9920 set in order to avoid putting out names that are never really
9921 used. */
9922 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9923 {
9924 /* maybe_assemble_visibility will return 1 if the assembler
9925 visibility directive is output. */
9926 int need_visibility = ((*targetm.binds_local_p) (decl)
9927 && maybe_assemble_visibility (decl));
9928
9929 /* GNU as does not need anything here, but the HP linker does
9930 need something for external functions. */
9931 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9932 && TREE_CODE (decl) == FUNCTION_DECL)
9933 (*targetm.asm_out.globalize_decl_name) (file, decl);
9934 else if (need_visibility && !TARGET_GNU_AS)
9935 (*targetm.asm_out.globalize_label) (file, name);
9936 }
9937 }
9938
9939 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9940 modes of word_mode and larger. Rename the TFmode libfuncs using the
9941 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9942 backward compatibility. */
9943
9944 static void
9945 ia64_init_libfuncs (void)
9946 {
9947 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9948 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9949 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9950 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9951
9952 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9953 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9954 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9955 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9956 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9957
9958 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9959 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9960 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9961 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9962 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9963 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9964
9965 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9966 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9967 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9968 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9969 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9970
9971 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9972 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9973 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9974 /* HP-UX 11.23 libc does not have a function for unsigned
9975 SImode-to-TFmode conversion. */
9976 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9977 }
9978
9979 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9980
9981 static void
9982 ia64_hpux_init_libfuncs (void)
9983 {
9984 ia64_init_libfuncs ();
9985
9986 /* The HP SI millicode division and mod functions expect DI arguments.
9987 By turning them off completely we avoid using both libgcc and the
9988 non-standard millicode routines and use the HP DI millicode routines
9989 instead. */
9990
9991 set_optab_libfunc (sdiv_optab, SImode, 0);
9992 set_optab_libfunc (udiv_optab, SImode, 0);
9993 set_optab_libfunc (smod_optab, SImode, 0);
9994 set_optab_libfunc (umod_optab, SImode, 0);
9995
9996 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9997 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9998 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9999 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10000
10001 /* HP-UX libc has TF min/max/abs routines in it. */
10002 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10003 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10004 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10005
10006 /* ia64_expand_compare uses this. */
10007 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10008
10009 /* These should never be used. */
10010 set_optab_libfunc (eq_optab, TFmode, 0);
10011 set_optab_libfunc (ne_optab, TFmode, 0);
10012 set_optab_libfunc (gt_optab, TFmode, 0);
10013 set_optab_libfunc (ge_optab, TFmode, 0);
10014 set_optab_libfunc (lt_optab, TFmode, 0);
10015 set_optab_libfunc (le_optab, TFmode, 0);
10016 }
10017
10018 /* Rename the division and modulus functions in VMS. */
10019
10020 static void
10021 ia64_vms_init_libfuncs (void)
10022 {
10023 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10024 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10025 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10026 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10027 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10028 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10029 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10030 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10031 }
10032
10033 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10034 the HPUX conventions. */
10035
10036 static void
10037 ia64_sysv4_init_libfuncs (void)
10038 {
10039 ia64_init_libfuncs ();
10040
10041 /* These functions are not part of the HPUX TFmode interface. We
10042 use them instead of _U_Qfcmp, which doesn't work the way we
10043 expect. */
10044 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10045 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10046 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10047 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10048 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10049 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10050
10051 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10052 glibc doesn't have them. */
10053 }
10054
10055 /* Use soft-fp. */
10056
10057 static void
10058 ia64_soft_fp_init_libfuncs (void)
10059 {
10060 }
10061 \f
10062 /* For HPUX, it is illegal to have relocations in shared segments. */
10063
10064 static int
10065 ia64_hpux_reloc_rw_mask (void)
10066 {
10067 return 3;
10068 }
10069
10070 /* For others, relax this so that relocations to local data goes in
10071 read-only segments, but we still cannot allow global relocations
10072 in read-only segments. */
10073
10074 static int
10075 ia64_reloc_rw_mask (void)
10076 {
10077 return flag_pic ? 3 : 2;
10078 }
10079
10080 /* Return the section to use for X. The only special thing we do here
10081 is to honor small data. */
10082
10083 static section *
10084 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10085 unsigned HOST_WIDE_INT align)
10086 {
10087 if (GET_MODE_SIZE (mode) > 0
10088 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10089 && !TARGET_NO_SDATA)
10090 return sdata_section;
10091 else
10092 return default_elf_select_rtx_section (mode, x, align);
10093 }
10094
10095 static unsigned int
10096 ia64_section_type_flags (tree decl, const char *name, int reloc)
10097 {
10098 unsigned int flags = 0;
10099
10100 if (strcmp (name, ".sdata") == 0
10101 || strncmp (name, ".sdata.", 7) == 0
10102 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10103 || strncmp (name, ".sdata2.", 8) == 0
10104 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10105 || strcmp (name, ".sbss") == 0
10106 || strncmp (name, ".sbss.", 6) == 0
10107 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10108 flags = SECTION_SMALL;
10109
10110 flags |= default_section_type_flags (decl, name, reloc);
10111 return flags;
10112 }
10113
10114 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10115 structure type and that the address of that type should be passed
10116 in out0, rather than in r8. */
10117
10118 static bool
10119 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10120 {
10121 tree ret_type = TREE_TYPE (fntype);
10122
10123 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10124 as the structure return address parameter, if the return value
10125 type has a non-trivial copy constructor or destructor. It is not
10126 clear if this same convention should be used for other
10127 programming languages. Until G++ 3.4, we incorrectly used r8 for
10128 these return values. */
10129 return (abi_version_at_least (2)
10130 && ret_type
10131 && TYPE_MODE (ret_type) == BLKmode
10132 && TREE_ADDRESSABLE (ret_type)
10133 && strcmp (lang_hooks.name, "GNU C++") == 0);
10134 }
10135
10136 /* Output the assembler code for a thunk function. THUNK_DECL is the
10137 declaration for the thunk function itself, FUNCTION is the decl for
10138 the target function. DELTA is an immediate constant offset to be
10139 added to THIS. If VCALL_OFFSET is nonzero, the word at
10140 *(*this + vcall_offset) should be added to THIS. */
10141
10142 static void
10143 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10144 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10145 tree function)
10146 {
10147 rtx this_rtx, insn, funexp;
10148 unsigned int this_parmno;
10149 unsigned int this_regno;
10150 rtx delta_rtx;
10151
10152 reload_completed = 1;
10153 epilogue_completed = 1;
10154
10155 /* Set things up as ia64_expand_prologue might. */
10156 last_scratch_gr_reg = 15;
10157
10158 memset (&current_frame_info, 0, sizeof (current_frame_info));
10159 current_frame_info.spill_cfa_off = -16;
10160 current_frame_info.n_input_regs = 1;
10161 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10162
10163 /* Mark the end of the (empty) prologue. */
10164 emit_note (NOTE_INSN_PROLOGUE_END);
10165
10166 /* Figure out whether "this" will be the first parameter (the
10167 typical case) or the second parameter (as happens when the
10168 virtual function returns certain class objects). */
10169 this_parmno
10170 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10171 ? 1 : 0);
10172 this_regno = IN_REG (this_parmno);
10173 if (!TARGET_REG_NAMES)
10174 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10175
10176 this_rtx = gen_rtx_REG (Pmode, this_regno);
10177
10178 /* Apply the constant offset, if required. */
10179 delta_rtx = GEN_INT (delta);
10180 if (TARGET_ILP32)
10181 {
10182 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10183 REG_POINTER (tmp) = 1;
10184 if (delta && satisfies_constraint_I (delta_rtx))
10185 {
10186 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10187 delta = 0;
10188 }
10189 else
10190 emit_insn (gen_ptr_extend (this_rtx, tmp));
10191 }
10192 if (delta)
10193 {
10194 if (!satisfies_constraint_I (delta_rtx))
10195 {
10196 rtx tmp = gen_rtx_REG (Pmode, 2);
10197 emit_move_insn (tmp, delta_rtx);
10198 delta_rtx = tmp;
10199 }
10200 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10201 }
10202
10203 /* Apply the offset from the vtable, if required. */
10204 if (vcall_offset)
10205 {
10206 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10207 rtx tmp = gen_rtx_REG (Pmode, 2);
10208
10209 if (TARGET_ILP32)
10210 {
10211 rtx t = gen_rtx_REG (ptr_mode, 2);
10212 REG_POINTER (t) = 1;
10213 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10214 if (satisfies_constraint_I (vcall_offset_rtx))
10215 {
10216 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10217 vcall_offset = 0;
10218 }
10219 else
10220 emit_insn (gen_ptr_extend (tmp, t));
10221 }
10222 else
10223 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10224
10225 if (vcall_offset)
10226 {
10227 if (!satisfies_constraint_J (vcall_offset_rtx))
10228 {
10229 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10230 emit_move_insn (tmp2, vcall_offset_rtx);
10231 vcall_offset_rtx = tmp2;
10232 }
10233 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10234 }
10235
10236 if (TARGET_ILP32)
10237 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10238 else
10239 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10240
10241 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10242 }
10243
10244 /* Generate a tail call to the target function. */
10245 if (! TREE_USED (function))
10246 {
10247 assemble_external (function);
10248 TREE_USED (function) = 1;
10249 }
10250 funexp = XEXP (DECL_RTL (function), 0);
10251 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10252 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10253 insn = get_last_insn ();
10254 SIBLING_CALL_P (insn) = 1;
10255
10256 /* Code generation for calls relies on splitting. */
10257 reload_completed = 1;
10258 epilogue_completed = 1;
10259 try_split (PATTERN (insn), insn, 0);
10260
10261 emit_barrier ();
10262
10263 /* Run just enough of rest_of_compilation to get the insns emitted.
10264 There's not really enough bulk here to make other passes such as
10265 instruction scheduling worth while. Note that use_thunk calls
10266 assemble_start_function and assemble_end_function. */
10267
10268 insn_locators_alloc ();
10269 emit_all_insn_group_barriers (NULL);
10270 insn = get_insns ();
10271 shorten_branches (insn);
10272 final_start_function (insn, file, 1);
10273 final (insn, file, 1);
10274 final_end_function ();
10275 free_after_compilation (cfun);
10276
10277 reload_completed = 0;
10278 epilogue_completed = 0;
10279 }
10280
10281 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10282
10283 static rtx
10284 ia64_struct_value_rtx (tree fntype,
10285 int incoming ATTRIBUTE_UNUSED)
10286 {
10287 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
10288 return NULL_RTX;
10289 return gen_rtx_REG (Pmode, GR_REG (8));
10290 }
10291
10292 static bool
10293 ia64_scalar_mode_supported_p (enum machine_mode mode)
10294 {
10295 switch (mode)
10296 {
10297 case QImode:
10298 case HImode:
10299 case SImode:
10300 case DImode:
10301 case TImode:
10302 return true;
10303
10304 case SFmode:
10305 case DFmode:
10306 case XFmode:
10307 case RFmode:
10308 return true;
10309
10310 case TFmode:
10311 return true;
10312
10313 default:
10314 return false;
10315 }
10316 }
10317
10318 static bool
10319 ia64_vector_mode_supported_p (enum machine_mode mode)
10320 {
10321 switch (mode)
10322 {
10323 case V8QImode:
10324 case V4HImode:
10325 case V2SImode:
10326 return true;
10327
10328 case V2SFmode:
10329 return true;
10330
10331 default:
10332 return false;
10333 }
10334 }
10335
10336 /* Implement the FUNCTION_PROFILER macro. */
10337
10338 void
10339 ia64_output_function_profiler (FILE *file, int labelno)
10340 {
10341 bool indirect_call;
10342
10343 /* If the function needs a static chain and the static chain
10344 register is r15, we use an indirect call so as to bypass
10345 the PLT stub in case the executable is dynamically linked,
10346 because the stub clobbers r15 as per 5.3.6 of the psABI.
10347 We don't need to do that in non canonical PIC mode. */
10348
10349 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10350 {
10351 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10352 indirect_call = true;
10353 }
10354 else
10355 indirect_call = false;
10356
10357 if (TARGET_GNU_AS)
10358 fputs ("\t.prologue 4, r40\n", file);
10359 else
10360 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10361 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10362
10363 if (NO_PROFILE_COUNTERS)
10364 fputs ("\tmov out3 = r0\n", file);
10365 else
10366 {
10367 char buf[20];
10368 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10369
10370 if (TARGET_AUTO_PIC)
10371 fputs ("\tmovl out3 = @gprel(", file);
10372 else
10373 fputs ("\taddl out3 = @ltoff(", file);
10374 assemble_name (file, buf);
10375 if (TARGET_AUTO_PIC)
10376 fputs (")\n", file);
10377 else
10378 fputs ("), r1\n", file);
10379 }
10380
10381 if (indirect_call)
10382 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10383 fputs ("\t;;\n", file);
10384
10385 fputs ("\t.save rp, r42\n", file);
10386 fputs ("\tmov out2 = b0\n", file);
10387 if (indirect_call)
10388 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10389 fputs ("\t.body\n", file);
10390 fputs ("\tmov out1 = r1\n", file);
10391 if (indirect_call)
10392 {
10393 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10394 fputs ("\tmov b6 = r16\n", file);
10395 fputs ("\tld8 r1 = [r14]\n", file);
10396 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10397 }
10398 else
10399 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10400 }
10401
10402 static GTY(()) rtx mcount_func_rtx;
10403 static rtx
10404 gen_mcount_func_rtx (void)
10405 {
10406 if (!mcount_func_rtx)
10407 mcount_func_rtx = init_one_libfunc ("_mcount");
10408 return mcount_func_rtx;
10409 }
10410
10411 void
10412 ia64_profile_hook (int labelno)
10413 {
10414 rtx label, ip;
10415
10416 if (NO_PROFILE_COUNTERS)
10417 label = const0_rtx;
10418 else
10419 {
10420 char buf[30];
10421 const char *label_name;
10422 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10423 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
10424 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10425 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10426 }
10427 ip = gen_reg_rtx (Pmode);
10428 emit_insn (gen_ip_value (ip));
10429 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10430 VOIDmode, 3,
10431 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10432 ip, Pmode,
10433 label, Pmode);
10434 }
10435
10436 /* Return the mangling of TYPE if it is an extended fundamental type. */
10437
10438 static const char *
10439 ia64_mangle_type (const_tree type)
10440 {
10441 type = TYPE_MAIN_VARIANT (type);
10442
10443 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10444 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10445 return NULL;
10446
10447 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10448 mangled as "e". */
10449 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10450 return "g";
10451 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10452 an extended mangling. Elsewhere, "e" is available since long
10453 double is 80 bits. */
10454 if (TYPE_MODE (type) == XFmode)
10455 return TARGET_HPUX ? "u9__float80" : "e";
10456 if (TYPE_MODE (type) == RFmode)
10457 return "u7__fpreg";
10458 return NULL;
10459 }
10460
10461 /* Return the diagnostic message string if conversion from FROMTYPE to
10462 TOTYPE is not allowed, NULL otherwise. */
10463 static const char *
10464 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10465 {
10466 /* Reject nontrivial conversion to or from __fpreg. */
10467 if (TYPE_MODE (fromtype) == RFmode
10468 && TYPE_MODE (totype) != RFmode
10469 && TYPE_MODE (totype) != VOIDmode)
10470 return N_("invalid conversion from %<__fpreg%>");
10471 if (TYPE_MODE (totype) == RFmode
10472 && TYPE_MODE (fromtype) != RFmode)
10473 return N_("invalid conversion to %<__fpreg%>");
10474 return NULL;
10475 }
10476
10477 /* Return the diagnostic message string if the unary operation OP is
10478 not permitted on TYPE, NULL otherwise. */
10479 static const char *
10480 ia64_invalid_unary_op (int op, const_tree type)
10481 {
10482 /* Reject operations on __fpreg other than unary + or &. */
10483 if (TYPE_MODE (type) == RFmode
10484 && op != CONVERT_EXPR
10485 && op != ADDR_EXPR)
10486 return N_("invalid operation on %<__fpreg%>");
10487 return NULL;
10488 }
10489
10490 /* Return the diagnostic message string if the binary operation OP is
10491 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10492 static const char *
10493 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10494 {
10495 /* Reject operations on __fpreg. */
10496 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10497 return N_("invalid operation on %<__fpreg%>");
10498 return NULL;
10499 }
10500
10501 /* Implement overriding of the optimization options. */
10502 void
10503 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
10504 int size ATTRIBUTE_UNUSED)
10505 {
10506 /* Let the scheduler form additional regions. */
10507 set_param_value ("max-sched-extend-regions-iters", 2);
10508
10509 /* Set the default values for cache-related parameters. */
10510 set_param_value ("simultaneous-prefetches", 6);
10511 set_param_value ("l1-cache-line-size", 32);
10512
10513 set_param_value("sched-mem-true-dep-cost", 4);
10514 }
10515
10516 /* HP-UX version_id attribute.
10517 For object foo, if the version_id is set to 1234 put out an alias
10518 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10519 other than an alias statement because it is an illegal symbol name. */
10520
10521 static tree
10522 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10523 tree name ATTRIBUTE_UNUSED,
10524 tree args,
10525 int flags ATTRIBUTE_UNUSED,
10526 bool *no_add_attrs)
10527 {
10528 tree arg = TREE_VALUE (args);
10529
10530 if (TREE_CODE (arg) != STRING_CST)
10531 {
10532 error("version attribute is not a string");
10533 *no_add_attrs = true;
10534 return NULL_TREE;
10535 }
10536 return NULL_TREE;
10537 }
10538
10539 /* Target hook for c_mode_for_suffix. */
10540
10541 static enum machine_mode
10542 ia64_c_mode_for_suffix (char suffix)
10543 {
10544 if (suffix == 'q')
10545 return TFmode;
10546 if (suffix == 'w')
10547 return XFmode;
10548
10549 return VOIDmode;
10550 }
10551
10552 #include "gt-ia64.h"