5f22b17ef67ddf7c65d2981406ccd3dfd303d4a9
[gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
14
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "libfuncs.h"
45 #include "diagnostic-core.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "intl.h"
56 #include "df.h"
57 #include "debug.h"
58 #include "params.h"
59 #include "dbgcnt.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
62 #include "reload.h"
63 #include "dwarf2out.h"
64 #include "opts.h"
65
66 /* This is used for communication between ASM_OUTPUT_LABEL and
67 ASM_OUTPUT_LABELREF. */
68 int ia64_asm_output_label = 0;
69
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
84
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105
106 /* Determines whether we run our final scheduling pass or not. We always
107 avoid the normal second scheduling pass. */
108 static int ia64_flag_schedule_insns2;
109
110 /* Determines whether we run variable tracking in machine dependent
111 reorganization. */
112 static int ia64_flag_var_tracking;
113
114 /* Variables which are this size or smaller are put in the sdata/sbss
115 sections. */
116
117 unsigned int ia64_section_threshold;
118
119 /* The following variable is used by the DFA insn scheduler. The value is
120 TRUE if we do insn bundling instead of insn scheduling. */
121 int bundling_p = 0;
122
123 enum ia64_frame_regs
124 {
125 reg_fp,
126 reg_save_b0,
127 reg_save_pr,
128 reg_save_ar_pfs,
129 reg_save_ar_unat,
130 reg_save_ar_lc,
131 reg_save_gp,
132 number_of_ia64_frame_regs
133 };
134
135 /* Structure to be filled in by ia64_compute_frame_size with register
136 save masks and offsets for the current function. */
137
138 struct ia64_frame_info
139 {
140 HOST_WIDE_INT total_size; /* size of the stack frame, not including
141 the caller's scratch area. */
142 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
143 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
144 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
145 HARD_REG_SET mask; /* mask of saved registers. */
146 unsigned int gr_used_mask; /* mask of registers in use as gr spill
147 registers or long-term scratches. */
148 int n_spilled; /* number of spilled registers. */
149 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
150 int n_input_regs; /* number of input registers used. */
151 int n_local_regs; /* number of local registers used. */
152 int n_output_regs; /* number of output registers used. */
153 int n_rotate_regs; /* number of rotating registers used. */
154
155 char need_regstk; /* true if a .regstk directive needed. */
156 char initialized; /* true if the data is finalized. */
157 };
158
159 /* Current frame information calculated by ia64_compute_frame_size. */
160 static struct ia64_frame_info current_frame_info;
161 /* The actual registers that are emitted. */
162 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
163 \f
164 static int ia64_first_cycle_multipass_dfa_lookahead (void);
165 static void ia64_dependencies_evaluation_hook (rtx, rtx);
166 static void ia64_init_dfa_pre_cycle_insn (void);
167 static rtx ia64_dfa_pre_cycle_insn (void);
168 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
169 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
170 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
171 static void ia64_h_i_d_extended (void);
172 static void * ia64_alloc_sched_context (void);
173 static void ia64_init_sched_context (void *, bool);
174 static void ia64_set_sched_context (void *);
175 static void ia64_clear_sched_context (void *);
176 static void ia64_free_sched_context (void *);
177 static int ia64_mode_to_int (enum machine_mode);
178 static void ia64_set_sched_flags (spec_info_t);
179 static ds_t ia64_get_insn_spec_ds (rtx);
180 static ds_t ia64_get_insn_checked_ds (rtx);
181 static bool ia64_skip_rtx_p (const_rtx);
182 static int ia64_speculate_insn (rtx, ds_t, rtx *);
183 static bool ia64_needs_block_p (int);
184 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
185 static int ia64_spec_check_p (rtx);
186 static int ia64_spec_check_src_p (rtx);
187 static rtx gen_tls_get_addr (void);
188 static rtx gen_thread_pointer (void);
189 static int find_gr_spill (enum ia64_frame_regs, int);
190 static int next_scratch_gr_reg (void);
191 static void mark_reg_gr_used_mask (rtx, void *);
192 static void ia64_compute_frame_size (HOST_WIDE_INT);
193 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
194 static void finish_spill_pointers (void);
195 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
196 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
197 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
198 static rtx gen_movdi_x (rtx, rtx, rtx);
199 static rtx gen_fr_spill_x (rtx, rtx, rtx);
200 static rtx gen_fr_restore_x (rtx, rtx, rtx);
201
202 static void ia64_option_override (void);
203 static void ia64_option_default_params (void);
204 static bool ia64_can_eliminate (const int, const int);
205 static enum machine_mode hfa_element_mode (const_tree, bool);
206 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
207 tree, int *, int);
208 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
209 tree, bool);
210 static rtx ia64_function_arg_1 (const CUMULATIVE_ARGS *, enum machine_mode,
211 const_tree, bool, bool);
212 static rtx ia64_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
213 const_tree, bool);
214 static rtx ia64_function_incoming_arg (CUMULATIVE_ARGS *,
215 enum machine_mode, const_tree, bool);
216 static void ia64_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
217 const_tree, bool);
218 static unsigned int ia64_function_arg_boundary (enum machine_mode,
219 const_tree);
220 static bool ia64_function_ok_for_sibcall (tree, tree);
221 static bool ia64_return_in_memory (const_tree, const_tree);
222 static rtx ia64_function_value (const_tree, const_tree, bool);
223 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
224 static bool ia64_function_value_regno_p (const unsigned int);
225 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
226 reg_class_t);
227 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
228 bool);
229 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
230 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
231 static void fix_range (const char *);
232 static bool ia64_handle_option (struct gcc_options *, struct gcc_options *,
233 const struct cl_decoded_option *, location_t);
234 static struct machine_function * ia64_init_machine_status (void);
235 static void emit_insn_group_barriers (FILE *);
236 static void emit_all_insn_group_barriers (FILE *);
237 static void final_emit_insn_group_barriers (FILE *);
238 static void emit_predicate_relation_info (void);
239 static void ia64_reorg (void);
240 static bool ia64_in_small_data_p (const_tree);
241 static void process_epilogue (FILE *, rtx, bool, bool);
242
243 static bool ia64_assemble_integer (rtx, unsigned int, int);
244 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
245 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
246 static void ia64_output_function_end_prologue (FILE *);
247
248 static int ia64_issue_rate (void);
249 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
250 static void ia64_sched_init (FILE *, int, int);
251 static void ia64_sched_init_global (FILE *, int, int);
252 static void ia64_sched_finish_global (FILE *, int);
253 static void ia64_sched_finish (FILE *, int);
254 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
255 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
256 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
257 static int ia64_variable_issue (FILE *, int, rtx, int);
258
259 static void ia64_asm_unwind_emit (FILE *, rtx);
260 static void ia64_asm_emit_except_personality (rtx);
261 static void ia64_asm_init_sections (void);
262
263 static enum unwind_info_type ia64_debug_unwind_info (void);
264 static enum unwind_info_type ia64_except_unwind_info (struct gcc_options *);
265
266 static struct bundle_state *get_free_bundle_state (void);
267 static void free_bundle_state (struct bundle_state *);
268 static void initiate_bundle_states (void);
269 static void finish_bundle_states (void);
270 static unsigned bundle_state_hash (const void *);
271 static int bundle_state_eq_p (const void *, const void *);
272 static int insert_bundle_state (struct bundle_state *);
273 static void initiate_bundle_state_table (void);
274 static void finish_bundle_state_table (void);
275 static int try_issue_nops (struct bundle_state *, int);
276 static int try_issue_insn (struct bundle_state *, rtx);
277 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
278 static int get_max_pos (state_t);
279 static int get_template (state_t, int);
280
281 static rtx get_next_important_insn (rtx, rtx);
282 static bool important_for_bundling_p (rtx);
283 static void bundling (FILE *, int, rtx, rtx);
284
285 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
286 HOST_WIDE_INT, tree);
287 static void ia64_file_start (void);
288 static void ia64_globalize_decl_name (FILE *, tree);
289
290 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
291 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
292 static section *ia64_select_rtx_section (enum machine_mode, rtx,
293 unsigned HOST_WIDE_INT);
294 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
295 ATTRIBUTE_UNUSED;
296 static unsigned int ia64_section_type_flags (tree, const char *, int);
297 static void ia64_init_libfuncs (void)
298 ATTRIBUTE_UNUSED;
299 static void ia64_hpux_init_libfuncs (void)
300 ATTRIBUTE_UNUSED;
301 static void ia64_sysv4_init_libfuncs (void)
302 ATTRIBUTE_UNUSED;
303 static void ia64_vms_init_libfuncs (void)
304 ATTRIBUTE_UNUSED;
305 static void ia64_soft_fp_init_libfuncs (void)
306 ATTRIBUTE_UNUSED;
307 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
308 ATTRIBUTE_UNUSED;
309 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
310 ATTRIBUTE_UNUSED;
311
312 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
313 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
314 static void ia64_encode_section_info (tree, rtx, int);
315 static rtx ia64_struct_value_rtx (tree, int);
316 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
317 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
318 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
319 static bool ia64_cannot_force_const_mem (rtx);
320 static const char *ia64_mangle_type (const_tree);
321 static const char *ia64_invalid_conversion (const_tree, const_tree);
322 static const char *ia64_invalid_unary_op (int, const_tree);
323 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
324 static enum machine_mode ia64_c_mode_for_suffix (char);
325 static enum machine_mode ia64_promote_function_mode (const_tree,
326 enum machine_mode,
327 int *,
328 const_tree,
329 int);
330 static void ia64_trampoline_init (rtx, tree, rtx);
331 static void ia64_override_options_after_change (void);
332
333 static void ia64_dwarf_handle_frame_unspec (const char *, rtx, int);
334 static tree ia64_builtin_decl (unsigned, bool);
335
336 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
337 static enum machine_mode ia64_get_reg_raw_mode (int regno);
338 static section * ia64_hpux_function_section (tree, enum node_frequency,
339 bool, bool);
340 \f
341 /* Table of valid machine attributes. */
342 static const struct attribute_spec ia64_attribute_table[] =
343 {
344 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
345 affects_type_identity } */
346 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
347 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
348 false },
349 #if TARGET_ABI_OPEN_VMS
350 { "common_object", 1, 1, true, false, false,
351 ia64_vms_common_object_attribute, false },
352 #endif
353 { "version_id", 1, 1, true, false, false,
354 ia64_handle_version_id_attribute, false },
355 { NULL, 0, 0, false, false, false, NULL, false }
356 };
357
358 /* Implement overriding of the optimization options. */
359 static const struct default_options ia64_option_optimization_table[] =
360 {
361 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
362 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
363 SUBTARGET_OPTIMIZATION_OPTIONS,
364 #endif
365 { OPT_LEVELS_NONE, 0, NULL, 0 }
366 };
367
368 /* Initialize the GCC target structure. */
369 #undef TARGET_ATTRIBUTE_TABLE
370 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
371
372 #undef TARGET_INIT_BUILTINS
373 #define TARGET_INIT_BUILTINS ia64_init_builtins
374
375 #undef TARGET_EXPAND_BUILTIN
376 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
377
378 #undef TARGET_BUILTIN_DECL
379 #define TARGET_BUILTIN_DECL ia64_builtin_decl
380
381 #undef TARGET_ASM_BYTE_OP
382 #define TARGET_ASM_BYTE_OP "\tdata1\t"
383 #undef TARGET_ASM_ALIGNED_HI_OP
384 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
385 #undef TARGET_ASM_ALIGNED_SI_OP
386 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
387 #undef TARGET_ASM_ALIGNED_DI_OP
388 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
389 #undef TARGET_ASM_UNALIGNED_HI_OP
390 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
391 #undef TARGET_ASM_UNALIGNED_SI_OP
392 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
393 #undef TARGET_ASM_UNALIGNED_DI_OP
394 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
395 #undef TARGET_ASM_INTEGER
396 #define TARGET_ASM_INTEGER ia64_assemble_integer
397
398 #undef TARGET_OPTION_OVERRIDE
399 #define TARGET_OPTION_OVERRIDE ia64_option_override
400 #undef TARGET_OPTION_OPTIMIZATION_TABLE
401 #define TARGET_OPTION_OPTIMIZATION_TABLE ia64_option_optimization_table
402 #undef TARGET_OPTION_DEFAULT_PARAMS
403 #define TARGET_OPTION_DEFAULT_PARAMS ia64_option_default_params
404
405 #undef TARGET_ASM_FUNCTION_PROLOGUE
406 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
407 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
408 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
409 #undef TARGET_ASM_FUNCTION_EPILOGUE
410 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
411
412 #undef TARGET_IN_SMALL_DATA_P
413 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
414
415 #undef TARGET_SCHED_ADJUST_COST_2
416 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
417 #undef TARGET_SCHED_ISSUE_RATE
418 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
419 #undef TARGET_SCHED_VARIABLE_ISSUE
420 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
421 #undef TARGET_SCHED_INIT
422 #define TARGET_SCHED_INIT ia64_sched_init
423 #undef TARGET_SCHED_FINISH
424 #define TARGET_SCHED_FINISH ia64_sched_finish
425 #undef TARGET_SCHED_INIT_GLOBAL
426 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
427 #undef TARGET_SCHED_FINISH_GLOBAL
428 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
429 #undef TARGET_SCHED_REORDER
430 #define TARGET_SCHED_REORDER ia64_sched_reorder
431 #undef TARGET_SCHED_REORDER2
432 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
433
434 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
435 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
436
437 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
438 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
439
440 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
441 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
442 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
443 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
444
445 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
446 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
447 ia64_first_cycle_multipass_dfa_lookahead_guard
448
449 #undef TARGET_SCHED_DFA_NEW_CYCLE
450 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
451
452 #undef TARGET_SCHED_H_I_D_EXTENDED
453 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
454
455 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
456 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
457
458 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
459 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
460
461 #undef TARGET_SCHED_SET_SCHED_CONTEXT
462 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
463
464 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
465 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
466
467 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
468 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
469
470 #undef TARGET_SCHED_SET_SCHED_FLAGS
471 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
472
473 #undef TARGET_SCHED_GET_INSN_SPEC_DS
474 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
475
476 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
477 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
478
479 #undef TARGET_SCHED_SPECULATE_INSN
480 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
481
482 #undef TARGET_SCHED_NEEDS_BLOCK_P
483 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
484
485 #undef TARGET_SCHED_GEN_SPEC_CHECK
486 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
487
488 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
489 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
490 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
491
492 #undef TARGET_SCHED_SKIP_RTX_P
493 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
494
495 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
496 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
497 #undef TARGET_ARG_PARTIAL_BYTES
498 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
499 #undef TARGET_FUNCTION_ARG
500 #define TARGET_FUNCTION_ARG ia64_function_arg
501 #undef TARGET_FUNCTION_INCOMING_ARG
502 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
503 #undef TARGET_FUNCTION_ARG_ADVANCE
504 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
505 #undef TARGET_FUNCTION_ARG_BOUNDARY
506 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
507
508 #undef TARGET_ASM_OUTPUT_MI_THUNK
509 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
510 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
511 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
512
513 #undef TARGET_ASM_FILE_START
514 #define TARGET_ASM_FILE_START ia64_file_start
515
516 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
517 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
518
519 #undef TARGET_REGISTER_MOVE_COST
520 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
521 #undef TARGET_MEMORY_MOVE_COST
522 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
523 #undef TARGET_RTX_COSTS
524 #define TARGET_RTX_COSTS ia64_rtx_costs
525 #undef TARGET_ADDRESS_COST
526 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
527
528 #undef TARGET_UNSPEC_MAY_TRAP_P
529 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
530
531 #undef TARGET_MACHINE_DEPENDENT_REORG
532 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
533
534 #undef TARGET_ENCODE_SECTION_INFO
535 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
536
537 #undef TARGET_SECTION_TYPE_FLAGS
538 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
539
540 #ifdef HAVE_AS_TLS
541 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
542 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
543 #endif
544
545 #undef TARGET_PROMOTE_FUNCTION_MODE
546 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
547
548 /* ??? Investigate. */
549 #if 0
550 #undef TARGET_PROMOTE_PROTOTYPES
551 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
552 #endif
553
554 #undef TARGET_FUNCTION_VALUE
555 #define TARGET_FUNCTION_VALUE ia64_function_value
556 #undef TARGET_LIBCALL_VALUE
557 #define TARGET_LIBCALL_VALUE ia64_libcall_value
558 #undef TARGET_FUNCTION_VALUE_REGNO_P
559 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
560
561 #undef TARGET_STRUCT_VALUE_RTX
562 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
563 #undef TARGET_RETURN_IN_MEMORY
564 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
565 #undef TARGET_SETUP_INCOMING_VARARGS
566 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
567 #undef TARGET_STRICT_ARGUMENT_NAMING
568 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
569 #undef TARGET_MUST_PASS_IN_STACK
570 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
571 #undef TARGET_GET_RAW_RESULT_MODE
572 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
573 #undef TARGET_GET_RAW_ARG_MODE
574 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
575
576 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
577 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
578
579 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
580 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ia64_dwarf_handle_frame_unspec
581 #undef TARGET_ASM_UNWIND_EMIT
582 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
583 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
584 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
585 #undef TARGET_ASM_INIT_SECTIONS
586 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
587
588 #undef TARGET_DEBUG_UNWIND_INFO
589 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
590 #undef TARGET_EXCEPT_UNWIND_INFO
591 #define TARGET_EXCEPT_UNWIND_INFO ia64_except_unwind_info
592
593 #undef TARGET_SCALAR_MODE_SUPPORTED_P
594 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
595 #undef TARGET_VECTOR_MODE_SUPPORTED_P
596 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
597
598 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
599 in an order different from the specified program order. */
600 #undef TARGET_RELAXED_ORDERING
601 #define TARGET_RELAXED_ORDERING true
602
603 #undef TARGET_DEFAULT_TARGET_FLAGS
604 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
605 #undef TARGET_HANDLE_OPTION
606 #define TARGET_HANDLE_OPTION ia64_handle_option
607
608 #undef TARGET_CANNOT_FORCE_CONST_MEM
609 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
610
611 #undef TARGET_MANGLE_TYPE
612 #define TARGET_MANGLE_TYPE ia64_mangle_type
613
614 #undef TARGET_INVALID_CONVERSION
615 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
616 #undef TARGET_INVALID_UNARY_OP
617 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
618 #undef TARGET_INVALID_BINARY_OP
619 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
620
621 #undef TARGET_C_MODE_FOR_SUFFIX
622 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
623
624 #undef TARGET_CAN_ELIMINATE
625 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
626
627 #undef TARGET_TRAMPOLINE_INIT
628 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
629
630 #undef TARGET_INVALID_WITHIN_DOLOOP
631 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
632
633 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
634 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
635
636 #undef TARGET_PREFERRED_RELOAD_CLASS
637 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
638
639 struct gcc_target targetm = TARGET_INITIALIZER;
640 \f
641 typedef enum
642 {
643 ADDR_AREA_NORMAL, /* normal address area */
644 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
645 }
646 ia64_addr_area;
647
648 static GTY(()) tree small_ident1;
649 static GTY(()) tree small_ident2;
650
651 static void
652 init_idents (void)
653 {
654 if (small_ident1 == 0)
655 {
656 small_ident1 = get_identifier ("small");
657 small_ident2 = get_identifier ("__small__");
658 }
659 }
660
661 /* Retrieve the address area that has been chosen for the given decl. */
662
663 static ia64_addr_area
664 ia64_get_addr_area (tree decl)
665 {
666 tree model_attr;
667
668 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
669 if (model_attr)
670 {
671 tree id;
672
673 init_idents ();
674 id = TREE_VALUE (TREE_VALUE (model_attr));
675 if (id == small_ident1 || id == small_ident2)
676 return ADDR_AREA_SMALL;
677 }
678 return ADDR_AREA_NORMAL;
679 }
680
681 static tree
682 ia64_handle_model_attribute (tree *node, tree name, tree args,
683 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
684 {
685 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
686 ia64_addr_area area;
687 tree arg, decl = *node;
688
689 init_idents ();
690 arg = TREE_VALUE (args);
691 if (arg == small_ident1 || arg == small_ident2)
692 {
693 addr_area = ADDR_AREA_SMALL;
694 }
695 else
696 {
697 warning (OPT_Wattributes, "invalid argument of %qE attribute",
698 name);
699 *no_add_attrs = true;
700 }
701
702 switch (TREE_CODE (decl))
703 {
704 case VAR_DECL:
705 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
706 == FUNCTION_DECL)
707 && !TREE_STATIC (decl))
708 {
709 error_at (DECL_SOURCE_LOCATION (decl),
710 "an address area attribute cannot be specified for "
711 "local variables");
712 *no_add_attrs = true;
713 }
714 area = ia64_get_addr_area (decl);
715 if (area != ADDR_AREA_NORMAL && addr_area != area)
716 {
717 error ("address area of %q+D conflicts with previous "
718 "declaration", decl);
719 *no_add_attrs = true;
720 }
721 break;
722
723 case FUNCTION_DECL:
724 error_at (DECL_SOURCE_LOCATION (decl),
725 "address area attribute cannot be specified for "
726 "functions");
727 *no_add_attrs = true;
728 break;
729
730 default:
731 warning (OPT_Wattributes, "%qE attribute ignored",
732 name);
733 *no_add_attrs = true;
734 break;
735 }
736
737 return NULL_TREE;
738 }
739
740 /* The section must have global and overlaid attributes. */
741 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
742
743 /* Part of the low level implementation of DEC Ada pragma Common_Object which
744 enables the shared use of variables stored in overlaid linker areas
745 corresponding to the use of Fortran COMMON. */
746
747 static tree
748 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
749 int flags ATTRIBUTE_UNUSED,
750 bool *no_add_attrs)
751 {
752 tree decl = *node;
753 tree id, val;
754 if (! DECL_P (decl))
755 abort ();
756
757 DECL_COMMON (decl) = 1;
758 id = TREE_VALUE (args);
759 if (TREE_CODE (id) == IDENTIFIER_NODE)
760 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
761 else if (TREE_CODE (id) == STRING_CST)
762 val = id;
763 else
764 {
765 warning (OPT_Wattributes,
766 "%qE attribute requires a string constant argument", name);
767 *no_add_attrs = true;
768 return NULL_TREE;
769 }
770 DECL_SECTION_NAME (decl) = val;
771 return NULL_TREE;
772 }
773
774 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
775
776 void
777 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
778 unsigned HOST_WIDE_INT size,
779 unsigned int align)
780 {
781 tree attr = DECL_ATTRIBUTES (decl);
782
783 /* As common_object attribute set DECL_SECTION_NAME check it before
784 looking up the attribute. */
785 if (DECL_SECTION_NAME (decl) && attr)
786 attr = lookup_attribute ("common_object", attr);
787 else
788 attr = NULL_TREE;
789
790 if (!attr)
791 {
792 /* Code from elfos.h. */
793 fprintf (file, "%s", COMMON_ASM_OP);
794 assemble_name (file, name);
795 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
796 size, align / BITS_PER_UNIT);
797 }
798 else
799 {
800 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
801 ASM_OUTPUT_LABEL (file, name);
802 ASM_OUTPUT_SKIP (file, size ? size : 1);
803 }
804 }
805
806 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
807
808 void
809 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
810 tree decl)
811 {
812 if (!(flags & SECTION_VMS_OVERLAY))
813 {
814 default_elf_asm_named_section (name, flags, decl);
815 return;
816 }
817 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
818 abort ();
819
820 if (flags & SECTION_DECLARED)
821 {
822 fprintf (asm_out_file, "\t.section\t%s\n", name);
823 return;
824 }
825
826 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
827 }
828
829 static void
830 ia64_encode_addr_area (tree decl, rtx symbol)
831 {
832 int flags;
833
834 flags = SYMBOL_REF_FLAGS (symbol);
835 switch (ia64_get_addr_area (decl))
836 {
837 case ADDR_AREA_NORMAL: break;
838 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
839 default: gcc_unreachable ();
840 }
841 SYMBOL_REF_FLAGS (symbol) = flags;
842 }
843
844 static void
845 ia64_encode_section_info (tree decl, rtx rtl, int first)
846 {
847 default_encode_section_info (decl, rtl, first);
848
849 /* Careful not to prod global register variables. */
850 if (TREE_CODE (decl) == VAR_DECL
851 && GET_CODE (DECL_RTL (decl)) == MEM
852 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
853 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
854 ia64_encode_addr_area (decl, XEXP (rtl, 0));
855 }
856 \f
857 /* Return 1 if the operands of a move are ok. */
858
859 int
860 ia64_move_ok (rtx dst, rtx src)
861 {
862 /* If we're under init_recog_no_volatile, we'll not be able to use
863 memory_operand. So check the code directly and don't worry about
864 the validity of the underlying address, which should have been
865 checked elsewhere anyway. */
866 if (GET_CODE (dst) != MEM)
867 return 1;
868 if (GET_CODE (src) == MEM)
869 return 0;
870 if (register_operand (src, VOIDmode))
871 return 1;
872
873 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
874 if (INTEGRAL_MODE_P (GET_MODE (dst)))
875 return src == const0_rtx;
876 else
877 return satisfies_constraint_G (src);
878 }
879
880 /* Return 1 if the operands are ok for a floating point load pair. */
881
882 int
883 ia64_load_pair_ok (rtx dst, rtx src)
884 {
885 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
886 return 0;
887 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
888 return 0;
889 switch (GET_CODE (XEXP (src, 0)))
890 {
891 case REG:
892 case POST_INC:
893 break;
894 case POST_DEC:
895 return 0;
896 case POST_MODIFY:
897 {
898 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
899
900 if (GET_CODE (adjust) != CONST_INT
901 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
902 return 0;
903 }
904 break;
905 default:
906 abort ();
907 }
908 return 1;
909 }
910
911 int
912 addp4_optimize_ok (rtx op1, rtx op2)
913 {
914 return (basereg_operand (op1, GET_MODE(op1)) !=
915 basereg_operand (op2, GET_MODE(op2)));
916 }
917
918 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
919 Return the length of the field, or <= 0 on failure. */
920
921 int
922 ia64_depz_field_mask (rtx rop, rtx rshift)
923 {
924 unsigned HOST_WIDE_INT op = INTVAL (rop);
925 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
926
927 /* Get rid of the zero bits we're shifting in. */
928 op >>= shift;
929
930 /* We must now have a solid block of 1's at bit 0. */
931 return exact_log2 (op + 1);
932 }
933
934 /* Return the TLS model to use for ADDR. */
935
936 static enum tls_model
937 tls_symbolic_operand_type (rtx addr)
938 {
939 enum tls_model tls_kind = TLS_MODEL_NONE;
940
941 if (GET_CODE (addr) == CONST)
942 {
943 if (GET_CODE (XEXP (addr, 0)) == PLUS
944 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
945 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
946 }
947 else if (GET_CODE (addr) == SYMBOL_REF)
948 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
949
950 return tls_kind;
951 }
952
953 /* Return true if X is a constant that is valid for some immediate
954 field in an instruction. */
955
956 bool
957 ia64_legitimate_constant_p (rtx x)
958 {
959 switch (GET_CODE (x))
960 {
961 case CONST_INT:
962 case LABEL_REF:
963 return true;
964
965 case CONST_DOUBLE:
966 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == SFmode
967 || GET_MODE (x) == DFmode)
968 return true;
969 return satisfies_constraint_G (x);
970
971 case CONST:
972 case SYMBOL_REF:
973 /* ??? Short term workaround for PR 28490. We must make the code here
974 match the code in ia64_expand_move and move_operand, even though they
975 are both technically wrong. */
976 if (tls_symbolic_operand_type (x) == 0)
977 {
978 HOST_WIDE_INT addend = 0;
979 rtx op = x;
980
981 if (GET_CODE (op) == CONST
982 && GET_CODE (XEXP (op, 0)) == PLUS
983 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
984 {
985 addend = INTVAL (XEXP (XEXP (op, 0), 1));
986 op = XEXP (XEXP (op, 0), 0);
987 }
988
989 if (any_offset_symbol_operand (op, GET_MODE (op))
990 || function_operand (op, GET_MODE (op)))
991 return true;
992 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
993 return (addend & 0x3fff) == 0;
994 return false;
995 }
996 return false;
997
998 case CONST_VECTOR:
999 {
1000 enum machine_mode mode = GET_MODE (x);
1001
1002 if (mode == V2SFmode)
1003 return satisfies_constraint_Y (x);
1004
1005 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1006 && GET_MODE_SIZE (mode) <= 8);
1007 }
1008
1009 default:
1010 return false;
1011 }
1012 }
1013
1014 /* Don't allow TLS addresses to get spilled to memory. */
1015
1016 static bool
1017 ia64_cannot_force_const_mem (rtx x)
1018 {
1019 if (GET_MODE (x) == RFmode)
1020 return true;
1021 return tls_symbolic_operand_type (x) != 0;
1022 }
1023
1024 /* Expand a symbolic constant load. */
1025
1026 bool
1027 ia64_expand_load_address (rtx dest, rtx src)
1028 {
1029 gcc_assert (GET_CODE (dest) == REG);
1030
1031 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1032 having to pointer-extend the value afterward. Other forms of address
1033 computation below are also more natural to compute as 64-bit quantities.
1034 If we've been given an SImode destination register, change it. */
1035 if (GET_MODE (dest) != Pmode)
1036 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1037 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1038
1039 if (TARGET_NO_PIC)
1040 return false;
1041 if (small_addr_symbolic_operand (src, VOIDmode))
1042 return false;
1043
1044 if (TARGET_AUTO_PIC)
1045 emit_insn (gen_load_gprel64 (dest, src));
1046 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1047 emit_insn (gen_load_fptr (dest, src));
1048 else if (sdata_symbolic_operand (src, VOIDmode))
1049 emit_insn (gen_load_gprel (dest, src));
1050 else
1051 {
1052 HOST_WIDE_INT addend = 0;
1053 rtx tmp;
1054
1055 /* We did split constant offsets in ia64_expand_move, and we did try
1056 to keep them split in move_operand, but we also allowed reload to
1057 rematerialize arbitrary constants rather than spill the value to
1058 the stack and reload it. So we have to be prepared here to split
1059 them apart again. */
1060 if (GET_CODE (src) == CONST)
1061 {
1062 HOST_WIDE_INT hi, lo;
1063
1064 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1065 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1066 hi = hi - lo;
1067
1068 if (lo != 0)
1069 {
1070 addend = lo;
1071 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1072 }
1073 }
1074
1075 tmp = gen_rtx_HIGH (Pmode, src);
1076 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1077 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1078
1079 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
1080 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1081
1082 if (addend)
1083 {
1084 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1085 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1086 }
1087 }
1088
1089 return true;
1090 }
1091
1092 static GTY(()) rtx gen_tls_tga;
1093 static rtx
1094 gen_tls_get_addr (void)
1095 {
1096 if (!gen_tls_tga)
1097 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1098 return gen_tls_tga;
1099 }
1100
1101 static GTY(()) rtx thread_pointer_rtx;
1102 static rtx
1103 gen_thread_pointer (void)
1104 {
1105 if (!thread_pointer_rtx)
1106 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1107 return thread_pointer_rtx;
1108 }
1109
1110 static rtx
1111 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1112 rtx orig_op1, HOST_WIDE_INT addend)
1113 {
1114 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1115 rtx orig_op0 = op0;
1116 HOST_WIDE_INT addend_lo, addend_hi;
1117
1118 switch (tls_kind)
1119 {
1120 case TLS_MODEL_GLOBAL_DYNAMIC:
1121 start_sequence ();
1122
1123 tga_op1 = gen_reg_rtx (Pmode);
1124 emit_insn (gen_load_dtpmod (tga_op1, op1));
1125
1126 tga_op2 = gen_reg_rtx (Pmode);
1127 emit_insn (gen_load_dtprel (tga_op2, op1));
1128
1129 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1130 LCT_CONST, Pmode, 2, tga_op1,
1131 Pmode, tga_op2, Pmode);
1132
1133 insns = get_insns ();
1134 end_sequence ();
1135
1136 if (GET_MODE (op0) != Pmode)
1137 op0 = tga_ret;
1138 emit_libcall_block (insns, op0, tga_ret, op1);
1139 break;
1140
1141 case TLS_MODEL_LOCAL_DYNAMIC:
1142 /* ??? This isn't the completely proper way to do local-dynamic
1143 If the call to __tls_get_addr is used only by a single symbol,
1144 then we should (somehow) move the dtprel to the second arg
1145 to avoid the extra add. */
1146 start_sequence ();
1147
1148 tga_op1 = gen_reg_rtx (Pmode);
1149 emit_insn (gen_load_dtpmod (tga_op1, op1));
1150
1151 tga_op2 = const0_rtx;
1152
1153 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1154 LCT_CONST, Pmode, 2, tga_op1,
1155 Pmode, tga_op2, Pmode);
1156
1157 insns = get_insns ();
1158 end_sequence ();
1159
1160 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1161 UNSPEC_LD_BASE);
1162 tmp = gen_reg_rtx (Pmode);
1163 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1164
1165 if (!register_operand (op0, Pmode))
1166 op0 = gen_reg_rtx (Pmode);
1167 if (TARGET_TLS64)
1168 {
1169 emit_insn (gen_load_dtprel (op0, op1));
1170 emit_insn (gen_adddi3 (op0, tmp, op0));
1171 }
1172 else
1173 emit_insn (gen_add_dtprel (op0, op1, tmp));
1174 break;
1175
1176 case TLS_MODEL_INITIAL_EXEC:
1177 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1178 addend_hi = addend - addend_lo;
1179
1180 op1 = plus_constant (op1, addend_hi);
1181 addend = addend_lo;
1182
1183 tmp = gen_reg_rtx (Pmode);
1184 emit_insn (gen_load_tprel (tmp, op1));
1185
1186 if (!register_operand (op0, Pmode))
1187 op0 = gen_reg_rtx (Pmode);
1188 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1189 break;
1190
1191 case TLS_MODEL_LOCAL_EXEC:
1192 if (!register_operand (op0, Pmode))
1193 op0 = gen_reg_rtx (Pmode);
1194
1195 op1 = orig_op1;
1196 addend = 0;
1197 if (TARGET_TLS64)
1198 {
1199 emit_insn (gen_load_tprel (op0, op1));
1200 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1201 }
1202 else
1203 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1204 break;
1205
1206 default:
1207 gcc_unreachable ();
1208 }
1209
1210 if (addend)
1211 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1212 orig_op0, 1, OPTAB_DIRECT);
1213 if (orig_op0 == op0)
1214 return NULL_RTX;
1215 if (GET_MODE (orig_op0) == Pmode)
1216 return op0;
1217 return gen_lowpart (GET_MODE (orig_op0), op0);
1218 }
1219
1220 rtx
1221 ia64_expand_move (rtx op0, rtx op1)
1222 {
1223 enum machine_mode mode = GET_MODE (op0);
1224
1225 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1226 op1 = force_reg (mode, op1);
1227
1228 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1229 {
1230 HOST_WIDE_INT addend = 0;
1231 enum tls_model tls_kind;
1232 rtx sym = op1;
1233
1234 if (GET_CODE (op1) == CONST
1235 && GET_CODE (XEXP (op1, 0)) == PLUS
1236 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1237 {
1238 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1239 sym = XEXP (XEXP (op1, 0), 0);
1240 }
1241
1242 tls_kind = tls_symbolic_operand_type (sym);
1243 if (tls_kind)
1244 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1245
1246 if (any_offset_symbol_operand (sym, mode))
1247 addend = 0;
1248 else if (aligned_offset_symbol_operand (sym, mode))
1249 {
1250 HOST_WIDE_INT addend_lo, addend_hi;
1251
1252 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1253 addend_hi = addend - addend_lo;
1254
1255 if (addend_lo != 0)
1256 {
1257 op1 = plus_constant (sym, addend_hi);
1258 addend = addend_lo;
1259 }
1260 else
1261 addend = 0;
1262 }
1263 else
1264 op1 = sym;
1265
1266 if (reload_completed)
1267 {
1268 /* We really should have taken care of this offset earlier. */
1269 gcc_assert (addend == 0);
1270 if (ia64_expand_load_address (op0, op1))
1271 return NULL_RTX;
1272 }
1273
1274 if (addend)
1275 {
1276 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1277
1278 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1279
1280 op1 = expand_simple_binop (mode, PLUS, subtarget,
1281 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1282 if (op0 == op1)
1283 return NULL_RTX;
1284 }
1285 }
1286
1287 return op1;
1288 }
1289
1290 /* Split a move from OP1 to OP0 conditional on COND. */
1291
1292 void
1293 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1294 {
1295 rtx insn, first = get_last_insn ();
1296
1297 emit_move_insn (op0, op1);
1298
1299 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1300 if (INSN_P (insn))
1301 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1302 PATTERN (insn));
1303 }
1304
1305 /* Split a post-reload TImode or TFmode reference into two DImode
1306 components. This is made extra difficult by the fact that we do
1307 not get any scratch registers to work with, because reload cannot
1308 be prevented from giving us a scratch that overlaps the register
1309 pair involved. So instead, when addressing memory, we tweak the
1310 pointer register up and back down with POST_INCs. Or up and not
1311 back down when we can get away with it.
1312
1313 REVERSED is true when the loads must be done in reversed order
1314 (high word first) for correctness. DEAD is true when the pointer
1315 dies with the second insn we generate and therefore the second
1316 address must not carry a postmodify.
1317
1318 May return an insn which is to be emitted after the moves. */
1319
1320 static rtx
1321 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1322 {
1323 rtx fixup = 0;
1324
1325 switch (GET_CODE (in))
1326 {
1327 case REG:
1328 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1329 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1330 break;
1331
1332 case CONST_INT:
1333 case CONST_DOUBLE:
1334 /* Cannot occur reversed. */
1335 gcc_assert (!reversed);
1336
1337 if (GET_MODE (in) != TFmode)
1338 split_double (in, &out[0], &out[1]);
1339 else
1340 /* split_double does not understand how to split a TFmode
1341 quantity into a pair of DImode constants. */
1342 {
1343 REAL_VALUE_TYPE r;
1344 unsigned HOST_WIDE_INT p[2];
1345 long l[4]; /* TFmode is 128 bits */
1346
1347 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1348 real_to_target (l, &r, TFmode);
1349
1350 if (FLOAT_WORDS_BIG_ENDIAN)
1351 {
1352 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1353 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1354 }
1355 else
1356 {
1357 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1358 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1359 }
1360 out[0] = GEN_INT (p[0]);
1361 out[1] = GEN_INT (p[1]);
1362 }
1363 break;
1364
1365 case MEM:
1366 {
1367 rtx base = XEXP (in, 0);
1368 rtx offset;
1369
1370 switch (GET_CODE (base))
1371 {
1372 case REG:
1373 if (!reversed)
1374 {
1375 out[0] = adjust_automodify_address
1376 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1377 out[1] = adjust_automodify_address
1378 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1379 }
1380 else
1381 {
1382 /* Reversal requires a pre-increment, which can only
1383 be done as a separate insn. */
1384 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1385 out[0] = adjust_automodify_address
1386 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1387 out[1] = adjust_address (in, DImode, 0);
1388 }
1389 break;
1390
1391 case POST_INC:
1392 gcc_assert (!reversed && !dead);
1393
1394 /* Just do the increment in two steps. */
1395 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1396 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1397 break;
1398
1399 case POST_DEC:
1400 gcc_assert (!reversed && !dead);
1401
1402 /* Add 8, subtract 24. */
1403 base = XEXP (base, 0);
1404 out[0] = adjust_automodify_address
1405 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1406 out[1] = adjust_automodify_address
1407 (in, DImode,
1408 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1409 8);
1410 break;
1411
1412 case POST_MODIFY:
1413 gcc_assert (!reversed && !dead);
1414
1415 /* Extract and adjust the modification. This case is
1416 trickier than the others, because we might have an
1417 index register, or we might have a combined offset that
1418 doesn't fit a signed 9-bit displacement field. We can
1419 assume the incoming expression is already legitimate. */
1420 offset = XEXP (base, 1);
1421 base = XEXP (base, 0);
1422
1423 out[0] = adjust_automodify_address
1424 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1425
1426 if (GET_CODE (XEXP (offset, 1)) == REG)
1427 {
1428 /* Can't adjust the postmodify to match. Emit the
1429 original, then a separate addition insn. */
1430 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1431 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1432 }
1433 else
1434 {
1435 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1436 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1437 {
1438 /* Again the postmodify cannot be made to match,
1439 but in this case it's more efficient to get rid
1440 of the postmodify entirely and fix up with an
1441 add insn. */
1442 out[1] = adjust_automodify_address (in, DImode, base, 8);
1443 fixup = gen_adddi3
1444 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1445 }
1446 else
1447 {
1448 /* Combined offset still fits in the displacement field.
1449 (We cannot overflow it at the high end.) */
1450 out[1] = adjust_automodify_address
1451 (in, DImode, gen_rtx_POST_MODIFY
1452 (Pmode, base, gen_rtx_PLUS
1453 (Pmode, base,
1454 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1455 8);
1456 }
1457 }
1458 break;
1459
1460 default:
1461 gcc_unreachable ();
1462 }
1463 break;
1464 }
1465
1466 default:
1467 gcc_unreachable ();
1468 }
1469
1470 return fixup;
1471 }
1472
1473 /* Split a TImode or TFmode move instruction after reload.
1474 This is used by *movtf_internal and *movti_internal. */
1475 void
1476 ia64_split_tmode_move (rtx operands[])
1477 {
1478 rtx in[2], out[2], insn;
1479 rtx fixup[2];
1480 bool dead = false;
1481 bool reversed = false;
1482
1483 /* It is possible for reload to decide to overwrite a pointer with
1484 the value it points to. In that case we have to do the loads in
1485 the appropriate order so that the pointer is not destroyed too
1486 early. Also we must not generate a postmodify for that second
1487 load, or rws_access_regno will die. */
1488 if (GET_CODE (operands[1]) == MEM
1489 && reg_overlap_mentioned_p (operands[0], operands[1]))
1490 {
1491 rtx base = XEXP (operands[1], 0);
1492 while (GET_CODE (base) != REG)
1493 base = XEXP (base, 0);
1494
1495 if (REGNO (base) == REGNO (operands[0]))
1496 reversed = true;
1497 dead = true;
1498 }
1499 /* Another reason to do the moves in reversed order is if the first
1500 element of the target register pair is also the second element of
1501 the source register pair. */
1502 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1503 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1504 reversed = true;
1505
1506 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1507 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1508
1509 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1510 if (GET_CODE (EXP) == MEM \
1511 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1512 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1513 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1514 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1515
1516 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1517 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1518 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1519
1520 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1521 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1522 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1523
1524 if (fixup[0])
1525 emit_insn (fixup[0]);
1526 if (fixup[1])
1527 emit_insn (fixup[1]);
1528
1529 #undef MAYBE_ADD_REG_INC_NOTE
1530 }
1531
1532 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1533 through memory plus an extra GR scratch register. Except that you can
1534 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1535 SECONDARY_RELOAD_CLASS, but not both.
1536
1537 We got into problems in the first place by allowing a construct like
1538 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1539 This solution attempts to prevent this situation from occurring. When
1540 we see something like the above, we spill the inner register to memory. */
1541
1542 static rtx
1543 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1544 {
1545 if (GET_CODE (in) == SUBREG
1546 && GET_MODE (SUBREG_REG (in)) == TImode
1547 && GET_CODE (SUBREG_REG (in)) == REG)
1548 {
1549 rtx memt = assign_stack_temp (TImode, 16, 0);
1550 emit_move_insn (memt, SUBREG_REG (in));
1551 return adjust_address (memt, mode, 0);
1552 }
1553 else if (force && GET_CODE (in) == REG)
1554 {
1555 rtx memx = assign_stack_temp (mode, 16, 0);
1556 emit_move_insn (memx, in);
1557 return memx;
1558 }
1559 else
1560 return in;
1561 }
1562
1563 /* Expand the movxf or movrf pattern (MODE says which) with the given
1564 OPERANDS, returning true if the pattern should then invoke
1565 DONE. */
1566
1567 bool
1568 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1569 {
1570 rtx op0 = operands[0];
1571
1572 if (GET_CODE (op0) == SUBREG)
1573 op0 = SUBREG_REG (op0);
1574
1575 /* We must support XFmode loads into general registers for stdarg/vararg,
1576 unprototyped calls, and a rare case where a long double is passed as
1577 an argument after a float HFA fills the FP registers. We split them into
1578 DImode loads for convenience. We also need to support XFmode stores
1579 for the last case. This case does not happen for stdarg/vararg routines,
1580 because we do a block store to memory of unnamed arguments. */
1581
1582 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1583 {
1584 rtx out[2];
1585
1586 /* We're hoping to transform everything that deals with XFmode
1587 quantities and GR registers early in the compiler. */
1588 gcc_assert (can_create_pseudo_p ());
1589
1590 /* Struct to register can just use TImode instead. */
1591 if ((GET_CODE (operands[1]) == SUBREG
1592 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1593 || (GET_CODE (operands[1]) == REG
1594 && GR_REGNO_P (REGNO (operands[1]))))
1595 {
1596 rtx op1 = operands[1];
1597
1598 if (GET_CODE (op1) == SUBREG)
1599 op1 = SUBREG_REG (op1);
1600 else
1601 op1 = gen_rtx_REG (TImode, REGNO (op1));
1602
1603 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1604 return true;
1605 }
1606
1607 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1608 {
1609 /* Don't word-swap when reading in the constant. */
1610 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1611 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1612 0, mode));
1613 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1614 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1615 0, mode));
1616 return true;
1617 }
1618
1619 /* If the quantity is in a register not known to be GR, spill it. */
1620 if (register_operand (operands[1], mode))
1621 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1622
1623 gcc_assert (GET_CODE (operands[1]) == MEM);
1624
1625 /* Don't word-swap when reading in the value. */
1626 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1627 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1628
1629 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1630 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1631 return true;
1632 }
1633
1634 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1635 {
1636 /* We're hoping to transform everything that deals with XFmode
1637 quantities and GR registers early in the compiler. */
1638 gcc_assert (can_create_pseudo_p ());
1639
1640 /* Op0 can't be a GR_REG here, as that case is handled above.
1641 If op0 is a register, then we spill op1, so that we now have a
1642 MEM operand. This requires creating an XFmode subreg of a TImode reg
1643 to force the spill. */
1644 if (register_operand (operands[0], mode))
1645 {
1646 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1647 op1 = gen_rtx_SUBREG (mode, op1, 0);
1648 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1649 }
1650
1651 else
1652 {
1653 rtx in[2];
1654
1655 gcc_assert (GET_CODE (operands[0]) == MEM);
1656
1657 /* Don't word-swap when writing out the value. */
1658 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1659 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1660
1661 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1662 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1663 return true;
1664 }
1665 }
1666
1667 if (!reload_in_progress && !reload_completed)
1668 {
1669 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1670
1671 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1672 {
1673 rtx memt, memx, in = operands[1];
1674 if (CONSTANT_P (in))
1675 in = validize_mem (force_const_mem (mode, in));
1676 if (GET_CODE (in) == MEM)
1677 memt = adjust_address (in, TImode, 0);
1678 else
1679 {
1680 memt = assign_stack_temp (TImode, 16, 0);
1681 memx = adjust_address (memt, mode, 0);
1682 emit_move_insn (memx, in);
1683 }
1684 emit_move_insn (op0, memt);
1685 return true;
1686 }
1687
1688 if (!ia64_move_ok (operands[0], operands[1]))
1689 operands[1] = force_reg (mode, operands[1]);
1690 }
1691
1692 return false;
1693 }
1694
1695 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1696 with the expression that holds the compare result (in VOIDmode). */
1697
1698 static GTY(()) rtx cmptf_libfunc;
1699
1700 void
1701 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1702 {
1703 enum rtx_code code = GET_CODE (*expr);
1704 rtx cmp;
1705
1706 /* If we have a BImode input, then we already have a compare result, and
1707 do not need to emit another comparison. */
1708 if (GET_MODE (*op0) == BImode)
1709 {
1710 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1711 cmp = *op0;
1712 }
1713 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1714 magic number as its third argument, that indicates what to do.
1715 The return value is an integer to be compared against zero. */
1716 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1717 {
1718 enum qfcmp_magic {
1719 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1720 QCMP_UNORD = 2,
1721 QCMP_EQ = 4,
1722 QCMP_LT = 8,
1723 QCMP_GT = 16
1724 };
1725 int magic;
1726 enum rtx_code ncode;
1727 rtx ret, insns;
1728
1729 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1730 switch (code)
1731 {
1732 /* 1 = equal, 0 = not equal. Equality operators do
1733 not raise FP_INVALID when given an SNaN operand. */
1734 case EQ: magic = QCMP_EQ; ncode = NE; break;
1735 case NE: magic = QCMP_EQ; ncode = EQ; break;
1736 /* isunordered() from C99. */
1737 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1738 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1739 /* Relational operators raise FP_INVALID when given
1740 an SNaN operand. */
1741 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1742 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1743 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1744 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1745 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1746 Expanders for buneq etc. weuld have to be added to ia64.md
1747 for this to be useful. */
1748 default: gcc_unreachable ();
1749 }
1750
1751 start_sequence ();
1752
1753 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1754 *op0, TFmode, *op1, TFmode,
1755 GEN_INT (magic), DImode);
1756 cmp = gen_reg_rtx (BImode);
1757 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1758 gen_rtx_fmt_ee (ncode, BImode,
1759 ret, const0_rtx)));
1760
1761 insns = get_insns ();
1762 end_sequence ();
1763
1764 emit_libcall_block (insns, cmp, cmp,
1765 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1766 code = NE;
1767 }
1768 else
1769 {
1770 cmp = gen_reg_rtx (BImode);
1771 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1772 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1773 code = NE;
1774 }
1775
1776 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1777 *op0 = cmp;
1778 *op1 = const0_rtx;
1779 }
1780
1781 /* Generate an integral vector comparison. Return true if the condition has
1782 been reversed, and so the sense of the comparison should be inverted. */
1783
1784 static bool
1785 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1786 rtx dest, rtx op0, rtx op1)
1787 {
1788 bool negate = false;
1789 rtx x;
1790
1791 /* Canonicalize the comparison to EQ, GT, GTU. */
1792 switch (code)
1793 {
1794 case EQ:
1795 case GT:
1796 case GTU:
1797 break;
1798
1799 case NE:
1800 case LE:
1801 case LEU:
1802 code = reverse_condition (code);
1803 negate = true;
1804 break;
1805
1806 case GE:
1807 case GEU:
1808 code = reverse_condition (code);
1809 negate = true;
1810 /* FALLTHRU */
1811
1812 case LT:
1813 case LTU:
1814 code = swap_condition (code);
1815 x = op0, op0 = op1, op1 = x;
1816 break;
1817
1818 default:
1819 gcc_unreachable ();
1820 }
1821
1822 /* Unsigned parallel compare is not supported by the hardware. Play some
1823 tricks to turn this into a signed comparison against 0. */
1824 if (code == GTU)
1825 {
1826 switch (mode)
1827 {
1828 case V2SImode:
1829 {
1830 rtx t1, t2, mask;
1831
1832 /* Subtract (-(INT MAX) - 1) from both operands to make
1833 them signed. */
1834 mask = GEN_INT (0x80000000);
1835 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1836 mask = force_reg (mode, mask);
1837 t1 = gen_reg_rtx (mode);
1838 emit_insn (gen_subv2si3 (t1, op0, mask));
1839 t2 = gen_reg_rtx (mode);
1840 emit_insn (gen_subv2si3 (t2, op1, mask));
1841 op0 = t1;
1842 op1 = t2;
1843 code = GT;
1844 }
1845 break;
1846
1847 case V8QImode:
1848 case V4HImode:
1849 /* Perform a parallel unsigned saturating subtraction. */
1850 x = gen_reg_rtx (mode);
1851 emit_insn (gen_rtx_SET (VOIDmode, x,
1852 gen_rtx_US_MINUS (mode, op0, op1)));
1853
1854 code = EQ;
1855 op0 = x;
1856 op1 = CONST0_RTX (mode);
1857 negate = !negate;
1858 break;
1859
1860 default:
1861 gcc_unreachable ();
1862 }
1863 }
1864
1865 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1866 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1867
1868 return negate;
1869 }
1870
1871 /* Emit an integral vector conditional move. */
1872
1873 void
1874 ia64_expand_vecint_cmov (rtx operands[])
1875 {
1876 enum machine_mode mode = GET_MODE (operands[0]);
1877 enum rtx_code code = GET_CODE (operands[3]);
1878 bool negate;
1879 rtx cmp, x, ot, of;
1880
1881 cmp = gen_reg_rtx (mode);
1882 negate = ia64_expand_vecint_compare (code, mode, cmp,
1883 operands[4], operands[5]);
1884
1885 ot = operands[1+negate];
1886 of = operands[2-negate];
1887
1888 if (ot == CONST0_RTX (mode))
1889 {
1890 if (of == CONST0_RTX (mode))
1891 {
1892 emit_move_insn (operands[0], ot);
1893 return;
1894 }
1895
1896 x = gen_rtx_NOT (mode, cmp);
1897 x = gen_rtx_AND (mode, x, of);
1898 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1899 }
1900 else if (of == CONST0_RTX (mode))
1901 {
1902 x = gen_rtx_AND (mode, cmp, ot);
1903 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1904 }
1905 else
1906 {
1907 rtx t, f;
1908
1909 t = gen_reg_rtx (mode);
1910 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1911 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1912
1913 f = gen_reg_rtx (mode);
1914 x = gen_rtx_NOT (mode, cmp);
1915 x = gen_rtx_AND (mode, x, operands[2-negate]);
1916 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1917
1918 x = gen_rtx_IOR (mode, t, f);
1919 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1920 }
1921 }
1922
1923 /* Emit an integral vector min or max operation. Return true if all done. */
1924
1925 bool
1926 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1927 rtx operands[])
1928 {
1929 rtx xops[6];
1930
1931 /* These four combinations are supported directly. */
1932 if (mode == V8QImode && (code == UMIN || code == UMAX))
1933 return false;
1934 if (mode == V4HImode && (code == SMIN || code == SMAX))
1935 return false;
1936
1937 /* This combination can be implemented with only saturating subtraction. */
1938 if (mode == V4HImode && code == UMAX)
1939 {
1940 rtx x, tmp = gen_reg_rtx (mode);
1941
1942 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1943 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1944
1945 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1946 return true;
1947 }
1948
1949 /* Everything else implemented via vector comparisons. */
1950 xops[0] = operands[0];
1951 xops[4] = xops[1] = operands[1];
1952 xops[5] = xops[2] = operands[2];
1953
1954 switch (code)
1955 {
1956 case UMIN:
1957 code = LTU;
1958 break;
1959 case UMAX:
1960 code = GTU;
1961 break;
1962 case SMIN:
1963 code = LT;
1964 break;
1965 case SMAX:
1966 code = GT;
1967 break;
1968 default:
1969 gcc_unreachable ();
1970 }
1971 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1972
1973 ia64_expand_vecint_cmov (xops);
1974 return true;
1975 }
1976
1977 /* The vectors LO and HI each contain N halves of a double-wide vector.
1978 Reassemble either the first N/2 or the second N/2 elements. */
1979
1980 void
1981 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
1982 {
1983 enum machine_mode mode = GET_MODE (lo);
1984 rtx (*gen) (rtx, rtx, rtx);
1985 rtx x;
1986
1987 switch (mode)
1988 {
1989 case V8QImode:
1990 gen = highp ? gen_vec_interleave_highv8qi : gen_vec_interleave_lowv8qi;
1991 break;
1992 case V4HImode:
1993 gen = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
1994 break;
1995 default:
1996 gcc_unreachable ();
1997 }
1998
1999 x = gen_lowpart (mode, out);
2000 if (TARGET_BIG_ENDIAN)
2001 x = gen (x, hi, lo);
2002 else
2003 x = gen (x, lo, hi);
2004 emit_insn (x);
2005 }
2006
2007 /* Return a vector of the sign-extension of VEC. */
2008
2009 static rtx
2010 ia64_unpack_sign (rtx vec, bool unsignedp)
2011 {
2012 enum machine_mode mode = GET_MODE (vec);
2013 rtx zero = CONST0_RTX (mode);
2014
2015 if (unsignedp)
2016 return zero;
2017 else
2018 {
2019 rtx sign = gen_reg_rtx (mode);
2020 bool neg;
2021
2022 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2023 gcc_assert (!neg);
2024
2025 return sign;
2026 }
2027 }
2028
2029 /* Emit an integral vector unpack operation. */
2030
2031 void
2032 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2033 {
2034 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2035 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2036 }
2037
2038 /* Emit an integral vector widening sum operations. */
2039
2040 void
2041 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2042 {
2043 enum machine_mode wmode;
2044 rtx l, h, t, sign;
2045
2046 sign = ia64_unpack_sign (operands[1], unsignedp);
2047
2048 wmode = GET_MODE (operands[0]);
2049 l = gen_reg_rtx (wmode);
2050 h = gen_reg_rtx (wmode);
2051
2052 ia64_unpack_assemble (l, operands[1], sign, false);
2053 ia64_unpack_assemble (h, operands[1], sign, true);
2054
2055 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2056 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2057 if (t != operands[0])
2058 emit_move_insn (operands[0], t);
2059 }
2060
2061 /* Emit a signed or unsigned V8QI dot product operation. */
2062
2063 void
2064 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
2065 {
2066 rtx op1, op2, sn1, sn2, l1, l2, h1, h2;
2067 rtx p1, p2, p3, p4, s1, s2, s3;
2068
2069 op1 = operands[1];
2070 op2 = operands[2];
2071 sn1 = ia64_unpack_sign (op1, unsignedp);
2072 sn2 = ia64_unpack_sign (op2, unsignedp);
2073
2074 l1 = gen_reg_rtx (V4HImode);
2075 l2 = gen_reg_rtx (V4HImode);
2076 h1 = gen_reg_rtx (V4HImode);
2077 h2 = gen_reg_rtx (V4HImode);
2078 ia64_unpack_assemble (l1, op1, sn1, false);
2079 ia64_unpack_assemble (l2, op2, sn2, false);
2080 ia64_unpack_assemble (h1, op1, sn1, true);
2081 ia64_unpack_assemble (h2, op2, sn2, true);
2082
2083 p1 = gen_reg_rtx (V2SImode);
2084 p2 = gen_reg_rtx (V2SImode);
2085 p3 = gen_reg_rtx (V2SImode);
2086 p4 = gen_reg_rtx (V2SImode);
2087 emit_insn (gen_pmpy2_even (p1, l1, l2));
2088 emit_insn (gen_pmpy2_even (p2, h1, h2));
2089 emit_insn (gen_pmpy2_odd (p3, l1, l2));
2090 emit_insn (gen_pmpy2_odd (p4, h1, h2));
2091
2092 s1 = gen_reg_rtx (V2SImode);
2093 s2 = gen_reg_rtx (V2SImode);
2094 s3 = gen_reg_rtx (V2SImode);
2095 emit_insn (gen_addv2si3 (s1, p1, p2));
2096 emit_insn (gen_addv2si3 (s2, p3, p4));
2097 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2098 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2099 }
2100
2101 /* Emit the appropriate sequence for a call. */
2102
2103 void
2104 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2105 int sibcall_p)
2106 {
2107 rtx insn, b0;
2108
2109 addr = XEXP (addr, 0);
2110 addr = convert_memory_address (DImode, addr);
2111 b0 = gen_rtx_REG (DImode, R_BR (0));
2112
2113 /* ??? Should do this for functions known to bind local too. */
2114 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2115 {
2116 if (sibcall_p)
2117 insn = gen_sibcall_nogp (addr);
2118 else if (! retval)
2119 insn = gen_call_nogp (addr, b0);
2120 else
2121 insn = gen_call_value_nogp (retval, addr, b0);
2122 insn = emit_call_insn (insn);
2123 }
2124 else
2125 {
2126 if (sibcall_p)
2127 insn = gen_sibcall_gp (addr);
2128 else if (! retval)
2129 insn = gen_call_gp (addr, b0);
2130 else
2131 insn = gen_call_value_gp (retval, addr, b0);
2132 insn = emit_call_insn (insn);
2133
2134 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2135 }
2136
2137 if (sibcall_p)
2138 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2139
2140 if (TARGET_ABI_OPEN_VMS)
2141 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2142 gen_rtx_REG (DImode, GR_REG (25)));
2143 }
2144
2145 static void
2146 reg_emitted (enum ia64_frame_regs r)
2147 {
2148 if (emitted_frame_related_regs[r] == 0)
2149 emitted_frame_related_regs[r] = current_frame_info.r[r];
2150 else
2151 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2152 }
2153
2154 static int
2155 get_reg (enum ia64_frame_regs r)
2156 {
2157 reg_emitted (r);
2158 return current_frame_info.r[r];
2159 }
2160
2161 static bool
2162 is_emitted (int regno)
2163 {
2164 unsigned int r;
2165
2166 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2167 if (emitted_frame_related_regs[r] == regno)
2168 return true;
2169 return false;
2170 }
2171
2172 void
2173 ia64_reload_gp (void)
2174 {
2175 rtx tmp;
2176
2177 if (current_frame_info.r[reg_save_gp])
2178 {
2179 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2180 }
2181 else
2182 {
2183 HOST_WIDE_INT offset;
2184 rtx offset_r;
2185
2186 offset = (current_frame_info.spill_cfa_off
2187 + current_frame_info.spill_size);
2188 if (frame_pointer_needed)
2189 {
2190 tmp = hard_frame_pointer_rtx;
2191 offset = -offset;
2192 }
2193 else
2194 {
2195 tmp = stack_pointer_rtx;
2196 offset = current_frame_info.total_size - offset;
2197 }
2198
2199 offset_r = GEN_INT (offset);
2200 if (satisfies_constraint_I (offset_r))
2201 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2202 else
2203 {
2204 emit_move_insn (pic_offset_table_rtx, offset_r);
2205 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2206 pic_offset_table_rtx, tmp));
2207 }
2208
2209 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2210 }
2211
2212 emit_move_insn (pic_offset_table_rtx, tmp);
2213 }
2214
2215 void
2216 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2217 rtx scratch_b, int noreturn_p, int sibcall_p)
2218 {
2219 rtx insn;
2220 bool is_desc = false;
2221
2222 /* If we find we're calling through a register, then we're actually
2223 calling through a descriptor, so load up the values. */
2224 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2225 {
2226 rtx tmp;
2227 bool addr_dead_p;
2228
2229 /* ??? We are currently constrained to *not* use peep2, because
2230 we can legitimately change the global lifetime of the GP
2231 (in the form of killing where previously live). This is
2232 because a call through a descriptor doesn't use the previous
2233 value of the GP, while a direct call does, and we do not
2234 commit to either form until the split here.
2235
2236 That said, this means that we lack precise life info for
2237 whether ADDR is dead after this call. This is not terribly
2238 important, since we can fix things up essentially for free
2239 with the POST_DEC below, but it's nice to not use it when we
2240 can immediately tell it's not necessary. */
2241 addr_dead_p = ((noreturn_p || sibcall_p
2242 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2243 REGNO (addr)))
2244 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2245
2246 /* Load the code address into scratch_b. */
2247 tmp = gen_rtx_POST_INC (Pmode, addr);
2248 tmp = gen_rtx_MEM (Pmode, tmp);
2249 emit_move_insn (scratch_r, tmp);
2250 emit_move_insn (scratch_b, scratch_r);
2251
2252 /* Load the GP address. If ADDR is not dead here, then we must
2253 revert the change made above via the POST_INCREMENT. */
2254 if (!addr_dead_p)
2255 tmp = gen_rtx_POST_DEC (Pmode, addr);
2256 else
2257 tmp = addr;
2258 tmp = gen_rtx_MEM (Pmode, tmp);
2259 emit_move_insn (pic_offset_table_rtx, tmp);
2260
2261 is_desc = true;
2262 addr = scratch_b;
2263 }
2264
2265 if (sibcall_p)
2266 insn = gen_sibcall_nogp (addr);
2267 else if (retval)
2268 insn = gen_call_value_nogp (retval, addr, retaddr);
2269 else
2270 insn = gen_call_nogp (addr, retaddr);
2271 emit_call_insn (insn);
2272
2273 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2274 ia64_reload_gp ();
2275 }
2276
2277 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2278
2279 This differs from the generic code in that we know about the zero-extending
2280 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2281 also know that ld.acq+cmpxchg.rel equals a full barrier.
2282
2283 The loop we want to generate looks like
2284
2285 cmp_reg = mem;
2286 label:
2287 old_reg = cmp_reg;
2288 new_reg = cmp_reg op val;
2289 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2290 if (cmp_reg != old_reg)
2291 goto label;
2292
2293 Note that we only do the plain load from memory once. Subsequent
2294 iterations use the value loaded by the compare-and-swap pattern. */
2295
2296 void
2297 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2298 rtx old_dst, rtx new_dst)
2299 {
2300 enum machine_mode mode = GET_MODE (mem);
2301 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2302 enum insn_code icode;
2303
2304 /* Special case for using fetchadd. */
2305 if ((mode == SImode || mode == DImode)
2306 && (code == PLUS || code == MINUS)
2307 && fetchadd_operand (val, mode))
2308 {
2309 if (code == MINUS)
2310 val = GEN_INT (-INTVAL (val));
2311
2312 if (!old_dst)
2313 old_dst = gen_reg_rtx (mode);
2314
2315 emit_insn (gen_memory_barrier ());
2316
2317 if (mode == SImode)
2318 icode = CODE_FOR_fetchadd_acq_si;
2319 else
2320 icode = CODE_FOR_fetchadd_acq_di;
2321 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2322
2323 if (new_dst)
2324 {
2325 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2326 true, OPTAB_WIDEN);
2327 if (new_reg != new_dst)
2328 emit_move_insn (new_dst, new_reg);
2329 }
2330 return;
2331 }
2332
2333 /* Because of the volatile mem read, we get an ld.acq, which is the
2334 front half of the full barrier. The end half is the cmpxchg.rel. */
2335 gcc_assert (MEM_VOLATILE_P (mem));
2336
2337 old_reg = gen_reg_rtx (DImode);
2338 cmp_reg = gen_reg_rtx (DImode);
2339 label = gen_label_rtx ();
2340
2341 if (mode != DImode)
2342 {
2343 val = simplify_gen_subreg (DImode, val, mode, 0);
2344 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2345 }
2346 else
2347 emit_move_insn (cmp_reg, mem);
2348
2349 emit_label (label);
2350
2351 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2352 emit_move_insn (old_reg, cmp_reg);
2353 emit_move_insn (ar_ccv, cmp_reg);
2354
2355 if (old_dst)
2356 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2357
2358 new_reg = cmp_reg;
2359 if (code == NOT)
2360 {
2361 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2362 true, OPTAB_DIRECT);
2363 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2364 }
2365 else
2366 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2367 true, OPTAB_DIRECT);
2368
2369 if (mode != DImode)
2370 new_reg = gen_lowpart (mode, new_reg);
2371 if (new_dst)
2372 emit_move_insn (new_dst, new_reg);
2373
2374 switch (mode)
2375 {
2376 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2377 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2378 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2379 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2380 default:
2381 gcc_unreachable ();
2382 }
2383
2384 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2385
2386 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2387 }
2388 \f
2389 /* Begin the assembly file. */
2390
2391 static void
2392 ia64_file_start (void)
2393 {
2394 /* Variable tracking should be run after all optimizations which change order
2395 of insns. It also needs a valid CFG. This can't be done in
2396 ia64_option_override, because flag_var_tracking is finalized after
2397 that. */
2398 ia64_flag_var_tracking = flag_var_tracking;
2399 flag_var_tracking = 0;
2400
2401 default_file_start ();
2402 emit_safe_across_calls ();
2403 }
2404
2405 void
2406 emit_safe_across_calls (void)
2407 {
2408 unsigned int rs, re;
2409 int out_state;
2410
2411 rs = 1;
2412 out_state = 0;
2413 while (1)
2414 {
2415 while (rs < 64 && call_used_regs[PR_REG (rs)])
2416 rs++;
2417 if (rs >= 64)
2418 break;
2419 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2420 continue;
2421 if (out_state == 0)
2422 {
2423 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2424 out_state = 1;
2425 }
2426 else
2427 fputc (',', asm_out_file);
2428 if (re == rs + 1)
2429 fprintf (asm_out_file, "p%u", rs);
2430 else
2431 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2432 rs = re + 1;
2433 }
2434 if (out_state)
2435 fputc ('\n', asm_out_file);
2436 }
2437
2438 /* Globalize a declaration. */
2439
2440 static void
2441 ia64_globalize_decl_name (FILE * stream, tree decl)
2442 {
2443 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2444 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2445 if (version_attr)
2446 {
2447 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2448 const char *p = TREE_STRING_POINTER (v);
2449 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2450 }
2451 targetm.asm_out.globalize_label (stream, name);
2452 if (TREE_CODE (decl) == FUNCTION_DECL)
2453 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2454 }
2455
2456 /* Helper function for ia64_compute_frame_size: find an appropriate general
2457 register to spill some special register to. SPECIAL_SPILL_MASK contains
2458 bits in GR0 to GR31 that have already been allocated by this routine.
2459 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2460
2461 static int
2462 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2463 {
2464 int regno;
2465
2466 if (emitted_frame_related_regs[r] != 0)
2467 {
2468 regno = emitted_frame_related_regs[r];
2469 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2470 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2471 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2472 else if (current_function_is_leaf
2473 && regno >= GR_REG (1) && regno <= GR_REG (31))
2474 current_frame_info.gr_used_mask |= 1 << regno;
2475
2476 return regno;
2477 }
2478
2479 /* If this is a leaf function, first try an otherwise unused
2480 call-clobbered register. */
2481 if (current_function_is_leaf)
2482 {
2483 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2484 if (! df_regs_ever_live_p (regno)
2485 && call_used_regs[regno]
2486 && ! fixed_regs[regno]
2487 && ! global_regs[regno]
2488 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2489 && ! is_emitted (regno))
2490 {
2491 current_frame_info.gr_used_mask |= 1 << regno;
2492 return regno;
2493 }
2494 }
2495
2496 if (try_locals)
2497 {
2498 regno = current_frame_info.n_local_regs;
2499 /* If there is a frame pointer, then we can't use loc79, because
2500 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2501 reg_name switching code in ia64_expand_prologue. */
2502 while (regno < (80 - frame_pointer_needed))
2503 if (! is_emitted (LOC_REG (regno++)))
2504 {
2505 current_frame_info.n_local_regs = regno;
2506 return LOC_REG (regno - 1);
2507 }
2508 }
2509
2510 /* Failed to find a general register to spill to. Must use stack. */
2511 return 0;
2512 }
2513
2514 /* In order to make for nice schedules, we try to allocate every temporary
2515 to a different register. We must of course stay away from call-saved,
2516 fixed, and global registers. We must also stay away from registers
2517 allocated in current_frame_info.gr_used_mask, since those include regs
2518 used all through the prologue.
2519
2520 Any register allocated here must be used immediately. The idea is to
2521 aid scheduling, not to solve data flow problems. */
2522
2523 static int last_scratch_gr_reg;
2524
2525 static int
2526 next_scratch_gr_reg (void)
2527 {
2528 int i, regno;
2529
2530 for (i = 0; i < 32; ++i)
2531 {
2532 regno = (last_scratch_gr_reg + i + 1) & 31;
2533 if (call_used_regs[regno]
2534 && ! fixed_regs[regno]
2535 && ! global_regs[regno]
2536 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2537 {
2538 last_scratch_gr_reg = regno;
2539 return regno;
2540 }
2541 }
2542
2543 /* There must be _something_ available. */
2544 gcc_unreachable ();
2545 }
2546
2547 /* Helper function for ia64_compute_frame_size, called through
2548 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2549
2550 static void
2551 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2552 {
2553 unsigned int regno = REGNO (reg);
2554 if (regno < 32)
2555 {
2556 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2557 for (i = 0; i < n; ++i)
2558 current_frame_info.gr_used_mask |= 1 << (regno + i);
2559 }
2560 }
2561
2562
2563 /* Returns the number of bytes offset between the frame pointer and the stack
2564 pointer for the current function. SIZE is the number of bytes of space
2565 needed for local variables. */
2566
2567 static void
2568 ia64_compute_frame_size (HOST_WIDE_INT size)
2569 {
2570 HOST_WIDE_INT total_size;
2571 HOST_WIDE_INT spill_size = 0;
2572 HOST_WIDE_INT extra_spill_size = 0;
2573 HOST_WIDE_INT pretend_args_size;
2574 HARD_REG_SET mask;
2575 int n_spilled = 0;
2576 int spilled_gr_p = 0;
2577 int spilled_fr_p = 0;
2578 unsigned int regno;
2579 int min_regno;
2580 int max_regno;
2581 int i;
2582
2583 if (current_frame_info.initialized)
2584 return;
2585
2586 memset (&current_frame_info, 0, sizeof current_frame_info);
2587 CLEAR_HARD_REG_SET (mask);
2588
2589 /* Don't allocate scratches to the return register. */
2590 diddle_return_value (mark_reg_gr_used_mask, NULL);
2591
2592 /* Don't allocate scratches to the EH scratch registers. */
2593 if (cfun->machine->ia64_eh_epilogue_sp)
2594 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2595 if (cfun->machine->ia64_eh_epilogue_bsp)
2596 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2597
2598 /* Find the size of the register stack frame. We have only 80 local
2599 registers, because we reserve 8 for the inputs and 8 for the
2600 outputs. */
2601
2602 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2603 since we'll be adjusting that down later. */
2604 regno = LOC_REG (78) + ! frame_pointer_needed;
2605 for (; regno >= LOC_REG (0); regno--)
2606 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2607 break;
2608 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2609
2610 /* For functions marked with the syscall_linkage attribute, we must mark
2611 all eight input registers as in use, so that locals aren't visible to
2612 the caller. */
2613
2614 if (cfun->machine->n_varargs > 0
2615 || lookup_attribute ("syscall_linkage",
2616 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2617 current_frame_info.n_input_regs = 8;
2618 else
2619 {
2620 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2621 if (df_regs_ever_live_p (regno))
2622 break;
2623 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2624 }
2625
2626 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2627 if (df_regs_ever_live_p (regno))
2628 break;
2629 i = regno - OUT_REG (0) + 1;
2630
2631 #ifndef PROFILE_HOOK
2632 /* When -p profiling, we need one output register for the mcount argument.
2633 Likewise for -a profiling for the bb_init_func argument. For -ax
2634 profiling, we need two output registers for the two bb_init_trace_func
2635 arguments. */
2636 if (crtl->profile)
2637 i = MAX (i, 1);
2638 #endif
2639 current_frame_info.n_output_regs = i;
2640
2641 /* ??? No rotating register support yet. */
2642 current_frame_info.n_rotate_regs = 0;
2643
2644 /* Discover which registers need spilling, and how much room that
2645 will take. Begin with floating point and general registers,
2646 which will always wind up on the stack. */
2647
2648 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2649 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2650 {
2651 SET_HARD_REG_BIT (mask, regno);
2652 spill_size += 16;
2653 n_spilled += 1;
2654 spilled_fr_p = 1;
2655 }
2656
2657 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2658 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2659 {
2660 SET_HARD_REG_BIT (mask, regno);
2661 spill_size += 8;
2662 n_spilled += 1;
2663 spilled_gr_p = 1;
2664 }
2665
2666 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2667 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2668 {
2669 SET_HARD_REG_BIT (mask, regno);
2670 spill_size += 8;
2671 n_spilled += 1;
2672 }
2673
2674 /* Now come all special registers that might get saved in other
2675 general registers. */
2676
2677 if (frame_pointer_needed)
2678 {
2679 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2680 /* If we did not get a register, then we take LOC79. This is guaranteed
2681 to be free, even if regs_ever_live is already set, because this is
2682 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2683 as we don't count loc79 above. */
2684 if (current_frame_info.r[reg_fp] == 0)
2685 {
2686 current_frame_info.r[reg_fp] = LOC_REG (79);
2687 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2688 }
2689 }
2690
2691 if (! current_function_is_leaf)
2692 {
2693 /* Emit a save of BR0 if we call other functions. Do this even
2694 if this function doesn't return, as EH depends on this to be
2695 able to unwind the stack. */
2696 SET_HARD_REG_BIT (mask, BR_REG (0));
2697
2698 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2699 if (current_frame_info.r[reg_save_b0] == 0)
2700 {
2701 extra_spill_size += 8;
2702 n_spilled += 1;
2703 }
2704
2705 /* Similarly for ar.pfs. */
2706 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2707 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2708 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2709 {
2710 extra_spill_size += 8;
2711 n_spilled += 1;
2712 }
2713
2714 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2715 registers are clobbered, so we fall back to the stack. */
2716 current_frame_info.r[reg_save_gp]
2717 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2718 if (current_frame_info.r[reg_save_gp] == 0)
2719 {
2720 SET_HARD_REG_BIT (mask, GR_REG (1));
2721 spill_size += 8;
2722 n_spilled += 1;
2723 }
2724 }
2725 else
2726 {
2727 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2728 {
2729 SET_HARD_REG_BIT (mask, BR_REG (0));
2730 extra_spill_size += 8;
2731 n_spilled += 1;
2732 }
2733
2734 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2735 {
2736 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2737 current_frame_info.r[reg_save_ar_pfs]
2738 = find_gr_spill (reg_save_ar_pfs, 1);
2739 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2740 {
2741 extra_spill_size += 8;
2742 n_spilled += 1;
2743 }
2744 }
2745 }
2746
2747 /* Unwind descriptor hackery: things are most efficient if we allocate
2748 consecutive GR save registers for RP, PFS, FP in that order. However,
2749 it is absolutely critical that FP get the only hard register that's
2750 guaranteed to be free, so we allocated it first. If all three did
2751 happen to be allocated hard regs, and are consecutive, rearrange them
2752 into the preferred order now.
2753
2754 If we have already emitted code for any of those registers,
2755 then it's already too late to change. */
2756 min_regno = MIN (current_frame_info.r[reg_fp],
2757 MIN (current_frame_info.r[reg_save_b0],
2758 current_frame_info.r[reg_save_ar_pfs]));
2759 max_regno = MAX (current_frame_info.r[reg_fp],
2760 MAX (current_frame_info.r[reg_save_b0],
2761 current_frame_info.r[reg_save_ar_pfs]));
2762 if (min_regno > 0
2763 && min_regno + 2 == max_regno
2764 && (current_frame_info.r[reg_fp] == min_regno + 1
2765 || current_frame_info.r[reg_save_b0] == min_regno + 1
2766 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2767 && (emitted_frame_related_regs[reg_save_b0] == 0
2768 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2769 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2770 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2771 && (emitted_frame_related_regs[reg_fp] == 0
2772 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2773 {
2774 current_frame_info.r[reg_save_b0] = min_regno;
2775 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2776 current_frame_info.r[reg_fp] = min_regno + 2;
2777 }
2778
2779 /* See if we need to store the predicate register block. */
2780 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2781 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2782 break;
2783 if (regno <= PR_REG (63))
2784 {
2785 SET_HARD_REG_BIT (mask, PR_REG (0));
2786 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2787 if (current_frame_info.r[reg_save_pr] == 0)
2788 {
2789 extra_spill_size += 8;
2790 n_spilled += 1;
2791 }
2792
2793 /* ??? Mark them all as used so that register renaming and such
2794 are free to use them. */
2795 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2796 df_set_regs_ever_live (regno, true);
2797 }
2798
2799 /* If we're forced to use st8.spill, we're forced to save and restore
2800 ar.unat as well. The check for existing liveness allows inline asm
2801 to touch ar.unat. */
2802 if (spilled_gr_p || cfun->machine->n_varargs
2803 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2804 {
2805 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2806 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2807 current_frame_info.r[reg_save_ar_unat]
2808 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2809 if (current_frame_info.r[reg_save_ar_unat] == 0)
2810 {
2811 extra_spill_size += 8;
2812 n_spilled += 1;
2813 }
2814 }
2815
2816 if (df_regs_ever_live_p (AR_LC_REGNUM))
2817 {
2818 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2819 current_frame_info.r[reg_save_ar_lc]
2820 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2821 if (current_frame_info.r[reg_save_ar_lc] == 0)
2822 {
2823 extra_spill_size += 8;
2824 n_spilled += 1;
2825 }
2826 }
2827
2828 /* If we have an odd number of words of pretend arguments written to
2829 the stack, then the FR save area will be unaligned. We round the
2830 size of this area up to keep things 16 byte aligned. */
2831 if (spilled_fr_p)
2832 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2833 else
2834 pretend_args_size = crtl->args.pretend_args_size;
2835
2836 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2837 + crtl->outgoing_args_size);
2838 total_size = IA64_STACK_ALIGN (total_size);
2839
2840 /* We always use the 16-byte scratch area provided by the caller, but
2841 if we are a leaf function, there's no one to which we need to provide
2842 a scratch area. */
2843 if (current_function_is_leaf)
2844 total_size = MAX (0, total_size - 16);
2845
2846 current_frame_info.total_size = total_size;
2847 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2848 current_frame_info.spill_size = spill_size;
2849 current_frame_info.extra_spill_size = extra_spill_size;
2850 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2851 current_frame_info.n_spilled = n_spilled;
2852 current_frame_info.initialized = reload_completed;
2853 }
2854
2855 /* Worker function for TARGET_CAN_ELIMINATE. */
2856
2857 bool
2858 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2859 {
2860 return (to == BR_REG (0) ? current_function_is_leaf : true);
2861 }
2862
2863 /* Compute the initial difference between the specified pair of registers. */
2864
2865 HOST_WIDE_INT
2866 ia64_initial_elimination_offset (int from, int to)
2867 {
2868 HOST_WIDE_INT offset;
2869
2870 ia64_compute_frame_size (get_frame_size ());
2871 switch (from)
2872 {
2873 case FRAME_POINTER_REGNUM:
2874 switch (to)
2875 {
2876 case HARD_FRAME_POINTER_REGNUM:
2877 if (current_function_is_leaf)
2878 offset = -current_frame_info.total_size;
2879 else
2880 offset = -(current_frame_info.total_size
2881 - crtl->outgoing_args_size - 16);
2882 break;
2883
2884 case STACK_POINTER_REGNUM:
2885 if (current_function_is_leaf)
2886 offset = 0;
2887 else
2888 offset = 16 + crtl->outgoing_args_size;
2889 break;
2890
2891 default:
2892 gcc_unreachable ();
2893 }
2894 break;
2895
2896 case ARG_POINTER_REGNUM:
2897 /* Arguments start above the 16 byte save area, unless stdarg
2898 in which case we store through the 16 byte save area. */
2899 switch (to)
2900 {
2901 case HARD_FRAME_POINTER_REGNUM:
2902 offset = 16 - crtl->args.pretend_args_size;
2903 break;
2904
2905 case STACK_POINTER_REGNUM:
2906 offset = (current_frame_info.total_size
2907 + 16 - crtl->args.pretend_args_size);
2908 break;
2909
2910 default:
2911 gcc_unreachable ();
2912 }
2913 break;
2914
2915 default:
2916 gcc_unreachable ();
2917 }
2918
2919 return offset;
2920 }
2921
2922 /* If there are more than a trivial number of register spills, we use
2923 two interleaved iterators so that we can get two memory references
2924 per insn group.
2925
2926 In order to simplify things in the prologue and epilogue expanders,
2927 we use helper functions to fix up the memory references after the
2928 fact with the appropriate offsets to a POST_MODIFY memory mode.
2929 The following data structure tracks the state of the two iterators
2930 while insns are being emitted. */
2931
2932 struct spill_fill_data
2933 {
2934 rtx init_after; /* point at which to emit initializations */
2935 rtx init_reg[2]; /* initial base register */
2936 rtx iter_reg[2]; /* the iterator registers */
2937 rtx *prev_addr[2]; /* address of last memory use */
2938 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2939 HOST_WIDE_INT prev_off[2]; /* last offset */
2940 int n_iter; /* number of iterators in use */
2941 int next_iter; /* next iterator to use */
2942 unsigned int save_gr_used_mask;
2943 };
2944
2945 static struct spill_fill_data spill_fill_data;
2946
2947 static void
2948 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2949 {
2950 int i;
2951
2952 spill_fill_data.init_after = get_last_insn ();
2953 spill_fill_data.init_reg[0] = init_reg;
2954 spill_fill_data.init_reg[1] = init_reg;
2955 spill_fill_data.prev_addr[0] = NULL;
2956 spill_fill_data.prev_addr[1] = NULL;
2957 spill_fill_data.prev_insn[0] = NULL;
2958 spill_fill_data.prev_insn[1] = NULL;
2959 spill_fill_data.prev_off[0] = cfa_off;
2960 spill_fill_data.prev_off[1] = cfa_off;
2961 spill_fill_data.next_iter = 0;
2962 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2963
2964 spill_fill_data.n_iter = 1 + (n_spills > 2);
2965 for (i = 0; i < spill_fill_data.n_iter; ++i)
2966 {
2967 int regno = next_scratch_gr_reg ();
2968 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2969 current_frame_info.gr_used_mask |= 1 << regno;
2970 }
2971 }
2972
2973 static void
2974 finish_spill_pointers (void)
2975 {
2976 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2977 }
2978
2979 static rtx
2980 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2981 {
2982 int iter = spill_fill_data.next_iter;
2983 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2984 rtx disp_rtx = GEN_INT (disp);
2985 rtx mem;
2986
2987 if (spill_fill_data.prev_addr[iter])
2988 {
2989 if (satisfies_constraint_N (disp_rtx))
2990 {
2991 *spill_fill_data.prev_addr[iter]
2992 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2993 gen_rtx_PLUS (DImode,
2994 spill_fill_data.iter_reg[iter],
2995 disp_rtx));
2996 add_reg_note (spill_fill_data.prev_insn[iter],
2997 REG_INC, spill_fill_data.iter_reg[iter]);
2998 }
2999 else
3000 {
3001 /* ??? Could use register post_modify for loads. */
3002 if (!satisfies_constraint_I (disp_rtx))
3003 {
3004 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3005 emit_move_insn (tmp, disp_rtx);
3006 disp_rtx = tmp;
3007 }
3008 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3009 spill_fill_data.iter_reg[iter], disp_rtx));
3010 }
3011 }
3012 /* Micro-optimization: if we've created a frame pointer, it's at
3013 CFA 0, which may allow the real iterator to be initialized lower,
3014 slightly increasing parallelism. Also, if there are few saves
3015 it may eliminate the iterator entirely. */
3016 else if (disp == 0
3017 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3018 && frame_pointer_needed)
3019 {
3020 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3021 set_mem_alias_set (mem, get_varargs_alias_set ());
3022 return mem;
3023 }
3024 else
3025 {
3026 rtx seq, insn;
3027
3028 if (disp == 0)
3029 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3030 spill_fill_data.init_reg[iter]);
3031 else
3032 {
3033 start_sequence ();
3034
3035 if (!satisfies_constraint_I (disp_rtx))
3036 {
3037 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3038 emit_move_insn (tmp, disp_rtx);
3039 disp_rtx = tmp;
3040 }
3041
3042 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3043 spill_fill_data.init_reg[iter],
3044 disp_rtx));
3045
3046 seq = get_insns ();
3047 end_sequence ();
3048 }
3049
3050 /* Careful for being the first insn in a sequence. */
3051 if (spill_fill_data.init_after)
3052 insn = emit_insn_after (seq, spill_fill_data.init_after);
3053 else
3054 {
3055 rtx first = get_insns ();
3056 if (first)
3057 insn = emit_insn_before (seq, first);
3058 else
3059 insn = emit_insn (seq);
3060 }
3061 spill_fill_data.init_after = insn;
3062 }
3063
3064 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3065
3066 /* ??? Not all of the spills are for varargs, but some of them are.
3067 The rest of the spills belong in an alias set of their own. But
3068 it doesn't actually hurt to include them here. */
3069 set_mem_alias_set (mem, get_varargs_alias_set ());
3070
3071 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3072 spill_fill_data.prev_off[iter] = cfa_off;
3073
3074 if (++iter >= spill_fill_data.n_iter)
3075 iter = 0;
3076 spill_fill_data.next_iter = iter;
3077
3078 return mem;
3079 }
3080
3081 static void
3082 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3083 rtx frame_reg)
3084 {
3085 int iter = spill_fill_data.next_iter;
3086 rtx mem, insn;
3087
3088 mem = spill_restore_mem (reg, cfa_off);
3089 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3090 spill_fill_data.prev_insn[iter] = insn;
3091
3092 if (frame_reg)
3093 {
3094 rtx base;
3095 HOST_WIDE_INT off;
3096
3097 RTX_FRAME_RELATED_P (insn) = 1;
3098
3099 /* Don't even pretend that the unwind code can intuit its way
3100 through a pair of interleaved post_modify iterators. Just
3101 provide the correct answer. */
3102
3103 if (frame_pointer_needed)
3104 {
3105 base = hard_frame_pointer_rtx;
3106 off = - cfa_off;
3107 }
3108 else
3109 {
3110 base = stack_pointer_rtx;
3111 off = current_frame_info.total_size - cfa_off;
3112 }
3113
3114 add_reg_note (insn, REG_CFA_OFFSET,
3115 gen_rtx_SET (VOIDmode,
3116 gen_rtx_MEM (GET_MODE (reg),
3117 plus_constant (base, off)),
3118 frame_reg));
3119 }
3120 }
3121
3122 static void
3123 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3124 {
3125 int iter = spill_fill_data.next_iter;
3126 rtx insn;
3127
3128 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3129 GEN_INT (cfa_off)));
3130 spill_fill_data.prev_insn[iter] = insn;
3131 }
3132
3133 /* Wrapper functions that discards the CONST_INT spill offset. These
3134 exist so that we can give gr_spill/gr_fill the offset they need and
3135 use a consistent function interface. */
3136
3137 static rtx
3138 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3139 {
3140 return gen_movdi (dest, src);
3141 }
3142
3143 static rtx
3144 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3145 {
3146 return gen_fr_spill (dest, src);
3147 }
3148
3149 static rtx
3150 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3151 {
3152 return gen_fr_restore (dest, src);
3153 }
3154
3155 /* Called after register allocation to add any instructions needed for the
3156 prologue. Using a prologue insn is favored compared to putting all of the
3157 instructions in output_function_prologue(), since it allows the scheduler
3158 to intermix instructions with the saves of the caller saved registers. In
3159 some cases, it might be necessary to emit a barrier instruction as the last
3160 insn to prevent such scheduling.
3161
3162 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3163 so that the debug info generation code can handle them properly.
3164
3165 The register save area is layed out like so:
3166 cfa+16
3167 [ varargs spill area ]
3168 [ fr register spill area ]
3169 [ br register spill area ]
3170 [ ar register spill area ]
3171 [ pr register spill area ]
3172 [ gr register spill area ] */
3173
3174 /* ??? Get inefficient code when the frame size is larger than can fit in an
3175 adds instruction. */
3176
3177 void
3178 ia64_expand_prologue (void)
3179 {
3180 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3181 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3182 rtx reg, alt_reg;
3183
3184 ia64_compute_frame_size (get_frame_size ());
3185 last_scratch_gr_reg = 15;
3186
3187 if (flag_stack_usage)
3188 current_function_static_stack_size = current_frame_info.total_size;
3189
3190 if (dump_file)
3191 {
3192 fprintf (dump_file, "ia64 frame related registers "
3193 "recorded in current_frame_info.r[]:\n");
3194 #define PRINTREG(a) if (current_frame_info.r[a]) \
3195 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3196 PRINTREG(reg_fp);
3197 PRINTREG(reg_save_b0);
3198 PRINTREG(reg_save_pr);
3199 PRINTREG(reg_save_ar_pfs);
3200 PRINTREG(reg_save_ar_unat);
3201 PRINTREG(reg_save_ar_lc);
3202 PRINTREG(reg_save_gp);
3203 #undef PRINTREG
3204 }
3205
3206 /* If there is no epilogue, then we don't need some prologue insns.
3207 We need to avoid emitting the dead prologue insns, because flow
3208 will complain about them. */
3209 if (optimize)
3210 {
3211 edge e;
3212 edge_iterator ei;
3213
3214 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3215 if ((e->flags & EDGE_FAKE) == 0
3216 && (e->flags & EDGE_FALLTHRU) != 0)
3217 break;
3218 epilogue_p = (e != NULL);
3219 }
3220 else
3221 epilogue_p = 1;
3222
3223 /* Set the local, input, and output register names. We need to do this
3224 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3225 half. If we use in/loc/out register names, then we get assembler errors
3226 in crtn.S because there is no alloc insn or regstk directive in there. */
3227 if (! TARGET_REG_NAMES)
3228 {
3229 int inputs = current_frame_info.n_input_regs;
3230 int locals = current_frame_info.n_local_regs;
3231 int outputs = current_frame_info.n_output_regs;
3232
3233 for (i = 0; i < inputs; i++)
3234 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3235 for (i = 0; i < locals; i++)
3236 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3237 for (i = 0; i < outputs; i++)
3238 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3239 }
3240
3241 /* Set the frame pointer register name. The regnum is logically loc79,
3242 but of course we'll not have allocated that many locals. Rather than
3243 worrying about renumbering the existing rtxs, we adjust the name. */
3244 /* ??? This code means that we can never use one local register when
3245 there is a frame pointer. loc79 gets wasted in this case, as it is
3246 renamed to a register that will never be used. See also the try_locals
3247 code in find_gr_spill. */
3248 if (current_frame_info.r[reg_fp])
3249 {
3250 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3251 reg_names[HARD_FRAME_POINTER_REGNUM]
3252 = reg_names[current_frame_info.r[reg_fp]];
3253 reg_names[current_frame_info.r[reg_fp]] = tmp;
3254 }
3255
3256 /* We don't need an alloc instruction if we've used no outputs or locals. */
3257 if (current_frame_info.n_local_regs == 0
3258 && current_frame_info.n_output_regs == 0
3259 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3260 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3261 {
3262 /* If there is no alloc, but there are input registers used, then we
3263 need a .regstk directive. */
3264 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3265 ar_pfs_save_reg = NULL_RTX;
3266 }
3267 else
3268 {
3269 current_frame_info.need_regstk = 0;
3270
3271 if (current_frame_info.r[reg_save_ar_pfs])
3272 {
3273 regno = current_frame_info.r[reg_save_ar_pfs];
3274 reg_emitted (reg_save_ar_pfs);
3275 }
3276 else
3277 regno = next_scratch_gr_reg ();
3278 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3279
3280 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3281 GEN_INT (current_frame_info.n_input_regs),
3282 GEN_INT (current_frame_info.n_local_regs),
3283 GEN_INT (current_frame_info.n_output_regs),
3284 GEN_INT (current_frame_info.n_rotate_regs)));
3285 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3286 }
3287
3288 /* Set up frame pointer, stack pointer, and spill iterators. */
3289
3290 n_varargs = cfun->machine->n_varargs;
3291 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3292 stack_pointer_rtx, 0);
3293
3294 if (frame_pointer_needed)
3295 {
3296 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3297 RTX_FRAME_RELATED_P (insn) = 1;
3298
3299 /* Force the unwind info to recognize this as defining a new CFA,
3300 rather than some temp register setup. */
3301 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3302 }
3303
3304 if (current_frame_info.total_size != 0)
3305 {
3306 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3307 rtx offset;
3308
3309 if (satisfies_constraint_I (frame_size_rtx))
3310 offset = frame_size_rtx;
3311 else
3312 {
3313 regno = next_scratch_gr_reg ();
3314 offset = gen_rtx_REG (DImode, regno);
3315 emit_move_insn (offset, frame_size_rtx);
3316 }
3317
3318 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3319 stack_pointer_rtx, offset));
3320
3321 if (! frame_pointer_needed)
3322 {
3323 RTX_FRAME_RELATED_P (insn) = 1;
3324 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3325 gen_rtx_SET (VOIDmode,
3326 stack_pointer_rtx,
3327 gen_rtx_PLUS (DImode,
3328 stack_pointer_rtx,
3329 frame_size_rtx)));
3330 }
3331
3332 /* ??? At this point we must generate a magic insn that appears to
3333 modify the stack pointer, the frame pointer, and all spill
3334 iterators. This would allow the most scheduling freedom. For
3335 now, just hard stop. */
3336 emit_insn (gen_blockage ());
3337 }
3338
3339 /* Must copy out ar.unat before doing any integer spills. */
3340 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3341 {
3342 if (current_frame_info.r[reg_save_ar_unat])
3343 {
3344 ar_unat_save_reg
3345 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3346 reg_emitted (reg_save_ar_unat);
3347 }
3348 else
3349 {
3350 alt_regno = next_scratch_gr_reg ();
3351 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3352 current_frame_info.gr_used_mask |= 1 << alt_regno;
3353 }
3354
3355 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3356 insn = emit_move_insn (ar_unat_save_reg, reg);
3357 if (current_frame_info.r[reg_save_ar_unat])
3358 {
3359 RTX_FRAME_RELATED_P (insn) = 1;
3360 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3361 }
3362
3363 /* Even if we're not going to generate an epilogue, we still
3364 need to save the register so that EH works. */
3365 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3366 emit_insn (gen_prologue_use (ar_unat_save_reg));
3367 }
3368 else
3369 ar_unat_save_reg = NULL_RTX;
3370
3371 /* Spill all varargs registers. Do this before spilling any GR registers,
3372 since we want the UNAT bits for the GR registers to override the UNAT
3373 bits from varargs, which we don't care about. */
3374
3375 cfa_off = -16;
3376 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3377 {
3378 reg = gen_rtx_REG (DImode, regno);
3379 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3380 }
3381
3382 /* Locate the bottom of the register save area. */
3383 cfa_off = (current_frame_info.spill_cfa_off
3384 + current_frame_info.spill_size
3385 + current_frame_info.extra_spill_size);
3386
3387 /* Save the predicate register block either in a register or in memory. */
3388 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3389 {
3390 reg = gen_rtx_REG (DImode, PR_REG (0));
3391 if (current_frame_info.r[reg_save_pr] != 0)
3392 {
3393 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3394 reg_emitted (reg_save_pr);
3395 insn = emit_move_insn (alt_reg, reg);
3396
3397 /* ??? Denote pr spill/fill by a DImode move that modifies all
3398 64 hard registers. */
3399 RTX_FRAME_RELATED_P (insn) = 1;
3400 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3401
3402 /* Even if we're not going to generate an epilogue, we still
3403 need to save the register so that EH works. */
3404 if (! epilogue_p)
3405 emit_insn (gen_prologue_use (alt_reg));
3406 }
3407 else
3408 {
3409 alt_regno = next_scratch_gr_reg ();
3410 alt_reg = gen_rtx_REG (DImode, alt_regno);
3411 insn = emit_move_insn (alt_reg, reg);
3412 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3413 cfa_off -= 8;
3414 }
3415 }
3416
3417 /* Handle AR regs in numerical order. All of them get special handling. */
3418 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3419 && current_frame_info.r[reg_save_ar_unat] == 0)
3420 {
3421 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3422 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3423 cfa_off -= 8;
3424 }
3425
3426 /* The alloc insn already copied ar.pfs into a general register. The
3427 only thing we have to do now is copy that register to a stack slot
3428 if we'd not allocated a local register for the job. */
3429 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3430 && current_frame_info.r[reg_save_ar_pfs] == 0)
3431 {
3432 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3433 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3434 cfa_off -= 8;
3435 }
3436
3437 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3438 {
3439 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3440 if (current_frame_info.r[reg_save_ar_lc] != 0)
3441 {
3442 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3443 reg_emitted (reg_save_ar_lc);
3444 insn = emit_move_insn (alt_reg, reg);
3445 RTX_FRAME_RELATED_P (insn) = 1;
3446 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3447
3448 /* Even if we're not going to generate an epilogue, we still
3449 need to save the register so that EH works. */
3450 if (! epilogue_p)
3451 emit_insn (gen_prologue_use (alt_reg));
3452 }
3453 else
3454 {
3455 alt_regno = next_scratch_gr_reg ();
3456 alt_reg = gen_rtx_REG (DImode, alt_regno);
3457 emit_move_insn (alt_reg, reg);
3458 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3459 cfa_off -= 8;
3460 }
3461 }
3462
3463 /* Save the return pointer. */
3464 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3465 {
3466 reg = gen_rtx_REG (DImode, BR_REG (0));
3467 if (current_frame_info.r[reg_save_b0] != 0)
3468 {
3469 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3470 reg_emitted (reg_save_b0);
3471 insn = emit_move_insn (alt_reg, reg);
3472 RTX_FRAME_RELATED_P (insn) = 1;
3473 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3474
3475 /* Even if we're not going to generate an epilogue, we still
3476 need to save the register so that EH works. */
3477 if (! epilogue_p)
3478 emit_insn (gen_prologue_use (alt_reg));
3479 }
3480 else
3481 {
3482 alt_regno = next_scratch_gr_reg ();
3483 alt_reg = gen_rtx_REG (DImode, alt_regno);
3484 emit_move_insn (alt_reg, reg);
3485 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3486 cfa_off -= 8;
3487 }
3488 }
3489
3490 if (current_frame_info.r[reg_save_gp])
3491 {
3492 reg_emitted (reg_save_gp);
3493 insn = emit_move_insn (gen_rtx_REG (DImode,
3494 current_frame_info.r[reg_save_gp]),
3495 pic_offset_table_rtx);
3496 }
3497
3498 /* We should now be at the base of the gr/br/fr spill area. */
3499 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3500 + current_frame_info.spill_size));
3501
3502 /* Spill all general registers. */
3503 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3504 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3505 {
3506 reg = gen_rtx_REG (DImode, regno);
3507 do_spill (gen_gr_spill, reg, cfa_off, reg);
3508 cfa_off -= 8;
3509 }
3510
3511 /* Spill the rest of the BR registers. */
3512 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3513 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3514 {
3515 alt_regno = next_scratch_gr_reg ();
3516 alt_reg = gen_rtx_REG (DImode, alt_regno);
3517 reg = gen_rtx_REG (DImode, regno);
3518 emit_move_insn (alt_reg, reg);
3519 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3520 cfa_off -= 8;
3521 }
3522
3523 /* Align the frame and spill all FR registers. */
3524 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3525 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3526 {
3527 gcc_assert (!(cfa_off & 15));
3528 reg = gen_rtx_REG (XFmode, regno);
3529 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3530 cfa_off -= 16;
3531 }
3532
3533 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3534
3535 finish_spill_pointers ();
3536 }
3537
3538 /* Output the textual info surrounding the prologue. */
3539
3540 void
3541 ia64_start_function (FILE *file, const char *fnname,
3542 tree decl ATTRIBUTE_UNUSED)
3543 {
3544 #if VMS_DEBUGGING_INFO
3545 if (vms_debug_main
3546 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3547 {
3548 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3549 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3550 dwarf2out_vms_debug_main_pointer ();
3551 vms_debug_main = 0;
3552 }
3553 #endif
3554
3555 fputs ("\t.proc ", file);
3556 assemble_name (file, fnname);
3557 fputc ('\n', file);
3558 ASM_OUTPUT_LABEL (file, fnname);
3559 }
3560
3561 /* Called after register allocation to add any instructions needed for the
3562 epilogue. Using an epilogue insn is favored compared to putting all of the
3563 instructions in output_function_prologue(), since it allows the scheduler
3564 to intermix instructions with the saves of the caller saved registers. In
3565 some cases, it might be necessary to emit a barrier instruction as the last
3566 insn to prevent such scheduling. */
3567
3568 void
3569 ia64_expand_epilogue (int sibcall_p)
3570 {
3571 rtx insn, reg, alt_reg, ar_unat_save_reg;
3572 int regno, alt_regno, cfa_off;
3573
3574 ia64_compute_frame_size (get_frame_size ());
3575
3576 /* If there is a frame pointer, then we use it instead of the stack
3577 pointer, so that the stack pointer does not need to be valid when
3578 the epilogue starts. See EXIT_IGNORE_STACK. */
3579 if (frame_pointer_needed)
3580 setup_spill_pointers (current_frame_info.n_spilled,
3581 hard_frame_pointer_rtx, 0);
3582 else
3583 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3584 current_frame_info.total_size);
3585
3586 if (current_frame_info.total_size != 0)
3587 {
3588 /* ??? At this point we must generate a magic insn that appears to
3589 modify the spill iterators and the frame pointer. This would
3590 allow the most scheduling freedom. For now, just hard stop. */
3591 emit_insn (gen_blockage ());
3592 }
3593
3594 /* Locate the bottom of the register save area. */
3595 cfa_off = (current_frame_info.spill_cfa_off
3596 + current_frame_info.spill_size
3597 + current_frame_info.extra_spill_size);
3598
3599 /* Restore the predicate registers. */
3600 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3601 {
3602 if (current_frame_info.r[reg_save_pr] != 0)
3603 {
3604 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3605 reg_emitted (reg_save_pr);
3606 }
3607 else
3608 {
3609 alt_regno = next_scratch_gr_reg ();
3610 alt_reg = gen_rtx_REG (DImode, alt_regno);
3611 do_restore (gen_movdi_x, alt_reg, cfa_off);
3612 cfa_off -= 8;
3613 }
3614 reg = gen_rtx_REG (DImode, PR_REG (0));
3615 emit_move_insn (reg, alt_reg);
3616 }
3617
3618 /* Restore the application registers. */
3619
3620 /* Load the saved unat from the stack, but do not restore it until
3621 after the GRs have been restored. */
3622 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3623 {
3624 if (current_frame_info.r[reg_save_ar_unat] != 0)
3625 {
3626 ar_unat_save_reg
3627 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3628 reg_emitted (reg_save_ar_unat);
3629 }
3630 else
3631 {
3632 alt_regno = next_scratch_gr_reg ();
3633 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3634 current_frame_info.gr_used_mask |= 1 << alt_regno;
3635 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3636 cfa_off -= 8;
3637 }
3638 }
3639 else
3640 ar_unat_save_reg = NULL_RTX;
3641
3642 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3643 {
3644 reg_emitted (reg_save_ar_pfs);
3645 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3646 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3647 emit_move_insn (reg, alt_reg);
3648 }
3649 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3650 {
3651 alt_regno = next_scratch_gr_reg ();
3652 alt_reg = gen_rtx_REG (DImode, alt_regno);
3653 do_restore (gen_movdi_x, alt_reg, cfa_off);
3654 cfa_off -= 8;
3655 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3656 emit_move_insn (reg, alt_reg);
3657 }
3658
3659 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3660 {
3661 if (current_frame_info.r[reg_save_ar_lc] != 0)
3662 {
3663 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3664 reg_emitted (reg_save_ar_lc);
3665 }
3666 else
3667 {
3668 alt_regno = next_scratch_gr_reg ();
3669 alt_reg = gen_rtx_REG (DImode, alt_regno);
3670 do_restore (gen_movdi_x, alt_reg, cfa_off);
3671 cfa_off -= 8;
3672 }
3673 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3674 emit_move_insn (reg, alt_reg);
3675 }
3676
3677 /* Restore the return pointer. */
3678 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3679 {
3680 if (current_frame_info.r[reg_save_b0] != 0)
3681 {
3682 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3683 reg_emitted (reg_save_b0);
3684 }
3685 else
3686 {
3687 alt_regno = next_scratch_gr_reg ();
3688 alt_reg = gen_rtx_REG (DImode, alt_regno);
3689 do_restore (gen_movdi_x, alt_reg, cfa_off);
3690 cfa_off -= 8;
3691 }
3692 reg = gen_rtx_REG (DImode, BR_REG (0));
3693 emit_move_insn (reg, alt_reg);
3694 }
3695
3696 /* We should now be at the base of the gr/br/fr spill area. */
3697 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3698 + current_frame_info.spill_size));
3699
3700 /* The GP may be stored on the stack in the prologue, but it's
3701 never restored in the epilogue. Skip the stack slot. */
3702 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3703 cfa_off -= 8;
3704
3705 /* Restore all general registers. */
3706 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3707 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3708 {
3709 reg = gen_rtx_REG (DImode, regno);
3710 do_restore (gen_gr_restore, reg, cfa_off);
3711 cfa_off -= 8;
3712 }
3713
3714 /* Restore the branch registers. */
3715 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3716 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3717 {
3718 alt_regno = next_scratch_gr_reg ();
3719 alt_reg = gen_rtx_REG (DImode, alt_regno);
3720 do_restore (gen_movdi_x, alt_reg, cfa_off);
3721 cfa_off -= 8;
3722 reg = gen_rtx_REG (DImode, regno);
3723 emit_move_insn (reg, alt_reg);
3724 }
3725
3726 /* Restore floating point registers. */
3727 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3728 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3729 {
3730 gcc_assert (!(cfa_off & 15));
3731 reg = gen_rtx_REG (XFmode, regno);
3732 do_restore (gen_fr_restore_x, reg, cfa_off);
3733 cfa_off -= 16;
3734 }
3735
3736 /* Restore ar.unat for real. */
3737 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3738 {
3739 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3740 emit_move_insn (reg, ar_unat_save_reg);
3741 }
3742
3743 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3744
3745 finish_spill_pointers ();
3746
3747 if (current_frame_info.total_size
3748 || cfun->machine->ia64_eh_epilogue_sp
3749 || frame_pointer_needed)
3750 {
3751 /* ??? At this point we must generate a magic insn that appears to
3752 modify the spill iterators, the stack pointer, and the frame
3753 pointer. This would allow the most scheduling freedom. For now,
3754 just hard stop. */
3755 emit_insn (gen_blockage ());
3756 }
3757
3758 if (cfun->machine->ia64_eh_epilogue_sp)
3759 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3760 else if (frame_pointer_needed)
3761 {
3762 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3763 RTX_FRAME_RELATED_P (insn) = 1;
3764 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
3765 }
3766 else if (current_frame_info.total_size)
3767 {
3768 rtx offset, frame_size_rtx;
3769
3770 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3771 if (satisfies_constraint_I (frame_size_rtx))
3772 offset = frame_size_rtx;
3773 else
3774 {
3775 regno = next_scratch_gr_reg ();
3776 offset = gen_rtx_REG (DImode, regno);
3777 emit_move_insn (offset, frame_size_rtx);
3778 }
3779
3780 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3781 offset));
3782
3783 RTX_FRAME_RELATED_P (insn) = 1;
3784 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3785 gen_rtx_SET (VOIDmode,
3786 stack_pointer_rtx,
3787 gen_rtx_PLUS (DImode,
3788 stack_pointer_rtx,
3789 frame_size_rtx)));
3790 }
3791
3792 if (cfun->machine->ia64_eh_epilogue_bsp)
3793 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3794
3795 if (! sibcall_p)
3796 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3797 else
3798 {
3799 int fp = GR_REG (2);
3800 /* We need a throw away register here, r0 and r1 are reserved,
3801 so r2 is the first available call clobbered register. If
3802 there was a frame_pointer register, we may have swapped the
3803 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
3804 sure we're using the string "r2" when emitting the register
3805 name for the assembler. */
3806 if (current_frame_info.r[reg_fp]
3807 && current_frame_info.r[reg_fp] == GR_REG (2))
3808 fp = HARD_FRAME_POINTER_REGNUM;
3809
3810 /* We must emit an alloc to force the input registers to become output
3811 registers. Otherwise, if the callee tries to pass its parameters
3812 through to another call without an intervening alloc, then these
3813 values get lost. */
3814 /* ??? We don't need to preserve all input registers. We only need to
3815 preserve those input registers used as arguments to the sibling call.
3816 It is unclear how to compute that number here. */
3817 if (current_frame_info.n_input_regs != 0)
3818 {
3819 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3820 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3821 const0_rtx, const0_rtx,
3822 n_inputs, const0_rtx));
3823 RTX_FRAME_RELATED_P (insn) = 1;
3824 }
3825 }
3826 }
3827
3828 /* Return 1 if br.ret can do all the work required to return from a
3829 function. */
3830
3831 int
3832 ia64_direct_return (void)
3833 {
3834 if (reload_completed && ! frame_pointer_needed)
3835 {
3836 ia64_compute_frame_size (get_frame_size ());
3837
3838 return (current_frame_info.total_size == 0
3839 && current_frame_info.n_spilled == 0
3840 && current_frame_info.r[reg_save_b0] == 0
3841 && current_frame_info.r[reg_save_pr] == 0
3842 && current_frame_info.r[reg_save_ar_pfs] == 0
3843 && current_frame_info.r[reg_save_ar_unat] == 0
3844 && current_frame_info.r[reg_save_ar_lc] == 0);
3845 }
3846 return 0;
3847 }
3848
3849 /* Return the magic cookie that we use to hold the return address
3850 during early compilation. */
3851
3852 rtx
3853 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3854 {
3855 if (count != 0)
3856 return NULL;
3857 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3858 }
3859
3860 /* Split this value after reload, now that we know where the return
3861 address is saved. */
3862
3863 void
3864 ia64_split_return_addr_rtx (rtx dest)
3865 {
3866 rtx src;
3867
3868 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3869 {
3870 if (current_frame_info.r[reg_save_b0] != 0)
3871 {
3872 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3873 reg_emitted (reg_save_b0);
3874 }
3875 else
3876 {
3877 HOST_WIDE_INT off;
3878 unsigned int regno;
3879 rtx off_r;
3880
3881 /* Compute offset from CFA for BR0. */
3882 /* ??? Must be kept in sync with ia64_expand_prologue. */
3883 off = (current_frame_info.spill_cfa_off
3884 + current_frame_info.spill_size);
3885 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3886 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3887 off -= 8;
3888
3889 /* Convert CFA offset to a register based offset. */
3890 if (frame_pointer_needed)
3891 src = hard_frame_pointer_rtx;
3892 else
3893 {
3894 src = stack_pointer_rtx;
3895 off += current_frame_info.total_size;
3896 }
3897
3898 /* Load address into scratch register. */
3899 off_r = GEN_INT (off);
3900 if (satisfies_constraint_I (off_r))
3901 emit_insn (gen_adddi3 (dest, src, off_r));
3902 else
3903 {
3904 emit_move_insn (dest, off_r);
3905 emit_insn (gen_adddi3 (dest, src, dest));
3906 }
3907
3908 src = gen_rtx_MEM (Pmode, dest);
3909 }
3910 }
3911 else
3912 src = gen_rtx_REG (DImode, BR_REG (0));
3913
3914 emit_move_insn (dest, src);
3915 }
3916
3917 int
3918 ia64_hard_regno_rename_ok (int from, int to)
3919 {
3920 /* Don't clobber any of the registers we reserved for the prologue. */
3921 unsigned int r;
3922
3923 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3924 if (to == current_frame_info.r[r]
3925 || from == current_frame_info.r[r]
3926 || to == emitted_frame_related_regs[r]
3927 || from == emitted_frame_related_regs[r])
3928 return 0;
3929
3930 /* Don't use output registers outside the register frame. */
3931 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3932 return 0;
3933
3934 /* Retain even/oddness on predicate register pairs. */
3935 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3936 return (from & 1) == (to & 1);
3937
3938 return 1;
3939 }
3940
3941 /* Target hook for assembling integer objects. Handle word-sized
3942 aligned objects and detect the cases when @fptr is needed. */
3943
3944 static bool
3945 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3946 {
3947 if (size == POINTER_SIZE / BITS_PER_UNIT
3948 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3949 && GET_CODE (x) == SYMBOL_REF
3950 && SYMBOL_REF_FUNCTION_P (x))
3951 {
3952 static const char * const directive[2][2] = {
3953 /* 64-bit pointer */ /* 32-bit pointer */
3954 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3955 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3956 };
3957 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3958 output_addr_const (asm_out_file, x);
3959 fputs (")\n", asm_out_file);
3960 return true;
3961 }
3962 return default_assemble_integer (x, size, aligned_p);
3963 }
3964
3965 /* Emit the function prologue. */
3966
3967 static void
3968 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3969 {
3970 int mask, grsave, grsave_prev;
3971
3972 if (current_frame_info.need_regstk)
3973 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3974 current_frame_info.n_input_regs,
3975 current_frame_info.n_local_regs,
3976 current_frame_info.n_output_regs,
3977 current_frame_info.n_rotate_regs);
3978
3979 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
3980 return;
3981
3982 /* Emit the .prologue directive. */
3983
3984 mask = 0;
3985 grsave = grsave_prev = 0;
3986 if (current_frame_info.r[reg_save_b0] != 0)
3987 {
3988 mask |= 8;
3989 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3990 }
3991 if (current_frame_info.r[reg_save_ar_pfs] != 0
3992 && (grsave_prev == 0
3993 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3994 {
3995 mask |= 4;
3996 if (grsave_prev == 0)
3997 grsave = current_frame_info.r[reg_save_ar_pfs];
3998 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3999 }
4000 if (current_frame_info.r[reg_fp] != 0
4001 && (grsave_prev == 0
4002 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4003 {
4004 mask |= 2;
4005 if (grsave_prev == 0)
4006 grsave = HARD_FRAME_POINTER_REGNUM;
4007 grsave_prev = current_frame_info.r[reg_fp];
4008 }
4009 if (current_frame_info.r[reg_save_pr] != 0
4010 && (grsave_prev == 0
4011 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4012 {
4013 mask |= 1;
4014 if (grsave_prev == 0)
4015 grsave = current_frame_info.r[reg_save_pr];
4016 }
4017
4018 if (mask && TARGET_GNU_AS)
4019 fprintf (file, "\t.prologue %d, %d\n", mask,
4020 ia64_dbx_register_number (grsave));
4021 else
4022 fputs ("\t.prologue\n", file);
4023
4024 /* Emit a .spill directive, if necessary, to relocate the base of
4025 the register spill area. */
4026 if (current_frame_info.spill_cfa_off != -16)
4027 fprintf (file, "\t.spill %ld\n",
4028 (long) (current_frame_info.spill_cfa_off
4029 + current_frame_info.spill_size));
4030 }
4031
4032 /* Emit the .body directive at the scheduled end of the prologue. */
4033
4034 static void
4035 ia64_output_function_end_prologue (FILE *file)
4036 {
4037 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4038 return;
4039
4040 fputs ("\t.body\n", file);
4041 }
4042
4043 /* Emit the function epilogue. */
4044
4045 static void
4046 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4047 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4048 {
4049 int i;
4050
4051 if (current_frame_info.r[reg_fp])
4052 {
4053 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4054 reg_names[HARD_FRAME_POINTER_REGNUM]
4055 = reg_names[current_frame_info.r[reg_fp]];
4056 reg_names[current_frame_info.r[reg_fp]] = tmp;
4057 reg_emitted (reg_fp);
4058 }
4059 if (! TARGET_REG_NAMES)
4060 {
4061 for (i = 0; i < current_frame_info.n_input_regs; i++)
4062 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4063 for (i = 0; i < current_frame_info.n_local_regs; i++)
4064 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4065 for (i = 0; i < current_frame_info.n_output_regs; i++)
4066 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4067 }
4068
4069 current_frame_info.initialized = 0;
4070 }
4071
4072 int
4073 ia64_dbx_register_number (int regno)
4074 {
4075 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4076 from its home at loc79 to something inside the register frame. We
4077 must perform the same renumbering here for the debug info. */
4078 if (current_frame_info.r[reg_fp])
4079 {
4080 if (regno == HARD_FRAME_POINTER_REGNUM)
4081 regno = current_frame_info.r[reg_fp];
4082 else if (regno == current_frame_info.r[reg_fp])
4083 regno = HARD_FRAME_POINTER_REGNUM;
4084 }
4085
4086 if (IN_REGNO_P (regno))
4087 return 32 + regno - IN_REG (0);
4088 else if (LOC_REGNO_P (regno))
4089 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4090 else if (OUT_REGNO_P (regno))
4091 return (32 + current_frame_info.n_input_regs
4092 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4093 else
4094 return regno;
4095 }
4096
4097 /* Implement TARGET_TRAMPOLINE_INIT.
4098
4099 The trampoline should set the static chain pointer to value placed
4100 into the trampoline and should branch to the specified routine.
4101 To make the normal indirect-subroutine calling convention work,
4102 the trampoline must look like a function descriptor; the first
4103 word being the target address and the second being the target's
4104 global pointer.
4105
4106 We abuse the concept of a global pointer by arranging for it
4107 to point to the data we need to load. The complete trampoline
4108 has the following form:
4109
4110 +-------------------+ \
4111 TRAMP: | __ia64_trampoline | |
4112 +-------------------+ > fake function descriptor
4113 | TRAMP+16 | |
4114 +-------------------+ /
4115 | target descriptor |
4116 +-------------------+
4117 | static link |
4118 +-------------------+
4119 */
4120
4121 static void
4122 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4123 {
4124 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4125 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4126
4127 /* The Intel assembler requires that the global __ia64_trampoline symbol
4128 be declared explicitly */
4129 if (!TARGET_GNU_AS)
4130 {
4131 static bool declared_ia64_trampoline = false;
4132
4133 if (!declared_ia64_trampoline)
4134 {
4135 declared_ia64_trampoline = true;
4136 (*targetm.asm_out.globalize_label) (asm_out_file,
4137 "__ia64_trampoline");
4138 }
4139 }
4140
4141 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4142 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4143 fnaddr = convert_memory_address (Pmode, fnaddr);
4144 static_chain = convert_memory_address (Pmode, static_chain);
4145
4146 /* Load up our iterator. */
4147 addr_reg = copy_to_reg (addr);
4148 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4149
4150 /* The first two words are the fake descriptor:
4151 __ia64_trampoline, ADDR+16. */
4152 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4153 if (TARGET_ABI_OPEN_VMS)
4154 {
4155 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4156 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4157 relocation against function symbols to make it identical to the
4158 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4159 strict ELF and dereference to get the bare code address. */
4160 rtx reg = gen_reg_rtx (Pmode);
4161 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4162 emit_move_insn (reg, tramp);
4163 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4164 tramp = reg;
4165 }
4166 emit_move_insn (m_tramp, tramp);
4167 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4168 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4169
4170 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4171 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4172 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4173
4174 /* The third word is the target descriptor. */
4175 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4176 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4177 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4178
4179 /* The fourth word is the static chain. */
4180 emit_move_insn (m_tramp, static_chain);
4181 }
4182 \f
4183 /* Do any needed setup for a variadic function. CUM has not been updated
4184 for the last named argument which has type TYPE and mode MODE.
4185
4186 We generate the actual spill instructions during prologue generation. */
4187
4188 static void
4189 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4190 tree type, int * pretend_size,
4191 int second_time ATTRIBUTE_UNUSED)
4192 {
4193 CUMULATIVE_ARGS next_cum = *cum;
4194
4195 /* Skip the current argument. */
4196 ia64_function_arg_advance (&next_cum, mode, type, 1);
4197
4198 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4199 {
4200 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4201 *pretend_size = n * UNITS_PER_WORD;
4202 cfun->machine->n_varargs = n;
4203 }
4204 }
4205
4206 /* Check whether TYPE is a homogeneous floating point aggregate. If
4207 it is, return the mode of the floating point type that appears
4208 in all leafs. If it is not, return VOIDmode.
4209
4210 An aggregate is a homogeneous floating point aggregate is if all
4211 fields/elements in it have the same floating point type (e.g,
4212 SFmode). 128-bit quad-precision floats are excluded.
4213
4214 Variable sized aggregates should never arrive here, since we should
4215 have already decided to pass them by reference. Top-level zero-sized
4216 aggregates are excluded because our parallels crash the middle-end. */
4217
4218 static enum machine_mode
4219 hfa_element_mode (const_tree type, bool nested)
4220 {
4221 enum machine_mode element_mode = VOIDmode;
4222 enum machine_mode mode;
4223 enum tree_code code = TREE_CODE (type);
4224 int know_element_mode = 0;
4225 tree t;
4226
4227 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4228 return VOIDmode;
4229
4230 switch (code)
4231 {
4232 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4233 case BOOLEAN_TYPE: case POINTER_TYPE:
4234 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4235 case LANG_TYPE: case FUNCTION_TYPE:
4236 return VOIDmode;
4237
4238 /* Fortran complex types are supposed to be HFAs, so we need to handle
4239 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4240 types though. */
4241 case COMPLEX_TYPE:
4242 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4243 && TYPE_MODE (type) != TCmode)
4244 return GET_MODE_INNER (TYPE_MODE (type));
4245 else
4246 return VOIDmode;
4247
4248 case REAL_TYPE:
4249 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4250 mode if this is contained within an aggregate. */
4251 if (nested && TYPE_MODE (type) != TFmode)
4252 return TYPE_MODE (type);
4253 else
4254 return VOIDmode;
4255
4256 case ARRAY_TYPE:
4257 return hfa_element_mode (TREE_TYPE (type), 1);
4258
4259 case RECORD_TYPE:
4260 case UNION_TYPE:
4261 case QUAL_UNION_TYPE:
4262 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4263 {
4264 if (TREE_CODE (t) != FIELD_DECL)
4265 continue;
4266
4267 mode = hfa_element_mode (TREE_TYPE (t), 1);
4268 if (know_element_mode)
4269 {
4270 if (mode != element_mode)
4271 return VOIDmode;
4272 }
4273 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4274 return VOIDmode;
4275 else
4276 {
4277 know_element_mode = 1;
4278 element_mode = mode;
4279 }
4280 }
4281 return element_mode;
4282
4283 default:
4284 /* If we reach here, we probably have some front-end specific type
4285 that the backend doesn't know about. This can happen via the
4286 aggregate_value_p call in init_function_start. All we can do is
4287 ignore unknown tree types. */
4288 return VOIDmode;
4289 }
4290
4291 return VOIDmode;
4292 }
4293
4294 /* Return the number of words required to hold a quantity of TYPE and MODE
4295 when passed as an argument. */
4296 static int
4297 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4298 {
4299 int words;
4300
4301 if (mode == BLKmode)
4302 words = int_size_in_bytes (type);
4303 else
4304 words = GET_MODE_SIZE (mode);
4305
4306 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4307 }
4308
4309 /* Return the number of registers that should be skipped so the current
4310 argument (described by TYPE and WORDS) will be properly aligned.
4311
4312 Integer and float arguments larger than 8 bytes start at the next
4313 even boundary. Aggregates larger than 8 bytes start at the next
4314 even boundary if the aggregate has 16 byte alignment. Note that
4315 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4316 but are still to be aligned in registers.
4317
4318 ??? The ABI does not specify how to handle aggregates with
4319 alignment from 9 to 15 bytes, or greater than 16. We handle them
4320 all as if they had 16 byte alignment. Such aggregates can occur
4321 only if gcc extensions are used. */
4322 static int
4323 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4324 const_tree type, int words)
4325 {
4326 /* No registers are skipped on VMS. */
4327 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4328 return 0;
4329
4330 if (type
4331 && TREE_CODE (type) != INTEGER_TYPE
4332 && TREE_CODE (type) != REAL_TYPE)
4333 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4334 else
4335 return words > 1;
4336 }
4337
4338 /* Return rtx for register where argument is passed, or zero if it is passed
4339 on the stack. */
4340 /* ??? 128-bit quad-precision floats are always passed in general
4341 registers. */
4342
4343 static rtx
4344 ia64_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4345 const_tree type, bool named, bool incoming)
4346 {
4347 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4348 int words = ia64_function_arg_words (type, mode);
4349 int offset = ia64_function_arg_offset (cum, type, words);
4350 enum machine_mode hfa_mode = VOIDmode;
4351
4352 /* For OPEN VMS, emit the instruction setting up the argument register here,
4353 when we know this will be together with the other arguments setup related
4354 insns. This is not the conceptually best place to do this, but this is
4355 the easiest as we have convenient access to cumulative args info. */
4356
4357 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4358 && named == 1)
4359 {
4360 unsigned HOST_WIDE_INT regval = cum->words;
4361 int i;
4362
4363 for (i = 0; i < 8; i++)
4364 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4365
4366 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4367 GEN_INT (regval));
4368 }
4369
4370 /* If all argument slots are used, then it must go on the stack. */
4371 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4372 return 0;
4373
4374 /* Check for and handle homogeneous FP aggregates. */
4375 if (type)
4376 hfa_mode = hfa_element_mode (type, 0);
4377
4378 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4379 and unprototyped hfas are passed specially. */
4380 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4381 {
4382 rtx loc[16];
4383 int i = 0;
4384 int fp_regs = cum->fp_regs;
4385 int int_regs = cum->words + offset;
4386 int hfa_size = GET_MODE_SIZE (hfa_mode);
4387 int byte_size;
4388 int args_byte_size;
4389
4390 /* If prototyped, pass it in FR regs then GR regs.
4391 If not prototyped, pass it in both FR and GR regs.
4392
4393 If this is an SFmode aggregate, then it is possible to run out of
4394 FR regs while GR regs are still left. In that case, we pass the
4395 remaining part in the GR regs. */
4396
4397 /* Fill the FP regs. We do this always. We stop if we reach the end
4398 of the argument, the last FP register, or the last argument slot. */
4399
4400 byte_size = ((mode == BLKmode)
4401 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4402 args_byte_size = int_regs * UNITS_PER_WORD;
4403 offset = 0;
4404 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4405 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4406 {
4407 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4408 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4409 + fp_regs)),
4410 GEN_INT (offset));
4411 offset += hfa_size;
4412 args_byte_size += hfa_size;
4413 fp_regs++;
4414 }
4415
4416 /* If no prototype, then the whole thing must go in GR regs. */
4417 if (! cum->prototype)
4418 offset = 0;
4419 /* If this is an SFmode aggregate, then we might have some left over
4420 that needs to go in GR regs. */
4421 else if (byte_size != offset)
4422 int_regs += offset / UNITS_PER_WORD;
4423
4424 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4425
4426 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4427 {
4428 enum machine_mode gr_mode = DImode;
4429 unsigned int gr_size;
4430
4431 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4432 then this goes in a GR reg left adjusted/little endian, right
4433 adjusted/big endian. */
4434 /* ??? Currently this is handled wrong, because 4-byte hunks are
4435 always right adjusted/little endian. */
4436 if (offset & 0x4)
4437 gr_mode = SImode;
4438 /* If we have an even 4 byte hunk because the aggregate is a
4439 multiple of 4 bytes in size, then this goes in a GR reg right
4440 adjusted/little endian. */
4441 else if (byte_size - offset == 4)
4442 gr_mode = SImode;
4443
4444 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4445 gen_rtx_REG (gr_mode, (basereg
4446 + int_regs)),
4447 GEN_INT (offset));
4448
4449 gr_size = GET_MODE_SIZE (gr_mode);
4450 offset += gr_size;
4451 if (gr_size == UNITS_PER_WORD
4452 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4453 int_regs++;
4454 else if (gr_size > UNITS_PER_WORD)
4455 int_regs += gr_size / UNITS_PER_WORD;
4456 }
4457 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4458 }
4459
4460 /* On OpenVMS variable argument is either in Rn or Fn. */
4461 else if (TARGET_ABI_OPEN_VMS && named == 0)
4462 {
4463 if (FLOAT_MODE_P (mode))
4464 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4465 else
4466 return gen_rtx_REG (mode, basereg + cum->words);
4467 }
4468
4469 /* Integral and aggregates go in general registers. If we have run out of
4470 FR registers, then FP values must also go in general registers. This can
4471 happen when we have a SFmode HFA. */
4472 else if (mode == TFmode || mode == TCmode
4473 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4474 {
4475 int byte_size = ((mode == BLKmode)
4476 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4477 if (BYTES_BIG_ENDIAN
4478 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4479 && byte_size < UNITS_PER_WORD
4480 && byte_size > 0)
4481 {
4482 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4483 gen_rtx_REG (DImode,
4484 (basereg + cum->words
4485 + offset)),
4486 const0_rtx);
4487 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4488 }
4489 else
4490 return gen_rtx_REG (mode, basereg + cum->words + offset);
4491
4492 }
4493
4494 /* If there is a prototype, then FP values go in a FR register when
4495 named, and in a GR register when unnamed. */
4496 else if (cum->prototype)
4497 {
4498 if (named)
4499 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4500 /* In big-endian mode, an anonymous SFmode value must be represented
4501 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4502 the value into the high half of the general register. */
4503 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4504 return gen_rtx_PARALLEL (mode,
4505 gen_rtvec (1,
4506 gen_rtx_EXPR_LIST (VOIDmode,
4507 gen_rtx_REG (DImode, basereg + cum->words + offset),
4508 const0_rtx)));
4509 else
4510 return gen_rtx_REG (mode, basereg + cum->words + offset);
4511 }
4512 /* If there is no prototype, then FP values go in both FR and GR
4513 registers. */
4514 else
4515 {
4516 /* See comment above. */
4517 enum machine_mode inner_mode =
4518 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4519
4520 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4521 gen_rtx_REG (mode, (FR_ARG_FIRST
4522 + cum->fp_regs)),
4523 const0_rtx);
4524 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4525 gen_rtx_REG (inner_mode,
4526 (basereg + cum->words
4527 + offset)),
4528 const0_rtx);
4529
4530 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4531 }
4532 }
4533
4534 /* Implement TARGET_FUNCION_ARG target hook. */
4535
4536 static rtx
4537 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4538 const_tree type, bool named)
4539 {
4540 return ia64_function_arg_1 (cum, mode, type, named, false);
4541 }
4542
4543 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4544
4545 static rtx
4546 ia64_function_incoming_arg (CUMULATIVE_ARGS *cum,
4547 enum machine_mode mode,
4548 const_tree type, bool named)
4549 {
4550 return ia64_function_arg_1 (cum, mode, type, named, true);
4551 }
4552
4553 /* Return number of bytes, at the beginning of the argument, that must be
4554 put in registers. 0 is the argument is entirely in registers or entirely
4555 in memory. */
4556
4557 static int
4558 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4559 tree type, bool named ATTRIBUTE_UNUSED)
4560 {
4561 int words = ia64_function_arg_words (type, mode);
4562 int offset = ia64_function_arg_offset (cum, type, words);
4563
4564 /* If all argument slots are used, then it must go on the stack. */
4565 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4566 return 0;
4567
4568 /* It doesn't matter whether the argument goes in FR or GR regs. If
4569 it fits within the 8 argument slots, then it goes entirely in
4570 registers. If it extends past the last argument slot, then the rest
4571 goes on the stack. */
4572
4573 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4574 return 0;
4575
4576 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4577 }
4578
4579 /* Return ivms_arg_type based on machine_mode. */
4580
4581 static enum ivms_arg_type
4582 ia64_arg_type (enum machine_mode mode)
4583 {
4584 switch (mode)
4585 {
4586 case SFmode:
4587 return FS;
4588 case DFmode:
4589 return FT;
4590 default:
4591 return I64;
4592 }
4593 }
4594
4595 /* Update CUM to point after this argument. This is patterned after
4596 ia64_function_arg. */
4597
4598 static void
4599 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4600 const_tree type, bool named)
4601 {
4602 int words = ia64_function_arg_words (type, mode);
4603 int offset = ia64_function_arg_offset (cum, type, words);
4604 enum machine_mode hfa_mode = VOIDmode;
4605
4606 /* If all arg slots are already full, then there is nothing to do. */
4607 if (cum->words >= MAX_ARGUMENT_SLOTS)
4608 {
4609 cum->words += words + offset;
4610 return;
4611 }
4612
4613 cum->atypes[cum->words] = ia64_arg_type (mode);
4614 cum->words += words + offset;
4615
4616 /* Check for and handle homogeneous FP aggregates. */
4617 if (type)
4618 hfa_mode = hfa_element_mode (type, 0);
4619
4620 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4621 and unprototyped hfas are passed specially. */
4622 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4623 {
4624 int fp_regs = cum->fp_regs;
4625 /* This is the original value of cum->words + offset. */
4626 int int_regs = cum->words - words;
4627 int hfa_size = GET_MODE_SIZE (hfa_mode);
4628 int byte_size;
4629 int args_byte_size;
4630
4631 /* If prototyped, pass it in FR regs then GR regs.
4632 If not prototyped, pass it in both FR and GR regs.
4633
4634 If this is an SFmode aggregate, then it is possible to run out of
4635 FR regs while GR regs are still left. In that case, we pass the
4636 remaining part in the GR regs. */
4637
4638 /* Fill the FP regs. We do this always. We stop if we reach the end
4639 of the argument, the last FP register, or the last argument slot. */
4640
4641 byte_size = ((mode == BLKmode)
4642 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4643 args_byte_size = int_regs * UNITS_PER_WORD;
4644 offset = 0;
4645 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4646 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4647 {
4648 offset += hfa_size;
4649 args_byte_size += hfa_size;
4650 fp_regs++;
4651 }
4652
4653 cum->fp_regs = fp_regs;
4654 }
4655
4656 /* On OpenVMS variable argument is either in Rn or Fn. */
4657 else if (TARGET_ABI_OPEN_VMS && named == 0)
4658 {
4659 cum->int_regs = cum->words;
4660 cum->fp_regs = cum->words;
4661 }
4662
4663 /* Integral and aggregates go in general registers. So do TFmode FP values.
4664 If we have run out of FR registers, then other FP values must also go in
4665 general registers. This can happen when we have a SFmode HFA. */
4666 else if (mode == TFmode || mode == TCmode
4667 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4668 cum->int_regs = cum->words;
4669
4670 /* If there is a prototype, then FP values go in a FR register when
4671 named, and in a GR register when unnamed. */
4672 else if (cum->prototype)
4673 {
4674 if (! named)
4675 cum->int_regs = cum->words;
4676 else
4677 /* ??? Complex types should not reach here. */
4678 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4679 }
4680 /* If there is no prototype, then FP values go in both FR and GR
4681 registers. */
4682 else
4683 {
4684 /* ??? Complex types should not reach here. */
4685 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4686 cum->int_regs = cum->words;
4687 }
4688 }
4689
4690 /* Arguments with alignment larger than 8 bytes start at the next even
4691 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4692 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4693
4694 static unsigned int
4695 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4696 {
4697 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4698 return PARM_BOUNDARY * 2;
4699
4700 if (type)
4701 {
4702 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4703 return PARM_BOUNDARY * 2;
4704 else
4705 return PARM_BOUNDARY;
4706 }
4707
4708 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4709 return PARM_BOUNDARY * 2;
4710 else
4711 return PARM_BOUNDARY;
4712 }
4713
4714 /* True if it is OK to do sibling call optimization for the specified
4715 call expression EXP. DECL will be the called function, or NULL if
4716 this is an indirect call. */
4717 static bool
4718 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4719 {
4720 /* We can't perform a sibcall if the current function has the syscall_linkage
4721 attribute. */
4722 if (lookup_attribute ("syscall_linkage",
4723 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4724 return false;
4725
4726 /* We must always return with our current GP. This means we can
4727 only sibcall to functions defined in the current module unless
4728 TARGET_CONST_GP is set to true. */
4729 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4730 }
4731 \f
4732
4733 /* Implement va_arg. */
4734
4735 static tree
4736 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4737 gimple_seq *post_p)
4738 {
4739 /* Variable sized types are passed by reference. */
4740 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4741 {
4742 tree ptrtype = build_pointer_type (type);
4743 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4744 return build_va_arg_indirect_ref (addr);
4745 }
4746
4747 /* Aggregate arguments with alignment larger than 8 bytes start at
4748 the next even boundary. Integer and floating point arguments
4749 do so if they are larger than 8 bytes, whether or not they are
4750 also aligned larger than 8 bytes. */
4751 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4752 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4753 {
4754 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4755 size_int (2 * UNITS_PER_WORD - 1));
4756 t = fold_convert (sizetype, t);
4757 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4758 size_int (-2 * UNITS_PER_WORD));
4759 t = fold_convert (TREE_TYPE (valist), t);
4760 gimplify_assign (unshare_expr (valist), t, pre_p);
4761 }
4762
4763 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4764 }
4765 \f
4766 /* Return 1 if function return value returned in memory. Return 0 if it is
4767 in a register. */
4768
4769 static bool
4770 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4771 {
4772 enum machine_mode mode;
4773 enum machine_mode hfa_mode;
4774 HOST_WIDE_INT byte_size;
4775
4776 mode = TYPE_MODE (valtype);
4777 byte_size = GET_MODE_SIZE (mode);
4778 if (mode == BLKmode)
4779 {
4780 byte_size = int_size_in_bytes (valtype);
4781 if (byte_size < 0)
4782 return true;
4783 }
4784
4785 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4786
4787 hfa_mode = hfa_element_mode (valtype, 0);
4788 if (hfa_mode != VOIDmode)
4789 {
4790 int hfa_size = GET_MODE_SIZE (hfa_mode);
4791
4792 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4793 return true;
4794 else
4795 return false;
4796 }
4797 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4798 return true;
4799 else
4800 return false;
4801 }
4802
4803 /* Return rtx for register that holds the function return value. */
4804
4805 static rtx
4806 ia64_function_value (const_tree valtype,
4807 const_tree fn_decl_or_type,
4808 bool outgoing ATTRIBUTE_UNUSED)
4809 {
4810 enum machine_mode mode;
4811 enum machine_mode hfa_mode;
4812 int unsignedp;
4813 const_tree func = fn_decl_or_type;
4814
4815 if (fn_decl_or_type
4816 && !DECL_P (fn_decl_or_type))
4817 func = NULL;
4818
4819 mode = TYPE_MODE (valtype);
4820 hfa_mode = hfa_element_mode (valtype, 0);
4821
4822 if (hfa_mode != VOIDmode)
4823 {
4824 rtx loc[8];
4825 int i;
4826 int hfa_size;
4827 int byte_size;
4828 int offset;
4829
4830 hfa_size = GET_MODE_SIZE (hfa_mode);
4831 byte_size = ((mode == BLKmode)
4832 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4833 offset = 0;
4834 for (i = 0; offset < byte_size; i++)
4835 {
4836 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4837 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4838 GEN_INT (offset));
4839 offset += hfa_size;
4840 }
4841 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4842 }
4843 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4844 return gen_rtx_REG (mode, FR_ARG_FIRST);
4845 else
4846 {
4847 bool need_parallel = false;
4848
4849 /* In big-endian mode, we need to manage the layout of aggregates
4850 in the registers so that we get the bits properly aligned in
4851 the highpart of the registers. */
4852 if (BYTES_BIG_ENDIAN
4853 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4854 need_parallel = true;
4855
4856 /* Something like struct S { long double x; char a[0] } is not an
4857 HFA structure, and therefore doesn't go in fp registers. But
4858 the middle-end will give it XFmode anyway, and XFmode values
4859 don't normally fit in integer registers. So we need to smuggle
4860 the value inside a parallel. */
4861 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4862 need_parallel = true;
4863
4864 if (need_parallel)
4865 {
4866 rtx loc[8];
4867 int offset;
4868 int bytesize;
4869 int i;
4870
4871 offset = 0;
4872 bytesize = int_size_in_bytes (valtype);
4873 /* An empty PARALLEL is invalid here, but the return value
4874 doesn't matter for empty structs. */
4875 if (bytesize == 0)
4876 return gen_rtx_REG (mode, GR_RET_FIRST);
4877 for (i = 0; offset < bytesize; i++)
4878 {
4879 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4880 gen_rtx_REG (DImode,
4881 GR_RET_FIRST + i),
4882 GEN_INT (offset));
4883 offset += UNITS_PER_WORD;
4884 }
4885 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4886 }
4887
4888 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4889 func ? TREE_TYPE (func) : NULL_TREE,
4890 true);
4891
4892 return gen_rtx_REG (mode, GR_RET_FIRST);
4893 }
4894 }
4895
4896 /* Worker function for TARGET_LIBCALL_VALUE. */
4897
4898 static rtx
4899 ia64_libcall_value (enum machine_mode mode,
4900 const_rtx fun ATTRIBUTE_UNUSED)
4901 {
4902 return gen_rtx_REG (mode,
4903 (((GET_MODE_CLASS (mode) == MODE_FLOAT
4904 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4905 && (mode) != TFmode)
4906 ? FR_RET_FIRST : GR_RET_FIRST));
4907 }
4908
4909 /* Worker function for FUNCTION_VALUE_REGNO_P. */
4910
4911 static bool
4912 ia64_function_value_regno_p (const unsigned int regno)
4913 {
4914 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
4915 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
4916 }
4917
4918 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4919 We need to emit DTP-relative relocations. */
4920
4921 static void
4922 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4923 {
4924 gcc_assert (size == 4 || size == 8);
4925 if (size == 4)
4926 fputs ("\tdata4.ua\t@dtprel(", file);
4927 else
4928 fputs ("\tdata8.ua\t@dtprel(", file);
4929 output_addr_const (file, x);
4930 fputs (")", file);
4931 }
4932
4933 /* Print a memory address as an operand to reference that memory location. */
4934
4935 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4936 also call this from ia64_print_operand for memory addresses. */
4937
4938 void
4939 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4940 rtx address ATTRIBUTE_UNUSED)
4941 {
4942 }
4943
4944 /* Print an operand to an assembler instruction.
4945 C Swap and print a comparison operator.
4946 D Print an FP comparison operator.
4947 E Print 32 - constant, for SImode shifts as extract.
4948 e Print 64 - constant, for DImode rotates.
4949 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4950 a floating point register emitted normally.
4951 G A floating point constant.
4952 I Invert a predicate register by adding 1.
4953 J Select the proper predicate register for a condition.
4954 j Select the inverse predicate register for a condition.
4955 O Append .acq for volatile load.
4956 P Postincrement of a MEM.
4957 Q Append .rel for volatile store.
4958 R Print .s .d or nothing for a single, double or no truncation.
4959 S Shift amount for shladd instruction.
4960 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4961 for Intel assembler.
4962 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4963 for Intel assembler.
4964 X A pair of floating point registers.
4965 r Print register name, or constant 0 as r0. HP compatibility for
4966 Linux kernel.
4967 v Print vector constant value as an 8-byte integer value. */
4968
4969 void
4970 ia64_print_operand (FILE * file, rtx x, int code)
4971 {
4972 const char *str;
4973
4974 switch (code)
4975 {
4976 case 0:
4977 /* Handled below. */
4978 break;
4979
4980 case 'C':
4981 {
4982 enum rtx_code c = swap_condition (GET_CODE (x));
4983 fputs (GET_RTX_NAME (c), file);
4984 return;
4985 }
4986
4987 case 'D':
4988 switch (GET_CODE (x))
4989 {
4990 case NE:
4991 str = "neq";
4992 break;
4993 case UNORDERED:
4994 str = "unord";
4995 break;
4996 case ORDERED:
4997 str = "ord";
4998 break;
4999 case UNLT:
5000 str = "nge";
5001 break;
5002 case UNLE:
5003 str = "ngt";
5004 break;
5005 case UNGT:
5006 str = "nle";
5007 break;
5008 case UNGE:
5009 str = "nlt";
5010 break;
5011 default:
5012 str = GET_RTX_NAME (GET_CODE (x));
5013 break;
5014 }
5015 fputs (str, file);
5016 return;
5017
5018 case 'E':
5019 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5020 return;
5021
5022 case 'e':
5023 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5024 return;
5025
5026 case 'F':
5027 if (x == CONST0_RTX (GET_MODE (x)))
5028 str = reg_names [FR_REG (0)];
5029 else if (x == CONST1_RTX (GET_MODE (x)))
5030 str = reg_names [FR_REG (1)];
5031 else
5032 {
5033 gcc_assert (GET_CODE (x) == REG);
5034 str = reg_names [REGNO (x)];
5035 }
5036 fputs (str, file);
5037 return;
5038
5039 case 'G':
5040 {
5041 long val[4];
5042 REAL_VALUE_TYPE rv;
5043 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5044 real_to_target (val, &rv, GET_MODE (x));
5045 if (GET_MODE (x) == SFmode)
5046 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5047 else if (GET_MODE (x) == DFmode)
5048 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5049 & 0xffffffff,
5050 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5051 & 0xffffffff);
5052 else
5053 output_operand_lossage ("invalid %%G mode");
5054 }
5055 return;
5056
5057 case 'I':
5058 fputs (reg_names [REGNO (x) + 1], file);
5059 return;
5060
5061 case 'J':
5062 case 'j':
5063 {
5064 unsigned int regno = REGNO (XEXP (x, 0));
5065 if (GET_CODE (x) == EQ)
5066 regno += 1;
5067 if (code == 'j')
5068 regno ^= 1;
5069 fputs (reg_names [regno], file);
5070 }
5071 return;
5072
5073 case 'O':
5074 if (MEM_VOLATILE_P (x))
5075 fputs(".acq", file);
5076 return;
5077
5078 case 'P':
5079 {
5080 HOST_WIDE_INT value;
5081
5082 switch (GET_CODE (XEXP (x, 0)))
5083 {
5084 default:
5085 return;
5086
5087 case POST_MODIFY:
5088 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5089 if (GET_CODE (x) == CONST_INT)
5090 value = INTVAL (x);
5091 else
5092 {
5093 gcc_assert (GET_CODE (x) == REG);
5094 fprintf (file, ", %s", reg_names[REGNO (x)]);
5095 return;
5096 }
5097 break;
5098
5099 case POST_INC:
5100 value = GET_MODE_SIZE (GET_MODE (x));
5101 break;
5102
5103 case POST_DEC:
5104 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5105 break;
5106 }
5107
5108 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5109 return;
5110 }
5111
5112 case 'Q':
5113 if (MEM_VOLATILE_P (x))
5114 fputs(".rel", file);
5115 return;
5116
5117 case 'R':
5118 if (x == CONST0_RTX (GET_MODE (x)))
5119 fputs(".s", file);
5120 else if (x == CONST1_RTX (GET_MODE (x)))
5121 fputs(".d", file);
5122 else if (x == CONST2_RTX (GET_MODE (x)))
5123 ;
5124 else
5125 output_operand_lossage ("invalid %%R value");
5126 return;
5127
5128 case 'S':
5129 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5130 return;
5131
5132 case 'T':
5133 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5134 {
5135 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5136 return;
5137 }
5138 break;
5139
5140 case 'U':
5141 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5142 {
5143 const char *prefix = "0x";
5144 if (INTVAL (x) & 0x80000000)
5145 {
5146 fprintf (file, "0xffffffff");
5147 prefix = "";
5148 }
5149 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5150 return;
5151 }
5152 break;
5153
5154 case 'X':
5155 {
5156 unsigned int regno = REGNO (x);
5157 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5158 }
5159 return;
5160
5161 case 'r':
5162 /* If this operand is the constant zero, write it as register zero.
5163 Any register, zero, or CONST_INT value is OK here. */
5164 if (GET_CODE (x) == REG)
5165 fputs (reg_names[REGNO (x)], file);
5166 else if (x == CONST0_RTX (GET_MODE (x)))
5167 fputs ("r0", file);
5168 else if (GET_CODE (x) == CONST_INT)
5169 output_addr_const (file, x);
5170 else
5171 output_operand_lossage ("invalid %%r value");
5172 return;
5173
5174 case 'v':
5175 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5176 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5177 break;
5178
5179 case '+':
5180 {
5181 const char *which;
5182
5183 /* For conditional branches, returns or calls, substitute
5184 sptk, dptk, dpnt, or spnt for %s. */
5185 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5186 if (x)
5187 {
5188 int pred_val = INTVAL (XEXP (x, 0));
5189
5190 /* Guess top and bottom 10% statically predicted. */
5191 if (pred_val < REG_BR_PROB_BASE / 50
5192 && br_prob_note_reliable_p (x))
5193 which = ".spnt";
5194 else if (pred_val < REG_BR_PROB_BASE / 2)
5195 which = ".dpnt";
5196 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5197 || !br_prob_note_reliable_p (x))
5198 which = ".dptk";
5199 else
5200 which = ".sptk";
5201 }
5202 else if (GET_CODE (current_output_insn) == CALL_INSN)
5203 which = ".sptk";
5204 else
5205 which = ".dptk";
5206
5207 fputs (which, file);
5208 return;
5209 }
5210
5211 case ',':
5212 x = current_insn_predicate;
5213 if (x)
5214 {
5215 unsigned int regno = REGNO (XEXP (x, 0));
5216 if (GET_CODE (x) == EQ)
5217 regno += 1;
5218 fprintf (file, "(%s) ", reg_names [regno]);
5219 }
5220 return;
5221
5222 default:
5223 output_operand_lossage ("ia64_print_operand: unknown code");
5224 return;
5225 }
5226
5227 switch (GET_CODE (x))
5228 {
5229 /* This happens for the spill/restore instructions. */
5230 case POST_INC:
5231 case POST_DEC:
5232 case POST_MODIFY:
5233 x = XEXP (x, 0);
5234 /* ... fall through ... */
5235
5236 case REG:
5237 fputs (reg_names [REGNO (x)], file);
5238 break;
5239
5240 case MEM:
5241 {
5242 rtx addr = XEXP (x, 0);
5243 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5244 addr = XEXP (addr, 0);
5245 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5246 break;
5247 }
5248
5249 default:
5250 output_addr_const (file, x);
5251 break;
5252 }
5253
5254 return;
5255 }
5256 \f
5257 /* Compute a (partial) cost for rtx X. Return true if the complete
5258 cost has been computed, and false if subexpressions should be
5259 scanned. In either case, *TOTAL contains the cost result. */
5260 /* ??? This is incomplete. */
5261
5262 static bool
5263 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
5264 bool speed ATTRIBUTE_UNUSED)
5265 {
5266 switch (code)
5267 {
5268 case CONST_INT:
5269 switch (outer_code)
5270 {
5271 case SET:
5272 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5273 return true;
5274 case PLUS:
5275 if (satisfies_constraint_I (x))
5276 *total = 0;
5277 else if (satisfies_constraint_J (x))
5278 *total = 1;
5279 else
5280 *total = COSTS_N_INSNS (1);
5281 return true;
5282 default:
5283 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5284 *total = 0;
5285 else
5286 *total = COSTS_N_INSNS (1);
5287 return true;
5288 }
5289
5290 case CONST_DOUBLE:
5291 *total = COSTS_N_INSNS (1);
5292 return true;
5293
5294 case CONST:
5295 case SYMBOL_REF:
5296 case LABEL_REF:
5297 *total = COSTS_N_INSNS (3);
5298 return true;
5299
5300 case FMA:
5301 *total = COSTS_N_INSNS (4);
5302 return true;
5303
5304 case MULT:
5305 /* For multiplies wider than HImode, we have to go to the FPU,
5306 which normally involves copies. Plus there's the latency
5307 of the multiply itself, and the latency of the instructions to
5308 transfer integer regs to FP regs. */
5309 if (FLOAT_MODE_P (GET_MODE (x)))
5310 *total = COSTS_N_INSNS (4);
5311 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5312 *total = COSTS_N_INSNS (10);
5313 else
5314 *total = COSTS_N_INSNS (2);
5315 return true;
5316
5317 case PLUS:
5318 case MINUS:
5319 if (FLOAT_MODE_P (GET_MODE (x)))
5320 {
5321 *total = COSTS_N_INSNS (4);
5322 return true;
5323 }
5324 /* FALLTHRU */
5325
5326 case ASHIFT:
5327 case ASHIFTRT:
5328 case LSHIFTRT:
5329 *total = COSTS_N_INSNS (1);
5330 return true;
5331
5332 case DIV:
5333 case UDIV:
5334 case MOD:
5335 case UMOD:
5336 /* We make divide expensive, so that divide-by-constant will be
5337 optimized to a multiply. */
5338 *total = COSTS_N_INSNS (60);
5339 return true;
5340
5341 default:
5342 return false;
5343 }
5344 }
5345
5346 /* Calculate the cost of moving data from a register in class FROM to
5347 one in class TO, using MODE. */
5348
5349 static int
5350 ia64_register_move_cost (enum machine_mode mode, reg_class_t from_i,
5351 reg_class_t to_i)
5352 {
5353 enum reg_class from = (enum reg_class) from_i;
5354 enum reg_class to = (enum reg_class) to_i;
5355
5356 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5357 if (to == ADDL_REGS)
5358 to = GR_REGS;
5359 if (from == ADDL_REGS)
5360 from = GR_REGS;
5361
5362 /* All costs are symmetric, so reduce cases by putting the
5363 lower number class as the destination. */
5364 if (from < to)
5365 {
5366 enum reg_class tmp = to;
5367 to = from, from = tmp;
5368 }
5369
5370 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5371 so that we get secondary memory reloads. Between FR_REGS,
5372 we have to make this at least as expensive as memory_move_cost
5373 to avoid spectacularly poor register class preferencing. */
5374 if (mode == XFmode || mode == RFmode)
5375 {
5376 if (to != GR_REGS || from != GR_REGS)
5377 return memory_move_cost (mode, to, false);
5378 else
5379 return 3;
5380 }
5381
5382 switch (to)
5383 {
5384 case PR_REGS:
5385 /* Moving between PR registers takes two insns. */
5386 if (from == PR_REGS)
5387 return 3;
5388 /* Moving between PR and anything but GR is impossible. */
5389 if (from != GR_REGS)
5390 return memory_move_cost (mode, to, false);
5391 break;
5392
5393 case BR_REGS:
5394 /* Moving between BR and anything but GR is impossible. */
5395 if (from != GR_REGS && from != GR_AND_BR_REGS)
5396 return memory_move_cost (mode, to, false);
5397 break;
5398
5399 case AR_I_REGS:
5400 case AR_M_REGS:
5401 /* Moving between AR and anything but GR is impossible. */
5402 if (from != GR_REGS)
5403 return memory_move_cost (mode, to, false);
5404 break;
5405
5406 case GR_REGS:
5407 case FR_REGS:
5408 case FP_REGS:
5409 case GR_AND_FR_REGS:
5410 case GR_AND_BR_REGS:
5411 case ALL_REGS:
5412 break;
5413
5414 default:
5415 gcc_unreachable ();
5416 }
5417
5418 return 2;
5419 }
5420
5421 /* Calculate the cost of moving data of MODE from a register to or from
5422 memory. */
5423
5424 static int
5425 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5426 reg_class_t rclass,
5427 bool in ATTRIBUTE_UNUSED)
5428 {
5429 if (rclass == GENERAL_REGS
5430 || rclass == FR_REGS
5431 || rclass == FP_REGS
5432 || rclass == GR_AND_FR_REGS)
5433 return 4;
5434 else
5435 return 10;
5436 }
5437
5438 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5439 on RCLASS to use when copying X into that class. */
5440
5441 static reg_class_t
5442 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5443 {
5444 switch (rclass)
5445 {
5446 case FR_REGS:
5447 case FP_REGS:
5448 /* Don't allow volatile mem reloads into floating point registers.
5449 This is defined to force reload to choose the r/m case instead
5450 of the f/f case when reloading (set (reg fX) (mem/v)). */
5451 if (MEM_P (x) && MEM_VOLATILE_P (x))
5452 return NO_REGS;
5453
5454 /* Force all unrecognized constants into the constant pool. */
5455 if (CONSTANT_P (x))
5456 return NO_REGS;
5457 break;
5458
5459 case AR_M_REGS:
5460 case AR_I_REGS:
5461 if (!OBJECT_P (x))
5462 return NO_REGS;
5463 break;
5464
5465 default:
5466 break;
5467 }
5468
5469 return rclass;
5470 }
5471
5472 /* This function returns the register class required for a secondary
5473 register when copying between one of the registers in RCLASS, and X,
5474 using MODE. A return value of NO_REGS means that no secondary register
5475 is required. */
5476
5477 enum reg_class
5478 ia64_secondary_reload_class (enum reg_class rclass,
5479 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5480 {
5481 int regno = -1;
5482
5483 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5484 regno = true_regnum (x);
5485
5486 switch (rclass)
5487 {
5488 case BR_REGS:
5489 case AR_M_REGS:
5490 case AR_I_REGS:
5491 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5492 interaction. We end up with two pseudos with overlapping lifetimes
5493 both of which are equiv to the same constant, and both which need
5494 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5495 changes depending on the path length, which means the qty_first_reg
5496 check in make_regs_eqv can give different answers at different times.
5497 At some point I'll probably need a reload_indi pattern to handle
5498 this.
5499
5500 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5501 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5502 non-general registers for good measure. */
5503 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5504 return GR_REGS;
5505
5506 /* This is needed if a pseudo used as a call_operand gets spilled to a
5507 stack slot. */
5508 if (GET_CODE (x) == MEM)
5509 return GR_REGS;
5510 break;
5511
5512 case FR_REGS:
5513 case FP_REGS:
5514 /* Need to go through general registers to get to other class regs. */
5515 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5516 return GR_REGS;
5517
5518 /* This can happen when a paradoxical subreg is an operand to the
5519 muldi3 pattern. */
5520 /* ??? This shouldn't be necessary after instruction scheduling is
5521 enabled, because paradoxical subregs are not accepted by
5522 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5523 stop the paradoxical subreg stupidity in the *_operand functions
5524 in recog.c. */
5525 if (GET_CODE (x) == MEM
5526 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5527 || GET_MODE (x) == QImode))
5528 return GR_REGS;
5529
5530 /* This can happen because of the ior/and/etc patterns that accept FP
5531 registers as operands. If the third operand is a constant, then it
5532 needs to be reloaded into a FP register. */
5533 if (GET_CODE (x) == CONST_INT)
5534 return GR_REGS;
5535
5536 /* This can happen because of register elimination in a muldi3 insn.
5537 E.g. `26107 * (unsigned long)&u'. */
5538 if (GET_CODE (x) == PLUS)
5539 return GR_REGS;
5540 break;
5541
5542 case PR_REGS:
5543 /* ??? This happens if we cse/gcse a BImode value across a call,
5544 and the function has a nonlocal goto. This is because global
5545 does not allocate call crossing pseudos to hard registers when
5546 crtl->has_nonlocal_goto is true. This is relatively
5547 common for C++ programs that use exceptions. To reproduce,
5548 return NO_REGS and compile libstdc++. */
5549 if (GET_CODE (x) == MEM)
5550 return GR_REGS;
5551
5552 /* This can happen when we take a BImode subreg of a DImode value,
5553 and that DImode value winds up in some non-GR register. */
5554 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5555 return GR_REGS;
5556 break;
5557
5558 default:
5559 break;
5560 }
5561
5562 return NO_REGS;
5563 }
5564
5565 \f
5566 /* Implement targetm.unspec_may_trap_p hook. */
5567 static int
5568 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5569 {
5570 if (GET_CODE (x) == UNSPEC)
5571 {
5572 switch (XINT (x, 1))
5573 {
5574 case UNSPEC_LDA:
5575 case UNSPEC_LDS:
5576 case UNSPEC_LDSA:
5577 case UNSPEC_LDCCLR:
5578 case UNSPEC_CHKACLR:
5579 case UNSPEC_CHKS:
5580 /* These unspecs are just wrappers. */
5581 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5582 }
5583 }
5584
5585 return default_unspec_may_trap_p (x, flags);
5586 }
5587
5588 \f
5589 /* Parse the -mfixed-range= option string. */
5590
5591 static void
5592 fix_range (const char *const_str)
5593 {
5594 int i, first, last;
5595 char *str, *dash, *comma;
5596
5597 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5598 REG2 are either register names or register numbers. The effect
5599 of this option is to mark the registers in the range from REG1 to
5600 REG2 as ``fixed'' so they won't be used by the compiler. This is
5601 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5602
5603 i = strlen (const_str);
5604 str = (char *) alloca (i + 1);
5605 memcpy (str, const_str, i + 1);
5606
5607 while (1)
5608 {
5609 dash = strchr (str, '-');
5610 if (!dash)
5611 {
5612 warning (0, "value of -mfixed-range must have form REG1-REG2");
5613 return;
5614 }
5615 *dash = '\0';
5616
5617 comma = strchr (dash + 1, ',');
5618 if (comma)
5619 *comma = '\0';
5620
5621 first = decode_reg_name (str);
5622 if (first < 0)
5623 {
5624 warning (0, "unknown register name: %s", str);
5625 return;
5626 }
5627
5628 last = decode_reg_name (dash + 1);
5629 if (last < 0)
5630 {
5631 warning (0, "unknown register name: %s", dash + 1);
5632 return;
5633 }
5634
5635 *dash = '-';
5636
5637 if (first > last)
5638 {
5639 warning (0, "%s-%s is an empty range", str, dash + 1);
5640 return;
5641 }
5642
5643 for (i = first; i <= last; ++i)
5644 fixed_regs[i] = call_used_regs[i] = 1;
5645
5646 if (!comma)
5647 break;
5648
5649 *comma = ',';
5650 str = comma + 1;
5651 }
5652 }
5653
5654 /* Implement TARGET_HANDLE_OPTION. */
5655
5656 static bool
5657 ia64_handle_option (struct gcc_options *opts ATTRIBUTE_UNUSED,
5658 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
5659 const struct cl_decoded_option *decoded,
5660 location_t loc)
5661 {
5662 size_t code = decoded->opt_index;
5663 const char *arg = decoded->arg;
5664 int value = decoded->value;
5665
5666 switch (code)
5667 {
5668 case OPT_mtls_size_:
5669 if (value != 14 && value != 22 && value != 64)
5670 error_at (loc, "bad value %<%s%> for -mtls-size= switch", arg);
5671 return true;
5672
5673 default:
5674 return true;
5675 }
5676 }
5677
5678 /* Implement TARGET_OPTION_OVERRIDE. */
5679
5680 static void
5681 ia64_option_override (void)
5682 {
5683 unsigned int i;
5684 cl_deferred_option *opt;
5685 VEC(cl_deferred_option,heap) *vec
5686 = (VEC(cl_deferred_option,heap) *) ia64_deferred_options;
5687
5688 FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
5689 {
5690 switch (opt->opt_index)
5691 {
5692 case OPT_mfixed_range_:
5693 fix_range (opt->arg);
5694 break;
5695
5696 default:
5697 gcc_unreachable ();
5698 }
5699 }
5700
5701 if (TARGET_AUTO_PIC)
5702 target_flags |= MASK_CONST_GP;
5703
5704 /* Numerous experiment shows that IRA based loop pressure
5705 calculation works better for RTL loop invariant motion on targets
5706 with enough (>= 32) registers. It is an expensive optimization.
5707 So it is on only for peak performance. */
5708 if (optimize >= 3)
5709 flag_ira_loop_pressure = 1;
5710
5711
5712 ia64_section_threshold = (global_options_set.x_g_switch_value
5713 ? g_switch_value
5714 : IA64_DEFAULT_GVALUE);
5715
5716 init_machine_status = ia64_init_machine_status;
5717
5718 if (align_functions <= 0)
5719 align_functions = 64;
5720 if (align_loops <= 0)
5721 align_loops = 32;
5722 if (TARGET_ABI_OPEN_VMS)
5723 flag_no_common = 1;
5724
5725 ia64_override_options_after_change();
5726 }
5727
5728 /* Implement targetm.override_options_after_change. */
5729
5730 static void
5731 ia64_override_options_after_change (void)
5732 {
5733 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5734 flag_schedule_insns_after_reload = 0;
5735
5736 if (optimize >= 3
5737 && !global_options_set.x_flag_selective_scheduling
5738 && !global_options_set.x_flag_selective_scheduling2)
5739 {
5740 flag_selective_scheduling2 = 1;
5741 flag_sel_sched_pipelining = 1;
5742 }
5743 if (mflag_sched_control_spec == 2)
5744 {
5745 /* Control speculation is on by default for the selective scheduler,
5746 but not for the Haifa scheduler. */
5747 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5748 }
5749 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5750 {
5751 /* FIXME: remove this when we'd implement breaking autoinsns as
5752 a transformation. */
5753 flag_auto_inc_dec = 0;
5754 }
5755 }
5756
5757 /* Initialize the record of emitted frame related registers. */
5758
5759 void ia64_init_expanders (void)
5760 {
5761 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5762 }
5763
5764 static struct machine_function *
5765 ia64_init_machine_status (void)
5766 {
5767 return ggc_alloc_cleared_machine_function ();
5768 }
5769 \f
5770 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5771 static enum attr_type ia64_safe_type (rtx);
5772
5773 static enum attr_itanium_class
5774 ia64_safe_itanium_class (rtx insn)
5775 {
5776 if (recog_memoized (insn) >= 0)
5777 return get_attr_itanium_class (insn);
5778 else if (DEBUG_INSN_P (insn))
5779 return ITANIUM_CLASS_IGNORE;
5780 else
5781 return ITANIUM_CLASS_UNKNOWN;
5782 }
5783
5784 static enum attr_type
5785 ia64_safe_type (rtx insn)
5786 {
5787 if (recog_memoized (insn) >= 0)
5788 return get_attr_type (insn);
5789 else
5790 return TYPE_UNKNOWN;
5791 }
5792 \f
5793 /* The following collection of routines emit instruction group stop bits as
5794 necessary to avoid dependencies. */
5795
5796 /* Need to track some additional registers as far as serialization is
5797 concerned so we can properly handle br.call and br.ret. We could
5798 make these registers visible to gcc, but since these registers are
5799 never explicitly used in gcc generated code, it seems wasteful to
5800 do so (plus it would make the call and return patterns needlessly
5801 complex). */
5802 #define REG_RP (BR_REG (0))
5803 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5804 /* This is used for volatile asms which may require a stop bit immediately
5805 before and after them. */
5806 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5807 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5808 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5809
5810 /* For each register, we keep track of how it has been written in the
5811 current instruction group.
5812
5813 If a register is written unconditionally (no qualifying predicate),
5814 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5815
5816 If a register is written if its qualifying predicate P is true, we
5817 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5818 may be written again by the complement of P (P^1) and when this happens,
5819 WRITE_COUNT gets set to 2.
5820
5821 The result of this is that whenever an insn attempts to write a register
5822 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5823
5824 If a predicate register is written by a floating-point insn, we set
5825 WRITTEN_BY_FP to true.
5826
5827 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5828 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5829
5830 #if GCC_VERSION >= 4000
5831 #define RWS_FIELD_TYPE __extension__ unsigned short
5832 #else
5833 #define RWS_FIELD_TYPE unsigned int
5834 #endif
5835 struct reg_write_state
5836 {
5837 RWS_FIELD_TYPE write_count : 2;
5838 RWS_FIELD_TYPE first_pred : 10;
5839 RWS_FIELD_TYPE written_by_fp : 1;
5840 RWS_FIELD_TYPE written_by_and : 1;
5841 RWS_FIELD_TYPE written_by_or : 1;
5842 };
5843
5844 /* Cumulative info for the current instruction group. */
5845 struct reg_write_state rws_sum[NUM_REGS];
5846 #ifdef ENABLE_CHECKING
5847 /* Bitmap whether a register has been written in the current insn. */
5848 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5849 / HOST_BITS_PER_WIDEST_FAST_INT];
5850
5851 static inline void
5852 rws_insn_set (int regno)
5853 {
5854 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5855 SET_HARD_REG_BIT (rws_insn, regno);
5856 }
5857
5858 static inline int
5859 rws_insn_test (int regno)
5860 {
5861 return TEST_HARD_REG_BIT (rws_insn, regno);
5862 }
5863 #else
5864 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5865 unsigned char rws_insn[2];
5866
5867 static inline void
5868 rws_insn_set (int regno)
5869 {
5870 if (regno == REG_AR_CFM)
5871 rws_insn[0] = 1;
5872 else if (regno == REG_VOLATILE)
5873 rws_insn[1] = 1;
5874 }
5875
5876 static inline int
5877 rws_insn_test (int regno)
5878 {
5879 if (regno == REG_AR_CFM)
5880 return rws_insn[0];
5881 if (regno == REG_VOLATILE)
5882 return rws_insn[1];
5883 return 0;
5884 }
5885 #endif
5886
5887 /* Indicates whether this is the first instruction after a stop bit,
5888 in which case we don't need another stop bit. Without this,
5889 ia64_variable_issue will die when scheduling an alloc. */
5890 static int first_instruction;
5891
5892 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5893 RTL for one instruction. */
5894 struct reg_flags
5895 {
5896 unsigned int is_write : 1; /* Is register being written? */
5897 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5898 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5899 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5900 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5901 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5902 };
5903
5904 static void rws_update (int, struct reg_flags, int);
5905 static int rws_access_regno (int, struct reg_flags, int);
5906 static int rws_access_reg (rtx, struct reg_flags, int);
5907 static void update_set_flags (rtx, struct reg_flags *);
5908 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5909 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5910 static void init_insn_group_barriers (void);
5911 static int group_barrier_needed (rtx);
5912 static int safe_group_barrier_needed (rtx);
5913 static int in_safe_group_barrier;
5914
5915 /* Update *RWS for REGNO, which is being written by the current instruction,
5916 with predicate PRED, and associated register flags in FLAGS. */
5917
5918 static void
5919 rws_update (int regno, struct reg_flags flags, int pred)
5920 {
5921 if (pred)
5922 rws_sum[regno].write_count++;
5923 else
5924 rws_sum[regno].write_count = 2;
5925 rws_sum[regno].written_by_fp |= flags.is_fp;
5926 /* ??? Not tracking and/or across differing predicates. */
5927 rws_sum[regno].written_by_and = flags.is_and;
5928 rws_sum[regno].written_by_or = flags.is_or;
5929 rws_sum[regno].first_pred = pred;
5930 }
5931
5932 /* Handle an access to register REGNO of type FLAGS using predicate register
5933 PRED. Update rws_sum array. Return 1 if this access creates
5934 a dependency with an earlier instruction in the same group. */
5935
5936 static int
5937 rws_access_regno (int regno, struct reg_flags flags, int pred)
5938 {
5939 int need_barrier = 0;
5940
5941 gcc_assert (regno < NUM_REGS);
5942
5943 if (! PR_REGNO_P (regno))
5944 flags.is_and = flags.is_or = 0;
5945
5946 if (flags.is_write)
5947 {
5948 int write_count;
5949
5950 rws_insn_set (regno);
5951 write_count = rws_sum[regno].write_count;
5952
5953 switch (write_count)
5954 {
5955 case 0:
5956 /* The register has not been written yet. */
5957 if (!in_safe_group_barrier)
5958 rws_update (regno, flags, pred);
5959 break;
5960
5961 case 1:
5962 /* The register has been written via a predicate. Treat
5963 it like a unconditional write and do not try to check
5964 for complementary pred reg in earlier write. */
5965 if (flags.is_and && rws_sum[regno].written_by_and)
5966 ;
5967 else if (flags.is_or && rws_sum[regno].written_by_or)
5968 ;
5969 else
5970 need_barrier = 1;
5971 if (!in_safe_group_barrier)
5972 rws_update (regno, flags, pred);
5973 break;
5974
5975 case 2:
5976 /* The register has been unconditionally written already. We
5977 need a barrier. */
5978 if (flags.is_and && rws_sum[regno].written_by_and)
5979 ;
5980 else if (flags.is_or && rws_sum[regno].written_by_or)
5981 ;
5982 else
5983 need_barrier = 1;
5984 if (!in_safe_group_barrier)
5985 {
5986 rws_sum[regno].written_by_and = flags.is_and;
5987 rws_sum[regno].written_by_or = flags.is_or;
5988 }
5989 break;
5990
5991 default:
5992 gcc_unreachable ();
5993 }
5994 }
5995 else
5996 {
5997 if (flags.is_branch)
5998 {
5999 /* Branches have several RAW exceptions that allow to avoid
6000 barriers. */
6001
6002 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6003 /* RAW dependencies on branch regs are permissible as long
6004 as the writer is a non-branch instruction. Since we
6005 never generate code that uses a branch register written
6006 by a branch instruction, handling this case is
6007 easy. */
6008 return 0;
6009
6010 if (REGNO_REG_CLASS (regno) == PR_REGS
6011 && ! rws_sum[regno].written_by_fp)
6012 /* The predicates of a branch are available within the
6013 same insn group as long as the predicate was written by
6014 something other than a floating-point instruction. */
6015 return 0;
6016 }
6017
6018 if (flags.is_and && rws_sum[regno].written_by_and)
6019 return 0;
6020 if (flags.is_or && rws_sum[regno].written_by_or)
6021 return 0;
6022
6023 switch (rws_sum[regno].write_count)
6024 {
6025 case 0:
6026 /* The register has not been written yet. */
6027 break;
6028
6029 case 1:
6030 /* The register has been written via a predicate, assume we
6031 need a barrier (don't check for complementary regs). */
6032 need_barrier = 1;
6033 break;
6034
6035 case 2:
6036 /* The register has been unconditionally written already. We
6037 need a barrier. */
6038 need_barrier = 1;
6039 break;
6040
6041 default:
6042 gcc_unreachable ();
6043 }
6044 }
6045
6046 return need_barrier;
6047 }
6048
6049 static int
6050 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6051 {
6052 int regno = REGNO (reg);
6053 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6054
6055 if (n == 1)
6056 return rws_access_regno (regno, flags, pred);
6057 else
6058 {
6059 int need_barrier = 0;
6060 while (--n >= 0)
6061 need_barrier |= rws_access_regno (regno + n, flags, pred);
6062 return need_barrier;
6063 }
6064 }
6065
6066 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6067 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6068
6069 static void
6070 update_set_flags (rtx x, struct reg_flags *pflags)
6071 {
6072 rtx src = SET_SRC (x);
6073
6074 switch (GET_CODE (src))
6075 {
6076 case CALL:
6077 return;
6078
6079 case IF_THEN_ELSE:
6080 /* There are four cases here:
6081 (1) The destination is (pc), in which case this is a branch,
6082 nothing here applies.
6083 (2) The destination is ar.lc, in which case this is a
6084 doloop_end_internal,
6085 (3) The destination is an fp register, in which case this is
6086 an fselect instruction.
6087 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6088 this is a check load.
6089 In all cases, nothing we do in this function applies. */
6090 return;
6091
6092 default:
6093 if (COMPARISON_P (src)
6094 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6095 /* Set pflags->is_fp to 1 so that we know we're dealing
6096 with a floating point comparison when processing the
6097 destination of the SET. */
6098 pflags->is_fp = 1;
6099
6100 /* Discover if this is a parallel comparison. We only handle
6101 and.orcm and or.andcm at present, since we must retain a
6102 strict inverse on the predicate pair. */
6103 else if (GET_CODE (src) == AND)
6104 pflags->is_and = 1;
6105 else if (GET_CODE (src) == IOR)
6106 pflags->is_or = 1;
6107
6108 break;
6109 }
6110 }
6111
6112 /* Subroutine of rtx_needs_barrier; this function determines whether the
6113 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6114 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6115 for this insn. */
6116
6117 static int
6118 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6119 {
6120 int need_barrier = 0;
6121 rtx dst;
6122 rtx src = SET_SRC (x);
6123
6124 if (GET_CODE (src) == CALL)
6125 /* We don't need to worry about the result registers that
6126 get written by subroutine call. */
6127 return rtx_needs_barrier (src, flags, pred);
6128 else if (SET_DEST (x) == pc_rtx)
6129 {
6130 /* X is a conditional branch. */
6131 /* ??? This seems redundant, as the caller sets this bit for
6132 all JUMP_INSNs. */
6133 if (!ia64_spec_check_src_p (src))
6134 flags.is_branch = 1;
6135 return rtx_needs_barrier (src, flags, pred);
6136 }
6137
6138 if (ia64_spec_check_src_p (src))
6139 /* Avoid checking one register twice (in condition
6140 and in 'then' section) for ldc pattern. */
6141 {
6142 gcc_assert (REG_P (XEXP (src, 2)));
6143 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6144
6145 /* We process MEM below. */
6146 src = XEXP (src, 1);
6147 }
6148
6149 need_barrier |= rtx_needs_barrier (src, flags, pred);
6150
6151 dst = SET_DEST (x);
6152 if (GET_CODE (dst) == ZERO_EXTRACT)
6153 {
6154 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6155 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6156 }
6157 return need_barrier;
6158 }
6159
6160 /* Handle an access to rtx X of type FLAGS using predicate register
6161 PRED. Return 1 if this access creates a dependency with an earlier
6162 instruction in the same group. */
6163
6164 static int
6165 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6166 {
6167 int i, j;
6168 int is_complemented = 0;
6169 int need_barrier = 0;
6170 const char *format_ptr;
6171 struct reg_flags new_flags;
6172 rtx cond;
6173
6174 if (! x)
6175 return 0;
6176
6177 new_flags = flags;
6178
6179 switch (GET_CODE (x))
6180 {
6181 case SET:
6182 update_set_flags (x, &new_flags);
6183 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6184 if (GET_CODE (SET_SRC (x)) != CALL)
6185 {
6186 new_flags.is_write = 1;
6187 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6188 }
6189 break;
6190
6191 case CALL:
6192 new_flags.is_write = 0;
6193 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6194
6195 /* Avoid multiple register writes, in case this is a pattern with
6196 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6197 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6198 {
6199 new_flags.is_write = 1;
6200 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6201 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6202 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6203 }
6204 break;
6205
6206 case COND_EXEC:
6207 /* X is a predicated instruction. */
6208
6209 cond = COND_EXEC_TEST (x);
6210 gcc_assert (!pred);
6211 need_barrier = rtx_needs_barrier (cond, flags, 0);
6212
6213 if (GET_CODE (cond) == EQ)
6214 is_complemented = 1;
6215 cond = XEXP (cond, 0);
6216 gcc_assert (GET_CODE (cond) == REG
6217 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6218 pred = REGNO (cond);
6219 if (is_complemented)
6220 ++pred;
6221
6222 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6223 return need_barrier;
6224
6225 case CLOBBER:
6226 case USE:
6227 /* Clobber & use are for earlier compiler-phases only. */
6228 break;
6229
6230 case ASM_OPERANDS:
6231 case ASM_INPUT:
6232 /* We always emit stop bits for traditional asms. We emit stop bits
6233 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6234 if (GET_CODE (x) != ASM_OPERANDS
6235 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6236 {
6237 /* Avoid writing the register multiple times if we have multiple
6238 asm outputs. This avoids a failure in rws_access_reg. */
6239 if (! rws_insn_test (REG_VOLATILE))
6240 {
6241 new_flags.is_write = 1;
6242 rws_access_regno (REG_VOLATILE, new_flags, pred);
6243 }
6244 return 1;
6245 }
6246
6247 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6248 We cannot just fall through here since then we would be confused
6249 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6250 traditional asms unlike their normal usage. */
6251
6252 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6253 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6254 need_barrier = 1;
6255 break;
6256
6257 case PARALLEL:
6258 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6259 {
6260 rtx pat = XVECEXP (x, 0, i);
6261 switch (GET_CODE (pat))
6262 {
6263 case SET:
6264 update_set_flags (pat, &new_flags);
6265 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6266 break;
6267
6268 case USE:
6269 case CALL:
6270 case ASM_OPERANDS:
6271 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6272 break;
6273
6274 case CLOBBER:
6275 if (REG_P (XEXP (pat, 0))
6276 && extract_asm_operands (x) != NULL_RTX
6277 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6278 {
6279 new_flags.is_write = 1;
6280 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6281 new_flags, pred);
6282 new_flags = flags;
6283 }
6284 break;
6285
6286 case RETURN:
6287 break;
6288
6289 default:
6290 gcc_unreachable ();
6291 }
6292 }
6293 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6294 {
6295 rtx pat = XVECEXP (x, 0, i);
6296 if (GET_CODE (pat) == SET)
6297 {
6298 if (GET_CODE (SET_SRC (pat)) != CALL)
6299 {
6300 new_flags.is_write = 1;
6301 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6302 pred);
6303 }
6304 }
6305 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6306 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6307 }
6308 break;
6309
6310 case SUBREG:
6311 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6312 break;
6313 case REG:
6314 if (REGNO (x) == AR_UNAT_REGNUM)
6315 {
6316 for (i = 0; i < 64; ++i)
6317 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6318 }
6319 else
6320 need_barrier = rws_access_reg (x, flags, pred);
6321 break;
6322
6323 case MEM:
6324 /* Find the regs used in memory address computation. */
6325 new_flags.is_write = 0;
6326 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6327 break;
6328
6329 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6330 case SYMBOL_REF: case LABEL_REF: case CONST:
6331 break;
6332
6333 /* Operators with side-effects. */
6334 case POST_INC: case POST_DEC:
6335 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6336
6337 new_flags.is_write = 0;
6338 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6339 new_flags.is_write = 1;
6340 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6341 break;
6342
6343 case POST_MODIFY:
6344 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6345
6346 new_flags.is_write = 0;
6347 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6348 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6349 new_flags.is_write = 1;
6350 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6351 break;
6352
6353 /* Handle common unary and binary ops for efficiency. */
6354 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6355 case MOD: case UDIV: case UMOD: case AND: case IOR:
6356 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6357 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6358 case NE: case EQ: case GE: case GT: case LE:
6359 case LT: case GEU: case GTU: case LEU: case LTU:
6360 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6361 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6362 break;
6363
6364 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6365 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6366 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6367 case SQRT: case FFS: case POPCOUNT:
6368 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6369 break;
6370
6371 case VEC_SELECT:
6372 /* VEC_SELECT's second argument is a PARALLEL with integers that
6373 describe the elements selected. On ia64, those integers are
6374 always constants. Avoid walking the PARALLEL so that we don't
6375 get confused with "normal" parallels and then die. */
6376 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6377 break;
6378
6379 case UNSPEC:
6380 switch (XINT (x, 1))
6381 {
6382 case UNSPEC_LTOFF_DTPMOD:
6383 case UNSPEC_LTOFF_DTPREL:
6384 case UNSPEC_DTPREL:
6385 case UNSPEC_LTOFF_TPREL:
6386 case UNSPEC_TPREL:
6387 case UNSPEC_PRED_REL_MUTEX:
6388 case UNSPEC_PIC_CALL:
6389 case UNSPEC_MF:
6390 case UNSPEC_FETCHADD_ACQ:
6391 case UNSPEC_BSP_VALUE:
6392 case UNSPEC_FLUSHRS:
6393 case UNSPEC_BUNDLE_SELECTOR:
6394 break;
6395
6396 case UNSPEC_GR_SPILL:
6397 case UNSPEC_GR_RESTORE:
6398 {
6399 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6400 HOST_WIDE_INT bit = (offset >> 3) & 63;
6401
6402 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6403 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6404 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6405 new_flags, pred);
6406 break;
6407 }
6408
6409 case UNSPEC_FR_SPILL:
6410 case UNSPEC_FR_RESTORE:
6411 case UNSPEC_GETF_EXP:
6412 case UNSPEC_SETF_EXP:
6413 case UNSPEC_ADDP4:
6414 case UNSPEC_FR_SQRT_RECIP_APPROX:
6415 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6416 case UNSPEC_LDA:
6417 case UNSPEC_LDS:
6418 case UNSPEC_LDS_A:
6419 case UNSPEC_LDSA:
6420 case UNSPEC_CHKACLR:
6421 case UNSPEC_CHKS:
6422 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6423 break;
6424
6425 case UNSPEC_FR_RECIP_APPROX:
6426 case UNSPEC_SHRP:
6427 case UNSPEC_COPYSIGN:
6428 case UNSPEC_FR_RECIP_APPROX_RES:
6429 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6430 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6431 break;
6432
6433 case UNSPEC_CMPXCHG_ACQ:
6434 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6435 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6436 break;
6437
6438 default:
6439 gcc_unreachable ();
6440 }
6441 break;
6442
6443 case UNSPEC_VOLATILE:
6444 switch (XINT (x, 1))
6445 {
6446 case UNSPECV_ALLOC:
6447 /* Alloc must always be the first instruction of a group.
6448 We force this by always returning true. */
6449 /* ??? We might get better scheduling if we explicitly check for
6450 input/local/output register dependencies, and modify the
6451 scheduler so that alloc is always reordered to the start of
6452 the current group. We could then eliminate all of the
6453 first_instruction code. */
6454 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6455
6456 new_flags.is_write = 1;
6457 rws_access_regno (REG_AR_CFM, new_flags, pred);
6458 return 1;
6459
6460 case UNSPECV_SET_BSP:
6461 need_barrier = 1;
6462 break;
6463
6464 case UNSPECV_BLOCKAGE:
6465 case UNSPECV_INSN_GROUP_BARRIER:
6466 case UNSPECV_BREAK:
6467 case UNSPECV_PSAC_ALL:
6468 case UNSPECV_PSAC_NORMAL:
6469 return 0;
6470
6471 default:
6472 gcc_unreachable ();
6473 }
6474 break;
6475
6476 case RETURN:
6477 new_flags.is_write = 0;
6478 need_barrier = rws_access_regno (REG_RP, flags, pred);
6479 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6480
6481 new_flags.is_write = 1;
6482 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6483 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6484 break;
6485
6486 default:
6487 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6488 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6489 switch (format_ptr[i])
6490 {
6491 case '0': /* unused field */
6492 case 'i': /* integer */
6493 case 'n': /* note */
6494 case 'w': /* wide integer */
6495 case 's': /* pointer to string */
6496 case 'S': /* optional pointer to string */
6497 break;
6498
6499 case 'e':
6500 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6501 need_barrier = 1;
6502 break;
6503
6504 case 'E':
6505 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6506 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6507 need_barrier = 1;
6508 break;
6509
6510 default:
6511 gcc_unreachable ();
6512 }
6513 break;
6514 }
6515 return need_barrier;
6516 }
6517
6518 /* Clear out the state for group_barrier_needed at the start of a
6519 sequence of insns. */
6520
6521 static void
6522 init_insn_group_barriers (void)
6523 {
6524 memset (rws_sum, 0, sizeof (rws_sum));
6525 first_instruction = 1;
6526 }
6527
6528 /* Given the current state, determine whether a group barrier (a stop bit) is
6529 necessary before INSN. Return nonzero if so. This modifies the state to
6530 include the effects of INSN as a side-effect. */
6531
6532 static int
6533 group_barrier_needed (rtx insn)
6534 {
6535 rtx pat;
6536 int need_barrier = 0;
6537 struct reg_flags flags;
6538
6539 memset (&flags, 0, sizeof (flags));
6540 switch (GET_CODE (insn))
6541 {
6542 case NOTE:
6543 case DEBUG_INSN:
6544 break;
6545
6546 case BARRIER:
6547 /* A barrier doesn't imply an instruction group boundary. */
6548 break;
6549
6550 case CODE_LABEL:
6551 memset (rws_insn, 0, sizeof (rws_insn));
6552 return 1;
6553
6554 case CALL_INSN:
6555 flags.is_branch = 1;
6556 flags.is_sibcall = SIBLING_CALL_P (insn);
6557 memset (rws_insn, 0, sizeof (rws_insn));
6558
6559 /* Don't bundle a call following another call. */
6560 if ((pat = prev_active_insn (insn))
6561 && GET_CODE (pat) == CALL_INSN)
6562 {
6563 need_barrier = 1;
6564 break;
6565 }
6566
6567 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6568 break;
6569
6570 case JUMP_INSN:
6571 if (!ia64_spec_check_p (insn))
6572 flags.is_branch = 1;
6573
6574 /* Don't bundle a jump following a call. */
6575 if ((pat = prev_active_insn (insn))
6576 && GET_CODE (pat) == CALL_INSN)
6577 {
6578 need_barrier = 1;
6579 break;
6580 }
6581 /* FALLTHRU */
6582
6583 case INSN:
6584 if (GET_CODE (PATTERN (insn)) == USE
6585 || GET_CODE (PATTERN (insn)) == CLOBBER)
6586 /* Don't care about USE and CLOBBER "insns"---those are used to
6587 indicate to the optimizer that it shouldn't get rid of
6588 certain operations. */
6589 break;
6590
6591 pat = PATTERN (insn);
6592
6593 /* Ug. Hack hacks hacked elsewhere. */
6594 switch (recog_memoized (insn))
6595 {
6596 /* We play dependency tricks with the epilogue in order
6597 to get proper schedules. Undo this for dv analysis. */
6598 case CODE_FOR_epilogue_deallocate_stack:
6599 case CODE_FOR_prologue_allocate_stack:
6600 pat = XVECEXP (pat, 0, 0);
6601 break;
6602
6603 /* The pattern we use for br.cloop confuses the code above.
6604 The second element of the vector is representative. */
6605 case CODE_FOR_doloop_end_internal:
6606 pat = XVECEXP (pat, 0, 1);
6607 break;
6608
6609 /* Doesn't generate code. */
6610 case CODE_FOR_pred_rel_mutex:
6611 case CODE_FOR_prologue_use:
6612 return 0;
6613
6614 default:
6615 break;
6616 }
6617
6618 memset (rws_insn, 0, sizeof (rws_insn));
6619 need_barrier = rtx_needs_barrier (pat, flags, 0);
6620
6621 /* Check to see if the previous instruction was a volatile
6622 asm. */
6623 if (! need_barrier)
6624 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6625
6626 break;
6627
6628 default:
6629 gcc_unreachable ();
6630 }
6631
6632 if (first_instruction && INSN_P (insn)
6633 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6634 && GET_CODE (PATTERN (insn)) != USE
6635 && GET_CODE (PATTERN (insn)) != CLOBBER)
6636 {
6637 need_barrier = 0;
6638 first_instruction = 0;
6639 }
6640
6641 return need_barrier;
6642 }
6643
6644 /* Like group_barrier_needed, but do not clobber the current state. */
6645
6646 static int
6647 safe_group_barrier_needed (rtx insn)
6648 {
6649 int saved_first_instruction;
6650 int t;
6651
6652 saved_first_instruction = first_instruction;
6653 in_safe_group_barrier = 1;
6654
6655 t = group_barrier_needed (insn);
6656
6657 first_instruction = saved_first_instruction;
6658 in_safe_group_barrier = 0;
6659
6660 return t;
6661 }
6662
6663 /* Scan the current function and insert stop bits as necessary to
6664 eliminate dependencies. This function assumes that a final
6665 instruction scheduling pass has been run which has already
6666 inserted most of the necessary stop bits. This function only
6667 inserts new ones at basic block boundaries, since these are
6668 invisible to the scheduler. */
6669
6670 static void
6671 emit_insn_group_barriers (FILE *dump)
6672 {
6673 rtx insn;
6674 rtx last_label = 0;
6675 int insns_since_last_label = 0;
6676
6677 init_insn_group_barriers ();
6678
6679 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6680 {
6681 if (GET_CODE (insn) == CODE_LABEL)
6682 {
6683 if (insns_since_last_label)
6684 last_label = insn;
6685 insns_since_last_label = 0;
6686 }
6687 else if (GET_CODE (insn) == NOTE
6688 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6689 {
6690 if (insns_since_last_label)
6691 last_label = insn;
6692 insns_since_last_label = 0;
6693 }
6694 else if (GET_CODE (insn) == INSN
6695 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6696 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6697 {
6698 init_insn_group_barriers ();
6699 last_label = 0;
6700 }
6701 else if (NONDEBUG_INSN_P (insn))
6702 {
6703 insns_since_last_label = 1;
6704
6705 if (group_barrier_needed (insn))
6706 {
6707 if (last_label)
6708 {
6709 if (dump)
6710 fprintf (dump, "Emitting stop before label %d\n",
6711 INSN_UID (last_label));
6712 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6713 insn = last_label;
6714
6715 init_insn_group_barriers ();
6716 last_label = 0;
6717 }
6718 }
6719 }
6720 }
6721 }
6722
6723 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6724 This function has to emit all necessary group barriers. */
6725
6726 static void
6727 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6728 {
6729 rtx insn;
6730
6731 init_insn_group_barriers ();
6732
6733 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6734 {
6735 if (GET_CODE (insn) == BARRIER)
6736 {
6737 rtx last = prev_active_insn (insn);
6738
6739 if (! last)
6740 continue;
6741 if (GET_CODE (last) == JUMP_INSN
6742 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6743 last = prev_active_insn (last);
6744 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6745 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6746
6747 init_insn_group_barriers ();
6748 }
6749 else if (NONDEBUG_INSN_P (insn))
6750 {
6751 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6752 init_insn_group_barriers ();
6753 else if (group_barrier_needed (insn))
6754 {
6755 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6756 init_insn_group_barriers ();
6757 group_barrier_needed (insn);
6758 }
6759 }
6760 }
6761 }
6762
6763 \f
6764
6765 /* Instruction scheduling support. */
6766
6767 #define NR_BUNDLES 10
6768
6769 /* A list of names of all available bundles. */
6770
6771 static const char *bundle_name [NR_BUNDLES] =
6772 {
6773 ".mii",
6774 ".mmi",
6775 ".mfi",
6776 ".mmf",
6777 #if NR_BUNDLES == 10
6778 ".bbb",
6779 ".mbb",
6780 #endif
6781 ".mib",
6782 ".mmb",
6783 ".mfb",
6784 ".mlx"
6785 };
6786
6787 /* Nonzero if we should insert stop bits into the schedule. */
6788
6789 int ia64_final_schedule = 0;
6790
6791 /* Codes of the corresponding queried units: */
6792
6793 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6794 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6795
6796 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6797 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6798
6799 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6800
6801 /* The following variable value is an insn group barrier. */
6802
6803 static rtx dfa_stop_insn;
6804
6805 /* The following variable value is the last issued insn. */
6806
6807 static rtx last_scheduled_insn;
6808
6809 /* The following variable value is pointer to a DFA state used as
6810 temporary variable. */
6811
6812 static state_t temp_dfa_state = NULL;
6813
6814 /* The following variable value is DFA state after issuing the last
6815 insn. */
6816
6817 static state_t prev_cycle_state = NULL;
6818
6819 /* The following array element values are TRUE if the corresponding
6820 insn requires to add stop bits before it. */
6821
6822 static char *stops_p = NULL;
6823
6824 /* The following variable is used to set up the mentioned above array. */
6825
6826 static int stop_before_p = 0;
6827
6828 /* The following variable value is length of the arrays `clocks' and
6829 `add_cycles'. */
6830
6831 static int clocks_length;
6832
6833 /* The following variable value is number of data speculations in progress. */
6834 static int pending_data_specs = 0;
6835
6836 /* Number of memory references on current and three future processor cycles. */
6837 static char mem_ops_in_group[4];
6838
6839 /* Number of current processor cycle (from scheduler's point of view). */
6840 static int current_cycle;
6841
6842 static rtx ia64_single_set (rtx);
6843 static void ia64_emit_insn_before (rtx, rtx);
6844
6845 /* Map a bundle number to its pseudo-op. */
6846
6847 const char *
6848 get_bundle_name (int b)
6849 {
6850 return bundle_name[b];
6851 }
6852
6853
6854 /* Return the maximum number of instructions a cpu can issue. */
6855
6856 static int
6857 ia64_issue_rate (void)
6858 {
6859 return 6;
6860 }
6861
6862 /* Helper function - like single_set, but look inside COND_EXEC. */
6863
6864 static rtx
6865 ia64_single_set (rtx insn)
6866 {
6867 rtx x = PATTERN (insn), ret;
6868 if (GET_CODE (x) == COND_EXEC)
6869 x = COND_EXEC_CODE (x);
6870 if (GET_CODE (x) == SET)
6871 return x;
6872
6873 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6874 Although they are not classical single set, the second set is there just
6875 to protect it from moving past FP-relative stack accesses. */
6876 switch (recog_memoized (insn))
6877 {
6878 case CODE_FOR_prologue_allocate_stack:
6879 case CODE_FOR_epilogue_deallocate_stack:
6880 ret = XVECEXP (x, 0, 0);
6881 break;
6882
6883 default:
6884 ret = single_set_2 (insn, x);
6885 break;
6886 }
6887
6888 return ret;
6889 }
6890
6891 /* Adjust the cost of a scheduling dependency.
6892 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6893 COST is the current cost, DW is dependency weakness. */
6894 static int
6895 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6896 {
6897 enum reg_note dep_type = (enum reg_note) dep_type1;
6898 enum attr_itanium_class dep_class;
6899 enum attr_itanium_class insn_class;
6900
6901 insn_class = ia64_safe_itanium_class (insn);
6902 dep_class = ia64_safe_itanium_class (dep_insn);
6903
6904 /* Treat true memory dependencies separately. Ignore apparent true
6905 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6906 if (dep_type == REG_DEP_TRUE
6907 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6908 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6909 return 0;
6910
6911 if (dw == MIN_DEP_WEAK)
6912 /* Store and load are likely to alias, use higher cost to avoid stall. */
6913 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6914 else if (dw > MIN_DEP_WEAK)
6915 {
6916 /* Store and load are less likely to alias. */
6917 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6918 /* Assume there will be no cache conflict for floating-point data.
6919 For integer data, L1 conflict penalty is huge (17 cycles), so we
6920 never assume it will not cause a conflict. */
6921 return 0;
6922 else
6923 return cost;
6924 }
6925
6926 if (dep_type != REG_DEP_OUTPUT)
6927 return cost;
6928
6929 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6930 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6931 return 0;
6932
6933 return cost;
6934 }
6935
6936 /* Like emit_insn_before, but skip cycle_display notes.
6937 ??? When cycle display notes are implemented, update this. */
6938
6939 static void
6940 ia64_emit_insn_before (rtx insn, rtx before)
6941 {
6942 emit_insn_before (insn, before);
6943 }
6944
6945 /* The following function marks insns who produce addresses for load
6946 and store insns. Such insns will be placed into M slots because it
6947 decrease latency time for Itanium1 (see function
6948 `ia64_produce_address_p' and the DFA descriptions). */
6949
6950 static void
6951 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6952 {
6953 rtx insn, next, next_tail;
6954
6955 /* Before reload, which_alternative is not set, which means that
6956 ia64_safe_itanium_class will produce wrong results for (at least)
6957 move instructions. */
6958 if (!reload_completed)
6959 return;
6960
6961 next_tail = NEXT_INSN (tail);
6962 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6963 if (INSN_P (insn))
6964 insn->call = 0;
6965 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6966 if (INSN_P (insn)
6967 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6968 {
6969 sd_iterator_def sd_it;
6970 dep_t dep;
6971 bool has_mem_op_consumer_p = false;
6972
6973 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6974 {
6975 enum attr_itanium_class c;
6976
6977 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6978 continue;
6979
6980 next = DEP_CON (dep);
6981 c = ia64_safe_itanium_class (next);
6982 if ((c == ITANIUM_CLASS_ST
6983 || c == ITANIUM_CLASS_STF)
6984 && ia64_st_address_bypass_p (insn, next))
6985 {
6986 has_mem_op_consumer_p = true;
6987 break;
6988 }
6989 else if ((c == ITANIUM_CLASS_LD
6990 || c == ITANIUM_CLASS_FLD
6991 || c == ITANIUM_CLASS_FLDP)
6992 && ia64_ld_address_bypass_p (insn, next))
6993 {
6994 has_mem_op_consumer_p = true;
6995 break;
6996 }
6997 }
6998
6999 insn->call = has_mem_op_consumer_p;
7000 }
7001 }
7002
7003 /* We're beginning a new block. Initialize data structures as necessary. */
7004
7005 static void
7006 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7007 int sched_verbose ATTRIBUTE_UNUSED,
7008 int max_ready ATTRIBUTE_UNUSED)
7009 {
7010 #ifdef ENABLE_CHECKING
7011 rtx insn;
7012
7013 if (!sel_sched_p () && reload_completed)
7014 for (insn = NEXT_INSN (current_sched_info->prev_head);
7015 insn != current_sched_info->next_tail;
7016 insn = NEXT_INSN (insn))
7017 gcc_assert (!SCHED_GROUP_P (insn));
7018 #endif
7019 last_scheduled_insn = NULL_RTX;
7020 init_insn_group_barriers ();
7021
7022 current_cycle = 0;
7023 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7024 }
7025
7026 /* We're beginning a scheduling pass. Check assertion. */
7027
7028 static void
7029 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7030 int sched_verbose ATTRIBUTE_UNUSED,
7031 int max_ready ATTRIBUTE_UNUSED)
7032 {
7033 gcc_assert (pending_data_specs == 0);
7034 }
7035
7036 /* Scheduling pass is now finished. Free/reset static variable. */
7037 static void
7038 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7039 int sched_verbose ATTRIBUTE_UNUSED)
7040 {
7041 gcc_assert (pending_data_specs == 0);
7042 }
7043
7044 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7045 speculation check), FALSE otherwise. */
7046 static bool
7047 is_load_p (rtx insn)
7048 {
7049 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7050
7051 return
7052 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7053 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7054 }
7055
7056 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7057 (taking account for 3-cycle cache reference postponing for stores: Intel
7058 Itanium 2 Reference Manual for Software Development and Optimization,
7059 6.7.3.1). */
7060 static void
7061 record_memory_reference (rtx insn)
7062 {
7063 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7064
7065 switch (insn_class) {
7066 case ITANIUM_CLASS_FLD:
7067 case ITANIUM_CLASS_LD:
7068 mem_ops_in_group[current_cycle % 4]++;
7069 break;
7070 case ITANIUM_CLASS_STF:
7071 case ITANIUM_CLASS_ST:
7072 mem_ops_in_group[(current_cycle + 3) % 4]++;
7073 break;
7074 default:;
7075 }
7076 }
7077
7078 /* We are about to being issuing insns for this clock cycle.
7079 Override the default sort algorithm to better slot instructions. */
7080
7081 static int
7082 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7083 int *pn_ready, int clock_var,
7084 int reorder_type)
7085 {
7086 int n_asms;
7087 int n_ready = *pn_ready;
7088 rtx *e_ready = ready + n_ready;
7089 rtx *insnp;
7090
7091 if (sched_verbose)
7092 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7093
7094 if (reorder_type == 0)
7095 {
7096 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7097 n_asms = 0;
7098 for (insnp = ready; insnp < e_ready; insnp++)
7099 if (insnp < e_ready)
7100 {
7101 rtx insn = *insnp;
7102 enum attr_type t = ia64_safe_type (insn);
7103 if (t == TYPE_UNKNOWN)
7104 {
7105 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7106 || asm_noperands (PATTERN (insn)) >= 0)
7107 {
7108 rtx lowest = ready[n_asms];
7109 ready[n_asms] = insn;
7110 *insnp = lowest;
7111 n_asms++;
7112 }
7113 else
7114 {
7115 rtx highest = ready[n_ready - 1];
7116 ready[n_ready - 1] = insn;
7117 *insnp = highest;
7118 return 1;
7119 }
7120 }
7121 }
7122
7123 if (n_asms < n_ready)
7124 {
7125 /* Some normal insns to process. Skip the asms. */
7126 ready += n_asms;
7127 n_ready -= n_asms;
7128 }
7129 else if (n_ready > 0)
7130 return 1;
7131 }
7132
7133 if (ia64_final_schedule)
7134 {
7135 int deleted = 0;
7136 int nr_need_stop = 0;
7137
7138 for (insnp = ready; insnp < e_ready; insnp++)
7139 if (safe_group_barrier_needed (*insnp))
7140 nr_need_stop++;
7141
7142 if (reorder_type == 1 && n_ready == nr_need_stop)
7143 return 0;
7144 if (reorder_type == 0)
7145 return 1;
7146 insnp = e_ready;
7147 /* Move down everything that needs a stop bit, preserving
7148 relative order. */
7149 while (insnp-- > ready + deleted)
7150 while (insnp >= ready + deleted)
7151 {
7152 rtx insn = *insnp;
7153 if (! safe_group_barrier_needed (insn))
7154 break;
7155 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7156 *ready = insn;
7157 deleted++;
7158 }
7159 n_ready -= deleted;
7160 ready += deleted;
7161 }
7162
7163 current_cycle = clock_var;
7164 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7165 {
7166 int moved = 0;
7167
7168 insnp = e_ready;
7169 /* Move down loads/stores, preserving relative order. */
7170 while (insnp-- > ready + moved)
7171 while (insnp >= ready + moved)
7172 {
7173 rtx insn = *insnp;
7174 if (! is_load_p (insn))
7175 break;
7176 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7177 *ready = insn;
7178 moved++;
7179 }
7180 n_ready -= moved;
7181 ready += moved;
7182 }
7183
7184 return 1;
7185 }
7186
7187 /* We are about to being issuing insns for this clock cycle. Override
7188 the default sort algorithm to better slot instructions. */
7189
7190 static int
7191 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7192 int clock_var)
7193 {
7194 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7195 pn_ready, clock_var, 0);
7196 }
7197
7198 /* Like ia64_sched_reorder, but called after issuing each insn.
7199 Override the default sort algorithm to better slot instructions. */
7200
7201 static int
7202 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7203 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7204 int *pn_ready, int clock_var)
7205 {
7206 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7207 clock_var, 1);
7208 }
7209
7210 /* We are about to issue INSN. Return the number of insns left on the
7211 ready queue that can be issued this cycle. */
7212
7213 static int
7214 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7215 int sched_verbose ATTRIBUTE_UNUSED,
7216 rtx insn ATTRIBUTE_UNUSED,
7217 int can_issue_more ATTRIBUTE_UNUSED)
7218 {
7219 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7220 /* Modulo scheduling does not extend h_i_d when emitting
7221 new instructions. Don't use h_i_d, if we don't have to. */
7222 {
7223 if (DONE_SPEC (insn) & BEGIN_DATA)
7224 pending_data_specs++;
7225 if (CHECK_SPEC (insn) & BEGIN_DATA)
7226 pending_data_specs--;
7227 }
7228
7229 if (DEBUG_INSN_P (insn))
7230 return 1;
7231
7232 last_scheduled_insn = insn;
7233 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7234 if (reload_completed)
7235 {
7236 int needed = group_barrier_needed (insn);
7237
7238 gcc_assert (!needed);
7239 if (GET_CODE (insn) == CALL_INSN)
7240 init_insn_group_barriers ();
7241 stops_p [INSN_UID (insn)] = stop_before_p;
7242 stop_before_p = 0;
7243
7244 record_memory_reference (insn);
7245 }
7246 return 1;
7247 }
7248
7249 /* We are choosing insn from the ready queue. Return nonzero if INSN
7250 can be chosen. */
7251
7252 static int
7253 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7254 {
7255 gcc_assert (insn && INSN_P (insn));
7256 return ((!reload_completed
7257 || !safe_group_barrier_needed (insn))
7258 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7259 && (!mflag_sched_mem_insns_hard_limit
7260 || !is_load_p (insn)
7261 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7262 }
7263
7264 /* We are choosing insn from the ready queue. Return nonzero if INSN
7265 can be chosen. */
7266
7267 static bool
7268 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7269 {
7270 gcc_assert (insn && INSN_P (insn));
7271 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7272 we keep ALAT half-empty. */
7273 return (pending_data_specs < 16
7274 || !(TODO_SPEC (insn) & BEGIN_DATA));
7275 }
7276
7277 /* The following variable value is pseudo-insn used by the DFA insn
7278 scheduler to change the DFA state when the simulated clock is
7279 increased. */
7280
7281 static rtx dfa_pre_cycle_insn;
7282
7283 /* Returns 1 when a meaningful insn was scheduled between the last group
7284 barrier and LAST. */
7285 static int
7286 scheduled_good_insn (rtx last)
7287 {
7288 if (last && recog_memoized (last) >= 0)
7289 return 1;
7290
7291 for ( ;
7292 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7293 && !stops_p[INSN_UID (last)];
7294 last = PREV_INSN (last))
7295 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7296 the ebb we're scheduling. */
7297 if (INSN_P (last) && recog_memoized (last) >= 0)
7298 return 1;
7299
7300 return 0;
7301 }
7302
7303 /* We are about to being issuing INSN. Return nonzero if we cannot
7304 issue it on given cycle CLOCK and return zero if we should not sort
7305 the ready queue on the next clock start. */
7306
7307 static int
7308 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7309 int clock, int *sort_p)
7310 {
7311 gcc_assert (insn && INSN_P (insn));
7312
7313 if (DEBUG_INSN_P (insn))
7314 return 0;
7315
7316 /* When a group barrier is needed for insn, last_scheduled_insn
7317 should be set. */
7318 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7319 || last_scheduled_insn);
7320
7321 if ((reload_completed
7322 && (safe_group_barrier_needed (insn)
7323 || (mflag_sched_stop_bits_after_every_cycle
7324 && last_clock != clock
7325 && last_scheduled_insn
7326 && scheduled_good_insn (last_scheduled_insn))))
7327 || (last_scheduled_insn
7328 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7329 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7330 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7331 {
7332 init_insn_group_barriers ();
7333
7334 if (verbose && dump)
7335 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7336 last_clock == clock ? " + cycle advance" : "");
7337
7338 stop_before_p = 1;
7339 current_cycle = clock;
7340 mem_ops_in_group[current_cycle % 4] = 0;
7341
7342 if (last_clock == clock)
7343 {
7344 state_transition (curr_state, dfa_stop_insn);
7345 if (TARGET_EARLY_STOP_BITS)
7346 *sort_p = (last_scheduled_insn == NULL_RTX
7347 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7348 else
7349 *sort_p = 0;
7350 return 1;
7351 }
7352
7353 if (last_scheduled_insn)
7354 {
7355 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7356 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7357 state_reset (curr_state);
7358 else
7359 {
7360 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7361 state_transition (curr_state, dfa_stop_insn);
7362 state_transition (curr_state, dfa_pre_cycle_insn);
7363 state_transition (curr_state, NULL);
7364 }
7365 }
7366 }
7367 return 0;
7368 }
7369
7370 /* Implement targetm.sched.h_i_d_extended hook.
7371 Extend internal data structures. */
7372 static void
7373 ia64_h_i_d_extended (void)
7374 {
7375 if (stops_p != NULL)
7376 {
7377 int new_clocks_length = get_max_uid () * 3 / 2;
7378 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7379 clocks_length = new_clocks_length;
7380 }
7381 }
7382 \f
7383
7384 /* This structure describes the data used by the backend to guide scheduling.
7385 When the current scheduling point is switched, this data should be saved
7386 and restored later, if the scheduler returns to this point. */
7387 struct _ia64_sched_context
7388 {
7389 state_t prev_cycle_state;
7390 rtx last_scheduled_insn;
7391 struct reg_write_state rws_sum[NUM_REGS];
7392 struct reg_write_state rws_insn[NUM_REGS];
7393 int first_instruction;
7394 int pending_data_specs;
7395 int current_cycle;
7396 char mem_ops_in_group[4];
7397 };
7398 typedef struct _ia64_sched_context *ia64_sched_context_t;
7399
7400 /* Allocates a scheduling context. */
7401 static void *
7402 ia64_alloc_sched_context (void)
7403 {
7404 return xmalloc (sizeof (struct _ia64_sched_context));
7405 }
7406
7407 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7408 the global context otherwise. */
7409 static void
7410 ia64_init_sched_context (void *_sc, bool clean_p)
7411 {
7412 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7413
7414 sc->prev_cycle_state = xmalloc (dfa_state_size);
7415 if (clean_p)
7416 {
7417 state_reset (sc->prev_cycle_state);
7418 sc->last_scheduled_insn = NULL_RTX;
7419 memset (sc->rws_sum, 0, sizeof (rws_sum));
7420 memset (sc->rws_insn, 0, sizeof (rws_insn));
7421 sc->first_instruction = 1;
7422 sc->pending_data_specs = 0;
7423 sc->current_cycle = 0;
7424 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7425 }
7426 else
7427 {
7428 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7429 sc->last_scheduled_insn = last_scheduled_insn;
7430 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7431 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7432 sc->first_instruction = first_instruction;
7433 sc->pending_data_specs = pending_data_specs;
7434 sc->current_cycle = current_cycle;
7435 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7436 }
7437 }
7438
7439 /* Sets the global scheduling context to the one pointed to by _SC. */
7440 static void
7441 ia64_set_sched_context (void *_sc)
7442 {
7443 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7444
7445 gcc_assert (sc != NULL);
7446
7447 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7448 last_scheduled_insn = sc->last_scheduled_insn;
7449 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7450 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7451 first_instruction = sc->first_instruction;
7452 pending_data_specs = sc->pending_data_specs;
7453 current_cycle = sc->current_cycle;
7454 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7455 }
7456
7457 /* Clears the data in the _SC scheduling context. */
7458 static void
7459 ia64_clear_sched_context (void *_sc)
7460 {
7461 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7462
7463 free (sc->prev_cycle_state);
7464 sc->prev_cycle_state = NULL;
7465 }
7466
7467 /* Frees the _SC scheduling context. */
7468 static void
7469 ia64_free_sched_context (void *_sc)
7470 {
7471 gcc_assert (_sc != NULL);
7472
7473 free (_sc);
7474 }
7475
7476 typedef rtx (* gen_func_t) (rtx, rtx);
7477
7478 /* Return a function that will generate a load of mode MODE_NO
7479 with speculation types TS. */
7480 static gen_func_t
7481 get_spec_load_gen_function (ds_t ts, int mode_no)
7482 {
7483 static gen_func_t gen_ld_[] = {
7484 gen_movbi,
7485 gen_movqi_internal,
7486 gen_movhi_internal,
7487 gen_movsi_internal,
7488 gen_movdi_internal,
7489 gen_movsf_internal,
7490 gen_movdf_internal,
7491 gen_movxf_internal,
7492 gen_movti_internal,
7493 gen_zero_extendqidi2,
7494 gen_zero_extendhidi2,
7495 gen_zero_extendsidi2,
7496 };
7497
7498 static gen_func_t gen_ld_a[] = {
7499 gen_movbi_advanced,
7500 gen_movqi_advanced,
7501 gen_movhi_advanced,
7502 gen_movsi_advanced,
7503 gen_movdi_advanced,
7504 gen_movsf_advanced,
7505 gen_movdf_advanced,
7506 gen_movxf_advanced,
7507 gen_movti_advanced,
7508 gen_zero_extendqidi2_advanced,
7509 gen_zero_extendhidi2_advanced,
7510 gen_zero_extendsidi2_advanced,
7511 };
7512 static gen_func_t gen_ld_s[] = {
7513 gen_movbi_speculative,
7514 gen_movqi_speculative,
7515 gen_movhi_speculative,
7516 gen_movsi_speculative,
7517 gen_movdi_speculative,
7518 gen_movsf_speculative,
7519 gen_movdf_speculative,
7520 gen_movxf_speculative,
7521 gen_movti_speculative,
7522 gen_zero_extendqidi2_speculative,
7523 gen_zero_extendhidi2_speculative,
7524 gen_zero_extendsidi2_speculative,
7525 };
7526 static gen_func_t gen_ld_sa[] = {
7527 gen_movbi_speculative_advanced,
7528 gen_movqi_speculative_advanced,
7529 gen_movhi_speculative_advanced,
7530 gen_movsi_speculative_advanced,
7531 gen_movdi_speculative_advanced,
7532 gen_movsf_speculative_advanced,
7533 gen_movdf_speculative_advanced,
7534 gen_movxf_speculative_advanced,
7535 gen_movti_speculative_advanced,
7536 gen_zero_extendqidi2_speculative_advanced,
7537 gen_zero_extendhidi2_speculative_advanced,
7538 gen_zero_extendsidi2_speculative_advanced,
7539 };
7540 static gen_func_t gen_ld_s_a[] = {
7541 gen_movbi_speculative_a,
7542 gen_movqi_speculative_a,
7543 gen_movhi_speculative_a,
7544 gen_movsi_speculative_a,
7545 gen_movdi_speculative_a,
7546 gen_movsf_speculative_a,
7547 gen_movdf_speculative_a,
7548 gen_movxf_speculative_a,
7549 gen_movti_speculative_a,
7550 gen_zero_extendqidi2_speculative_a,
7551 gen_zero_extendhidi2_speculative_a,
7552 gen_zero_extendsidi2_speculative_a,
7553 };
7554
7555 gen_func_t *gen_ld;
7556
7557 if (ts & BEGIN_DATA)
7558 {
7559 if (ts & BEGIN_CONTROL)
7560 gen_ld = gen_ld_sa;
7561 else
7562 gen_ld = gen_ld_a;
7563 }
7564 else if (ts & BEGIN_CONTROL)
7565 {
7566 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7567 || ia64_needs_block_p (ts))
7568 gen_ld = gen_ld_s;
7569 else
7570 gen_ld = gen_ld_s_a;
7571 }
7572 else if (ts == 0)
7573 gen_ld = gen_ld_;
7574 else
7575 gcc_unreachable ();
7576
7577 return gen_ld[mode_no];
7578 }
7579
7580 /* Constants that help mapping 'enum machine_mode' to int. */
7581 enum SPEC_MODES
7582 {
7583 SPEC_MODE_INVALID = -1,
7584 SPEC_MODE_FIRST = 0,
7585 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7586 SPEC_MODE_FOR_EXTEND_LAST = 3,
7587 SPEC_MODE_LAST = 8
7588 };
7589
7590 enum
7591 {
7592 /* Offset to reach ZERO_EXTEND patterns. */
7593 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7594 };
7595
7596 /* Return index of the MODE. */
7597 static int
7598 ia64_mode_to_int (enum machine_mode mode)
7599 {
7600 switch (mode)
7601 {
7602 case BImode: return 0; /* SPEC_MODE_FIRST */
7603 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7604 case HImode: return 2;
7605 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7606 case DImode: return 4;
7607 case SFmode: return 5;
7608 case DFmode: return 6;
7609 case XFmode: return 7;
7610 case TImode:
7611 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7612 mentioned in itanium[12].md. Predicate fp_register_operand also
7613 needs to be defined. Bottom line: better disable for now. */
7614 return SPEC_MODE_INVALID;
7615 default: return SPEC_MODE_INVALID;
7616 }
7617 }
7618
7619 /* Provide information about speculation capabilities. */
7620 static void
7621 ia64_set_sched_flags (spec_info_t spec_info)
7622 {
7623 unsigned int *flags = &(current_sched_info->flags);
7624
7625 if (*flags & SCHED_RGN
7626 || *flags & SCHED_EBB
7627 || *flags & SEL_SCHED)
7628 {
7629 int mask = 0;
7630
7631 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7632 || (mflag_sched_ar_data_spec && reload_completed))
7633 {
7634 mask |= BEGIN_DATA;
7635
7636 if (!sel_sched_p ()
7637 && ((mflag_sched_br_in_data_spec && !reload_completed)
7638 || (mflag_sched_ar_in_data_spec && reload_completed)))
7639 mask |= BE_IN_DATA;
7640 }
7641
7642 if (mflag_sched_control_spec
7643 && (!sel_sched_p ()
7644 || reload_completed))
7645 {
7646 mask |= BEGIN_CONTROL;
7647
7648 if (!sel_sched_p () && mflag_sched_in_control_spec)
7649 mask |= BE_IN_CONTROL;
7650 }
7651
7652 spec_info->mask = mask;
7653
7654 if (mask)
7655 {
7656 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7657
7658 if (mask & BE_IN_SPEC)
7659 *flags |= NEW_BBS;
7660
7661 spec_info->flags = 0;
7662
7663 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7664 spec_info->flags |= PREFER_NON_DATA_SPEC;
7665
7666 if (mask & CONTROL_SPEC)
7667 {
7668 if (mflag_sched_prefer_non_control_spec_insns)
7669 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7670
7671 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7672 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7673 }
7674
7675 if (sched_verbose >= 1)
7676 spec_info->dump = sched_dump;
7677 else
7678 spec_info->dump = 0;
7679
7680 if (mflag_sched_count_spec_in_critical_path)
7681 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7682 }
7683 }
7684 else
7685 spec_info->mask = 0;
7686 }
7687
7688 /* If INSN is an appropriate load return its mode.
7689 Return -1 otherwise. */
7690 static int
7691 get_mode_no_for_insn (rtx insn)
7692 {
7693 rtx reg, mem, mode_rtx;
7694 int mode_no;
7695 bool extend_p;
7696
7697 extract_insn_cached (insn);
7698
7699 /* We use WHICH_ALTERNATIVE only after reload. This will
7700 guarantee that reload won't touch a speculative insn. */
7701
7702 if (recog_data.n_operands != 2)
7703 return -1;
7704
7705 reg = recog_data.operand[0];
7706 mem = recog_data.operand[1];
7707
7708 /* We should use MEM's mode since REG's mode in presence of
7709 ZERO_EXTEND will always be DImode. */
7710 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7711 /* Process non-speculative ld. */
7712 {
7713 if (!reload_completed)
7714 {
7715 /* Do not speculate into regs like ar.lc. */
7716 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7717 return -1;
7718
7719 if (!MEM_P (mem))
7720 return -1;
7721
7722 {
7723 rtx mem_reg = XEXP (mem, 0);
7724
7725 if (!REG_P (mem_reg))
7726 return -1;
7727 }
7728
7729 mode_rtx = mem;
7730 }
7731 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7732 {
7733 gcc_assert (REG_P (reg) && MEM_P (mem));
7734 mode_rtx = mem;
7735 }
7736 else
7737 return -1;
7738 }
7739 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7740 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7741 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7742 /* Process speculative ld or ld.c. */
7743 {
7744 gcc_assert (REG_P (reg) && MEM_P (mem));
7745 mode_rtx = mem;
7746 }
7747 else
7748 {
7749 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7750
7751 if (attr_class == ITANIUM_CLASS_CHK_A
7752 || attr_class == ITANIUM_CLASS_CHK_S_I
7753 || attr_class == ITANIUM_CLASS_CHK_S_F)
7754 /* Process chk. */
7755 mode_rtx = reg;
7756 else
7757 return -1;
7758 }
7759
7760 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7761
7762 if (mode_no == SPEC_MODE_INVALID)
7763 return -1;
7764
7765 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7766
7767 if (extend_p)
7768 {
7769 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7770 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7771 return -1;
7772
7773 mode_no += SPEC_GEN_EXTEND_OFFSET;
7774 }
7775
7776 return mode_no;
7777 }
7778
7779 /* If X is an unspec part of a speculative load, return its code.
7780 Return -1 otherwise. */
7781 static int
7782 get_spec_unspec_code (const_rtx x)
7783 {
7784 if (GET_CODE (x) != UNSPEC)
7785 return -1;
7786
7787 {
7788 int code;
7789
7790 code = XINT (x, 1);
7791
7792 switch (code)
7793 {
7794 case UNSPEC_LDA:
7795 case UNSPEC_LDS:
7796 case UNSPEC_LDS_A:
7797 case UNSPEC_LDSA:
7798 return code;
7799
7800 default:
7801 return -1;
7802 }
7803 }
7804 }
7805
7806 /* Implement skip_rtx_p hook. */
7807 static bool
7808 ia64_skip_rtx_p (const_rtx x)
7809 {
7810 return get_spec_unspec_code (x) != -1;
7811 }
7812
7813 /* If INSN is a speculative load, return its UNSPEC code.
7814 Return -1 otherwise. */
7815 static int
7816 get_insn_spec_code (const_rtx insn)
7817 {
7818 rtx pat, reg, mem;
7819
7820 pat = PATTERN (insn);
7821
7822 if (GET_CODE (pat) == COND_EXEC)
7823 pat = COND_EXEC_CODE (pat);
7824
7825 if (GET_CODE (pat) != SET)
7826 return -1;
7827
7828 reg = SET_DEST (pat);
7829 if (!REG_P (reg))
7830 return -1;
7831
7832 mem = SET_SRC (pat);
7833 if (GET_CODE (mem) == ZERO_EXTEND)
7834 mem = XEXP (mem, 0);
7835
7836 return get_spec_unspec_code (mem);
7837 }
7838
7839 /* If INSN is a speculative load, return a ds with the speculation types.
7840 Otherwise [if INSN is a normal instruction] return 0. */
7841 static ds_t
7842 ia64_get_insn_spec_ds (rtx insn)
7843 {
7844 int code = get_insn_spec_code (insn);
7845
7846 switch (code)
7847 {
7848 case UNSPEC_LDA:
7849 return BEGIN_DATA;
7850
7851 case UNSPEC_LDS:
7852 case UNSPEC_LDS_A:
7853 return BEGIN_CONTROL;
7854
7855 case UNSPEC_LDSA:
7856 return BEGIN_DATA | BEGIN_CONTROL;
7857
7858 default:
7859 return 0;
7860 }
7861 }
7862
7863 /* If INSN is a speculative load return a ds with the speculation types that
7864 will be checked.
7865 Otherwise [if INSN is a normal instruction] return 0. */
7866 static ds_t
7867 ia64_get_insn_checked_ds (rtx insn)
7868 {
7869 int code = get_insn_spec_code (insn);
7870
7871 switch (code)
7872 {
7873 case UNSPEC_LDA:
7874 return BEGIN_DATA | BEGIN_CONTROL;
7875
7876 case UNSPEC_LDS:
7877 return BEGIN_CONTROL;
7878
7879 case UNSPEC_LDS_A:
7880 case UNSPEC_LDSA:
7881 return BEGIN_DATA | BEGIN_CONTROL;
7882
7883 default:
7884 return 0;
7885 }
7886 }
7887
7888 /* If GEN_P is true, calculate the index of needed speculation check and return
7889 speculative pattern for INSN with speculative mode TS, machine mode
7890 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7891 If GEN_P is false, just calculate the index of needed speculation check. */
7892 static rtx
7893 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7894 {
7895 rtx pat, new_pat;
7896 gen_func_t gen_load;
7897
7898 gen_load = get_spec_load_gen_function (ts, mode_no);
7899
7900 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7901 copy_rtx (recog_data.operand[1]));
7902
7903 pat = PATTERN (insn);
7904 if (GET_CODE (pat) == COND_EXEC)
7905 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7906 new_pat);
7907
7908 return new_pat;
7909 }
7910
7911 static bool
7912 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7913 ds_t ds ATTRIBUTE_UNUSED)
7914 {
7915 return false;
7916 }
7917
7918 /* Implement targetm.sched.speculate_insn hook.
7919 Check if the INSN can be TS speculative.
7920 If 'no' - return -1.
7921 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7922 If current pattern of the INSN already provides TS speculation,
7923 return 0. */
7924 static int
7925 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7926 {
7927 int mode_no;
7928 int res;
7929
7930 gcc_assert (!(ts & ~SPECULATIVE));
7931
7932 if (ia64_spec_check_p (insn))
7933 return -1;
7934
7935 if ((ts & BE_IN_SPEC)
7936 && !insn_can_be_in_speculative_p (insn, ts))
7937 return -1;
7938
7939 mode_no = get_mode_no_for_insn (insn);
7940
7941 if (mode_no != SPEC_MODE_INVALID)
7942 {
7943 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7944 res = 0;
7945 else
7946 {
7947 res = 1;
7948 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7949 }
7950 }
7951 else
7952 res = -1;
7953
7954 return res;
7955 }
7956
7957 /* Return a function that will generate a check for speculation TS with mode
7958 MODE_NO.
7959 If simple check is needed, pass true for SIMPLE_CHECK_P.
7960 If clearing check is needed, pass true for CLEARING_CHECK_P. */
7961 static gen_func_t
7962 get_spec_check_gen_function (ds_t ts, int mode_no,
7963 bool simple_check_p, bool clearing_check_p)
7964 {
7965 static gen_func_t gen_ld_c_clr[] = {
7966 gen_movbi_clr,
7967 gen_movqi_clr,
7968 gen_movhi_clr,
7969 gen_movsi_clr,
7970 gen_movdi_clr,
7971 gen_movsf_clr,
7972 gen_movdf_clr,
7973 gen_movxf_clr,
7974 gen_movti_clr,
7975 gen_zero_extendqidi2_clr,
7976 gen_zero_extendhidi2_clr,
7977 gen_zero_extendsidi2_clr,
7978 };
7979 static gen_func_t gen_ld_c_nc[] = {
7980 gen_movbi_nc,
7981 gen_movqi_nc,
7982 gen_movhi_nc,
7983 gen_movsi_nc,
7984 gen_movdi_nc,
7985 gen_movsf_nc,
7986 gen_movdf_nc,
7987 gen_movxf_nc,
7988 gen_movti_nc,
7989 gen_zero_extendqidi2_nc,
7990 gen_zero_extendhidi2_nc,
7991 gen_zero_extendsidi2_nc,
7992 };
7993 static gen_func_t gen_chk_a_clr[] = {
7994 gen_advanced_load_check_clr_bi,
7995 gen_advanced_load_check_clr_qi,
7996 gen_advanced_load_check_clr_hi,
7997 gen_advanced_load_check_clr_si,
7998 gen_advanced_load_check_clr_di,
7999 gen_advanced_load_check_clr_sf,
8000 gen_advanced_load_check_clr_df,
8001 gen_advanced_load_check_clr_xf,
8002 gen_advanced_load_check_clr_ti,
8003 gen_advanced_load_check_clr_di,
8004 gen_advanced_load_check_clr_di,
8005 gen_advanced_load_check_clr_di,
8006 };
8007 static gen_func_t gen_chk_a_nc[] = {
8008 gen_advanced_load_check_nc_bi,
8009 gen_advanced_load_check_nc_qi,
8010 gen_advanced_load_check_nc_hi,
8011 gen_advanced_load_check_nc_si,
8012 gen_advanced_load_check_nc_di,
8013 gen_advanced_load_check_nc_sf,
8014 gen_advanced_load_check_nc_df,
8015 gen_advanced_load_check_nc_xf,
8016 gen_advanced_load_check_nc_ti,
8017 gen_advanced_load_check_nc_di,
8018 gen_advanced_load_check_nc_di,
8019 gen_advanced_load_check_nc_di,
8020 };
8021 static gen_func_t gen_chk_s[] = {
8022 gen_speculation_check_bi,
8023 gen_speculation_check_qi,
8024 gen_speculation_check_hi,
8025 gen_speculation_check_si,
8026 gen_speculation_check_di,
8027 gen_speculation_check_sf,
8028 gen_speculation_check_df,
8029 gen_speculation_check_xf,
8030 gen_speculation_check_ti,
8031 gen_speculation_check_di,
8032 gen_speculation_check_di,
8033 gen_speculation_check_di,
8034 };
8035
8036 gen_func_t *gen_check;
8037
8038 if (ts & BEGIN_DATA)
8039 {
8040 /* We don't need recovery because even if this is ld.sa
8041 ALAT entry will be allocated only if NAT bit is set to zero.
8042 So it is enough to use ld.c here. */
8043
8044 if (simple_check_p)
8045 {
8046 gcc_assert (mflag_sched_spec_ldc);
8047
8048 if (clearing_check_p)
8049 gen_check = gen_ld_c_clr;
8050 else
8051 gen_check = gen_ld_c_nc;
8052 }
8053 else
8054 {
8055 if (clearing_check_p)
8056 gen_check = gen_chk_a_clr;
8057 else
8058 gen_check = gen_chk_a_nc;
8059 }
8060 }
8061 else if (ts & BEGIN_CONTROL)
8062 {
8063 if (simple_check_p)
8064 /* We might want to use ld.sa -> ld.c instead of
8065 ld.s -> chk.s. */
8066 {
8067 gcc_assert (!ia64_needs_block_p (ts));
8068
8069 if (clearing_check_p)
8070 gen_check = gen_ld_c_clr;
8071 else
8072 gen_check = gen_ld_c_nc;
8073 }
8074 else
8075 {
8076 gen_check = gen_chk_s;
8077 }
8078 }
8079 else
8080 gcc_unreachable ();
8081
8082 gcc_assert (mode_no >= 0);
8083 return gen_check[mode_no];
8084 }
8085
8086 /* Return nonzero, if INSN needs branchy recovery check. */
8087 static bool
8088 ia64_needs_block_p (ds_t ts)
8089 {
8090 if (ts & BEGIN_DATA)
8091 return !mflag_sched_spec_ldc;
8092
8093 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8094
8095 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8096 }
8097
8098 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
8099 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
8100 Otherwise, generate a simple check. */
8101 static rtx
8102 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8103 {
8104 rtx op1, pat, check_pat;
8105 gen_func_t gen_check;
8106 int mode_no;
8107
8108 mode_no = get_mode_no_for_insn (insn);
8109 gcc_assert (mode_no >= 0);
8110
8111 if (label)
8112 op1 = label;
8113 else
8114 {
8115 gcc_assert (!ia64_needs_block_p (ds));
8116 op1 = copy_rtx (recog_data.operand[1]);
8117 }
8118
8119 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8120 true);
8121
8122 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8123
8124 pat = PATTERN (insn);
8125 if (GET_CODE (pat) == COND_EXEC)
8126 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8127 check_pat);
8128
8129 return check_pat;
8130 }
8131
8132 /* Return nonzero, if X is branchy recovery check. */
8133 static int
8134 ia64_spec_check_p (rtx x)
8135 {
8136 x = PATTERN (x);
8137 if (GET_CODE (x) == COND_EXEC)
8138 x = COND_EXEC_CODE (x);
8139 if (GET_CODE (x) == SET)
8140 return ia64_spec_check_src_p (SET_SRC (x));
8141 return 0;
8142 }
8143
8144 /* Return nonzero, if SRC belongs to recovery check. */
8145 static int
8146 ia64_spec_check_src_p (rtx src)
8147 {
8148 if (GET_CODE (src) == IF_THEN_ELSE)
8149 {
8150 rtx t;
8151
8152 t = XEXP (src, 0);
8153 if (GET_CODE (t) == NE)
8154 {
8155 t = XEXP (t, 0);
8156
8157 if (GET_CODE (t) == UNSPEC)
8158 {
8159 int code;
8160
8161 code = XINT (t, 1);
8162
8163 if (code == UNSPEC_LDCCLR
8164 || code == UNSPEC_LDCNC
8165 || code == UNSPEC_CHKACLR
8166 || code == UNSPEC_CHKANC
8167 || code == UNSPEC_CHKS)
8168 {
8169 gcc_assert (code != 0);
8170 return code;
8171 }
8172 }
8173 }
8174 }
8175 return 0;
8176 }
8177 \f
8178
8179 /* The following page contains abstract data `bundle states' which are
8180 used for bundling insns (inserting nops and template generation). */
8181
8182 /* The following describes state of insn bundling. */
8183
8184 struct bundle_state
8185 {
8186 /* Unique bundle state number to identify them in the debugging
8187 output */
8188 int unique_num;
8189 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8190 /* number nops before and after the insn */
8191 short before_nops_num, after_nops_num;
8192 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8193 insn */
8194 int cost; /* cost of the state in cycles */
8195 int accumulated_insns_num; /* number of all previous insns including
8196 nops. L is considered as 2 insns */
8197 int branch_deviation; /* deviation of previous branches from 3rd slots */
8198 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8199 struct bundle_state *next; /* next state with the same insn_num */
8200 struct bundle_state *originator; /* originator (previous insn state) */
8201 /* All bundle states are in the following chain. */
8202 struct bundle_state *allocated_states_chain;
8203 /* The DFA State after issuing the insn and the nops. */
8204 state_t dfa_state;
8205 };
8206
8207 /* The following is map insn number to the corresponding bundle state. */
8208
8209 static struct bundle_state **index_to_bundle_states;
8210
8211 /* The unique number of next bundle state. */
8212
8213 static int bundle_states_num;
8214
8215 /* All allocated bundle states are in the following chain. */
8216
8217 static struct bundle_state *allocated_bundle_states_chain;
8218
8219 /* All allocated but not used bundle states are in the following
8220 chain. */
8221
8222 static struct bundle_state *free_bundle_state_chain;
8223
8224
8225 /* The following function returns a free bundle state. */
8226
8227 static struct bundle_state *
8228 get_free_bundle_state (void)
8229 {
8230 struct bundle_state *result;
8231
8232 if (free_bundle_state_chain != NULL)
8233 {
8234 result = free_bundle_state_chain;
8235 free_bundle_state_chain = result->next;
8236 }
8237 else
8238 {
8239 result = XNEW (struct bundle_state);
8240 result->dfa_state = xmalloc (dfa_state_size);
8241 result->allocated_states_chain = allocated_bundle_states_chain;
8242 allocated_bundle_states_chain = result;
8243 }
8244 result->unique_num = bundle_states_num++;
8245 return result;
8246
8247 }
8248
8249 /* The following function frees given bundle state. */
8250
8251 static void
8252 free_bundle_state (struct bundle_state *state)
8253 {
8254 state->next = free_bundle_state_chain;
8255 free_bundle_state_chain = state;
8256 }
8257
8258 /* Start work with abstract data `bundle states'. */
8259
8260 static void
8261 initiate_bundle_states (void)
8262 {
8263 bundle_states_num = 0;
8264 free_bundle_state_chain = NULL;
8265 allocated_bundle_states_chain = NULL;
8266 }
8267
8268 /* Finish work with abstract data `bundle states'. */
8269
8270 static void
8271 finish_bundle_states (void)
8272 {
8273 struct bundle_state *curr_state, *next_state;
8274
8275 for (curr_state = allocated_bundle_states_chain;
8276 curr_state != NULL;
8277 curr_state = next_state)
8278 {
8279 next_state = curr_state->allocated_states_chain;
8280 free (curr_state->dfa_state);
8281 free (curr_state);
8282 }
8283 }
8284
8285 /* Hash table of the bundle states. The key is dfa_state and insn_num
8286 of the bundle states. */
8287
8288 static htab_t bundle_state_table;
8289
8290 /* The function returns hash of BUNDLE_STATE. */
8291
8292 static unsigned
8293 bundle_state_hash (const void *bundle_state)
8294 {
8295 const struct bundle_state *const state
8296 = (const struct bundle_state *) bundle_state;
8297 unsigned result, i;
8298
8299 for (result = i = 0; i < dfa_state_size; i++)
8300 result += (((unsigned char *) state->dfa_state) [i]
8301 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8302 return result + state->insn_num;
8303 }
8304
8305 /* The function returns nonzero if the bundle state keys are equal. */
8306
8307 static int
8308 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8309 {
8310 const struct bundle_state *const state1
8311 = (const struct bundle_state *) bundle_state_1;
8312 const struct bundle_state *const state2
8313 = (const struct bundle_state *) bundle_state_2;
8314
8315 return (state1->insn_num == state2->insn_num
8316 && memcmp (state1->dfa_state, state2->dfa_state,
8317 dfa_state_size) == 0);
8318 }
8319
8320 /* The function inserts the BUNDLE_STATE into the hash table. The
8321 function returns nonzero if the bundle has been inserted into the
8322 table. The table contains the best bundle state with given key. */
8323
8324 static int
8325 insert_bundle_state (struct bundle_state *bundle_state)
8326 {
8327 void **entry_ptr;
8328
8329 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8330 if (*entry_ptr == NULL)
8331 {
8332 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8333 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8334 *entry_ptr = (void *) bundle_state;
8335 return TRUE;
8336 }
8337 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8338 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8339 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8340 > bundle_state->accumulated_insns_num
8341 || (((struct bundle_state *)
8342 *entry_ptr)->accumulated_insns_num
8343 == bundle_state->accumulated_insns_num
8344 && (((struct bundle_state *)
8345 *entry_ptr)->branch_deviation
8346 > bundle_state->branch_deviation
8347 || (((struct bundle_state *)
8348 *entry_ptr)->branch_deviation
8349 == bundle_state->branch_deviation
8350 && ((struct bundle_state *)
8351 *entry_ptr)->middle_bundle_stops
8352 > bundle_state->middle_bundle_stops))))))
8353
8354 {
8355 struct bundle_state temp;
8356
8357 temp = *(struct bundle_state *) *entry_ptr;
8358 *(struct bundle_state *) *entry_ptr = *bundle_state;
8359 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8360 *bundle_state = temp;
8361 }
8362 return FALSE;
8363 }
8364
8365 /* Start work with the hash table. */
8366
8367 static void
8368 initiate_bundle_state_table (void)
8369 {
8370 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8371 (htab_del) 0);
8372 }
8373
8374 /* Finish work with the hash table. */
8375
8376 static void
8377 finish_bundle_state_table (void)
8378 {
8379 htab_delete (bundle_state_table);
8380 }
8381
8382 \f
8383
8384 /* The following variable is a insn `nop' used to check bundle states
8385 with different number of inserted nops. */
8386
8387 static rtx ia64_nop;
8388
8389 /* The following function tries to issue NOPS_NUM nops for the current
8390 state without advancing processor cycle. If it failed, the
8391 function returns FALSE and frees the current state. */
8392
8393 static int
8394 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8395 {
8396 int i;
8397
8398 for (i = 0; i < nops_num; i++)
8399 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8400 {
8401 free_bundle_state (curr_state);
8402 return FALSE;
8403 }
8404 return TRUE;
8405 }
8406
8407 /* The following function tries to issue INSN for the current
8408 state without advancing processor cycle. If it failed, the
8409 function returns FALSE and frees the current state. */
8410
8411 static int
8412 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8413 {
8414 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8415 {
8416 free_bundle_state (curr_state);
8417 return FALSE;
8418 }
8419 return TRUE;
8420 }
8421
8422 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8423 starting with ORIGINATOR without advancing processor cycle. If
8424 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8425 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8426 If it was successful, the function creates new bundle state and
8427 insert into the hash table and into `index_to_bundle_states'. */
8428
8429 static void
8430 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8431 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8432 {
8433 struct bundle_state *curr_state;
8434
8435 curr_state = get_free_bundle_state ();
8436 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8437 curr_state->insn = insn;
8438 curr_state->insn_num = originator->insn_num + 1;
8439 curr_state->cost = originator->cost;
8440 curr_state->originator = originator;
8441 curr_state->before_nops_num = before_nops_num;
8442 curr_state->after_nops_num = 0;
8443 curr_state->accumulated_insns_num
8444 = originator->accumulated_insns_num + before_nops_num;
8445 curr_state->branch_deviation = originator->branch_deviation;
8446 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8447 gcc_assert (insn);
8448 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8449 {
8450 gcc_assert (GET_MODE (insn) != TImode);
8451 if (!try_issue_nops (curr_state, before_nops_num))
8452 return;
8453 if (!try_issue_insn (curr_state, insn))
8454 return;
8455 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8456 if (curr_state->accumulated_insns_num % 3 != 0)
8457 curr_state->middle_bundle_stops++;
8458 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8459 && curr_state->accumulated_insns_num % 3 != 0)
8460 {
8461 free_bundle_state (curr_state);
8462 return;
8463 }
8464 }
8465 else if (GET_MODE (insn) != TImode)
8466 {
8467 if (!try_issue_nops (curr_state, before_nops_num))
8468 return;
8469 if (!try_issue_insn (curr_state, insn))
8470 return;
8471 curr_state->accumulated_insns_num++;
8472 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8473 && asm_noperands (PATTERN (insn)) < 0);
8474
8475 if (ia64_safe_type (insn) == TYPE_L)
8476 curr_state->accumulated_insns_num++;
8477 }
8478 else
8479 {
8480 /* If this is an insn that must be first in a group, then don't allow
8481 nops to be emitted before it. Currently, alloc is the only such
8482 supported instruction. */
8483 /* ??? The bundling automatons should handle this for us, but they do
8484 not yet have support for the first_insn attribute. */
8485 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8486 {
8487 free_bundle_state (curr_state);
8488 return;
8489 }
8490
8491 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8492 state_transition (curr_state->dfa_state, NULL);
8493 curr_state->cost++;
8494 if (!try_issue_nops (curr_state, before_nops_num))
8495 return;
8496 if (!try_issue_insn (curr_state, insn))
8497 return;
8498 curr_state->accumulated_insns_num++;
8499 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8500 || asm_noperands (PATTERN (insn)) >= 0)
8501 {
8502 /* Finish bundle containing asm insn. */
8503 curr_state->after_nops_num
8504 = 3 - curr_state->accumulated_insns_num % 3;
8505 curr_state->accumulated_insns_num
8506 += 3 - curr_state->accumulated_insns_num % 3;
8507 }
8508 else if (ia64_safe_type (insn) == TYPE_L)
8509 curr_state->accumulated_insns_num++;
8510 }
8511 if (ia64_safe_type (insn) == TYPE_B)
8512 curr_state->branch_deviation
8513 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8514 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8515 {
8516 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8517 {
8518 state_t dfa_state;
8519 struct bundle_state *curr_state1;
8520 struct bundle_state *allocated_states_chain;
8521
8522 curr_state1 = get_free_bundle_state ();
8523 dfa_state = curr_state1->dfa_state;
8524 allocated_states_chain = curr_state1->allocated_states_chain;
8525 *curr_state1 = *curr_state;
8526 curr_state1->dfa_state = dfa_state;
8527 curr_state1->allocated_states_chain = allocated_states_chain;
8528 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8529 dfa_state_size);
8530 curr_state = curr_state1;
8531 }
8532 if (!try_issue_nops (curr_state,
8533 3 - curr_state->accumulated_insns_num % 3))
8534 return;
8535 curr_state->after_nops_num
8536 = 3 - curr_state->accumulated_insns_num % 3;
8537 curr_state->accumulated_insns_num
8538 += 3 - curr_state->accumulated_insns_num % 3;
8539 }
8540 if (!insert_bundle_state (curr_state))
8541 free_bundle_state (curr_state);
8542 return;
8543 }
8544
8545 /* The following function returns position in the two window bundle
8546 for given STATE. */
8547
8548 static int
8549 get_max_pos (state_t state)
8550 {
8551 if (cpu_unit_reservation_p (state, pos_6))
8552 return 6;
8553 else if (cpu_unit_reservation_p (state, pos_5))
8554 return 5;
8555 else if (cpu_unit_reservation_p (state, pos_4))
8556 return 4;
8557 else if (cpu_unit_reservation_p (state, pos_3))
8558 return 3;
8559 else if (cpu_unit_reservation_p (state, pos_2))
8560 return 2;
8561 else if (cpu_unit_reservation_p (state, pos_1))
8562 return 1;
8563 else
8564 return 0;
8565 }
8566
8567 /* The function returns code of a possible template for given position
8568 and state. The function should be called only with 2 values of
8569 position equal to 3 or 6. We avoid generating F NOPs by putting
8570 templates containing F insns at the end of the template search
8571 because undocumented anomaly in McKinley derived cores which can
8572 cause stalls if an F-unit insn (including a NOP) is issued within a
8573 six-cycle window after reading certain application registers (such
8574 as ar.bsp). Furthermore, power-considerations also argue against
8575 the use of F-unit instructions unless they're really needed. */
8576
8577 static int
8578 get_template (state_t state, int pos)
8579 {
8580 switch (pos)
8581 {
8582 case 3:
8583 if (cpu_unit_reservation_p (state, _0mmi_))
8584 return 1;
8585 else if (cpu_unit_reservation_p (state, _0mii_))
8586 return 0;
8587 else if (cpu_unit_reservation_p (state, _0mmb_))
8588 return 7;
8589 else if (cpu_unit_reservation_p (state, _0mib_))
8590 return 6;
8591 else if (cpu_unit_reservation_p (state, _0mbb_))
8592 return 5;
8593 else if (cpu_unit_reservation_p (state, _0bbb_))
8594 return 4;
8595 else if (cpu_unit_reservation_p (state, _0mmf_))
8596 return 3;
8597 else if (cpu_unit_reservation_p (state, _0mfi_))
8598 return 2;
8599 else if (cpu_unit_reservation_p (state, _0mfb_))
8600 return 8;
8601 else if (cpu_unit_reservation_p (state, _0mlx_))
8602 return 9;
8603 else
8604 gcc_unreachable ();
8605 case 6:
8606 if (cpu_unit_reservation_p (state, _1mmi_))
8607 return 1;
8608 else if (cpu_unit_reservation_p (state, _1mii_))
8609 return 0;
8610 else if (cpu_unit_reservation_p (state, _1mmb_))
8611 return 7;
8612 else if (cpu_unit_reservation_p (state, _1mib_))
8613 return 6;
8614 else if (cpu_unit_reservation_p (state, _1mbb_))
8615 return 5;
8616 else if (cpu_unit_reservation_p (state, _1bbb_))
8617 return 4;
8618 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8619 return 3;
8620 else if (cpu_unit_reservation_p (state, _1mfi_))
8621 return 2;
8622 else if (cpu_unit_reservation_p (state, _1mfb_))
8623 return 8;
8624 else if (cpu_unit_reservation_p (state, _1mlx_))
8625 return 9;
8626 else
8627 gcc_unreachable ();
8628 default:
8629 gcc_unreachable ();
8630 }
8631 }
8632
8633 /* True when INSN is important for bundling. */
8634 static bool
8635 important_for_bundling_p (rtx insn)
8636 {
8637 return (INSN_P (insn)
8638 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8639 && GET_CODE (PATTERN (insn)) != USE
8640 && GET_CODE (PATTERN (insn)) != CLOBBER);
8641 }
8642
8643 /* The following function returns an insn important for insn bundling
8644 followed by INSN and before TAIL. */
8645
8646 static rtx
8647 get_next_important_insn (rtx insn, rtx tail)
8648 {
8649 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8650 if (important_for_bundling_p (insn))
8651 return insn;
8652 return NULL_RTX;
8653 }
8654
8655 /* Add a bundle selector TEMPLATE0 before INSN. */
8656
8657 static void
8658 ia64_add_bundle_selector_before (int template0, rtx insn)
8659 {
8660 rtx b = gen_bundle_selector (GEN_INT (template0));
8661
8662 ia64_emit_insn_before (b, insn);
8663 #if NR_BUNDLES == 10
8664 if ((template0 == 4 || template0 == 5)
8665 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8666 {
8667 int i;
8668 rtx note = NULL_RTX;
8669
8670 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8671 first or second slot. If it is and has REG_EH_NOTE set, copy it
8672 to following nops, as br.call sets rp to the address of following
8673 bundle and therefore an EH region end must be on a bundle
8674 boundary. */
8675 insn = PREV_INSN (insn);
8676 for (i = 0; i < 3; i++)
8677 {
8678 do
8679 insn = next_active_insn (insn);
8680 while (GET_CODE (insn) == INSN
8681 && get_attr_empty (insn) == EMPTY_YES);
8682 if (GET_CODE (insn) == CALL_INSN)
8683 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8684 else if (note)
8685 {
8686 int code;
8687
8688 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8689 || code == CODE_FOR_nop_b);
8690 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8691 note = NULL_RTX;
8692 else
8693 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8694 }
8695 }
8696 }
8697 #endif
8698 }
8699
8700 /* The following function does insn bundling. Bundling means
8701 inserting templates and nop insns to fit insn groups into permitted
8702 templates. Instruction scheduling uses NDFA (non-deterministic
8703 finite automata) encoding informations about the templates and the
8704 inserted nops. Nondeterminism of the automata permits follows
8705 all possible insn sequences very fast.
8706
8707 Unfortunately it is not possible to get information about inserting
8708 nop insns and used templates from the automata states. The
8709 automata only says that we can issue an insn possibly inserting
8710 some nops before it and using some template. Therefore insn
8711 bundling in this function is implemented by using DFA
8712 (deterministic finite automata). We follow all possible insn
8713 sequences by inserting 0-2 nops (that is what the NDFA describe for
8714 insn scheduling) before/after each insn being bundled. We know the
8715 start of simulated processor cycle from insn scheduling (insn
8716 starting a new cycle has TImode).
8717
8718 Simple implementation of insn bundling would create enormous
8719 number of possible insn sequences satisfying information about new
8720 cycle ticks taken from the insn scheduling. To make the algorithm
8721 practical we use dynamic programming. Each decision (about
8722 inserting nops and implicitly about previous decisions) is described
8723 by structure bundle_state (see above). If we generate the same
8724 bundle state (key is automaton state after issuing the insns and
8725 nops for it), we reuse already generated one. As consequence we
8726 reject some decisions which cannot improve the solution and
8727 reduce memory for the algorithm.
8728
8729 When we reach the end of EBB (extended basic block), we choose the
8730 best sequence and then, moving back in EBB, insert templates for
8731 the best alternative. The templates are taken from querying
8732 automaton state for each insn in chosen bundle states.
8733
8734 So the algorithm makes two (forward and backward) passes through
8735 EBB. */
8736
8737 static void
8738 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8739 {
8740 struct bundle_state *curr_state, *next_state, *best_state;
8741 rtx insn, next_insn;
8742 int insn_num;
8743 int i, bundle_end_p, only_bundle_end_p, asm_p;
8744 int pos = 0, max_pos, template0, template1;
8745 rtx b;
8746 rtx nop;
8747 enum attr_type type;
8748
8749 insn_num = 0;
8750 /* Count insns in the EBB. */
8751 for (insn = NEXT_INSN (prev_head_insn);
8752 insn && insn != tail;
8753 insn = NEXT_INSN (insn))
8754 if (INSN_P (insn))
8755 insn_num++;
8756 if (insn_num == 0)
8757 return;
8758 bundling_p = 1;
8759 dfa_clean_insn_cache ();
8760 initiate_bundle_state_table ();
8761 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8762 /* First (forward) pass -- generation of bundle states. */
8763 curr_state = get_free_bundle_state ();
8764 curr_state->insn = NULL;
8765 curr_state->before_nops_num = 0;
8766 curr_state->after_nops_num = 0;
8767 curr_state->insn_num = 0;
8768 curr_state->cost = 0;
8769 curr_state->accumulated_insns_num = 0;
8770 curr_state->branch_deviation = 0;
8771 curr_state->middle_bundle_stops = 0;
8772 curr_state->next = NULL;
8773 curr_state->originator = NULL;
8774 state_reset (curr_state->dfa_state);
8775 index_to_bundle_states [0] = curr_state;
8776 insn_num = 0;
8777 /* Shift cycle mark if it is put on insn which could be ignored. */
8778 for (insn = NEXT_INSN (prev_head_insn);
8779 insn != tail;
8780 insn = NEXT_INSN (insn))
8781 if (INSN_P (insn)
8782 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8783 || GET_CODE (PATTERN (insn)) == USE
8784 || GET_CODE (PATTERN (insn)) == CLOBBER)
8785 && GET_MODE (insn) == TImode)
8786 {
8787 PUT_MODE (insn, VOIDmode);
8788 for (next_insn = NEXT_INSN (insn);
8789 next_insn != tail;
8790 next_insn = NEXT_INSN (next_insn))
8791 if (INSN_P (next_insn)
8792 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8793 && GET_CODE (PATTERN (next_insn)) != USE
8794 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8795 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8796 {
8797 PUT_MODE (next_insn, TImode);
8798 break;
8799 }
8800 }
8801 /* Forward pass: generation of bundle states. */
8802 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8803 insn != NULL_RTX;
8804 insn = next_insn)
8805 {
8806 gcc_assert (INSN_P (insn)
8807 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8808 && GET_CODE (PATTERN (insn)) != USE
8809 && GET_CODE (PATTERN (insn)) != CLOBBER);
8810 type = ia64_safe_type (insn);
8811 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8812 insn_num++;
8813 index_to_bundle_states [insn_num] = NULL;
8814 for (curr_state = index_to_bundle_states [insn_num - 1];
8815 curr_state != NULL;
8816 curr_state = next_state)
8817 {
8818 pos = curr_state->accumulated_insns_num % 3;
8819 next_state = curr_state->next;
8820 /* We must fill up the current bundle in order to start a
8821 subsequent asm insn in a new bundle. Asm insn is always
8822 placed in a separate bundle. */
8823 only_bundle_end_p
8824 = (next_insn != NULL_RTX
8825 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8826 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8827 /* We may fill up the current bundle if it is the cycle end
8828 without a group barrier. */
8829 bundle_end_p
8830 = (only_bundle_end_p || next_insn == NULL_RTX
8831 || (GET_MODE (next_insn) == TImode
8832 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8833 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8834 || type == TYPE_S)
8835 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8836 only_bundle_end_p);
8837 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8838 only_bundle_end_p);
8839 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8840 only_bundle_end_p);
8841 }
8842 gcc_assert (index_to_bundle_states [insn_num]);
8843 for (curr_state = index_to_bundle_states [insn_num];
8844 curr_state != NULL;
8845 curr_state = curr_state->next)
8846 if (verbose >= 2 && dump)
8847 {
8848 /* This structure is taken from generated code of the
8849 pipeline hazard recognizer (see file insn-attrtab.c).
8850 Please don't forget to change the structure if a new
8851 automaton is added to .md file. */
8852 struct DFA_chip
8853 {
8854 unsigned short one_automaton_state;
8855 unsigned short oneb_automaton_state;
8856 unsigned short two_automaton_state;
8857 unsigned short twob_automaton_state;
8858 };
8859
8860 fprintf
8861 (dump,
8862 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8863 curr_state->unique_num,
8864 (curr_state->originator == NULL
8865 ? -1 : curr_state->originator->unique_num),
8866 curr_state->cost,
8867 curr_state->before_nops_num, curr_state->after_nops_num,
8868 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8869 curr_state->middle_bundle_stops,
8870 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8871 INSN_UID (insn));
8872 }
8873 }
8874
8875 /* We should find a solution because the 2nd insn scheduling has
8876 found one. */
8877 gcc_assert (index_to_bundle_states [insn_num]);
8878 /* Find a state corresponding to the best insn sequence. */
8879 best_state = NULL;
8880 for (curr_state = index_to_bundle_states [insn_num];
8881 curr_state != NULL;
8882 curr_state = curr_state->next)
8883 /* We are just looking at the states with fully filled up last
8884 bundle. The first we prefer insn sequences with minimal cost
8885 then with minimal inserted nops and finally with branch insns
8886 placed in the 3rd slots. */
8887 if (curr_state->accumulated_insns_num % 3 == 0
8888 && (best_state == NULL || best_state->cost > curr_state->cost
8889 || (best_state->cost == curr_state->cost
8890 && (curr_state->accumulated_insns_num
8891 < best_state->accumulated_insns_num
8892 || (curr_state->accumulated_insns_num
8893 == best_state->accumulated_insns_num
8894 && (curr_state->branch_deviation
8895 < best_state->branch_deviation
8896 || (curr_state->branch_deviation
8897 == best_state->branch_deviation
8898 && curr_state->middle_bundle_stops
8899 < best_state->middle_bundle_stops)))))))
8900 best_state = curr_state;
8901 /* Second (backward) pass: adding nops and templates. */
8902 gcc_assert (best_state);
8903 insn_num = best_state->before_nops_num;
8904 template0 = template1 = -1;
8905 for (curr_state = best_state;
8906 curr_state->originator != NULL;
8907 curr_state = curr_state->originator)
8908 {
8909 insn = curr_state->insn;
8910 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8911 || asm_noperands (PATTERN (insn)) >= 0);
8912 insn_num++;
8913 if (verbose >= 2 && dump)
8914 {
8915 struct DFA_chip
8916 {
8917 unsigned short one_automaton_state;
8918 unsigned short oneb_automaton_state;
8919 unsigned short two_automaton_state;
8920 unsigned short twob_automaton_state;
8921 };
8922
8923 fprintf
8924 (dump,
8925 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8926 curr_state->unique_num,
8927 (curr_state->originator == NULL
8928 ? -1 : curr_state->originator->unique_num),
8929 curr_state->cost,
8930 curr_state->before_nops_num, curr_state->after_nops_num,
8931 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8932 curr_state->middle_bundle_stops,
8933 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8934 INSN_UID (insn));
8935 }
8936 /* Find the position in the current bundle window. The window can
8937 contain at most two bundles. Two bundle window means that
8938 the processor will make two bundle rotation. */
8939 max_pos = get_max_pos (curr_state->dfa_state);
8940 if (max_pos == 6
8941 /* The following (negative template number) means that the
8942 processor did one bundle rotation. */
8943 || (max_pos == 3 && template0 < 0))
8944 {
8945 /* We are at the end of the window -- find template(s) for
8946 its bundle(s). */
8947 pos = max_pos;
8948 if (max_pos == 3)
8949 template0 = get_template (curr_state->dfa_state, 3);
8950 else
8951 {
8952 template1 = get_template (curr_state->dfa_state, 3);
8953 template0 = get_template (curr_state->dfa_state, 6);
8954 }
8955 }
8956 if (max_pos > 3 && template1 < 0)
8957 /* It may happen when we have the stop inside a bundle. */
8958 {
8959 gcc_assert (pos <= 3);
8960 template1 = get_template (curr_state->dfa_state, 3);
8961 pos += 3;
8962 }
8963 if (!asm_p)
8964 /* Emit nops after the current insn. */
8965 for (i = 0; i < curr_state->after_nops_num; i++)
8966 {
8967 nop = gen_nop ();
8968 emit_insn_after (nop, insn);
8969 pos--;
8970 gcc_assert (pos >= 0);
8971 if (pos % 3 == 0)
8972 {
8973 /* We are at the start of a bundle: emit the template
8974 (it should be defined). */
8975 gcc_assert (template0 >= 0);
8976 ia64_add_bundle_selector_before (template0, nop);
8977 /* If we have two bundle window, we make one bundle
8978 rotation. Otherwise template0 will be undefined
8979 (negative value). */
8980 template0 = template1;
8981 template1 = -1;
8982 }
8983 }
8984 /* Move the position backward in the window. Group barrier has
8985 no slot. Asm insn takes all bundle. */
8986 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8987 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8988 && asm_noperands (PATTERN (insn)) < 0)
8989 pos--;
8990 /* Long insn takes 2 slots. */
8991 if (ia64_safe_type (insn) == TYPE_L)
8992 pos--;
8993 gcc_assert (pos >= 0);
8994 if (pos % 3 == 0
8995 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8996 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8997 && asm_noperands (PATTERN (insn)) < 0)
8998 {
8999 /* The current insn is at the bundle start: emit the
9000 template. */
9001 gcc_assert (template0 >= 0);
9002 ia64_add_bundle_selector_before (template0, insn);
9003 b = PREV_INSN (insn);
9004 insn = b;
9005 /* See comment above in analogous place for emitting nops
9006 after the insn. */
9007 template0 = template1;
9008 template1 = -1;
9009 }
9010 /* Emit nops after the current insn. */
9011 for (i = 0; i < curr_state->before_nops_num; i++)
9012 {
9013 nop = gen_nop ();
9014 ia64_emit_insn_before (nop, insn);
9015 nop = PREV_INSN (insn);
9016 insn = nop;
9017 pos--;
9018 gcc_assert (pos >= 0);
9019 if (pos % 3 == 0)
9020 {
9021 /* See comment above in analogous place for emitting nops
9022 after the insn. */
9023 gcc_assert (template0 >= 0);
9024 ia64_add_bundle_selector_before (template0, insn);
9025 b = PREV_INSN (insn);
9026 insn = b;
9027 template0 = template1;
9028 template1 = -1;
9029 }
9030 }
9031 }
9032
9033 #ifdef ENABLE_CHECKING
9034 {
9035 /* Assert right calculation of middle_bundle_stops. */
9036 int num = best_state->middle_bundle_stops;
9037 bool start_bundle = true, end_bundle = false;
9038
9039 for (insn = NEXT_INSN (prev_head_insn);
9040 insn && insn != tail;
9041 insn = NEXT_INSN (insn))
9042 {
9043 if (!INSN_P (insn))
9044 continue;
9045 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9046 start_bundle = true;
9047 else
9048 {
9049 rtx next_insn;
9050
9051 for (next_insn = NEXT_INSN (insn);
9052 next_insn && next_insn != tail;
9053 next_insn = NEXT_INSN (next_insn))
9054 if (INSN_P (next_insn)
9055 && (ia64_safe_itanium_class (next_insn)
9056 != ITANIUM_CLASS_IGNORE
9057 || recog_memoized (next_insn)
9058 == CODE_FOR_bundle_selector)
9059 && GET_CODE (PATTERN (next_insn)) != USE
9060 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9061 break;
9062
9063 end_bundle = next_insn == NULL_RTX
9064 || next_insn == tail
9065 || (INSN_P (next_insn)
9066 && recog_memoized (next_insn)
9067 == CODE_FOR_bundle_selector);
9068 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9069 && !start_bundle && !end_bundle
9070 && next_insn
9071 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
9072 && asm_noperands (PATTERN (next_insn)) < 0)
9073 num--;
9074
9075 start_bundle = false;
9076 }
9077 }
9078
9079 gcc_assert (num == 0);
9080 }
9081 #endif
9082
9083 free (index_to_bundle_states);
9084 finish_bundle_state_table ();
9085 bundling_p = 0;
9086 dfa_clean_insn_cache ();
9087 }
9088
9089 /* The following function is called at the end of scheduling BB or
9090 EBB. After reload, it inserts stop bits and does insn bundling. */
9091
9092 static void
9093 ia64_sched_finish (FILE *dump, int sched_verbose)
9094 {
9095 if (sched_verbose)
9096 fprintf (dump, "// Finishing schedule.\n");
9097 if (!reload_completed)
9098 return;
9099 if (reload_completed)
9100 {
9101 final_emit_insn_group_barriers (dump);
9102 bundling (dump, sched_verbose, current_sched_info->prev_head,
9103 current_sched_info->next_tail);
9104 if (sched_verbose && dump)
9105 fprintf (dump, "// finishing %d-%d\n",
9106 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9107 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9108
9109 return;
9110 }
9111 }
9112
9113 /* The following function inserts stop bits in scheduled BB or EBB. */
9114
9115 static void
9116 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9117 {
9118 rtx insn;
9119 int need_barrier_p = 0;
9120 int seen_good_insn = 0;
9121
9122 init_insn_group_barriers ();
9123
9124 for (insn = NEXT_INSN (current_sched_info->prev_head);
9125 insn != current_sched_info->next_tail;
9126 insn = NEXT_INSN (insn))
9127 {
9128 if (GET_CODE (insn) == BARRIER)
9129 {
9130 rtx last = prev_active_insn (insn);
9131
9132 if (! last)
9133 continue;
9134 if (GET_CODE (last) == JUMP_INSN
9135 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
9136 last = prev_active_insn (last);
9137 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9138 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9139
9140 init_insn_group_barriers ();
9141 seen_good_insn = 0;
9142 need_barrier_p = 0;
9143 }
9144 else if (NONDEBUG_INSN_P (insn))
9145 {
9146 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9147 {
9148 init_insn_group_barriers ();
9149 seen_good_insn = 0;
9150 need_barrier_p = 0;
9151 }
9152 else if (need_barrier_p || group_barrier_needed (insn)
9153 || (mflag_sched_stop_bits_after_every_cycle
9154 && GET_MODE (insn) == TImode
9155 && seen_good_insn))
9156 {
9157 if (TARGET_EARLY_STOP_BITS)
9158 {
9159 rtx last;
9160
9161 for (last = insn;
9162 last != current_sched_info->prev_head;
9163 last = PREV_INSN (last))
9164 if (INSN_P (last) && GET_MODE (last) == TImode
9165 && stops_p [INSN_UID (last)])
9166 break;
9167 if (last == current_sched_info->prev_head)
9168 last = insn;
9169 last = prev_active_insn (last);
9170 if (last
9171 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9172 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9173 last);
9174 init_insn_group_barriers ();
9175 for (last = NEXT_INSN (last);
9176 last != insn;
9177 last = NEXT_INSN (last))
9178 if (INSN_P (last))
9179 {
9180 group_barrier_needed (last);
9181 if (recog_memoized (last) >= 0
9182 && important_for_bundling_p (last))
9183 seen_good_insn = 1;
9184 }
9185 }
9186 else
9187 {
9188 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9189 insn);
9190 init_insn_group_barriers ();
9191 seen_good_insn = 0;
9192 }
9193 group_barrier_needed (insn);
9194 if (recog_memoized (insn) >= 0
9195 && important_for_bundling_p (insn))
9196 seen_good_insn = 1;
9197 }
9198 else if (recog_memoized (insn) >= 0
9199 && important_for_bundling_p (insn))
9200 seen_good_insn = 1;
9201 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9202 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9203 || asm_noperands (PATTERN (insn)) >= 0);
9204 }
9205 }
9206 }
9207
9208 \f
9209
9210 /* If the following function returns TRUE, we will use the DFA
9211 insn scheduler. */
9212
9213 static int
9214 ia64_first_cycle_multipass_dfa_lookahead (void)
9215 {
9216 return (reload_completed ? 6 : 4);
9217 }
9218
9219 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9220
9221 static void
9222 ia64_init_dfa_pre_cycle_insn (void)
9223 {
9224 if (temp_dfa_state == NULL)
9225 {
9226 dfa_state_size = state_size ();
9227 temp_dfa_state = xmalloc (dfa_state_size);
9228 prev_cycle_state = xmalloc (dfa_state_size);
9229 }
9230 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9231 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9232 recog_memoized (dfa_pre_cycle_insn);
9233 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9234 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9235 recog_memoized (dfa_stop_insn);
9236 }
9237
9238 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9239 used by the DFA insn scheduler. */
9240
9241 static rtx
9242 ia64_dfa_pre_cycle_insn (void)
9243 {
9244 return dfa_pre_cycle_insn;
9245 }
9246
9247 /* The following function returns TRUE if PRODUCER (of type ilog or
9248 ld) produces address for CONSUMER (of type st or stf). */
9249
9250 int
9251 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9252 {
9253 rtx dest, reg, mem;
9254
9255 gcc_assert (producer && consumer);
9256 dest = ia64_single_set (producer);
9257 gcc_assert (dest);
9258 reg = SET_DEST (dest);
9259 gcc_assert (reg);
9260 if (GET_CODE (reg) == SUBREG)
9261 reg = SUBREG_REG (reg);
9262 gcc_assert (GET_CODE (reg) == REG);
9263
9264 dest = ia64_single_set (consumer);
9265 gcc_assert (dest);
9266 mem = SET_DEST (dest);
9267 gcc_assert (mem && GET_CODE (mem) == MEM);
9268 return reg_mentioned_p (reg, mem);
9269 }
9270
9271 /* The following function returns TRUE if PRODUCER (of type ilog or
9272 ld) produces address for CONSUMER (of type ld or fld). */
9273
9274 int
9275 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9276 {
9277 rtx dest, src, reg, mem;
9278
9279 gcc_assert (producer && consumer);
9280 dest = ia64_single_set (producer);
9281 gcc_assert (dest);
9282 reg = SET_DEST (dest);
9283 gcc_assert (reg);
9284 if (GET_CODE (reg) == SUBREG)
9285 reg = SUBREG_REG (reg);
9286 gcc_assert (GET_CODE (reg) == REG);
9287
9288 src = ia64_single_set (consumer);
9289 gcc_assert (src);
9290 mem = SET_SRC (src);
9291 gcc_assert (mem);
9292
9293 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9294 mem = XVECEXP (mem, 0, 0);
9295 else if (GET_CODE (mem) == IF_THEN_ELSE)
9296 /* ??? Is this bypass necessary for ld.c? */
9297 {
9298 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9299 mem = XEXP (mem, 1);
9300 }
9301
9302 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9303 mem = XEXP (mem, 0);
9304
9305 if (GET_CODE (mem) == UNSPEC)
9306 {
9307 int c = XINT (mem, 1);
9308
9309 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9310 || c == UNSPEC_LDSA);
9311 mem = XVECEXP (mem, 0, 0);
9312 }
9313
9314 /* Note that LO_SUM is used for GOT loads. */
9315 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9316
9317 return reg_mentioned_p (reg, mem);
9318 }
9319
9320 /* The following function returns TRUE if INSN produces address for a
9321 load/store insn. We will place such insns into M slot because it
9322 decreases its latency time. */
9323
9324 int
9325 ia64_produce_address_p (rtx insn)
9326 {
9327 return insn->call;
9328 }
9329
9330 \f
9331 /* Emit pseudo-ops for the assembler to describe predicate relations.
9332 At present this assumes that we only consider predicate pairs to
9333 be mutex, and that the assembler can deduce proper values from
9334 straight-line code. */
9335
9336 static void
9337 emit_predicate_relation_info (void)
9338 {
9339 basic_block bb;
9340
9341 FOR_EACH_BB_REVERSE (bb)
9342 {
9343 int r;
9344 rtx head = BB_HEAD (bb);
9345
9346 /* We only need such notes at code labels. */
9347 if (GET_CODE (head) != CODE_LABEL)
9348 continue;
9349 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9350 head = NEXT_INSN (head);
9351
9352 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9353 grabbing the entire block of predicate registers. */
9354 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9355 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9356 {
9357 rtx p = gen_rtx_REG (BImode, r);
9358 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9359 if (head == BB_END (bb))
9360 BB_END (bb) = n;
9361 head = n;
9362 }
9363 }
9364
9365 /* Look for conditional calls that do not return, and protect predicate
9366 relations around them. Otherwise the assembler will assume the call
9367 returns, and complain about uses of call-clobbered predicates after
9368 the call. */
9369 FOR_EACH_BB_REVERSE (bb)
9370 {
9371 rtx insn = BB_HEAD (bb);
9372
9373 while (1)
9374 {
9375 if (GET_CODE (insn) == CALL_INSN
9376 && GET_CODE (PATTERN (insn)) == COND_EXEC
9377 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9378 {
9379 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9380 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9381 if (BB_HEAD (bb) == insn)
9382 BB_HEAD (bb) = b;
9383 if (BB_END (bb) == insn)
9384 BB_END (bb) = a;
9385 }
9386
9387 if (insn == BB_END (bb))
9388 break;
9389 insn = NEXT_INSN (insn);
9390 }
9391 }
9392 }
9393
9394 /* Perform machine dependent operations on the rtl chain INSNS. */
9395
9396 static void
9397 ia64_reorg (void)
9398 {
9399 /* We are freeing block_for_insn in the toplev to keep compatibility
9400 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9401 compute_bb_for_insn ();
9402
9403 /* If optimizing, we'll have split before scheduling. */
9404 if (optimize == 0)
9405 split_all_insns ();
9406
9407 if (optimize && ia64_flag_schedule_insns2
9408 && dbg_cnt (ia64_sched2))
9409 {
9410 timevar_push (TV_SCHED2);
9411 ia64_final_schedule = 1;
9412
9413 initiate_bundle_states ();
9414 ia64_nop = make_insn_raw (gen_nop ());
9415 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9416 recog_memoized (ia64_nop);
9417 clocks_length = get_max_uid () + 1;
9418 stops_p = XCNEWVEC (char, clocks_length);
9419
9420 if (ia64_tune == PROCESSOR_ITANIUM2)
9421 {
9422 pos_1 = get_cpu_unit_code ("2_1");
9423 pos_2 = get_cpu_unit_code ("2_2");
9424 pos_3 = get_cpu_unit_code ("2_3");
9425 pos_4 = get_cpu_unit_code ("2_4");
9426 pos_5 = get_cpu_unit_code ("2_5");
9427 pos_6 = get_cpu_unit_code ("2_6");
9428 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9429 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9430 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9431 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9432 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9433 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9434 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9435 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9436 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9437 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9438 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9439 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9440 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9441 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9442 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9443 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9444 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9445 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9446 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9447 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9448 }
9449 else
9450 {
9451 pos_1 = get_cpu_unit_code ("1_1");
9452 pos_2 = get_cpu_unit_code ("1_2");
9453 pos_3 = get_cpu_unit_code ("1_3");
9454 pos_4 = get_cpu_unit_code ("1_4");
9455 pos_5 = get_cpu_unit_code ("1_5");
9456 pos_6 = get_cpu_unit_code ("1_6");
9457 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9458 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9459 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9460 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9461 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9462 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9463 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9464 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9465 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9466 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9467 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9468 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9469 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9470 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9471 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9472 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9473 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9474 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9475 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9476 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9477 }
9478
9479 if (flag_selective_scheduling2
9480 && !maybe_skip_selective_scheduling ())
9481 run_selective_scheduling ();
9482 else
9483 schedule_ebbs ();
9484
9485 /* Redo alignment computation, as it might gone wrong. */
9486 compute_alignments ();
9487
9488 /* We cannot reuse this one because it has been corrupted by the
9489 evil glat. */
9490 finish_bundle_states ();
9491 free (stops_p);
9492 stops_p = NULL;
9493 emit_insn_group_barriers (dump_file);
9494
9495 ia64_final_schedule = 0;
9496 timevar_pop (TV_SCHED2);
9497 }
9498 else
9499 emit_all_insn_group_barriers (dump_file);
9500
9501 df_analyze ();
9502
9503 /* A call must not be the last instruction in a function, so that the
9504 return address is still within the function, so that unwinding works
9505 properly. Note that IA-64 differs from dwarf2 on this point. */
9506 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9507 {
9508 rtx insn;
9509 int saw_stop = 0;
9510
9511 insn = get_last_insn ();
9512 if (! INSN_P (insn))
9513 insn = prev_active_insn (insn);
9514 if (insn)
9515 {
9516 /* Skip over insns that expand to nothing. */
9517 while (GET_CODE (insn) == INSN
9518 && get_attr_empty (insn) == EMPTY_YES)
9519 {
9520 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9521 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9522 saw_stop = 1;
9523 insn = prev_active_insn (insn);
9524 }
9525 if (GET_CODE (insn) == CALL_INSN)
9526 {
9527 if (! saw_stop)
9528 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9529 emit_insn (gen_break_f ());
9530 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9531 }
9532 }
9533 }
9534
9535 emit_predicate_relation_info ();
9536
9537 if (ia64_flag_var_tracking)
9538 {
9539 timevar_push (TV_VAR_TRACKING);
9540 variable_tracking_main ();
9541 timevar_pop (TV_VAR_TRACKING);
9542 }
9543 df_finish_pass (false);
9544 }
9545 \f
9546 /* Return true if REGNO is used by the epilogue. */
9547
9548 int
9549 ia64_epilogue_uses (int regno)
9550 {
9551 switch (regno)
9552 {
9553 case R_GR (1):
9554 /* With a call to a function in another module, we will write a new
9555 value to "gp". After returning from such a call, we need to make
9556 sure the function restores the original gp-value, even if the
9557 function itself does not use the gp anymore. */
9558 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9559
9560 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9561 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9562 /* For functions defined with the syscall_linkage attribute, all
9563 input registers are marked as live at all function exits. This
9564 prevents the register allocator from using the input registers,
9565 which in turn makes it possible to restart a system call after
9566 an interrupt without having to save/restore the input registers.
9567 This also prevents kernel data from leaking to application code. */
9568 return lookup_attribute ("syscall_linkage",
9569 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9570
9571 case R_BR (0):
9572 /* Conditional return patterns can't represent the use of `b0' as
9573 the return address, so we force the value live this way. */
9574 return 1;
9575
9576 case AR_PFS_REGNUM:
9577 /* Likewise for ar.pfs, which is used by br.ret. */
9578 return 1;
9579
9580 default:
9581 return 0;
9582 }
9583 }
9584
9585 /* Return true if REGNO is used by the frame unwinder. */
9586
9587 int
9588 ia64_eh_uses (int regno)
9589 {
9590 unsigned int r;
9591
9592 if (! reload_completed)
9593 return 0;
9594
9595 if (regno == 0)
9596 return 0;
9597
9598 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9599 if (regno == current_frame_info.r[r]
9600 || regno == emitted_frame_related_regs[r])
9601 return 1;
9602
9603 return 0;
9604 }
9605 \f
9606 /* Return true if this goes in small data/bss. */
9607
9608 /* ??? We could also support own long data here. Generating movl/add/ld8
9609 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9610 code faster because there is one less load. This also includes incomplete
9611 types which can't go in sdata/sbss. */
9612
9613 static bool
9614 ia64_in_small_data_p (const_tree exp)
9615 {
9616 if (TARGET_NO_SDATA)
9617 return false;
9618
9619 /* We want to merge strings, so we never consider them small data. */
9620 if (TREE_CODE (exp) == STRING_CST)
9621 return false;
9622
9623 /* Functions are never small data. */
9624 if (TREE_CODE (exp) == FUNCTION_DECL)
9625 return false;
9626
9627 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9628 {
9629 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9630
9631 if (strcmp (section, ".sdata") == 0
9632 || strncmp (section, ".sdata.", 7) == 0
9633 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9634 || strcmp (section, ".sbss") == 0
9635 || strncmp (section, ".sbss.", 6) == 0
9636 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9637 return true;
9638 }
9639 else
9640 {
9641 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9642
9643 /* If this is an incomplete type with size 0, then we can't put it
9644 in sdata because it might be too big when completed. */
9645 if (size > 0 && size <= ia64_section_threshold)
9646 return true;
9647 }
9648
9649 return false;
9650 }
9651 \f
9652 /* Output assembly directives for prologue regions. */
9653
9654 /* The current basic block number. */
9655
9656 static bool last_block;
9657
9658 /* True if we need a copy_state command at the start of the next block. */
9659
9660 static bool need_copy_state;
9661
9662 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9663 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9664 #endif
9665
9666 /* Emit a debugging label after a call-frame-related insn. We'd
9667 rather output the label right away, but we'd have to output it
9668 after, not before, the instruction, and the instruction has not
9669 been output yet. So we emit the label after the insn, delete it to
9670 avoid introducing basic blocks, and mark it as preserved, such that
9671 it is still output, given that it is referenced in debug info. */
9672
9673 static const char *
9674 ia64_emit_deleted_label_after_insn (rtx insn)
9675 {
9676 char label[MAX_ARTIFICIAL_LABEL_BYTES];
9677 rtx lb = gen_label_rtx ();
9678 rtx label_insn = emit_label_after (lb, insn);
9679
9680 LABEL_PRESERVE_P (lb) = 1;
9681
9682 delete_insn (label_insn);
9683
9684 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
9685
9686 return xstrdup (label);
9687 }
9688
9689 /* Define the CFA after INSN with the steady-state definition. */
9690
9691 static void
9692 ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame)
9693 {
9694 rtx fp = frame_pointer_needed
9695 ? hard_frame_pointer_rtx
9696 : stack_pointer_rtx;
9697 const char *label = ia64_emit_deleted_label_after_insn (insn);
9698
9699 if (!frame)
9700 return;
9701
9702 dwarf2out_def_cfa
9703 (label, REGNO (fp),
9704 ia64_initial_elimination_offset
9705 (REGNO (arg_pointer_rtx), REGNO (fp))
9706 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9707 }
9708
9709 /* All we need to do here is avoid a crash in the generic dwarf2
9710 processing. The real CFA definition is set up above. */
9711
9712 static void
9713 ia64_dwarf_handle_frame_unspec (const char * ARG_UNUSED (label),
9714 rtx ARG_UNUSED (pattern),
9715 int index)
9716 {
9717 gcc_assert (index == UNSPECV_ALLOC);
9718 }
9719
9720 /* The generic dwarf2 frame debug info generator does not define a
9721 separate region for the very end of the epilogue, so refrain from
9722 doing so in the IA64-specific code as well. */
9723
9724 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
9725
9726 /* The function emits unwind directives for the start of an epilogue. */
9727
9728 static void
9729 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
9730 {
9731 /* If this isn't the last block of the function, then we need to label the
9732 current state, and copy it back in at the start of the next block. */
9733
9734 if (!last_block)
9735 {
9736 if (unwind)
9737 fprintf (asm_out_file, "\t.label_state %d\n",
9738 ++cfun->machine->state_num);
9739 need_copy_state = true;
9740 }
9741
9742 if (unwind)
9743 fprintf (asm_out_file, "\t.restore sp\n");
9744 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9745 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
9746 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
9747 }
9748
9749 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9750
9751 static void
9752 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9753 bool unwind, bool frame)
9754 {
9755 rtx dest = SET_DEST (pat);
9756 rtx src = SET_SRC (pat);
9757
9758 if (dest == stack_pointer_rtx)
9759 {
9760 if (GET_CODE (src) == PLUS)
9761 {
9762 rtx op0 = XEXP (src, 0);
9763 rtx op1 = XEXP (src, 1);
9764
9765 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9766
9767 if (INTVAL (op1) < 0)
9768 {
9769 gcc_assert (!frame_pointer_needed);
9770 if (unwind)
9771 fprintf (asm_out_file,
9772 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9773 -INTVAL (op1));
9774 ia64_dwarf2out_def_steady_cfa (insn, frame);
9775 }
9776 else
9777 process_epilogue (asm_out_file, insn, unwind, frame);
9778 }
9779 else
9780 {
9781 gcc_assert (src == hard_frame_pointer_rtx);
9782 process_epilogue (asm_out_file, insn, unwind, frame);
9783 }
9784 }
9785 else if (dest == hard_frame_pointer_rtx)
9786 {
9787 gcc_assert (src == stack_pointer_rtx);
9788 gcc_assert (frame_pointer_needed);
9789
9790 if (unwind)
9791 fprintf (asm_out_file, "\t.vframe r%d\n",
9792 ia64_dbx_register_number (REGNO (dest)));
9793 ia64_dwarf2out_def_steady_cfa (insn, frame);
9794 }
9795 else
9796 gcc_unreachable ();
9797 }
9798
9799 /* This function processes a SET pattern for REG_CFA_REGISTER. */
9800
9801 static void
9802 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
9803 {
9804 rtx dest = SET_DEST (pat);
9805 rtx src = SET_SRC (pat);
9806
9807 int dest_regno = REGNO (dest);
9808 int src_regno = REGNO (src);
9809
9810 switch (src_regno)
9811 {
9812 case BR_REG (0):
9813 /* Saving return address pointer. */
9814 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9815 if (unwind)
9816 fprintf (asm_out_file, "\t.save rp, r%d\n",
9817 ia64_dbx_register_number (dest_regno));
9818 break;
9819
9820 case PR_REG (0):
9821 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9822 if (unwind)
9823 fprintf (asm_out_file, "\t.save pr, r%d\n",
9824 ia64_dbx_register_number (dest_regno));
9825 break;
9826
9827 case AR_UNAT_REGNUM:
9828 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9829 if (unwind)
9830 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9831 ia64_dbx_register_number (dest_regno));
9832 break;
9833
9834 case AR_LC_REGNUM:
9835 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9836 if (unwind)
9837 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9838 ia64_dbx_register_number (dest_regno));
9839 break;
9840
9841 default:
9842 /* Everything else should indicate being stored to memory. */
9843 gcc_unreachable ();
9844 }
9845 }
9846
9847 /* This function processes a SET pattern for REG_CFA_OFFSET. */
9848
9849 static void
9850 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
9851 {
9852 rtx dest = SET_DEST (pat);
9853 rtx src = SET_SRC (pat);
9854 int src_regno = REGNO (src);
9855 const char *saveop;
9856 HOST_WIDE_INT off;
9857 rtx base;
9858
9859 gcc_assert (MEM_P (dest));
9860 if (GET_CODE (XEXP (dest, 0)) == REG)
9861 {
9862 base = XEXP (dest, 0);
9863 off = 0;
9864 }
9865 else
9866 {
9867 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9868 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9869 base = XEXP (XEXP (dest, 0), 0);
9870 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9871 }
9872
9873 if (base == hard_frame_pointer_rtx)
9874 {
9875 saveop = ".savepsp";
9876 off = - off;
9877 }
9878 else
9879 {
9880 gcc_assert (base == stack_pointer_rtx);
9881 saveop = ".savesp";
9882 }
9883
9884 src_regno = REGNO (src);
9885 switch (src_regno)
9886 {
9887 case BR_REG (0):
9888 gcc_assert (!current_frame_info.r[reg_save_b0]);
9889 if (unwind)
9890 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
9891 saveop, off);
9892 break;
9893
9894 case PR_REG (0):
9895 gcc_assert (!current_frame_info.r[reg_save_pr]);
9896 if (unwind)
9897 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
9898 saveop, off);
9899 break;
9900
9901 case AR_LC_REGNUM:
9902 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9903 if (unwind)
9904 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
9905 saveop, off);
9906 break;
9907
9908 case AR_PFS_REGNUM:
9909 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9910 if (unwind)
9911 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
9912 saveop, off);
9913 break;
9914
9915 case AR_UNAT_REGNUM:
9916 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9917 if (unwind)
9918 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
9919 saveop, off);
9920 break;
9921
9922 case GR_REG (4):
9923 case GR_REG (5):
9924 case GR_REG (6):
9925 case GR_REG (7):
9926 if (unwind)
9927 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9928 1 << (src_regno - GR_REG (4)));
9929 break;
9930
9931 case BR_REG (1):
9932 case BR_REG (2):
9933 case BR_REG (3):
9934 case BR_REG (4):
9935 case BR_REG (5):
9936 if (unwind)
9937 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9938 1 << (src_regno - BR_REG (1)));
9939 break;
9940
9941 case FR_REG (2):
9942 case FR_REG (3):
9943 case FR_REG (4):
9944 case FR_REG (5):
9945 if (unwind)
9946 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9947 1 << (src_regno - FR_REG (2)));
9948 break;
9949
9950 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9951 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9952 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9953 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9954 if (unwind)
9955 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9956 1 << (src_regno - FR_REG (12)));
9957 break;
9958
9959 default:
9960 /* ??? For some reason we mark other general registers, even those
9961 we can't represent in the unwind info. Ignore them. */
9962 break;
9963 }
9964 }
9965
9966 /* This function looks at a single insn and emits any directives
9967 required to unwind this insn. */
9968
9969 static void
9970 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
9971 {
9972 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
9973 bool frame = dwarf2out_do_frame ();
9974 rtx note, pat;
9975 bool handled_one;
9976
9977 if (!unwind && !frame)
9978 return;
9979
9980 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9981 {
9982 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9983
9984 /* Restore unwind state from immediately before the epilogue. */
9985 if (need_copy_state)
9986 {
9987 if (unwind)
9988 {
9989 fprintf (asm_out_file, "\t.body\n");
9990 fprintf (asm_out_file, "\t.copy_state %d\n",
9991 cfun->machine->state_num);
9992 }
9993 if (IA64_CHANGE_CFA_IN_EPILOGUE)
9994 ia64_dwarf2out_def_steady_cfa (insn, frame);
9995 need_copy_state = false;
9996 }
9997 }
9998
9999 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
10000 return;
10001
10002 /* Look for the ALLOC insn. */
10003 if (INSN_CODE (insn) == CODE_FOR_alloc)
10004 {
10005 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10006 int dest_regno = REGNO (dest);
10007
10008 /* If this is the final destination for ar.pfs, then this must
10009 be the alloc in the prologue. */
10010 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10011 {
10012 if (unwind)
10013 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10014 ia64_dbx_register_number (dest_regno));
10015 }
10016 else
10017 {
10018 /* This must be an alloc before a sibcall. We must drop the
10019 old frame info. The easiest way to drop the old frame
10020 info is to ensure we had a ".restore sp" directive
10021 followed by a new prologue. If the procedure doesn't
10022 have a memory-stack frame, we'll issue a dummy ".restore
10023 sp" now. */
10024 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10025 /* if haven't done process_epilogue() yet, do it now */
10026 process_epilogue (asm_out_file, insn, unwind, frame);
10027 if (unwind)
10028 fprintf (asm_out_file, "\t.prologue\n");
10029 }
10030 return;
10031 }
10032
10033 handled_one = false;
10034 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10035 switch (REG_NOTE_KIND (note))
10036 {
10037 case REG_CFA_ADJUST_CFA:
10038 pat = XEXP (note, 0);
10039 if (pat == NULL)
10040 pat = PATTERN (insn);
10041 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10042 handled_one = true;
10043 break;
10044
10045 case REG_CFA_OFFSET:
10046 pat = XEXP (note, 0);
10047 if (pat == NULL)
10048 pat = PATTERN (insn);
10049 process_cfa_offset (asm_out_file, pat, unwind);
10050 handled_one = true;
10051 break;
10052
10053 case REG_CFA_REGISTER:
10054 pat = XEXP (note, 0);
10055 if (pat == NULL)
10056 pat = PATTERN (insn);
10057 process_cfa_register (asm_out_file, pat, unwind);
10058 handled_one = true;
10059 break;
10060
10061 case REG_FRAME_RELATED_EXPR:
10062 case REG_CFA_DEF_CFA:
10063 case REG_CFA_EXPRESSION:
10064 case REG_CFA_RESTORE:
10065 case REG_CFA_SET_VDRAP:
10066 /* Not used in the ia64 port. */
10067 gcc_unreachable ();
10068
10069 default:
10070 /* Not a frame-related note. */
10071 break;
10072 }
10073
10074 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10075 explicit action to take. No guessing required. */
10076 gcc_assert (handled_one);
10077 }
10078
10079 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10080
10081 static void
10082 ia64_asm_emit_except_personality (rtx personality)
10083 {
10084 fputs ("\t.personality\t", asm_out_file);
10085 output_addr_const (asm_out_file, personality);
10086 fputc ('\n', asm_out_file);
10087 }
10088
10089 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10090
10091 static void
10092 ia64_asm_init_sections (void)
10093 {
10094 exception_section = get_unnamed_section (0, output_section_asm_op,
10095 "\t.handlerdata");
10096 }
10097
10098 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10099
10100 static enum unwind_info_type
10101 ia64_debug_unwind_info (void)
10102 {
10103 return UI_TARGET;
10104 }
10105
10106 /* Implement TARGET_EXCEPT_UNWIND_INFO. */
10107
10108 static enum unwind_info_type
10109 ia64_except_unwind_info (struct gcc_options *opts)
10110 {
10111 /* Honor the --enable-sjlj-exceptions configure switch. */
10112 #ifdef CONFIG_UNWIND_EXCEPTIONS
10113 if (CONFIG_UNWIND_EXCEPTIONS)
10114 return UI_SJLJ;
10115 #endif
10116
10117 /* For simplicity elsewhere in this file, indicate that all unwind
10118 info is disabled if we're not emitting unwind tables. */
10119 if (!opts->x_flag_exceptions && !opts->x_flag_unwind_tables)
10120 return UI_NONE;
10121
10122 return UI_TARGET;
10123 }
10124 \f
10125 enum ia64_builtins
10126 {
10127 IA64_BUILTIN_BSP,
10128 IA64_BUILTIN_COPYSIGNQ,
10129 IA64_BUILTIN_FABSQ,
10130 IA64_BUILTIN_FLUSHRS,
10131 IA64_BUILTIN_INFQ,
10132 IA64_BUILTIN_HUGE_VALQ,
10133 IA64_BUILTIN_max
10134 };
10135
10136 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10137
10138 void
10139 ia64_init_builtins (void)
10140 {
10141 tree fpreg_type;
10142 tree float80_type;
10143 tree decl;
10144
10145 /* The __fpreg type. */
10146 fpreg_type = make_node (REAL_TYPE);
10147 TYPE_PRECISION (fpreg_type) = 82;
10148 layout_type (fpreg_type);
10149 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10150
10151 /* The __float80 type. */
10152 float80_type = make_node (REAL_TYPE);
10153 TYPE_PRECISION (float80_type) = 80;
10154 layout_type (float80_type);
10155 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10156
10157 /* The __float128 type. */
10158 if (!TARGET_HPUX)
10159 {
10160 tree ftype;
10161 tree float128_type = make_node (REAL_TYPE);
10162
10163 TYPE_PRECISION (float128_type) = 128;
10164 layout_type (float128_type);
10165 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10166
10167 /* TFmode support builtins. */
10168 ftype = build_function_type (float128_type, void_list_node);
10169 decl = add_builtin_function ("__builtin_infq", ftype,
10170 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10171 NULL, NULL_TREE);
10172 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10173
10174 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10175 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10176 NULL, NULL_TREE);
10177 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10178
10179 ftype = build_function_type_list (float128_type,
10180 float128_type,
10181 NULL_TREE);
10182 decl = add_builtin_function ("__builtin_fabsq", ftype,
10183 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10184 "__fabstf2", NULL_TREE);
10185 TREE_READONLY (decl) = 1;
10186 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10187
10188 ftype = build_function_type_list (float128_type,
10189 float128_type,
10190 float128_type,
10191 NULL_TREE);
10192 decl = add_builtin_function ("__builtin_copysignq", ftype,
10193 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10194 "__copysigntf3", NULL_TREE);
10195 TREE_READONLY (decl) = 1;
10196 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10197 }
10198 else
10199 /* Under HPUX, this is a synonym for "long double". */
10200 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10201 "__float128");
10202
10203 /* Fwrite on VMS is non-standard. */
10204 if (TARGET_ABI_OPEN_VMS)
10205 {
10206 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
10207 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
10208 }
10209
10210 #define def_builtin(name, type, code) \
10211 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10212 NULL, NULL_TREE)
10213
10214 decl = def_builtin ("__builtin_ia64_bsp",
10215 build_function_type (ptr_type_node, void_list_node),
10216 IA64_BUILTIN_BSP);
10217 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10218
10219 decl = def_builtin ("__builtin_ia64_flushrs",
10220 build_function_type (void_type_node, void_list_node),
10221 IA64_BUILTIN_FLUSHRS);
10222 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10223
10224 #undef def_builtin
10225
10226 if (TARGET_HPUX)
10227 {
10228 if (built_in_decls [BUILT_IN_FINITE])
10229 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
10230 "_Isfinite");
10231 if (built_in_decls [BUILT_IN_FINITEF])
10232 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
10233 "_Isfinitef");
10234 if (built_in_decls [BUILT_IN_FINITEL])
10235 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
10236 "_Isfinitef128");
10237 }
10238 }
10239
10240 rtx
10241 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10242 enum machine_mode mode ATTRIBUTE_UNUSED,
10243 int ignore ATTRIBUTE_UNUSED)
10244 {
10245 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10246 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10247
10248 switch (fcode)
10249 {
10250 case IA64_BUILTIN_BSP:
10251 if (! target || ! register_operand (target, DImode))
10252 target = gen_reg_rtx (DImode);
10253 emit_insn (gen_bsp_value (target));
10254 #ifdef POINTERS_EXTEND_UNSIGNED
10255 target = convert_memory_address (ptr_mode, target);
10256 #endif
10257 return target;
10258
10259 case IA64_BUILTIN_FLUSHRS:
10260 emit_insn (gen_flushrs ());
10261 return const0_rtx;
10262
10263 case IA64_BUILTIN_INFQ:
10264 case IA64_BUILTIN_HUGE_VALQ:
10265 {
10266 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10267 REAL_VALUE_TYPE inf;
10268 rtx tmp;
10269
10270 real_inf (&inf);
10271 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10272
10273 tmp = validize_mem (force_const_mem (target_mode, tmp));
10274
10275 if (target == 0)
10276 target = gen_reg_rtx (target_mode);
10277
10278 emit_move_insn (target, tmp);
10279 return target;
10280 }
10281
10282 case IA64_BUILTIN_FABSQ:
10283 case IA64_BUILTIN_COPYSIGNQ:
10284 return expand_call (exp, target, ignore);
10285
10286 default:
10287 gcc_unreachable ();
10288 }
10289
10290 return NULL_RTX;
10291 }
10292
10293 /* Return the ia64 builtin for CODE. */
10294
10295 static tree
10296 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10297 {
10298 if (code >= IA64_BUILTIN_max)
10299 return error_mark_node;
10300
10301 return ia64_builtins[code];
10302 }
10303
10304 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10305 most significant bits of the stack slot. */
10306
10307 enum direction
10308 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10309 {
10310 /* Exception to normal case for structures/unions/etc. */
10311
10312 if (type && AGGREGATE_TYPE_P (type)
10313 && int_size_in_bytes (type) < UNITS_PER_WORD)
10314 return upward;
10315
10316 /* Fall back to the default. */
10317 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10318 }
10319
10320 /* Emit text to declare externally defined variables and functions, because
10321 the Intel assembler does not support undefined externals. */
10322
10323 void
10324 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10325 {
10326 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10327 set in order to avoid putting out names that are never really
10328 used. */
10329 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10330 {
10331 /* maybe_assemble_visibility will return 1 if the assembler
10332 visibility directive is output. */
10333 int need_visibility = ((*targetm.binds_local_p) (decl)
10334 && maybe_assemble_visibility (decl));
10335
10336 #ifdef DO_CRTL_NAMES
10337 DO_CRTL_NAMES;
10338 #endif
10339
10340 /* GNU as does not need anything here, but the HP linker does
10341 need something for external functions. */
10342 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10343 && TREE_CODE (decl) == FUNCTION_DECL)
10344 (*targetm.asm_out.globalize_decl_name) (file, decl);
10345 else if (need_visibility && !TARGET_GNU_AS)
10346 (*targetm.asm_out.globalize_label) (file, name);
10347 }
10348 }
10349
10350 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10351 modes of word_mode and larger. Rename the TFmode libfuncs using the
10352 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10353 backward compatibility. */
10354
10355 static void
10356 ia64_init_libfuncs (void)
10357 {
10358 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10359 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10360 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10361 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10362
10363 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10364 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10365 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10366 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10367 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10368
10369 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10370 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10371 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10372 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10373 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10374 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10375
10376 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10377 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10378 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10379 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10380 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10381
10382 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10383 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10384 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10385 /* HP-UX 11.23 libc does not have a function for unsigned
10386 SImode-to-TFmode conversion. */
10387 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10388 }
10389
10390 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10391
10392 static void
10393 ia64_hpux_init_libfuncs (void)
10394 {
10395 ia64_init_libfuncs ();
10396
10397 /* The HP SI millicode division and mod functions expect DI arguments.
10398 By turning them off completely we avoid using both libgcc and the
10399 non-standard millicode routines and use the HP DI millicode routines
10400 instead. */
10401
10402 set_optab_libfunc (sdiv_optab, SImode, 0);
10403 set_optab_libfunc (udiv_optab, SImode, 0);
10404 set_optab_libfunc (smod_optab, SImode, 0);
10405 set_optab_libfunc (umod_optab, SImode, 0);
10406
10407 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10408 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10409 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10410 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10411
10412 /* HP-UX libc has TF min/max/abs routines in it. */
10413 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10414 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10415 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10416
10417 /* ia64_expand_compare uses this. */
10418 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10419
10420 /* These should never be used. */
10421 set_optab_libfunc (eq_optab, TFmode, 0);
10422 set_optab_libfunc (ne_optab, TFmode, 0);
10423 set_optab_libfunc (gt_optab, TFmode, 0);
10424 set_optab_libfunc (ge_optab, TFmode, 0);
10425 set_optab_libfunc (lt_optab, TFmode, 0);
10426 set_optab_libfunc (le_optab, TFmode, 0);
10427 }
10428
10429 /* Rename the division and modulus functions in VMS. */
10430
10431 static void
10432 ia64_vms_init_libfuncs (void)
10433 {
10434 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10435 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10436 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10437 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10438 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10439 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10440 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10441 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10442 abort_libfunc = init_one_libfunc ("decc$abort");
10443 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10444 #ifdef MEM_LIBFUNCS_INIT
10445 MEM_LIBFUNCS_INIT;
10446 #endif
10447 }
10448
10449 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10450 the HPUX conventions. */
10451
10452 static void
10453 ia64_sysv4_init_libfuncs (void)
10454 {
10455 ia64_init_libfuncs ();
10456
10457 /* These functions are not part of the HPUX TFmode interface. We
10458 use them instead of _U_Qfcmp, which doesn't work the way we
10459 expect. */
10460 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10461 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10462 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10463 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10464 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10465 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10466
10467 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10468 glibc doesn't have them. */
10469 }
10470
10471 /* Use soft-fp. */
10472
10473 static void
10474 ia64_soft_fp_init_libfuncs (void)
10475 {
10476 }
10477
10478 static bool
10479 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10480 {
10481 return (mode == SImode || mode == DImode);
10482 }
10483 \f
10484 /* For HPUX, it is illegal to have relocations in shared segments. */
10485
10486 static int
10487 ia64_hpux_reloc_rw_mask (void)
10488 {
10489 return 3;
10490 }
10491
10492 /* For others, relax this so that relocations to local data goes in
10493 read-only segments, but we still cannot allow global relocations
10494 in read-only segments. */
10495
10496 static int
10497 ia64_reloc_rw_mask (void)
10498 {
10499 return flag_pic ? 3 : 2;
10500 }
10501
10502 /* Return the section to use for X. The only special thing we do here
10503 is to honor small data. */
10504
10505 static section *
10506 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10507 unsigned HOST_WIDE_INT align)
10508 {
10509 if (GET_MODE_SIZE (mode) > 0
10510 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10511 && !TARGET_NO_SDATA)
10512 return sdata_section;
10513 else
10514 return default_elf_select_rtx_section (mode, x, align);
10515 }
10516
10517 static unsigned int
10518 ia64_section_type_flags (tree decl, const char *name, int reloc)
10519 {
10520 unsigned int flags = 0;
10521
10522 if (strcmp (name, ".sdata") == 0
10523 || strncmp (name, ".sdata.", 7) == 0
10524 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10525 || strncmp (name, ".sdata2.", 8) == 0
10526 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10527 || strcmp (name, ".sbss") == 0
10528 || strncmp (name, ".sbss.", 6) == 0
10529 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10530 flags = SECTION_SMALL;
10531
10532 #if TARGET_ABI_OPEN_VMS
10533 if (decl && DECL_ATTRIBUTES (decl)
10534 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10535 flags |= SECTION_VMS_OVERLAY;
10536 #endif
10537
10538 flags |= default_section_type_flags (decl, name, reloc);
10539 return flags;
10540 }
10541
10542 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10543 structure type and that the address of that type should be passed
10544 in out0, rather than in r8. */
10545
10546 static bool
10547 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10548 {
10549 tree ret_type = TREE_TYPE (fntype);
10550
10551 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10552 as the structure return address parameter, if the return value
10553 type has a non-trivial copy constructor or destructor. It is not
10554 clear if this same convention should be used for other
10555 programming languages. Until G++ 3.4, we incorrectly used r8 for
10556 these return values. */
10557 return (abi_version_at_least (2)
10558 && ret_type
10559 && TYPE_MODE (ret_type) == BLKmode
10560 && TREE_ADDRESSABLE (ret_type)
10561 && strcmp (lang_hooks.name, "GNU C++") == 0);
10562 }
10563
10564 /* Output the assembler code for a thunk function. THUNK_DECL is the
10565 declaration for the thunk function itself, FUNCTION is the decl for
10566 the target function. DELTA is an immediate constant offset to be
10567 added to THIS. If VCALL_OFFSET is nonzero, the word at
10568 *(*this + vcall_offset) should be added to THIS. */
10569
10570 static void
10571 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10572 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10573 tree function)
10574 {
10575 rtx this_rtx, insn, funexp;
10576 unsigned int this_parmno;
10577 unsigned int this_regno;
10578 rtx delta_rtx;
10579
10580 reload_completed = 1;
10581 epilogue_completed = 1;
10582
10583 /* Set things up as ia64_expand_prologue might. */
10584 last_scratch_gr_reg = 15;
10585
10586 memset (&current_frame_info, 0, sizeof (current_frame_info));
10587 current_frame_info.spill_cfa_off = -16;
10588 current_frame_info.n_input_regs = 1;
10589 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10590
10591 /* Mark the end of the (empty) prologue. */
10592 emit_note (NOTE_INSN_PROLOGUE_END);
10593
10594 /* Figure out whether "this" will be the first parameter (the
10595 typical case) or the second parameter (as happens when the
10596 virtual function returns certain class objects). */
10597 this_parmno
10598 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10599 ? 1 : 0);
10600 this_regno = IN_REG (this_parmno);
10601 if (!TARGET_REG_NAMES)
10602 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10603
10604 this_rtx = gen_rtx_REG (Pmode, this_regno);
10605
10606 /* Apply the constant offset, if required. */
10607 delta_rtx = GEN_INT (delta);
10608 if (TARGET_ILP32)
10609 {
10610 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10611 REG_POINTER (tmp) = 1;
10612 if (delta && satisfies_constraint_I (delta_rtx))
10613 {
10614 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10615 delta = 0;
10616 }
10617 else
10618 emit_insn (gen_ptr_extend (this_rtx, tmp));
10619 }
10620 if (delta)
10621 {
10622 if (!satisfies_constraint_I (delta_rtx))
10623 {
10624 rtx tmp = gen_rtx_REG (Pmode, 2);
10625 emit_move_insn (tmp, delta_rtx);
10626 delta_rtx = tmp;
10627 }
10628 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10629 }
10630
10631 /* Apply the offset from the vtable, if required. */
10632 if (vcall_offset)
10633 {
10634 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10635 rtx tmp = gen_rtx_REG (Pmode, 2);
10636
10637 if (TARGET_ILP32)
10638 {
10639 rtx t = gen_rtx_REG (ptr_mode, 2);
10640 REG_POINTER (t) = 1;
10641 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10642 if (satisfies_constraint_I (vcall_offset_rtx))
10643 {
10644 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10645 vcall_offset = 0;
10646 }
10647 else
10648 emit_insn (gen_ptr_extend (tmp, t));
10649 }
10650 else
10651 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10652
10653 if (vcall_offset)
10654 {
10655 if (!satisfies_constraint_J (vcall_offset_rtx))
10656 {
10657 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10658 emit_move_insn (tmp2, vcall_offset_rtx);
10659 vcall_offset_rtx = tmp2;
10660 }
10661 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10662 }
10663
10664 if (TARGET_ILP32)
10665 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10666 else
10667 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10668
10669 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10670 }
10671
10672 /* Generate a tail call to the target function. */
10673 if (! TREE_USED (function))
10674 {
10675 assemble_external (function);
10676 TREE_USED (function) = 1;
10677 }
10678 funexp = XEXP (DECL_RTL (function), 0);
10679 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10680 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10681 insn = get_last_insn ();
10682 SIBLING_CALL_P (insn) = 1;
10683
10684 /* Code generation for calls relies on splitting. */
10685 reload_completed = 1;
10686 epilogue_completed = 1;
10687 try_split (PATTERN (insn), insn, 0);
10688
10689 emit_barrier ();
10690
10691 /* Run just enough of rest_of_compilation to get the insns emitted.
10692 There's not really enough bulk here to make other passes such as
10693 instruction scheduling worth while. Note that use_thunk calls
10694 assemble_start_function and assemble_end_function. */
10695
10696 insn_locators_alloc ();
10697 emit_all_insn_group_barriers (NULL);
10698 insn = get_insns ();
10699 shorten_branches (insn);
10700 final_start_function (insn, file, 1);
10701 final (insn, file, 1);
10702 final_end_function ();
10703
10704 reload_completed = 0;
10705 epilogue_completed = 0;
10706 }
10707
10708 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10709
10710 static rtx
10711 ia64_struct_value_rtx (tree fntype,
10712 int incoming ATTRIBUTE_UNUSED)
10713 {
10714 if (TARGET_ABI_OPEN_VMS ||
10715 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10716 return NULL_RTX;
10717 return gen_rtx_REG (Pmode, GR_REG (8));
10718 }
10719
10720 static bool
10721 ia64_scalar_mode_supported_p (enum machine_mode mode)
10722 {
10723 switch (mode)
10724 {
10725 case QImode:
10726 case HImode:
10727 case SImode:
10728 case DImode:
10729 case TImode:
10730 return true;
10731
10732 case SFmode:
10733 case DFmode:
10734 case XFmode:
10735 case RFmode:
10736 return true;
10737
10738 case TFmode:
10739 return true;
10740
10741 default:
10742 return false;
10743 }
10744 }
10745
10746 static bool
10747 ia64_vector_mode_supported_p (enum machine_mode mode)
10748 {
10749 switch (mode)
10750 {
10751 case V8QImode:
10752 case V4HImode:
10753 case V2SImode:
10754 return true;
10755
10756 case V2SFmode:
10757 return true;
10758
10759 default:
10760 return false;
10761 }
10762 }
10763
10764 /* Implement the FUNCTION_PROFILER macro. */
10765
10766 void
10767 ia64_output_function_profiler (FILE *file, int labelno)
10768 {
10769 bool indirect_call;
10770
10771 /* If the function needs a static chain and the static chain
10772 register is r15, we use an indirect call so as to bypass
10773 the PLT stub in case the executable is dynamically linked,
10774 because the stub clobbers r15 as per 5.3.6 of the psABI.
10775 We don't need to do that in non canonical PIC mode. */
10776
10777 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10778 {
10779 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10780 indirect_call = true;
10781 }
10782 else
10783 indirect_call = false;
10784
10785 if (TARGET_GNU_AS)
10786 fputs ("\t.prologue 4, r40\n", file);
10787 else
10788 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10789 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10790
10791 if (NO_PROFILE_COUNTERS)
10792 fputs ("\tmov out3 = r0\n", file);
10793 else
10794 {
10795 char buf[20];
10796 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10797
10798 if (TARGET_AUTO_PIC)
10799 fputs ("\tmovl out3 = @gprel(", file);
10800 else
10801 fputs ("\taddl out3 = @ltoff(", file);
10802 assemble_name (file, buf);
10803 if (TARGET_AUTO_PIC)
10804 fputs (")\n", file);
10805 else
10806 fputs ("), r1\n", file);
10807 }
10808
10809 if (indirect_call)
10810 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10811 fputs ("\t;;\n", file);
10812
10813 fputs ("\t.save rp, r42\n", file);
10814 fputs ("\tmov out2 = b0\n", file);
10815 if (indirect_call)
10816 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10817 fputs ("\t.body\n", file);
10818 fputs ("\tmov out1 = r1\n", file);
10819 if (indirect_call)
10820 {
10821 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10822 fputs ("\tmov b6 = r16\n", file);
10823 fputs ("\tld8 r1 = [r14]\n", file);
10824 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10825 }
10826 else
10827 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10828 }
10829
10830 static GTY(()) rtx mcount_func_rtx;
10831 static rtx
10832 gen_mcount_func_rtx (void)
10833 {
10834 if (!mcount_func_rtx)
10835 mcount_func_rtx = init_one_libfunc ("_mcount");
10836 return mcount_func_rtx;
10837 }
10838
10839 void
10840 ia64_profile_hook (int labelno)
10841 {
10842 rtx label, ip;
10843
10844 if (NO_PROFILE_COUNTERS)
10845 label = const0_rtx;
10846 else
10847 {
10848 char buf[30];
10849 const char *label_name;
10850 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10851 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
10852 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10853 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10854 }
10855 ip = gen_reg_rtx (Pmode);
10856 emit_insn (gen_ip_value (ip));
10857 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10858 VOIDmode, 3,
10859 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10860 ip, Pmode,
10861 label, Pmode);
10862 }
10863
10864 /* Return the mangling of TYPE if it is an extended fundamental type. */
10865
10866 static const char *
10867 ia64_mangle_type (const_tree type)
10868 {
10869 type = TYPE_MAIN_VARIANT (type);
10870
10871 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10872 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10873 return NULL;
10874
10875 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10876 mangled as "e". */
10877 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10878 return "g";
10879 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10880 an extended mangling. Elsewhere, "e" is available since long
10881 double is 80 bits. */
10882 if (TYPE_MODE (type) == XFmode)
10883 return TARGET_HPUX ? "u9__float80" : "e";
10884 if (TYPE_MODE (type) == RFmode)
10885 return "u7__fpreg";
10886 return NULL;
10887 }
10888
10889 /* Return the diagnostic message string if conversion from FROMTYPE to
10890 TOTYPE is not allowed, NULL otherwise. */
10891 static const char *
10892 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10893 {
10894 /* Reject nontrivial conversion to or from __fpreg. */
10895 if (TYPE_MODE (fromtype) == RFmode
10896 && TYPE_MODE (totype) != RFmode
10897 && TYPE_MODE (totype) != VOIDmode)
10898 return N_("invalid conversion from %<__fpreg%>");
10899 if (TYPE_MODE (totype) == RFmode
10900 && TYPE_MODE (fromtype) != RFmode)
10901 return N_("invalid conversion to %<__fpreg%>");
10902 return NULL;
10903 }
10904
10905 /* Return the diagnostic message string if the unary operation OP is
10906 not permitted on TYPE, NULL otherwise. */
10907 static const char *
10908 ia64_invalid_unary_op (int op, const_tree type)
10909 {
10910 /* Reject operations on __fpreg other than unary + or &. */
10911 if (TYPE_MODE (type) == RFmode
10912 && op != CONVERT_EXPR
10913 && op != ADDR_EXPR)
10914 return N_("invalid operation on %<__fpreg%>");
10915 return NULL;
10916 }
10917
10918 /* Return the diagnostic message string if the binary operation OP is
10919 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10920 static const char *
10921 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10922 {
10923 /* Reject operations on __fpreg. */
10924 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10925 return N_("invalid operation on %<__fpreg%>");
10926 return NULL;
10927 }
10928
10929 /* Implement TARGET_OPTION_DEFAULT_PARAMS. */
10930 static void
10931 ia64_option_default_params (void)
10932 {
10933 /* Let the scheduler form additional regions. */
10934 set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
10935
10936 /* Set the default values for cache-related parameters. */
10937 set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
10938 set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
10939
10940 set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
10941 }
10942
10943 /* HP-UX version_id attribute.
10944 For object foo, if the version_id is set to 1234 put out an alias
10945 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10946 other than an alias statement because it is an illegal symbol name. */
10947
10948 static tree
10949 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10950 tree name ATTRIBUTE_UNUSED,
10951 tree args,
10952 int flags ATTRIBUTE_UNUSED,
10953 bool *no_add_attrs)
10954 {
10955 tree arg = TREE_VALUE (args);
10956
10957 if (TREE_CODE (arg) != STRING_CST)
10958 {
10959 error("version attribute is not a string");
10960 *no_add_attrs = true;
10961 return NULL_TREE;
10962 }
10963 return NULL_TREE;
10964 }
10965
10966 /* Target hook for c_mode_for_suffix. */
10967
10968 static enum machine_mode
10969 ia64_c_mode_for_suffix (char suffix)
10970 {
10971 if (suffix == 'q')
10972 return TFmode;
10973 if (suffix == 'w')
10974 return XFmode;
10975
10976 return VOIDmode;
10977 }
10978
10979 static enum machine_mode
10980 ia64_promote_function_mode (const_tree type,
10981 enum machine_mode mode,
10982 int *punsignedp,
10983 const_tree funtype,
10984 int for_return)
10985 {
10986 /* Special processing required for OpenVMS ... */
10987
10988 if (!TARGET_ABI_OPEN_VMS)
10989 return default_promote_function_mode(type, mode, punsignedp, funtype,
10990 for_return);
10991
10992 /* HP OpenVMS Calling Standard dated June, 2004, that describes
10993 HP OpenVMS I64 Version 8.2EFT,
10994 chapter 4 "OpenVMS I64 Conventions"
10995 section 4.7 "Procedure Linkage"
10996 subsection 4.7.5.2, "Normal Register Parameters"
10997
10998 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
10999 values passed in registers are zero-filled; signed integral values as
11000 well as unsigned 32-bit integral values are sign-extended to 64 bits.
11001 For all other types passed in the general registers, unused bits are
11002 undefined." */
11003
11004 if (for_return != 2
11005 && GET_MODE_CLASS (mode) == MODE_INT
11006 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
11007 {
11008 if (mode == SImode)
11009 *punsignedp = 0;
11010 return DImode;
11011 }
11012 else
11013 return promote_mode (type, mode, punsignedp);
11014 }
11015
11016 static GTY(()) rtx ia64_dconst_0_5_rtx;
11017
11018 rtx
11019 ia64_dconst_0_5 (void)
11020 {
11021 if (! ia64_dconst_0_5_rtx)
11022 {
11023 REAL_VALUE_TYPE rv;
11024 real_from_string (&rv, "0.5");
11025 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11026 }
11027 return ia64_dconst_0_5_rtx;
11028 }
11029
11030 static GTY(()) rtx ia64_dconst_0_375_rtx;
11031
11032 rtx
11033 ia64_dconst_0_375 (void)
11034 {
11035 if (! ia64_dconst_0_375_rtx)
11036 {
11037 REAL_VALUE_TYPE rv;
11038 real_from_string (&rv, "0.375");
11039 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11040 }
11041 return ia64_dconst_0_375_rtx;
11042 }
11043
11044 static enum machine_mode
11045 ia64_get_reg_raw_mode (int regno)
11046 {
11047 if (FR_REGNO_P (regno))
11048 return XFmode;
11049 return default_get_reg_raw_mode(regno);
11050 }
11051
11052 /* Always default to .text section until HP-UX linker is fixed. */
11053
11054 ATTRIBUTE_UNUSED static section *
11055 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11056 enum node_frequency freq ATTRIBUTE_UNUSED,
11057 bool startup ATTRIBUTE_UNUSED,
11058 bool exit ATTRIBUTE_UNUSED)
11059 {
11060 return NULL;
11061 }
11062
11063 #include "gt-ia64.h"