ia64.c (ia64_function_arg_1): Move code around.
[gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
14
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "libfuncs.h"
45 #include "diagnostic-core.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "intl.h"
56 #include "df.h"
57 #include "debug.h"
58 #include "params.h"
59 #include "dbgcnt.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
62 #include "reload.h"
63 #include "dwarf2out.h"
64 #include "opts.h"
65
66 /* This is used for communication between ASM_OUTPUT_LABEL and
67 ASM_OUTPUT_LABELREF. */
68 int ia64_asm_output_label = 0;
69
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
84
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105
106 /* Variables which are this size or smaller are put in the sdata/sbss
107 sections. */
108
109 unsigned int ia64_section_threshold;
110
111 /* The following variable is used by the DFA insn scheduler. The value is
112 TRUE if we do insn bundling instead of insn scheduling. */
113 int bundling_p = 0;
114
115 enum ia64_frame_regs
116 {
117 reg_fp,
118 reg_save_b0,
119 reg_save_pr,
120 reg_save_ar_pfs,
121 reg_save_ar_unat,
122 reg_save_ar_lc,
123 reg_save_gp,
124 number_of_ia64_frame_regs
125 };
126
127 /* Structure to be filled in by ia64_compute_frame_size with register
128 save masks and offsets for the current function. */
129
130 struct ia64_frame_info
131 {
132 HOST_WIDE_INT total_size; /* size of the stack frame, not including
133 the caller's scratch area. */
134 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
135 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
136 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
137 HARD_REG_SET mask; /* mask of saved registers. */
138 unsigned int gr_used_mask; /* mask of registers in use as gr spill
139 registers or long-term scratches. */
140 int n_spilled; /* number of spilled registers. */
141 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
142 int n_input_regs; /* number of input registers used. */
143 int n_local_regs; /* number of local registers used. */
144 int n_output_regs; /* number of output registers used. */
145 int n_rotate_regs; /* number of rotating registers used. */
146
147 char need_regstk; /* true if a .regstk directive needed. */
148 char initialized; /* true if the data is finalized. */
149 };
150
151 /* Current frame information calculated by ia64_compute_frame_size. */
152 static struct ia64_frame_info current_frame_info;
153 /* The actual registers that are emitted. */
154 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
155 \f
156 static int ia64_first_cycle_multipass_dfa_lookahead (void);
157 static void ia64_dependencies_evaluation_hook (rtx, rtx);
158 static void ia64_init_dfa_pre_cycle_insn (void);
159 static rtx ia64_dfa_pre_cycle_insn (void);
160 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
161 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
162 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
163 static void ia64_h_i_d_extended (void);
164 static void * ia64_alloc_sched_context (void);
165 static void ia64_init_sched_context (void *, bool);
166 static void ia64_set_sched_context (void *);
167 static void ia64_clear_sched_context (void *);
168 static void ia64_free_sched_context (void *);
169 static int ia64_mode_to_int (enum machine_mode);
170 static void ia64_set_sched_flags (spec_info_t);
171 static ds_t ia64_get_insn_spec_ds (rtx);
172 static ds_t ia64_get_insn_checked_ds (rtx);
173 static bool ia64_skip_rtx_p (const_rtx);
174 static int ia64_speculate_insn (rtx, ds_t, rtx *);
175 static bool ia64_needs_block_p (int);
176 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
177 static int ia64_spec_check_p (rtx);
178 static int ia64_spec_check_src_p (rtx);
179 static rtx gen_tls_get_addr (void);
180 static rtx gen_thread_pointer (void);
181 static int find_gr_spill (enum ia64_frame_regs, int);
182 static int next_scratch_gr_reg (void);
183 static void mark_reg_gr_used_mask (rtx, void *);
184 static void ia64_compute_frame_size (HOST_WIDE_INT);
185 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
186 static void finish_spill_pointers (void);
187 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
188 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
189 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
190 static rtx gen_movdi_x (rtx, rtx, rtx);
191 static rtx gen_fr_spill_x (rtx, rtx, rtx);
192 static rtx gen_fr_restore_x (rtx, rtx, rtx);
193
194 static void ia64_option_override (void);
195 static bool ia64_can_eliminate (const int, const int);
196 static enum machine_mode hfa_element_mode (const_tree, bool);
197 static void ia64_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
198 tree, int *, int);
199 static int ia64_arg_partial_bytes (cumulative_args_t, enum machine_mode,
200 tree, bool);
201 static rtx ia64_function_arg_1 (cumulative_args_t, enum machine_mode,
202 const_tree, bool, bool);
203 static rtx ia64_function_arg (cumulative_args_t, enum machine_mode,
204 const_tree, bool);
205 static rtx ia64_function_incoming_arg (cumulative_args_t,
206 enum machine_mode, const_tree, bool);
207 static void ia64_function_arg_advance (cumulative_args_t, enum machine_mode,
208 const_tree, bool);
209 static unsigned int ia64_function_arg_boundary (enum machine_mode,
210 const_tree);
211 static bool ia64_function_ok_for_sibcall (tree, tree);
212 static bool ia64_return_in_memory (const_tree, const_tree);
213 static rtx ia64_function_value (const_tree, const_tree, bool);
214 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
215 static bool ia64_function_value_regno_p (const unsigned int);
216 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
217 reg_class_t);
218 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
219 bool);
220 static bool ia64_rtx_costs (rtx, int, int, int, int *, bool);
221 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
222 static void fix_range (const char *);
223 static struct machine_function * ia64_init_machine_status (void);
224 static void emit_insn_group_barriers (FILE *);
225 static void emit_all_insn_group_barriers (FILE *);
226 static void final_emit_insn_group_barriers (FILE *);
227 static void emit_predicate_relation_info (void);
228 static void ia64_reorg (void);
229 static bool ia64_in_small_data_p (const_tree);
230 static void process_epilogue (FILE *, rtx, bool, bool);
231
232 static bool ia64_assemble_integer (rtx, unsigned int, int);
233 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
234 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
235 static void ia64_output_function_end_prologue (FILE *);
236
237 static void ia64_print_operand (FILE *, rtx, int);
238 static void ia64_print_operand_address (FILE *, rtx);
239 static bool ia64_print_operand_punct_valid_p (unsigned char code);
240
241 static int ia64_issue_rate (void);
242 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
243 static void ia64_sched_init (FILE *, int, int);
244 static void ia64_sched_init_global (FILE *, int, int);
245 static void ia64_sched_finish_global (FILE *, int);
246 static void ia64_sched_finish (FILE *, int);
247 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
248 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
249 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
250 static int ia64_variable_issue (FILE *, int, rtx, int);
251
252 static void ia64_asm_unwind_emit (FILE *, rtx);
253 static void ia64_asm_emit_except_personality (rtx);
254 static void ia64_asm_init_sections (void);
255
256 static enum unwind_info_type ia64_debug_unwind_info (void);
257
258 static struct bundle_state *get_free_bundle_state (void);
259 static void free_bundle_state (struct bundle_state *);
260 static void initiate_bundle_states (void);
261 static void finish_bundle_states (void);
262 static unsigned bundle_state_hash (const void *);
263 static int bundle_state_eq_p (const void *, const void *);
264 static int insert_bundle_state (struct bundle_state *);
265 static void initiate_bundle_state_table (void);
266 static void finish_bundle_state_table (void);
267 static int try_issue_nops (struct bundle_state *, int);
268 static int try_issue_insn (struct bundle_state *, rtx);
269 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
270 static int get_max_pos (state_t);
271 static int get_template (state_t, int);
272
273 static rtx get_next_important_insn (rtx, rtx);
274 static bool important_for_bundling_p (rtx);
275 static void bundling (FILE *, int, rtx, rtx);
276
277 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
278 HOST_WIDE_INT, tree);
279 static void ia64_file_start (void);
280 static void ia64_globalize_decl_name (FILE *, tree);
281
282 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
283 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
284 static section *ia64_select_rtx_section (enum machine_mode, rtx,
285 unsigned HOST_WIDE_INT);
286 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
287 ATTRIBUTE_UNUSED;
288 static unsigned int ia64_section_type_flags (tree, const char *, int);
289 static void ia64_init_libfuncs (void)
290 ATTRIBUTE_UNUSED;
291 static void ia64_hpux_init_libfuncs (void)
292 ATTRIBUTE_UNUSED;
293 static void ia64_sysv4_init_libfuncs (void)
294 ATTRIBUTE_UNUSED;
295 static void ia64_vms_init_libfuncs (void)
296 ATTRIBUTE_UNUSED;
297 static void ia64_soft_fp_init_libfuncs (void)
298 ATTRIBUTE_UNUSED;
299 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
300 ATTRIBUTE_UNUSED;
301 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
302 ATTRIBUTE_UNUSED;
303
304 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
305 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
306 static void ia64_encode_section_info (tree, rtx, int);
307 static rtx ia64_struct_value_rtx (tree, int);
308 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
309 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
310 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
311 static bool ia64_legitimate_constant_p (enum machine_mode, rtx);
312 static bool ia64_legitimate_address_p (enum machine_mode, rtx, bool);
313 static bool ia64_cannot_force_const_mem (enum machine_mode, rtx);
314 static const char *ia64_mangle_type (const_tree);
315 static const char *ia64_invalid_conversion (const_tree, const_tree);
316 static const char *ia64_invalid_unary_op (int, const_tree);
317 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
318 static enum machine_mode ia64_c_mode_for_suffix (char);
319 static void ia64_trampoline_init (rtx, tree, rtx);
320 static void ia64_override_options_after_change (void);
321
322 static tree ia64_builtin_decl (unsigned, bool);
323
324 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
325 static enum machine_mode ia64_get_reg_raw_mode (int regno);
326 static section * ia64_hpux_function_section (tree, enum node_frequency,
327 bool, bool);
328
329 static bool ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
330 const unsigned char *sel);
331
332 #define MAX_VECT_LEN 8
333
334 struct expand_vec_perm_d
335 {
336 rtx target, op0, op1;
337 unsigned char perm[MAX_VECT_LEN];
338 enum machine_mode vmode;
339 unsigned char nelt;
340 bool one_operand_p;
341 bool testing_p;
342 };
343
344 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
345
346 \f
347 /* Table of valid machine attributes. */
348 static const struct attribute_spec ia64_attribute_table[] =
349 {
350 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
351 affects_type_identity } */
352 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
353 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
354 false },
355 #if TARGET_ABI_OPEN_VMS
356 { "common_object", 1, 1, true, false, false,
357 ia64_vms_common_object_attribute, false },
358 #endif
359 { "version_id", 1, 1, true, false, false,
360 ia64_handle_version_id_attribute, false },
361 { NULL, 0, 0, false, false, false, NULL, false }
362 };
363
364 /* Initialize the GCC target structure. */
365 #undef TARGET_ATTRIBUTE_TABLE
366 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
367
368 #undef TARGET_INIT_BUILTINS
369 #define TARGET_INIT_BUILTINS ia64_init_builtins
370
371 #undef TARGET_EXPAND_BUILTIN
372 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
373
374 #undef TARGET_BUILTIN_DECL
375 #define TARGET_BUILTIN_DECL ia64_builtin_decl
376
377 #undef TARGET_ASM_BYTE_OP
378 #define TARGET_ASM_BYTE_OP "\tdata1\t"
379 #undef TARGET_ASM_ALIGNED_HI_OP
380 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
381 #undef TARGET_ASM_ALIGNED_SI_OP
382 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
383 #undef TARGET_ASM_ALIGNED_DI_OP
384 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
385 #undef TARGET_ASM_UNALIGNED_HI_OP
386 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
387 #undef TARGET_ASM_UNALIGNED_SI_OP
388 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
389 #undef TARGET_ASM_UNALIGNED_DI_OP
390 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
391 #undef TARGET_ASM_INTEGER
392 #define TARGET_ASM_INTEGER ia64_assemble_integer
393
394 #undef TARGET_OPTION_OVERRIDE
395 #define TARGET_OPTION_OVERRIDE ia64_option_override
396
397 #undef TARGET_ASM_FUNCTION_PROLOGUE
398 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
399 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
400 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
401 #undef TARGET_ASM_FUNCTION_EPILOGUE
402 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
403
404 #undef TARGET_PRINT_OPERAND
405 #define TARGET_PRINT_OPERAND ia64_print_operand
406 #undef TARGET_PRINT_OPERAND_ADDRESS
407 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
408 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
409 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
410
411 #undef TARGET_IN_SMALL_DATA_P
412 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
413
414 #undef TARGET_SCHED_ADJUST_COST_2
415 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
416 #undef TARGET_SCHED_ISSUE_RATE
417 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
418 #undef TARGET_SCHED_VARIABLE_ISSUE
419 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
420 #undef TARGET_SCHED_INIT
421 #define TARGET_SCHED_INIT ia64_sched_init
422 #undef TARGET_SCHED_FINISH
423 #define TARGET_SCHED_FINISH ia64_sched_finish
424 #undef TARGET_SCHED_INIT_GLOBAL
425 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
426 #undef TARGET_SCHED_FINISH_GLOBAL
427 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
428 #undef TARGET_SCHED_REORDER
429 #define TARGET_SCHED_REORDER ia64_sched_reorder
430 #undef TARGET_SCHED_REORDER2
431 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
432
433 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
434 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
435
436 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
437 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
438
439 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
440 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
441 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
442 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
443
444 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
445 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
446 ia64_first_cycle_multipass_dfa_lookahead_guard
447
448 #undef TARGET_SCHED_DFA_NEW_CYCLE
449 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
450
451 #undef TARGET_SCHED_H_I_D_EXTENDED
452 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
453
454 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
455 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
456
457 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
458 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
459
460 #undef TARGET_SCHED_SET_SCHED_CONTEXT
461 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
462
463 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
464 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
465
466 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
467 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
468
469 #undef TARGET_SCHED_SET_SCHED_FLAGS
470 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
471
472 #undef TARGET_SCHED_GET_INSN_SPEC_DS
473 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
474
475 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
476 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
477
478 #undef TARGET_SCHED_SPECULATE_INSN
479 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
480
481 #undef TARGET_SCHED_NEEDS_BLOCK_P
482 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
483
484 #undef TARGET_SCHED_GEN_SPEC_CHECK
485 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
486
487 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
488 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
489 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
490
491 #undef TARGET_SCHED_SKIP_RTX_P
492 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
493
494 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
495 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
496 #undef TARGET_ARG_PARTIAL_BYTES
497 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
498 #undef TARGET_FUNCTION_ARG
499 #define TARGET_FUNCTION_ARG ia64_function_arg
500 #undef TARGET_FUNCTION_INCOMING_ARG
501 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
502 #undef TARGET_FUNCTION_ARG_ADVANCE
503 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
504 #undef TARGET_FUNCTION_ARG_BOUNDARY
505 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
506
507 #undef TARGET_ASM_OUTPUT_MI_THUNK
508 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
509 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
510 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
511
512 #undef TARGET_ASM_FILE_START
513 #define TARGET_ASM_FILE_START ia64_file_start
514
515 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
516 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
517
518 #undef TARGET_REGISTER_MOVE_COST
519 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
520 #undef TARGET_MEMORY_MOVE_COST
521 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
522 #undef TARGET_RTX_COSTS
523 #define TARGET_RTX_COSTS ia64_rtx_costs
524 #undef TARGET_ADDRESS_COST
525 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
526
527 #undef TARGET_UNSPEC_MAY_TRAP_P
528 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
529
530 #undef TARGET_MACHINE_DEPENDENT_REORG
531 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
532
533 #undef TARGET_ENCODE_SECTION_INFO
534 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
535
536 #undef TARGET_SECTION_TYPE_FLAGS
537 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
538
539 #ifdef HAVE_AS_TLS
540 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
541 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
542 #endif
543
544 /* ??? Investigate. */
545 #if 0
546 #undef TARGET_PROMOTE_PROTOTYPES
547 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
548 #endif
549
550 #undef TARGET_FUNCTION_VALUE
551 #define TARGET_FUNCTION_VALUE ia64_function_value
552 #undef TARGET_LIBCALL_VALUE
553 #define TARGET_LIBCALL_VALUE ia64_libcall_value
554 #undef TARGET_FUNCTION_VALUE_REGNO_P
555 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
556
557 #undef TARGET_STRUCT_VALUE_RTX
558 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
559 #undef TARGET_RETURN_IN_MEMORY
560 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
561 #undef TARGET_SETUP_INCOMING_VARARGS
562 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
563 #undef TARGET_STRICT_ARGUMENT_NAMING
564 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
565 #undef TARGET_MUST_PASS_IN_STACK
566 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
567 #undef TARGET_GET_RAW_RESULT_MODE
568 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
569 #undef TARGET_GET_RAW_ARG_MODE
570 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
571
572 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
573 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
574
575 #undef TARGET_ASM_UNWIND_EMIT
576 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
577 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
578 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
579 #undef TARGET_ASM_INIT_SECTIONS
580 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
581
582 #undef TARGET_DEBUG_UNWIND_INFO
583 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
584
585 #undef TARGET_SCALAR_MODE_SUPPORTED_P
586 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
587 #undef TARGET_VECTOR_MODE_SUPPORTED_P
588 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
589
590 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
591 in an order different from the specified program order. */
592 #undef TARGET_RELAXED_ORDERING
593 #define TARGET_RELAXED_ORDERING true
594
595 #undef TARGET_LEGITIMATE_CONSTANT_P
596 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
597 #undef TARGET_LEGITIMATE_ADDRESS_P
598 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
599
600 #undef TARGET_CANNOT_FORCE_CONST_MEM
601 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
602
603 #undef TARGET_MANGLE_TYPE
604 #define TARGET_MANGLE_TYPE ia64_mangle_type
605
606 #undef TARGET_INVALID_CONVERSION
607 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
608 #undef TARGET_INVALID_UNARY_OP
609 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
610 #undef TARGET_INVALID_BINARY_OP
611 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
612
613 #undef TARGET_C_MODE_FOR_SUFFIX
614 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
615
616 #undef TARGET_CAN_ELIMINATE
617 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
618
619 #undef TARGET_TRAMPOLINE_INIT
620 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
621
622 #undef TARGET_INVALID_WITHIN_DOLOOP
623 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
624
625 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
626 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
627
628 #undef TARGET_PREFERRED_RELOAD_CLASS
629 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
630
631 #undef TARGET_DELAY_SCHED2
632 #define TARGET_DELAY_SCHED2 true
633
634 /* Variable tracking should be run after all optimizations which
635 change order of insns. It also needs a valid CFG. */
636 #undef TARGET_DELAY_VARTRACK
637 #define TARGET_DELAY_VARTRACK true
638
639 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
640 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok
641
642 struct gcc_target targetm = TARGET_INITIALIZER;
643 \f
644 typedef enum
645 {
646 ADDR_AREA_NORMAL, /* normal address area */
647 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
648 }
649 ia64_addr_area;
650
651 static GTY(()) tree small_ident1;
652 static GTY(()) tree small_ident2;
653
654 static void
655 init_idents (void)
656 {
657 if (small_ident1 == 0)
658 {
659 small_ident1 = get_identifier ("small");
660 small_ident2 = get_identifier ("__small__");
661 }
662 }
663
664 /* Retrieve the address area that has been chosen for the given decl. */
665
666 static ia64_addr_area
667 ia64_get_addr_area (tree decl)
668 {
669 tree model_attr;
670
671 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
672 if (model_attr)
673 {
674 tree id;
675
676 init_idents ();
677 id = TREE_VALUE (TREE_VALUE (model_attr));
678 if (id == small_ident1 || id == small_ident2)
679 return ADDR_AREA_SMALL;
680 }
681 return ADDR_AREA_NORMAL;
682 }
683
684 static tree
685 ia64_handle_model_attribute (tree *node, tree name, tree args,
686 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
687 {
688 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
689 ia64_addr_area area;
690 tree arg, decl = *node;
691
692 init_idents ();
693 arg = TREE_VALUE (args);
694 if (arg == small_ident1 || arg == small_ident2)
695 {
696 addr_area = ADDR_AREA_SMALL;
697 }
698 else
699 {
700 warning (OPT_Wattributes, "invalid argument of %qE attribute",
701 name);
702 *no_add_attrs = true;
703 }
704
705 switch (TREE_CODE (decl))
706 {
707 case VAR_DECL:
708 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
709 == FUNCTION_DECL)
710 && !TREE_STATIC (decl))
711 {
712 error_at (DECL_SOURCE_LOCATION (decl),
713 "an address area attribute cannot be specified for "
714 "local variables");
715 *no_add_attrs = true;
716 }
717 area = ia64_get_addr_area (decl);
718 if (area != ADDR_AREA_NORMAL && addr_area != area)
719 {
720 error ("address area of %q+D conflicts with previous "
721 "declaration", decl);
722 *no_add_attrs = true;
723 }
724 break;
725
726 case FUNCTION_DECL:
727 error_at (DECL_SOURCE_LOCATION (decl),
728 "address area attribute cannot be specified for "
729 "functions");
730 *no_add_attrs = true;
731 break;
732
733 default:
734 warning (OPT_Wattributes, "%qE attribute ignored",
735 name);
736 *no_add_attrs = true;
737 break;
738 }
739
740 return NULL_TREE;
741 }
742
743 /* The section must have global and overlaid attributes. */
744 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
745
746 /* Part of the low level implementation of DEC Ada pragma Common_Object which
747 enables the shared use of variables stored in overlaid linker areas
748 corresponding to the use of Fortran COMMON. */
749
750 static tree
751 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
752 int flags ATTRIBUTE_UNUSED,
753 bool *no_add_attrs)
754 {
755 tree decl = *node;
756 tree id, val;
757 if (! DECL_P (decl))
758 abort ();
759
760 DECL_COMMON (decl) = 1;
761 id = TREE_VALUE (args);
762 if (TREE_CODE (id) == IDENTIFIER_NODE)
763 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
764 else if (TREE_CODE (id) == STRING_CST)
765 val = id;
766 else
767 {
768 warning (OPT_Wattributes,
769 "%qE attribute requires a string constant argument", name);
770 *no_add_attrs = true;
771 return NULL_TREE;
772 }
773 DECL_SECTION_NAME (decl) = val;
774 return NULL_TREE;
775 }
776
777 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
778
779 void
780 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
781 unsigned HOST_WIDE_INT size,
782 unsigned int align)
783 {
784 tree attr = DECL_ATTRIBUTES (decl);
785
786 /* As common_object attribute set DECL_SECTION_NAME check it before
787 looking up the attribute. */
788 if (DECL_SECTION_NAME (decl) && attr)
789 attr = lookup_attribute ("common_object", attr);
790 else
791 attr = NULL_TREE;
792
793 if (!attr)
794 {
795 /* Code from elfos.h. */
796 fprintf (file, "%s", COMMON_ASM_OP);
797 assemble_name (file, name);
798 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
799 size, align / BITS_PER_UNIT);
800 }
801 else
802 {
803 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
804 ASM_OUTPUT_LABEL (file, name);
805 ASM_OUTPUT_SKIP (file, size ? size : 1);
806 }
807 }
808
809 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
810
811 void
812 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
813 tree decl)
814 {
815 if (!(flags & SECTION_VMS_OVERLAY))
816 {
817 default_elf_asm_named_section (name, flags, decl);
818 return;
819 }
820 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
821 abort ();
822
823 if (flags & SECTION_DECLARED)
824 {
825 fprintf (asm_out_file, "\t.section\t%s\n", name);
826 return;
827 }
828
829 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
830 }
831
832 static void
833 ia64_encode_addr_area (tree decl, rtx symbol)
834 {
835 int flags;
836
837 flags = SYMBOL_REF_FLAGS (symbol);
838 switch (ia64_get_addr_area (decl))
839 {
840 case ADDR_AREA_NORMAL: break;
841 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
842 default: gcc_unreachable ();
843 }
844 SYMBOL_REF_FLAGS (symbol) = flags;
845 }
846
847 static void
848 ia64_encode_section_info (tree decl, rtx rtl, int first)
849 {
850 default_encode_section_info (decl, rtl, first);
851
852 /* Careful not to prod global register variables. */
853 if (TREE_CODE (decl) == VAR_DECL
854 && GET_CODE (DECL_RTL (decl)) == MEM
855 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
856 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
857 ia64_encode_addr_area (decl, XEXP (rtl, 0));
858 }
859 \f
860 /* Return 1 if the operands of a move are ok. */
861
862 int
863 ia64_move_ok (rtx dst, rtx src)
864 {
865 /* If we're under init_recog_no_volatile, we'll not be able to use
866 memory_operand. So check the code directly and don't worry about
867 the validity of the underlying address, which should have been
868 checked elsewhere anyway. */
869 if (GET_CODE (dst) != MEM)
870 return 1;
871 if (GET_CODE (src) == MEM)
872 return 0;
873 if (register_operand (src, VOIDmode))
874 return 1;
875
876 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
877 if (INTEGRAL_MODE_P (GET_MODE (dst)))
878 return src == const0_rtx;
879 else
880 return satisfies_constraint_G (src);
881 }
882
883 /* Return 1 if the operands are ok for a floating point load pair. */
884
885 int
886 ia64_load_pair_ok (rtx dst, rtx src)
887 {
888 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
889 return 0;
890 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
891 return 0;
892 switch (GET_CODE (XEXP (src, 0)))
893 {
894 case REG:
895 case POST_INC:
896 break;
897 case POST_DEC:
898 return 0;
899 case POST_MODIFY:
900 {
901 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
902
903 if (GET_CODE (adjust) != CONST_INT
904 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
905 return 0;
906 }
907 break;
908 default:
909 abort ();
910 }
911 return 1;
912 }
913
914 int
915 addp4_optimize_ok (rtx op1, rtx op2)
916 {
917 return (basereg_operand (op1, GET_MODE(op1)) !=
918 basereg_operand (op2, GET_MODE(op2)));
919 }
920
921 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
922 Return the length of the field, or <= 0 on failure. */
923
924 int
925 ia64_depz_field_mask (rtx rop, rtx rshift)
926 {
927 unsigned HOST_WIDE_INT op = INTVAL (rop);
928 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
929
930 /* Get rid of the zero bits we're shifting in. */
931 op >>= shift;
932
933 /* We must now have a solid block of 1's at bit 0. */
934 return exact_log2 (op + 1);
935 }
936
937 /* Return the TLS model to use for ADDR. */
938
939 static enum tls_model
940 tls_symbolic_operand_type (rtx addr)
941 {
942 enum tls_model tls_kind = TLS_MODEL_NONE;
943
944 if (GET_CODE (addr) == CONST)
945 {
946 if (GET_CODE (XEXP (addr, 0)) == PLUS
947 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
948 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
949 }
950 else if (GET_CODE (addr) == SYMBOL_REF)
951 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
952
953 return tls_kind;
954 }
955
956 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
957 as a base register. */
958
959 static inline bool
960 ia64_reg_ok_for_base_p (const_rtx reg, bool strict)
961 {
962 if (strict
963 && REGNO_OK_FOR_BASE_P (REGNO (reg)))
964 return true;
965 else if (!strict
966 && (GENERAL_REGNO_P (REGNO (reg))
967 || !HARD_REGISTER_P (reg)))
968 return true;
969 else
970 return false;
971 }
972
973 static bool
974 ia64_legitimate_address_reg (const_rtx reg, bool strict)
975 {
976 if ((REG_P (reg) && ia64_reg_ok_for_base_p (reg, strict))
977 || (GET_CODE (reg) == SUBREG && REG_P (XEXP (reg, 0))
978 && ia64_reg_ok_for_base_p (XEXP (reg, 0), strict)))
979 return true;
980
981 return false;
982 }
983
984 static bool
985 ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict)
986 {
987 if (GET_CODE (disp) == PLUS
988 && rtx_equal_p (reg, XEXP (disp, 0))
989 && (ia64_legitimate_address_reg (XEXP (disp, 1), strict)
990 || (CONST_INT_P (XEXP (disp, 1))
991 && IN_RANGE (INTVAL (XEXP (disp, 1)), -256, 255))))
992 return true;
993
994 return false;
995 }
996
997 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
998
999 static bool
1000 ia64_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1001 rtx x, bool strict)
1002 {
1003 if (ia64_legitimate_address_reg (x, strict))
1004 return true;
1005 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
1006 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1007 && XEXP (x, 0) != arg_pointer_rtx)
1008 return true;
1009 else if (GET_CODE (x) == POST_MODIFY
1010 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1011 && XEXP (x, 0) != arg_pointer_rtx
1012 && ia64_legitimate_address_disp (XEXP (x, 0), XEXP (x, 1), strict))
1013 return true;
1014 else
1015 return false;
1016 }
1017
1018 /* Return true if X is a constant that is valid for some immediate
1019 field in an instruction. */
1020
1021 static bool
1022 ia64_legitimate_constant_p (enum machine_mode mode, rtx x)
1023 {
1024 switch (GET_CODE (x))
1025 {
1026 case CONST_INT:
1027 case LABEL_REF:
1028 return true;
1029
1030 case CONST_DOUBLE:
1031 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
1032 return true;
1033 return satisfies_constraint_G (x);
1034
1035 case CONST:
1036 case SYMBOL_REF:
1037 /* ??? Short term workaround for PR 28490. We must make the code here
1038 match the code in ia64_expand_move and move_operand, even though they
1039 are both technically wrong. */
1040 if (tls_symbolic_operand_type (x) == 0)
1041 {
1042 HOST_WIDE_INT addend = 0;
1043 rtx op = x;
1044
1045 if (GET_CODE (op) == CONST
1046 && GET_CODE (XEXP (op, 0)) == PLUS
1047 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1048 {
1049 addend = INTVAL (XEXP (XEXP (op, 0), 1));
1050 op = XEXP (XEXP (op, 0), 0);
1051 }
1052
1053 if (any_offset_symbol_operand (op, mode)
1054 || function_operand (op, mode))
1055 return true;
1056 if (aligned_offset_symbol_operand (op, mode))
1057 return (addend & 0x3fff) == 0;
1058 return false;
1059 }
1060 return false;
1061
1062 case CONST_VECTOR:
1063 if (mode == V2SFmode)
1064 return satisfies_constraint_Y (x);
1065
1066 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1067 && GET_MODE_SIZE (mode) <= 8);
1068
1069 default:
1070 return false;
1071 }
1072 }
1073
1074 /* Don't allow TLS addresses to get spilled to memory. */
1075
1076 static bool
1077 ia64_cannot_force_const_mem (enum machine_mode mode, rtx x)
1078 {
1079 if (mode == RFmode)
1080 return true;
1081 return tls_symbolic_operand_type (x) != 0;
1082 }
1083
1084 /* Expand a symbolic constant load. */
1085
1086 bool
1087 ia64_expand_load_address (rtx dest, rtx src)
1088 {
1089 gcc_assert (GET_CODE (dest) == REG);
1090
1091 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1092 having to pointer-extend the value afterward. Other forms of address
1093 computation below are also more natural to compute as 64-bit quantities.
1094 If we've been given an SImode destination register, change it. */
1095 if (GET_MODE (dest) != Pmode)
1096 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1097 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1098
1099 if (TARGET_NO_PIC)
1100 return false;
1101 if (small_addr_symbolic_operand (src, VOIDmode))
1102 return false;
1103
1104 if (TARGET_AUTO_PIC)
1105 emit_insn (gen_load_gprel64 (dest, src));
1106 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1107 emit_insn (gen_load_fptr (dest, src));
1108 else if (sdata_symbolic_operand (src, VOIDmode))
1109 emit_insn (gen_load_gprel (dest, src));
1110 else
1111 {
1112 HOST_WIDE_INT addend = 0;
1113 rtx tmp;
1114
1115 /* We did split constant offsets in ia64_expand_move, and we did try
1116 to keep them split in move_operand, but we also allowed reload to
1117 rematerialize arbitrary constants rather than spill the value to
1118 the stack and reload it. So we have to be prepared here to split
1119 them apart again. */
1120 if (GET_CODE (src) == CONST)
1121 {
1122 HOST_WIDE_INT hi, lo;
1123
1124 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1125 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1126 hi = hi - lo;
1127
1128 if (lo != 0)
1129 {
1130 addend = lo;
1131 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1132 }
1133 }
1134
1135 tmp = gen_rtx_HIGH (Pmode, src);
1136 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1137 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1138
1139 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1140 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1141
1142 if (addend)
1143 {
1144 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1145 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1146 }
1147 }
1148
1149 return true;
1150 }
1151
1152 static GTY(()) rtx gen_tls_tga;
1153 static rtx
1154 gen_tls_get_addr (void)
1155 {
1156 if (!gen_tls_tga)
1157 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1158 return gen_tls_tga;
1159 }
1160
1161 static GTY(()) rtx thread_pointer_rtx;
1162 static rtx
1163 gen_thread_pointer (void)
1164 {
1165 if (!thread_pointer_rtx)
1166 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1167 return thread_pointer_rtx;
1168 }
1169
1170 static rtx
1171 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1172 rtx orig_op1, HOST_WIDE_INT addend)
1173 {
1174 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1175 rtx orig_op0 = op0;
1176 HOST_WIDE_INT addend_lo, addend_hi;
1177
1178 switch (tls_kind)
1179 {
1180 case TLS_MODEL_GLOBAL_DYNAMIC:
1181 start_sequence ();
1182
1183 tga_op1 = gen_reg_rtx (Pmode);
1184 emit_insn (gen_load_dtpmod (tga_op1, op1));
1185
1186 tga_op2 = gen_reg_rtx (Pmode);
1187 emit_insn (gen_load_dtprel (tga_op2, op1));
1188
1189 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1190 LCT_CONST, Pmode, 2, tga_op1,
1191 Pmode, tga_op2, Pmode);
1192
1193 insns = get_insns ();
1194 end_sequence ();
1195
1196 if (GET_MODE (op0) != Pmode)
1197 op0 = tga_ret;
1198 emit_libcall_block (insns, op0, tga_ret, op1);
1199 break;
1200
1201 case TLS_MODEL_LOCAL_DYNAMIC:
1202 /* ??? This isn't the completely proper way to do local-dynamic
1203 If the call to __tls_get_addr is used only by a single symbol,
1204 then we should (somehow) move the dtprel to the second arg
1205 to avoid the extra add. */
1206 start_sequence ();
1207
1208 tga_op1 = gen_reg_rtx (Pmode);
1209 emit_insn (gen_load_dtpmod (tga_op1, op1));
1210
1211 tga_op2 = const0_rtx;
1212
1213 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1214 LCT_CONST, Pmode, 2, tga_op1,
1215 Pmode, tga_op2, Pmode);
1216
1217 insns = get_insns ();
1218 end_sequence ();
1219
1220 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1221 UNSPEC_LD_BASE);
1222 tmp = gen_reg_rtx (Pmode);
1223 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1224
1225 if (!register_operand (op0, Pmode))
1226 op0 = gen_reg_rtx (Pmode);
1227 if (TARGET_TLS64)
1228 {
1229 emit_insn (gen_load_dtprel (op0, op1));
1230 emit_insn (gen_adddi3 (op0, tmp, op0));
1231 }
1232 else
1233 emit_insn (gen_add_dtprel (op0, op1, tmp));
1234 break;
1235
1236 case TLS_MODEL_INITIAL_EXEC:
1237 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1238 addend_hi = addend - addend_lo;
1239
1240 op1 = plus_constant (op1, addend_hi);
1241 addend = addend_lo;
1242
1243 tmp = gen_reg_rtx (Pmode);
1244 emit_insn (gen_load_tprel (tmp, op1));
1245
1246 if (!register_operand (op0, Pmode))
1247 op0 = gen_reg_rtx (Pmode);
1248 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1249 break;
1250
1251 case TLS_MODEL_LOCAL_EXEC:
1252 if (!register_operand (op0, Pmode))
1253 op0 = gen_reg_rtx (Pmode);
1254
1255 op1 = orig_op1;
1256 addend = 0;
1257 if (TARGET_TLS64)
1258 {
1259 emit_insn (gen_load_tprel (op0, op1));
1260 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1261 }
1262 else
1263 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1264 break;
1265
1266 default:
1267 gcc_unreachable ();
1268 }
1269
1270 if (addend)
1271 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1272 orig_op0, 1, OPTAB_DIRECT);
1273 if (orig_op0 == op0)
1274 return NULL_RTX;
1275 if (GET_MODE (orig_op0) == Pmode)
1276 return op0;
1277 return gen_lowpart (GET_MODE (orig_op0), op0);
1278 }
1279
1280 rtx
1281 ia64_expand_move (rtx op0, rtx op1)
1282 {
1283 enum machine_mode mode = GET_MODE (op0);
1284
1285 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1286 op1 = force_reg (mode, op1);
1287
1288 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1289 {
1290 HOST_WIDE_INT addend = 0;
1291 enum tls_model tls_kind;
1292 rtx sym = op1;
1293
1294 if (GET_CODE (op1) == CONST
1295 && GET_CODE (XEXP (op1, 0)) == PLUS
1296 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1297 {
1298 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1299 sym = XEXP (XEXP (op1, 0), 0);
1300 }
1301
1302 tls_kind = tls_symbolic_operand_type (sym);
1303 if (tls_kind)
1304 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1305
1306 if (any_offset_symbol_operand (sym, mode))
1307 addend = 0;
1308 else if (aligned_offset_symbol_operand (sym, mode))
1309 {
1310 HOST_WIDE_INT addend_lo, addend_hi;
1311
1312 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1313 addend_hi = addend - addend_lo;
1314
1315 if (addend_lo != 0)
1316 {
1317 op1 = plus_constant (sym, addend_hi);
1318 addend = addend_lo;
1319 }
1320 else
1321 addend = 0;
1322 }
1323 else
1324 op1 = sym;
1325
1326 if (reload_completed)
1327 {
1328 /* We really should have taken care of this offset earlier. */
1329 gcc_assert (addend == 0);
1330 if (ia64_expand_load_address (op0, op1))
1331 return NULL_RTX;
1332 }
1333
1334 if (addend)
1335 {
1336 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1337
1338 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1339
1340 op1 = expand_simple_binop (mode, PLUS, subtarget,
1341 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1342 if (op0 == op1)
1343 return NULL_RTX;
1344 }
1345 }
1346
1347 return op1;
1348 }
1349
1350 /* Split a move from OP1 to OP0 conditional on COND. */
1351
1352 void
1353 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1354 {
1355 rtx insn, first = get_last_insn ();
1356
1357 emit_move_insn (op0, op1);
1358
1359 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1360 if (INSN_P (insn))
1361 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1362 PATTERN (insn));
1363 }
1364
1365 /* Split a post-reload TImode or TFmode reference into two DImode
1366 components. This is made extra difficult by the fact that we do
1367 not get any scratch registers to work with, because reload cannot
1368 be prevented from giving us a scratch that overlaps the register
1369 pair involved. So instead, when addressing memory, we tweak the
1370 pointer register up and back down with POST_INCs. Or up and not
1371 back down when we can get away with it.
1372
1373 REVERSED is true when the loads must be done in reversed order
1374 (high word first) for correctness. DEAD is true when the pointer
1375 dies with the second insn we generate and therefore the second
1376 address must not carry a postmodify.
1377
1378 May return an insn which is to be emitted after the moves. */
1379
1380 static rtx
1381 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1382 {
1383 rtx fixup = 0;
1384
1385 switch (GET_CODE (in))
1386 {
1387 case REG:
1388 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1389 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1390 break;
1391
1392 case CONST_INT:
1393 case CONST_DOUBLE:
1394 /* Cannot occur reversed. */
1395 gcc_assert (!reversed);
1396
1397 if (GET_MODE (in) != TFmode)
1398 split_double (in, &out[0], &out[1]);
1399 else
1400 /* split_double does not understand how to split a TFmode
1401 quantity into a pair of DImode constants. */
1402 {
1403 REAL_VALUE_TYPE r;
1404 unsigned HOST_WIDE_INT p[2];
1405 long l[4]; /* TFmode is 128 bits */
1406
1407 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1408 real_to_target (l, &r, TFmode);
1409
1410 if (FLOAT_WORDS_BIG_ENDIAN)
1411 {
1412 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1413 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1414 }
1415 else
1416 {
1417 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1418 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1419 }
1420 out[0] = GEN_INT (p[0]);
1421 out[1] = GEN_INT (p[1]);
1422 }
1423 break;
1424
1425 case MEM:
1426 {
1427 rtx base = XEXP (in, 0);
1428 rtx offset;
1429
1430 switch (GET_CODE (base))
1431 {
1432 case REG:
1433 if (!reversed)
1434 {
1435 out[0] = adjust_automodify_address
1436 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1437 out[1] = adjust_automodify_address
1438 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1439 }
1440 else
1441 {
1442 /* Reversal requires a pre-increment, which can only
1443 be done as a separate insn. */
1444 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1445 out[0] = adjust_automodify_address
1446 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1447 out[1] = adjust_address (in, DImode, 0);
1448 }
1449 break;
1450
1451 case POST_INC:
1452 gcc_assert (!reversed && !dead);
1453
1454 /* Just do the increment in two steps. */
1455 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1456 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1457 break;
1458
1459 case POST_DEC:
1460 gcc_assert (!reversed && !dead);
1461
1462 /* Add 8, subtract 24. */
1463 base = XEXP (base, 0);
1464 out[0] = adjust_automodify_address
1465 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1466 out[1] = adjust_automodify_address
1467 (in, DImode,
1468 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1469 8);
1470 break;
1471
1472 case POST_MODIFY:
1473 gcc_assert (!reversed && !dead);
1474
1475 /* Extract and adjust the modification. This case is
1476 trickier than the others, because we might have an
1477 index register, or we might have a combined offset that
1478 doesn't fit a signed 9-bit displacement field. We can
1479 assume the incoming expression is already legitimate. */
1480 offset = XEXP (base, 1);
1481 base = XEXP (base, 0);
1482
1483 out[0] = adjust_automodify_address
1484 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1485
1486 if (GET_CODE (XEXP (offset, 1)) == REG)
1487 {
1488 /* Can't adjust the postmodify to match. Emit the
1489 original, then a separate addition insn. */
1490 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1491 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1492 }
1493 else
1494 {
1495 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1496 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1497 {
1498 /* Again the postmodify cannot be made to match,
1499 but in this case it's more efficient to get rid
1500 of the postmodify entirely and fix up with an
1501 add insn. */
1502 out[1] = adjust_automodify_address (in, DImode, base, 8);
1503 fixup = gen_adddi3
1504 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1505 }
1506 else
1507 {
1508 /* Combined offset still fits in the displacement field.
1509 (We cannot overflow it at the high end.) */
1510 out[1] = adjust_automodify_address
1511 (in, DImode, gen_rtx_POST_MODIFY
1512 (Pmode, base, gen_rtx_PLUS
1513 (Pmode, base,
1514 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1515 8);
1516 }
1517 }
1518 break;
1519
1520 default:
1521 gcc_unreachable ();
1522 }
1523 break;
1524 }
1525
1526 default:
1527 gcc_unreachable ();
1528 }
1529
1530 return fixup;
1531 }
1532
1533 /* Split a TImode or TFmode move instruction after reload.
1534 This is used by *movtf_internal and *movti_internal. */
1535 void
1536 ia64_split_tmode_move (rtx operands[])
1537 {
1538 rtx in[2], out[2], insn;
1539 rtx fixup[2];
1540 bool dead = false;
1541 bool reversed = false;
1542
1543 /* It is possible for reload to decide to overwrite a pointer with
1544 the value it points to. In that case we have to do the loads in
1545 the appropriate order so that the pointer is not destroyed too
1546 early. Also we must not generate a postmodify for that second
1547 load, or rws_access_regno will die. */
1548 if (GET_CODE (operands[1]) == MEM
1549 && reg_overlap_mentioned_p (operands[0], operands[1]))
1550 {
1551 rtx base = XEXP (operands[1], 0);
1552 while (GET_CODE (base) != REG)
1553 base = XEXP (base, 0);
1554
1555 if (REGNO (base) == REGNO (operands[0]))
1556 reversed = true;
1557 dead = true;
1558 }
1559 /* Another reason to do the moves in reversed order is if the first
1560 element of the target register pair is also the second element of
1561 the source register pair. */
1562 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1563 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1564 reversed = true;
1565
1566 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1567 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1568
1569 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1570 if (GET_CODE (EXP) == MEM \
1571 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1572 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1573 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1574 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1575
1576 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1577 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1578 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1579
1580 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1581 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1582 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1583
1584 if (fixup[0])
1585 emit_insn (fixup[0]);
1586 if (fixup[1])
1587 emit_insn (fixup[1]);
1588
1589 #undef MAYBE_ADD_REG_INC_NOTE
1590 }
1591
1592 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1593 through memory plus an extra GR scratch register. Except that you can
1594 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1595 SECONDARY_RELOAD_CLASS, but not both.
1596
1597 We got into problems in the first place by allowing a construct like
1598 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1599 This solution attempts to prevent this situation from occurring. When
1600 we see something like the above, we spill the inner register to memory. */
1601
1602 static rtx
1603 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1604 {
1605 if (GET_CODE (in) == SUBREG
1606 && GET_MODE (SUBREG_REG (in)) == TImode
1607 && GET_CODE (SUBREG_REG (in)) == REG)
1608 {
1609 rtx memt = assign_stack_temp (TImode, 16, 0);
1610 emit_move_insn (memt, SUBREG_REG (in));
1611 return adjust_address (memt, mode, 0);
1612 }
1613 else if (force && GET_CODE (in) == REG)
1614 {
1615 rtx memx = assign_stack_temp (mode, 16, 0);
1616 emit_move_insn (memx, in);
1617 return memx;
1618 }
1619 else
1620 return in;
1621 }
1622
1623 /* Expand the movxf or movrf pattern (MODE says which) with the given
1624 OPERANDS, returning true if the pattern should then invoke
1625 DONE. */
1626
1627 bool
1628 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1629 {
1630 rtx op0 = operands[0];
1631
1632 if (GET_CODE (op0) == SUBREG)
1633 op0 = SUBREG_REG (op0);
1634
1635 /* We must support XFmode loads into general registers for stdarg/vararg,
1636 unprototyped calls, and a rare case where a long double is passed as
1637 an argument after a float HFA fills the FP registers. We split them into
1638 DImode loads for convenience. We also need to support XFmode stores
1639 for the last case. This case does not happen for stdarg/vararg routines,
1640 because we do a block store to memory of unnamed arguments. */
1641
1642 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1643 {
1644 rtx out[2];
1645
1646 /* We're hoping to transform everything that deals with XFmode
1647 quantities and GR registers early in the compiler. */
1648 gcc_assert (can_create_pseudo_p ());
1649
1650 /* Struct to register can just use TImode instead. */
1651 if ((GET_CODE (operands[1]) == SUBREG
1652 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1653 || (GET_CODE (operands[1]) == REG
1654 && GR_REGNO_P (REGNO (operands[1]))))
1655 {
1656 rtx op1 = operands[1];
1657
1658 if (GET_CODE (op1) == SUBREG)
1659 op1 = SUBREG_REG (op1);
1660 else
1661 op1 = gen_rtx_REG (TImode, REGNO (op1));
1662
1663 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1664 return true;
1665 }
1666
1667 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1668 {
1669 /* Don't word-swap when reading in the constant. */
1670 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1671 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1672 0, mode));
1673 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1674 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1675 0, mode));
1676 return true;
1677 }
1678
1679 /* If the quantity is in a register not known to be GR, spill it. */
1680 if (register_operand (operands[1], mode))
1681 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1682
1683 gcc_assert (GET_CODE (operands[1]) == MEM);
1684
1685 /* Don't word-swap when reading in the value. */
1686 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1687 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1688
1689 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1690 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1691 return true;
1692 }
1693
1694 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1695 {
1696 /* We're hoping to transform everything that deals with XFmode
1697 quantities and GR registers early in the compiler. */
1698 gcc_assert (can_create_pseudo_p ());
1699
1700 /* Op0 can't be a GR_REG here, as that case is handled above.
1701 If op0 is a register, then we spill op1, so that we now have a
1702 MEM operand. This requires creating an XFmode subreg of a TImode reg
1703 to force the spill. */
1704 if (register_operand (operands[0], mode))
1705 {
1706 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1707 op1 = gen_rtx_SUBREG (mode, op1, 0);
1708 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1709 }
1710
1711 else
1712 {
1713 rtx in[2];
1714
1715 gcc_assert (GET_CODE (operands[0]) == MEM);
1716
1717 /* Don't word-swap when writing out the value. */
1718 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1719 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1720
1721 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1722 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1723 return true;
1724 }
1725 }
1726
1727 if (!reload_in_progress && !reload_completed)
1728 {
1729 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1730
1731 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1732 {
1733 rtx memt, memx, in = operands[1];
1734 if (CONSTANT_P (in))
1735 in = validize_mem (force_const_mem (mode, in));
1736 if (GET_CODE (in) == MEM)
1737 memt = adjust_address (in, TImode, 0);
1738 else
1739 {
1740 memt = assign_stack_temp (TImode, 16, 0);
1741 memx = adjust_address (memt, mode, 0);
1742 emit_move_insn (memx, in);
1743 }
1744 emit_move_insn (op0, memt);
1745 return true;
1746 }
1747
1748 if (!ia64_move_ok (operands[0], operands[1]))
1749 operands[1] = force_reg (mode, operands[1]);
1750 }
1751
1752 return false;
1753 }
1754
1755 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1756 with the expression that holds the compare result (in VOIDmode). */
1757
1758 static GTY(()) rtx cmptf_libfunc;
1759
1760 void
1761 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1762 {
1763 enum rtx_code code = GET_CODE (*expr);
1764 rtx cmp;
1765
1766 /* If we have a BImode input, then we already have a compare result, and
1767 do not need to emit another comparison. */
1768 if (GET_MODE (*op0) == BImode)
1769 {
1770 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1771 cmp = *op0;
1772 }
1773 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1774 magic number as its third argument, that indicates what to do.
1775 The return value is an integer to be compared against zero. */
1776 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1777 {
1778 enum qfcmp_magic {
1779 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1780 QCMP_UNORD = 2,
1781 QCMP_EQ = 4,
1782 QCMP_LT = 8,
1783 QCMP_GT = 16
1784 };
1785 int magic;
1786 enum rtx_code ncode;
1787 rtx ret, insns;
1788
1789 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1790 switch (code)
1791 {
1792 /* 1 = equal, 0 = not equal. Equality operators do
1793 not raise FP_INVALID when given an SNaN operand. */
1794 case EQ: magic = QCMP_EQ; ncode = NE; break;
1795 case NE: magic = QCMP_EQ; ncode = EQ; break;
1796 /* isunordered() from C99. */
1797 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1798 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1799 /* Relational operators raise FP_INVALID when given
1800 an SNaN operand. */
1801 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1802 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1803 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1804 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1805 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1806 Expanders for buneq etc. weuld have to be added to ia64.md
1807 for this to be useful. */
1808 default: gcc_unreachable ();
1809 }
1810
1811 start_sequence ();
1812
1813 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1814 *op0, TFmode, *op1, TFmode,
1815 GEN_INT (magic), DImode);
1816 cmp = gen_reg_rtx (BImode);
1817 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1818 gen_rtx_fmt_ee (ncode, BImode,
1819 ret, const0_rtx)));
1820
1821 insns = get_insns ();
1822 end_sequence ();
1823
1824 emit_libcall_block (insns, cmp, cmp,
1825 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1826 code = NE;
1827 }
1828 else
1829 {
1830 cmp = gen_reg_rtx (BImode);
1831 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1832 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1833 code = NE;
1834 }
1835
1836 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1837 *op0 = cmp;
1838 *op1 = const0_rtx;
1839 }
1840
1841 /* Generate an integral vector comparison. Return true if the condition has
1842 been reversed, and so the sense of the comparison should be inverted. */
1843
1844 static bool
1845 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1846 rtx dest, rtx op0, rtx op1)
1847 {
1848 bool negate = false;
1849 rtx x;
1850
1851 /* Canonicalize the comparison to EQ, GT, GTU. */
1852 switch (code)
1853 {
1854 case EQ:
1855 case GT:
1856 case GTU:
1857 break;
1858
1859 case NE:
1860 case LE:
1861 case LEU:
1862 code = reverse_condition (code);
1863 negate = true;
1864 break;
1865
1866 case GE:
1867 case GEU:
1868 code = reverse_condition (code);
1869 negate = true;
1870 /* FALLTHRU */
1871
1872 case LT:
1873 case LTU:
1874 code = swap_condition (code);
1875 x = op0, op0 = op1, op1 = x;
1876 break;
1877
1878 default:
1879 gcc_unreachable ();
1880 }
1881
1882 /* Unsigned parallel compare is not supported by the hardware. Play some
1883 tricks to turn this into a signed comparison against 0. */
1884 if (code == GTU)
1885 {
1886 switch (mode)
1887 {
1888 case V2SImode:
1889 {
1890 rtx t1, t2, mask;
1891
1892 /* Subtract (-(INT MAX) - 1) from both operands to make
1893 them signed. */
1894 mask = GEN_INT (0x80000000);
1895 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1896 mask = force_reg (mode, mask);
1897 t1 = gen_reg_rtx (mode);
1898 emit_insn (gen_subv2si3 (t1, op0, mask));
1899 t2 = gen_reg_rtx (mode);
1900 emit_insn (gen_subv2si3 (t2, op1, mask));
1901 op0 = t1;
1902 op1 = t2;
1903 code = GT;
1904 }
1905 break;
1906
1907 case V8QImode:
1908 case V4HImode:
1909 /* Perform a parallel unsigned saturating subtraction. */
1910 x = gen_reg_rtx (mode);
1911 emit_insn (gen_rtx_SET (VOIDmode, x,
1912 gen_rtx_US_MINUS (mode, op0, op1)));
1913
1914 code = EQ;
1915 op0 = x;
1916 op1 = CONST0_RTX (mode);
1917 negate = !negate;
1918 break;
1919
1920 default:
1921 gcc_unreachable ();
1922 }
1923 }
1924
1925 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1926 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1927
1928 return negate;
1929 }
1930
1931 /* Emit an integral vector conditional move. */
1932
1933 void
1934 ia64_expand_vecint_cmov (rtx operands[])
1935 {
1936 enum machine_mode mode = GET_MODE (operands[0]);
1937 enum rtx_code code = GET_CODE (operands[3]);
1938 bool negate;
1939 rtx cmp, x, ot, of;
1940
1941 cmp = gen_reg_rtx (mode);
1942 negate = ia64_expand_vecint_compare (code, mode, cmp,
1943 operands[4], operands[5]);
1944
1945 ot = operands[1+negate];
1946 of = operands[2-negate];
1947
1948 if (ot == CONST0_RTX (mode))
1949 {
1950 if (of == CONST0_RTX (mode))
1951 {
1952 emit_move_insn (operands[0], ot);
1953 return;
1954 }
1955
1956 x = gen_rtx_NOT (mode, cmp);
1957 x = gen_rtx_AND (mode, x, of);
1958 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1959 }
1960 else if (of == CONST0_RTX (mode))
1961 {
1962 x = gen_rtx_AND (mode, cmp, ot);
1963 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1964 }
1965 else
1966 {
1967 rtx t, f;
1968
1969 t = gen_reg_rtx (mode);
1970 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1971 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1972
1973 f = gen_reg_rtx (mode);
1974 x = gen_rtx_NOT (mode, cmp);
1975 x = gen_rtx_AND (mode, x, operands[2-negate]);
1976 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1977
1978 x = gen_rtx_IOR (mode, t, f);
1979 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1980 }
1981 }
1982
1983 /* Emit an integral vector min or max operation. Return true if all done. */
1984
1985 bool
1986 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1987 rtx operands[])
1988 {
1989 rtx xops[6];
1990
1991 /* These four combinations are supported directly. */
1992 if (mode == V8QImode && (code == UMIN || code == UMAX))
1993 return false;
1994 if (mode == V4HImode && (code == SMIN || code == SMAX))
1995 return false;
1996
1997 /* This combination can be implemented with only saturating subtraction. */
1998 if (mode == V4HImode && code == UMAX)
1999 {
2000 rtx x, tmp = gen_reg_rtx (mode);
2001
2002 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
2003 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
2004
2005 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
2006 return true;
2007 }
2008
2009 /* Everything else implemented via vector comparisons. */
2010 xops[0] = operands[0];
2011 xops[4] = xops[1] = operands[1];
2012 xops[5] = xops[2] = operands[2];
2013
2014 switch (code)
2015 {
2016 case UMIN:
2017 code = LTU;
2018 break;
2019 case UMAX:
2020 code = GTU;
2021 break;
2022 case SMIN:
2023 code = LT;
2024 break;
2025 case SMAX:
2026 code = GT;
2027 break;
2028 default:
2029 gcc_unreachable ();
2030 }
2031 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
2032
2033 ia64_expand_vecint_cmov (xops);
2034 return true;
2035 }
2036
2037 /* The vectors LO and HI each contain N halves of a double-wide vector.
2038 Reassemble either the first N/2 or the second N/2 elements. */
2039
2040 void
2041 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
2042 {
2043 enum machine_mode vmode = GET_MODE (lo);
2044 unsigned int i, high, nelt = GET_MODE_NUNITS (vmode);
2045 struct expand_vec_perm_d d;
2046 bool ok;
2047
2048 d.target = gen_lowpart (vmode, out);
2049 d.op0 = (TARGET_BIG_ENDIAN ? hi : lo);
2050 d.op1 = (TARGET_BIG_ENDIAN ? lo : hi);
2051 d.vmode = vmode;
2052 d.nelt = nelt;
2053 d.one_operand_p = false;
2054 d.testing_p = false;
2055
2056 high = (highp ? nelt / 2 : 0);
2057 for (i = 0; i < nelt / 2; ++i)
2058 {
2059 d.perm[i * 2] = i + high;
2060 d.perm[i * 2 + 1] = i + high + nelt;
2061 }
2062
2063 ok = ia64_expand_vec_perm_const_1 (&d);
2064 gcc_assert (ok);
2065 }
2066
2067 /* Return a vector of the sign-extension of VEC. */
2068
2069 static rtx
2070 ia64_unpack_sign (rtx vec, bool unsignedp)
2071 {
2072 enum machine_mode mode = GET_MODE (vec);
2073 rtx zero = CONST0_RTX (mode);
2074
2075 if (unsignedp)
2076 return zero;
2077 else
2078 {
2079 rtx sign = gen_reg_rtx (mode);
2080 bool neg;
2081
2082 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2083 gcc_assert (!neg);
2084
2085 return sign;
2086 }
2087 }
2088
2089 /* Emit an integral vector unpack operation. */
2090
2091 void
2092 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2093 {
2094 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2095 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2096 }
2097
2098 /* Emit an integral vector widening sum operations. */
2099
2100 void
2101 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2102 {
2103 enum machine_mode wmode;
2104 rtx l, h, t, sign;
2105
2106 sign = ia64_unpack_sign (operands[1], unsignedp);
2107
2108 wmode = GET_MODE (operands[0]);
2109 l = gen_reg_rtx (wmode);
2110 h = gen_reg_rtx (wmode);
2111
2112 ia64_unpack_assemble (l, operands[1], sign, false);
2113 ia64_unpack_assemble (h, operands[1], sign, true);
2114
2115 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2116 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2117 if (t != operands[0])
2118 emit_move_insn (operands[0], t);
2119 }
2120
2121 /* Emit a signed or unsigned V8QI dot product operation. */
2122
2123 void
2124 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
2125 {
2126 rtx op1, op2, sn1, sn2, l1, l2, h1, h2;
2127 rtx p1, p2, p3, p4, s1, s2, s3;
2128
2129 op1 = operands[1];
2130 op2 = operands[2];
2131 sn1 = ia64_unpack_sign (op1, unsignedp);
2132 sn2 = ia64_unpack_sign (op2, unsignedp);
2133
2134 l1 = gen_reg_rtx (V4HImode);
2135 l2 = gen_reg_rtx (V4HImode);
2136 h1 = gen_reg_rtx (V4HImode);
2137 h2 = gen_reg_rtx (V4HImode);
2138 ia64_unpack_assemble (l1, op1, sn1, false);
2139 ia64_unpack_assemble (l2, op2, sn2, false);
2140 ia64_unpack_assemble (h1, op1, sn1, true);
2141 ia64_unpack_assemble (h2, op2, sn2, true);
2142
2143 p1 = gen_reg_rtx (V2SImode);
2144 p2 = gen_reg_rtx (V2SImode);
2145 p3 = gen_reg_rtx (V2SImode);
2146 p4 = gen_reg_rtx (V2SImode);
2147 emit_insn (gen_pmpy2_even (p1, l1, l2));
2148 emit_insn (gen_pmpy2_even (p2, h1, h2));
2149 emit_insn (gen_pmpy2_odd (p3, l1, l2));
2150 emit_insn (gen_pmpy2_odd (p4, h1, h2));
2151
2152 s1 = gen_reg_rtx (V2SImode);
2153 s2 = gen_reg_rtx (V2SImode);
2154 s3 = gen_reg_rtx (V2SImode);
2155 emit_insn (gen_addv2si3 (s1, p1, p2));
2156 emit_insn (gen_addv2si3 (s2, p3, p4));
2157 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2158 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2159 }
2160
2161 /* Emit the appropriate sequence for a call. */
2162
2163 void
2164 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2165 int sibcall_p)
2166 {
2167 rtx insn, b0;
2168
2169 addr = XEXP (addr, 0);
2170 addr = convert_memory_address (DImode, addr);
2171 b0 = gen_rtx_REG (DImode, R_BR (0));
2172
2173 /* ??? Should do this for functions known to bind local too. */
2174 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2175 {
2176 if (sibcall_p)
2177 insn = gen_sibcall_nogp (addr);
2178 else if (! retval)
2179 insn = gen_call_nogp (addr, b0);
2180 else
2181 insn = gen_call_value_nogp (retval, addr, b0);
2182 insn = emit_call_insn (insn);
2183 }
2184 else
2185 {
2186 if (sibcall_p)
2187 insn = gen_sibcall_gp (addr);
2188 else if (! retval)
2189 insn = gen_call_gp (addr, b0);
2190 else
2191 insn = gen_call_value_gp (retval, addr, b0);
2192 insn = emit_call_insn (insn);
2193
2194 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2195 }
2196
2197 if (sibcall_p)
2198 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2199
2200 if (TARGET_ABI_OPEN_VMS)
2201 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2202 gen_rtx_REG (DImode, GR_REG (25)));
2203 }
2204
2205 static void
2206 reg_emitted (enum ia64_frame_regs r)
2207 {
2208 if (emitted_frame_related_regs[r] == 0)
2209 emitted_frame_related_regs[r] = current_frame_info.r[r];
2210 else
2211 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2212 }
2213
2214 static int
2215 get_reg (enum ia64_frame_regs r)
2216 {
2217 reg_emitted (r);
2218 return current_frame_info.r[r];
2219 }
2220
2221 static bool
2222 is_emitted (int regno)
2223 {
2224 unsigned int r;
2225
2226 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2227 if (emitted_frame_related_regs[r] == regno)
2228 return true;
2229 return false;
2230 }
2231
2232 void
2233 ia64_reload_gp (void)
2234 {
2235 rtx tmp;
2236
2237 if (current_frame_info.r[reg_save_gp])
2238 {
2239 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2240 }
2241 else
2242 {
2243 HOST_WIDE_INT offset;
2244 rtx offset_r;
2245
2246 offset = (current_frame_info.spill_cfa_off
2247 + current_frame_info.spill_size);
2248 if (frame_pointer_needed)
2249 {
2250 tmp = hard_frame_pointer_rtx;
2251 offset = -offset;
2252 }
2253 else
2254 {
2255 tmp = stack_pointer_rtx;
2256 offset = current_frame_info.total_size - offset;
2257 }
2258
2259 offset_r = GEN_INT (offset);
2260 if (satisfies_constraint_I (offset_r))
2261 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2262 else
2263 {
2264 emit_move_insn (pic_offset_table_rtx, offset_r);
2265 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2266 pic_offset_table_rtx, tmp));
2267 }
2268
2269 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2270 }
2271
2272 emit_move_insn (pic_offset_table_rtx, tmp);
2273 }
2274
2275 void
2276 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2277 rtx scratch_b, int noreturn_p, int sibcall_p)
2278 {
2279 rtx insn;
2280 bool is_desc = false;
2281
2282 /* If we find we're calling through a register, then we're actually
2283 calling through a descriptor, so load up the values. */
2284 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2285 {
2286 rtx tmp;
2287 bool addr_dead_p;
2288
2289 /* ??? We are currently constrained to *not* use peep2, because
2290 we can legitimately change the global lifetime of the GP
2291 (in the form of killing where previously live). This is
2292 because a call through a descriptor doesn't use the previous
2293 value of the GP, while a direct call does, and we do not
2294 commit to either form until the split here.
2295
2296 That said, this means that we lack precise life info for
2297 whether ADDR is dead after this call. This is not terribly
2298 important, since we can fix things up essentially for free
2299 with the POST_DEC below, but it's nice to not use it when we
2300 can immediately tell it's not necessary. */
2301 addr_dead_p = ((noreturn_p || sibcall_p
2302 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2303 REGNO (addr)))
2304 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2305
2306 /* Load the code address into scratch_b. */
2307 tmp = gen_rtx_POST_INC (Pmode, addr);
2308 tmp = gen_rtx_MEM (Pmode, tmp);
2309 emit_move_insn (scratch_r, tmp);
2310 emit_move_insn (scratch_b, scratch_r);
2311
2312 /* Load the GP address. If ADDR is not dead here, then we must
2313 revert the change made above via the POST_INCREMENT. */
2314 if (!addr_dead_p)
2315 tmp = gen_rtx_POST_DEC (Pmode, addr);
2316 else
2317 tmp = addr;
2318 tmp = gen_rtx_MEM (Pmode, tmp);
2319 emit_move_insn (pic_offset_table_rtx, tmp);
2320
2321 is_desc = true;
2322 addr = scratch_b;
2323 }
2324
2325 if (sibcall_p)
2326 insn = gen_sibcall_nogp (addr);
2327 else if (retval)
2328 insn = gen_call_value_nogp (retval, addr, retaddr);
2329 else
2330 insn = gen_call_nogp (addr, retaddr);
2331 emit_call_insn (insn);
2332
2333 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2334 ia64_reload_gp ();
2335 }
2336
2337 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2338
2339 This differs from the generic code in that we know about the zero-extending
2340 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2341 also know that ld.acq+cmpxchg.rel equals a full barrier.
2342
2343 The loop we want to generate looks like
2344
2345 cmp_reg = mem;
2346 label:
2347 old_reg = cmp_reg;
2348 new_reg = cmp_reg op val;
2349 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2350 if (cmp_reg != old_reg)
2351 goto label;
2352
2353 Note that we only do the plain load from memory once. Subsequent
2354 iterations use the value loaded by the compare-and-swap pattern. */
2355
2356 void
2357 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2358 rtx old_dst, rtx new_dst, enum memmodel model)
2359 {
2360 enum machine_mode mode = GET_MODE (mem);
2361 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2362 enum insn_code icode;
2363
2364 /* Special case for using fetchadd. */
2365 if ((mode == SImode || mode == DImode)
2366 && (code == PLUS || code == MINUS)
2367 && fetchadd_operand (val, mode))
2368 {
2369 if (code == MINUS)
2370 val = GEN_INT (-INTVAL (val));
2371
2372 if (!old_dst)
2373 old_dst = gen_reg_rtx (mode);
2374
2375 switch (model)
2376 {
2377 case MEMMODEL_ACQ_REL:
2378 case MEMMODEL_SEQ_CST:
2379 emit_insn (gen_memory_barrier ());
2380 /* FALLTHRU */
2381 case MEMMODEL_RELAXED:
2382 case MEMMODEL_ACQUIRE:
2383 case MEMMODEL_CONSUME:
2384 if (mode == SImode)
2385 icode = CODE_FOR_fetchadd_acq_si;
2386 else
2387 icode = CODE_FOR_fetchadd_acq_di;
2388 break;
2389 case MEMMODEL_RELEASE:
2390 if (mode == SImode)
2391 icode = CODE_FOR_fetchadd_rel_si;
2392 else
2393 icode = CODE_FOR_fetchadd_rel_di;
2394 break;
2395
2396 default:
2397 gcc_unreachable ();
2398 }
2399
2400 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2401
2402 if (new_dst)
2403 {
2404 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2405 true, OPTAB_WIDEN);
2406 if (new_reg != new_dst)
2407 emit_move_insn (new_dst, new_reg);
2408 }
2409 return;
2410 }
2411
2412 /* Because of the volatile mem read, we get an ld.acq, which is the
2413 front half of the full barrier. The end half is the cmpxchg.rel.
2414 For relaxed and release memory models, we don't need this. But we
2415 also don't bother trying to prevent it either. */
2416 gcc_assert (model == MEMMODEL_RELAXED
2417 || model == MEMMODEL_RELEASE
2418 || MEM_VOLATILE_P (mem));
2419
2420 old_reg = gen_reg_rtx (DImode);
2421 cmp_reg = gen_reg_rtx (DImode);
2422 label = gen_label_rtx ();
2423
2424 if (mode != DImode)
2425 {
2426 val = simplify_gen_subreg (DImode, val, mode, 0);
2427 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2428 }
2429 else
2430 emit_move_insn (cmp_reg, mem);
2431
2432 emit_label (label);
2433
2434 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2435 emit_move_insn (old_reg, cmp_reg);
2436 emit_move_insn (ar_ccv, cmp_reg);
2437
2438 if (old_dst)
2439 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2440
2441 new_reg = cmp_reg;
2442 if (code == NOT)
2443 {
2444 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2445 true, OPTAB_DIRECT);
2446 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2447 }
2448 else
2449 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2450 true, OPTAB_DIRECT);
2451
2452 if (mode != DImode)
2453 new_reg = gen_lowpart (mode, new_reg);
2454 if (new_dst)
2455 emit_move_insn (new_dst, new_reg);
2456
2457 switch (model)
2458 {
2459 case MEMMODEL_RELAXED:
2460 case MEMMODEL_ACQUIRE:
2461 case MEMMODEL_CONSUME:
2462 switch (mode)
2463 {
2464 case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2465 case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2466 case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2467 case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2468 default:
2469 gcc_unreachable ();
2470 }
2471 break;
2472
2473 case MEMMODEL_RELEASE:
2474 case MEMMODEL_ACQ_REL:
2475 case MEMMODEL_SEQ_CST:
2476 switch (mode)
2477 {
2478 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2479 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2480 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2481 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2482 default:
2483 gcc_unreachable ();
2484 }
2485 break;
2486
2487 default:
2488 gcc_unreachable ();
2489 }
2490
2491 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2492
2493 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2494 }
2495 \f
2496 /* Begin the assembly file. */
2497
2498 static void
2499 ia64_file_start (void)
2500 {
2501 default_file_start ();
2502 emit_safe_across_calls ();
2503 }
2504
2505 void
2506 emit_safe_across_calls (void)
2507 {
2508 unsigned int rs, re;
2509 int out_state;
2510
2511 rs = 1;
2512 out_state = 0;
2513 while (1)
2514 {
2515 while (rs < 64 && call_used_regs[PR_REG (rs)])
2516 rs++;
2517 if (rs >= 64)
2518 break;
2519 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2520 continue;
2521 if (out_state == 0)
2522 {
2523 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2524 out_state = 1;
2525 }
2526 else
2527 fputc (',', asm_out_file);
2528 if (re == rs + 1)
2529 fprintf (asm_out_file, "p%u", rs);
2530 else
2531 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2532 rs = re + 1;
2533 }
2534 if (out_state)
2535 fputc ('\n', asm_out_file);
2536 }
2537
2538 /* Globalize a declaration. */
2539
2540 static void
2541 ia64_globalize_decl_name (FILE * stream, tree decl)
2542 {
2543 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2544 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2545 if (version_attr)
2546 {
2547 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2548 const char *p = TREE_STRING_POINTER (v);
2549 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2550 }
2551 targetm.asm_out.globalize_label (stream, name);
2552 if (TREE_CODE (decl) == FUNCTION_DECL)
2553 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2554 }
2555
2556 /* Helper function for ia64_compute_frame_size: find an appropriate general
2557 register to spill some special register to. SPECIAL_SPILL_MASK contains
2558 bits in GR0 to GR31 that have already been allocated by this routine.
2559 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2560
2561 static int
2562 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2563 {
2564 int regno;
2565
2566 if (emitted_frame_related_regs[r] != 0)
2567 {
2568 regno = emitted_frame_related_regs[r];
2569 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2570 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2571 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2572 else if (current_function_is_leaf
2573 && regno >= GR_REG (1) && regno <= GR_REG (31))
2574 current_frame_info.gr_used_mask |= 1 << regno;
2575
2576 return regno;
2577 }
2578
2579 /* If this is a leaf function, first try an otherwise unused
2580 call-clobbered register. */
2581 if (current_function_is_leaf)
2582 {
2583 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2584 if (! df_regs_ever_live_p (regno)
2585 && call_used_regs[regno]
2586 && ! fixed_regs[regno]
2587 && ! global_regs[regno]
2588 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2589 && ! is_emitted (regno))
2590 {
2591 current_frame_info.gr_used_mask |= 1 << regno;
2592 return regno;
2593 }
2594 }
2595
2596 if (try_locals)
2597 {
2598 regno = current_frame_info.n_local_regs;
2599 /* If there is a frame pointer, then we can't use loc79, because
2600 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2601 reg_name switching code in ia64_expand_prologue. */
2602 while (regno < (80 - frame_pointer_needed))
2603 if (! is_emitted (LOC_REG (regno++)))
2604 {
2605 current_frame_info.n_local_regs = regno;
2606 return LOC_REG (regno - 1);
2607 }
2608 }
2609
2610 /* Failed to find a general register to spill to. Must use stack. */
2611 return 0;
2612 }
2613
2614 /* In order to make for nice schedules, we try to allocate every temporary
2615 to a different register. We must of course stay away from call-saved,
2616 fixed, and global registers. We must also stay away from registers
2617 allocated in current_frame_info.gr_used_mask, since those include regs
2618 used all through the prologue.
2619
2620 Any register allocated here must be used immediately. The idea is to
2621 aid scheduling, not to solve data flow problems. */
2622
2623 static int last_scratch_gr_reg;
2624
2625 static int
2626 next_scratch_gr_reg (void)
2627 {
2628 int i, regno;
2629
2630 for (i = 0; i < 32; ++i)
2631 {
2632 regno = (last_scratch_gr_reg + i + 1) & 31;
2633 if (call_used_regs[regno]
2634 && ! fixed_regs[regno]
2635 && ! global_regs[regno]
2636 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2637 {
2638 last_scratch_gr_reg = regno;
2639 return regno;
2640 }
2641 }
2642
2643 /* There must be _something_ available. */
2644 gcc_unreachable ();
2645 }
2646
2647 /* Helper function for ia64_compute_frame_size, called through
2648 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2649
2650 static void
2651 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2652 {
2653 unsigned int regno = REGNO (reg);
2654 if (regno < 32)
2655 {
2656 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2657 for (i = 0; i < n; ++i)
2658 current_frame_info.gr_used_mask |= 1 << (regno + i);
2659 }
2660 }
2661
2662
2663 /* Returns the number of bytes offset between the frame pointer and the stack
2664 pointer for the current function. SIZE is the number of bytes of space
2665 needed for local variables. */
2666
2667 static void
2668 ia64_compute_frame_size (HOST_WIDE_INT size)
2669 {
2670 HOST_WIDE_INT total_size;
2671 HOST_WIDE_INT spill_size = 0;
2672 HOST_WIDE_INT extra_spill_size = 0;
2673 HOST_WIDE_INT pretend_args_size;
2674 HARD_REG_SET mask;
2675 int n_spilled = 0;
2676 int spilled_gr_p = 0;
2677 int spilled_fr_p = 0;
2678 unsigned int regno;
2679 int min_regno;
2680 int max_regno;
2681 int i;
2682
2683 if (current_frame_info.initialized)
2684 return;
2685
2686 memset (&current_frame_info, 0, sizeof current_frame_info);
2687 CLEAR_HARD_REG_SET (mask);
2688
2689 /* Don't allocate scratches to the return register. */
2690 diddle_return_value (mark_reg_gr_used_mask, NULL);
2691
2692 /* Don't allocate scratches to the EH scratch registers. */
2693 if (cfun->machine->ia64_eh_epilogue_sp)
2694 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2695 if (cfun->machine->ia64_eh_epilogue_bsp)
2696 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2697
2698 /* Find the size of the register stack frame. We have only 80 local
2699 registers, because we reserve 8 for the inputs and 8 for the
2700 outputs. */
2701
2702 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2703 since we'll be adjusting that down later. */
2704 regno = LOC_REG (78) + ! frame_pointer_needed;
2705 for (; regno >= LOC_REG (0); regno--)
2706 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2707 break;
2708 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2709
2710 /* For functions marked with the syscall_linkage attribute, we must mark
2711 all eight input registers as in use, so that locals aren't visible to
2712 the caller. */
2713
2714 if (cfun->machine->n_varargs > 0
2715 || lookup_attribute ("syscall_linkage",
2716 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2717 current_frame_info.n_input_regs = 8;
2718 else
2719 {
2720 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2721 if (df_regs_ever_live_p (regno))
2722 break;
2723 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2724 }
2725
2726 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2727 if (df_regs_ever_live_p (regno))
2728 break;
2729 i = regno - OUT_REG (0) + 1;
2730
2731 #ifndef PROFILE_HOOK
2732 /* When -p profiling, we need one output register for the mcount argument.
2733 Likewise for -a profiling for the bb_init_func argument. For -ax
2734 profiling, we need two output registers for the two bb_init_trace_func
2735 arguments. */
2736 if (crtl->profile)
2737 i = MAX (i, 1);
2738 #endif
2739 current_frame_info.n_output_regs = i;
2740
2741 /* ??? No rotating register support yet. */
2742 current_frame_info.n_rotate_regs = 0;
2743
2744 /* Discover which registers need spilling, and how much room that
2745 will take. Begin with floating point and general registers,
2746 which will always wind up on the stack. */
2747
2748 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2749 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2750 {
2751 SET_HARD_REG_BIT (mask, regno);
2752 spill_size += 16;
2753 n_spilled += 1;
2754 spilled_fr_p = 1;
2755 }
2756
2757 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2758 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2759 {
2760 SET_HARD_REG_BIT (mask, regno);
2761 spill_size += 8;
2762 n_spilled += 1;
2763 spilled_gr_p = 1;
2764 }
2765
2766 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2767 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2768 {
2769 SET_HARD_REG_BIT (mask, regno);
2770 spill_size += 8;
2771 n_spilled += 1;
2772 }
2773
2774 /* Now come all special registers that might get saved in other
2775 general registers. */
2776
2777 if (frame_pointer_needed)
2778 {
2779 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2780 /* If we did not get a register, then we take LOC79. This is guaranteed
2781 to be free, even if regs_ever_live is already set, because this is
2782 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2783 as we don't count loc79 above. */
2784 if (current_frame_info.r[reg_fp] == 0)
2785 {
2786 current_frame_info.r[reg_fp] = LOC_REG (79);
2787 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2788 }
2789 }
2790
2791 if (! current_function_is_leaf)
2792 {
2793 /* Emit a save of BR0 if we call other functions. Do this even
2794 if this function doesn't return, as EH depends on this to be
2795 able to unwind the stack. */
2796 SET_HARD_REG_BIT (mask, BR_REG (0));
2797
2798 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2799 if (current_frame_info.r[reg_save_b0] == 0)
2800 {
2801 extra_spill_size += 8;
2802 n_spilled += 1;
2803 }
2804
2805 /* Similarly for ar.pfs. */
2806 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2807 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2808 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2809 {
2810 extra_spill_size += 8;
2811 n_spilled += 1;
2812 }
2813
2814 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2815 registers are clobbered, so we fall back to the stack. */
2816 current_frame_info.r[reg_save_gp]
2817 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2818 if (current_frame_info.r[reg_save_gp] == 0)
2819 {
2820 SET_HARD_REG_BIT (mask, GR_REG (1));
2821 spill_size += 8;
2822 n_spilled += 1;
2823 }
2824 }
2825 else
2826 {
2827 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2828 {
2829 SET_HARD_REG_BIT (mask, BR_REG (0));
2830 extra_spill_size += 8;
2831 n_spilled += 1;
2832 }
2833
2834 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2835 {
2836 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2837 current_frame_info.r[reg_save_ar_pfs]
2838 = find_gr_spill (reg_save_ar_pfs, 1);
2839 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2840 {
2841 extra_spill_size += 8;
2842 n_spilled += 1;
2843 }
2844 }
2845 }
2846
2847 /* Unwind descriptor hackery: things are most efficient if we allocate
2848 consecutive GR save registers for RP, PFS, FP in that order. However,
2849 it is absolutely critical that FP get the only hard register that's
2850 guaranteed to be free, so we allocated it first. If all three did
2851 happen to be allocated hard regs, and are consecutive, rearrange them
2852 into the preferred order now.
2853
2854 If we have already emitted code for any of those registers,
2855 then it's already too late to change. */
2856 min_regno = MIN (current_frame_info.r[reg_fp],
2857 MIN (current_frame_info.r[reg_save_b0],
2858 current_frame_info.r[reg_save_ar_pfs]));
2859 max_regno = MAX (current_frame_info.r[reg_fp],
2860 MAX (current_frame_info.r[reg_save_b0],
2861 current_frame_info.r[reg_save_ar_pfs]));
2862 if (min_regno > 0
2863 && min_regno + 2 == max_regno
2864 && (current_frame_info.r[reg_fp] == min_regno + 1
2865 || current_frame_info.r[reg_save_b0] == min_regno + 1
2866 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2867 && (emitted_frame_related_regs[reg_save_b0] == 0
2868 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2869 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2870 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2871 && (emitted_frame_related_regs[reg_fp] == 0
2872 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2873 {
2874 current_frame_info.r[reg_save_b0] = min_regno;
2875 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2876 current_frame_info.r[reg_fp] = min_regno + 2;
2877 }
2878
2879 /* See if we need to store the predicate register block. */
2880 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2881 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2882 break;
2883 if (regno <= PR_REG (63))
2884 {
2885 SET_HARD_REG_BIT (mask, PR_REG (0));
2886 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2887 if (current_frame_info.r[reg_save_pr] == 0)
2888 {
2889 extra_spill_size += 8;
2890 n_spilled += 1;
2891 }
2892
2893 /* ??? Mark them all as used so that register renaming and such
2894 are free to use them. */
2895 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2896 df_set_regs_ever_live (regno, true);
2897 }
2898
2899 /* If we're forced to use st8.spill, we're forced to save and restore
2900 ar.unat as well. The check for existing liveness allows inline asm
2901 to touch ar.unat. */
2902 if (spilled_gr_p || cfun->machine->n_varargs
2903 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2904 {
2905 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2906 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2907 current_frame_info.r[reg_save_ar_unat]
2908 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2909 if (current_frame_info.r[reg_save_ar_unat] == 0)
2910 {
2911 extra_spill_size += 8;
2912 n_spilled += 1;
2913 }
2914 }
2915
2916 if (df_regs_ever_live_p (AR_LC_REGNUM))
2917 {
2918 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2919 current_frame_info.r[reg_save_ar_lc]
2920 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2921 if (current_frame_info.r[reg_save_ar_lc] == 0)
2922 {
2923 extra_spill_size += 8;
2924 n_spilled += 1;
2925 }
2926 }
2927
2928 /* If we have an odd number of words of pretend arguments written to
2929 the stack, then the FR save area will be unaligned. We round the
2930 size of this area up to keep things 16 byte aligned. */
2931 if (spilled_fr_p)
2932 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2933 else
2934 pretend_args_size = crtl->args.pretend_args_size;
2935
2936 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2937 + crtl->outgoing_args_size);
2938 total_size = IA64_STACK_ALIGN (total_size);
2939
2940 /* We always use the 16-byte scratch area provided by the caller, but
2941 if we are a leaf function, there's no one to which we need to provide
2942 a scratch area. */
2943 if (current_function_is_leaf)
2944 total_size = MAX (0, total_size - 16);
2945
2946 current_frame_info.total_size = total_size;
2947 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2948 current_frame_info.spill_size = spill_size;
2949 current_frame_info.extra_spill_size = extra_spill_size;
2950 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2951 current_frame_info.n_spilled = n_spilled;
2952 current_frame_info.initialized = reload_completed;
2953 }
2954
2955 /* Worker function for TARGET_CAN_ELIMINATE. */
2956
2957 bool
2958 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2959 {
2960 return (to == BR_REG (0) ? current_function_is_leaf : true);
2961 }
2962
2963 /* Compute the initial difference between the specified pair of registers. */
2964
2965 HOST_WIDE_INT
2966 ia64_initial_elimination_offset (int from, int to)
2967 {
2968 HOST_WIDE_INT offset;
2969
2970 ia64_compute_frame_size (get_frame_size ());
2971 switch (from)
2972 {
2973 case FRAME_POINTER_REGNUM:
2974 switch (to)
2975 {
2976 case HARD_FRAME_POINTER_REGNUM:
2977 if (current_function_is_leaf)
2978 offset = -current_frame_info.total_size;
2979 else
2980 offset = -(current_frame_info.total_size
2981 - crtl->outgoing_args_size - 16);
2982 break;
2983
2984 case STACK_POINTER_REGNUM:
2985 if (current_function_is_leaf)
2986 offset = 0;
2987 else
2988 offset = 16 + crtl->outgoing_args_size;
2989 break;
2990
2991 default:
2992 gcc_unreachable ();
2993 }
2994 break;
2995
2996 case ARG_POINTER_REGNUM:
2997 /* Arguments start above the 16 byte save area, unless stdarg
2998 in which case we store through the 16 byte save area. */
2999 switch (to)
3000 {
3001 case HARD_FRAME_POINTER_REGNUM:
3002 offset = 16 - crtl->args.pretend_args_size;
3003 break;
3004
3005 case STACK_POINTER_REGNUM:
3006 offset = (current_frame_info.total_size
3007 + 16 - crtl->args.pretend_args_size);
3008 break;
3009
3010 default:
3011 gcc_unreachable ();
3012 }
3013 break;
3014
3015 default:
3016 gcc_unreachable ();
3017 }
3018
3019 return offset;
3020 }
3021
3022 /* If there are more than a trivial number of register spills, we use
3023 two interleaved iterators so that we can get two memory references
3024 per insn group.
3025
3026 In order to simplify things in the prologue and epilogue expanders,
3027 we use helper functions to fix up the memory references after the
3028 fact with the appropriate offsets to a POST_MODIFY memory mode.
3029 The following data structure tracks the state of the two iterators
3030 while insns are being emitted. */
3031
3032 struct spill_fill_data
3033 {
3034 rtx init_after; /* point at which to emit initializations */
3035 rtx init_reg[2]; /* initial base register */
3036 rtx iter_reg[2]; /* the iterator registers */
3037 rtx *prev_addr[2]; /* address of last memory use */
3038 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
3039 HOST_WIDE_INT prev_off[2]; /* last offset */
3040 int n_iter; /* number of iterators in use */
3041 int next_iter; /* next iterator to use */
3042 unsigned int save_gr_used_mask;
3043 };
3044
3045 static struct spill_fill_data spill_fill_data;
3046
3047 static void
3048 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
3049 {
3050 int i;
3051
3052 spill_fill_data.init_after = get_last_insn ();
3053 spill_fill_data.init_reg[0] = init_reg;
3054 spill_fill_data.init_reg[1] = init_reg;
3055 spill_fill_data.prev_addr[0] = NULL;
3056 spill_fill_data.prev_addr[1] = NULL;
3057 spill_fill_data.prev_insn[0] = NULL;
3058 spill_fill_data.prev_insn[1] = NULL;
3059 spill_fill_data.prev_off[0] = cfa_off;
3060 spill_fill_data.prev_off[1] = cfa_off;
3061 spill_fill_data.next_iter = 0;
3062 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3063
3064 spill_fill_data.n_iter = 1 + (n_spills > 2);
3065 for (i = 0; i < spill_fill_data.n_iter; ++i)
3066 {
3067 int regno = next_scratch_gr_reg ();
3068 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3069 current_frame_info.gr_used_mask |= 1 << regno;
3070 }
3071 }
3072
3073 static void
3074 finish_spill_pointers (void)
3075 {
3076 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3077 }
3078
3079 static rtx
3080 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3081 {
3082 int iter = spill_fill_data.next_iter;
3083 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3084 rtx disp_rtx = GEN_INT (disp);
3085 rtx mem;
3086
3087 if (spill_fill_data.prev_addr[iter])
3088 {
3089 if (satisfies_constraint_N (disp_rtx))
3090 {
3091 *spill_fill_data.prev_addr[iter]
3092 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3093 gen_rtx_PLUS (DImode,
3094 spill_fill_data.iter_reg[iter],
3095 disp_rtx));
3096 add_reg_note (spill_fill_data.prev_insn[iter],
3097 REG_INC, spill_fill_data.iter_reg[iter]);
3098 }
3099 else
3100 {
3101 /* ??? Could use register post_modify for loads. */
3102 if (!satisfies_constraint_I (disp_rtx))
3103 {
3104 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3105 emit_move_insn (tmp, disp_rtx);
3106 disp_rtx = tmp;
3107 }
3108 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3109 spill_fill_data.iter_reg[iter], disp_rtx));
3110 }
3111 }
3112 /* Micro-optimization: if we've created a frame pointer, it's at
3113 CFA 0, which may allow the real iterator to be initialized lower,
3114 slightly increasing parallelism. Also, if there are few saves
3115 it may eliminate the iterator entirely. */
3116 else if (disp == 0
3117 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3118 && frame_pointer_needed)
3119 {
3120 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3121 set_mem_alias_set (mem, get_varargs_alias_set ());
3122 return mem;
3123 }
3124 else
3125 {
3126 rtx seq, insn;
3127
3128 if (disp == 0)
3129 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3130 spill_fill_data.init_reg[iter]);
3131 else
3132 {
3133 start_sequence ();
3134
3135 if (!satisfies_constraint_I (disp_rtx))
3136 {
3137 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3138 emit_move_insn (tmp, disp_rtx);
3139 disp_rtx = tmp;
3140 }
3141
3142 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3143 spill_fill_data.init_reg[iter],
3144 disp_rtx));
3145
3146 seq = get_insns ();
3147 end_sequence ();
3148 }
3149
3150 /* Careful for being the first insn in a sequence. */
3151 if (spill_fill_data.init_after)
3152 insn = emit_insn_after (seq, spill_fill_data.init_after);
3153 else
3154 {
3155 rtx first = get_insns ();
3156 if (first)
3157 insn = emit_insn_before (seq, first);
3158 else
3159 insn = emit_insn (seq);
3160 }
3161 spill_fill_data.init_after = insn;
3162 }
3163
3164 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3165
3166 /* ??? Not all of the spills are for varargs, but some of them are.
3167 The rest of the spills belong in an alias set of their own. But
3168 it doesn't actually hurt to include them here. */
3169 set_mem_alias_set (mem, get_varargs_alias_set ());
3170
3171 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3172 spill_fill_data.prev_off[iter] = cfa_off;
3173
3174 if (++iter >= spill_fill_data.n_iter)
3175 iter = 0;
3176 spill_fill_data.next_iter = iter;
3177
3178 return mem;
3179 }
3180
3181 static void
3182 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3183 rtx frame_reg)
3184 {
3185 int iter = spill_fill_data.next_iter;
3186 rtx mem, insn;
3187
3188 mem = spill_restore_mem (reg, cfa_off);
3189 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3190 spill_fill_data.prev_insn[iter] = insn;
3191
3192 if (frame_reg)
3193 {
3194 rtx base;
3195 HOST_WIDE_INT off;
3196
3197 RTX_FRAME_RELATED_P (insn) = 1;
3198
3199 /* Don't even pretend that the unwind code can intuit its way
3200 through a pair of interleaved post_modify iterators. Just
3201 provide the correct answer. */
3202
3203 if (frame_pointer_needed)
3204 {
3205 base = hard_frame_pointer_rtx;
3206 off = - cfa_off;
3207 }
3208 else
3209 {
3210 base = stack_pointer_rtx;
3211 off = current_frame_info.total_size - cfa_off;
3212 }
3213
3214 add_reg_note (insn, REG_CFA_OFFSET,
3215 gen_rtx_SET (VOIDmode,
3216 gen_rtx_MEM (GET_MODE (reg),
3217 plus_constant (base, off)),
3218 frame_reg));
3219 }
3220 }
3221
3222 static void
3223 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3224 {
3225 int iter = spill_fill_data.next_iter;
3226 rtx insn;
3227
3228 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3229 GEN_INT (cfa_off)));
3230 spill_fill_data.prev_insn[iter] = insn;
3231 }
3232
3233 /* Wrapper functions that discards the CONST_INT spill offset. These
3234 exist so that we can give gr_spill/gr_fill the offset they need and
3235 use a consistent function interface. */
3236
3237 static rtx
3238 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3239 {
3240 return gen_movdi (dest, src);
3241 }
3242
3243 static rtx
3244 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3245 {
3246 return gen_fr_spill (dest, src);
3247 }
3248
3249 static rtx
3250 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3251 {
3252 return gen_fr_restore (dest, src);
3253 }
3254
3255 /* Called after register allocation to add any instructions needed for the
3256 prologue. Using a prologue insn is favored compared to putting all of the
3257 instructions in output_function_prologue(), since it allows the scheduler
3258 to intermix instructions with the saves of the caller saved registers. In
3259 some cases, it might be necessary to emit a barrier instruction as the last
3260 insn to prevent such scheduling.
3261
3262 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3263 so that the debug info generation code can handle them properly.
3264
3265 The register save area is layed out like so:
3266 cfa+16
3267 [ varargs spill area ]
3268 [ fr register spill area ]
3269 [ br register spill area ]
3270 [ ar register spill area ]
3271 [ pr register spill area ]
3272 [ gr register spill area ] */
3273
3274 /* ??? Get inefficient code when the frame size is larger than can fit in an
3275 adds instruction. */
3276
3277 void
3278 ia64_expand_prologue (void)
3279 {
3280 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3281 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3282 rtx reg, alt_reg;
3283
3284 ia64_compute_frame_size (get_frame_size ());
3285 last_scratch_gr_reg = 15;
3286
3287 if (flag_stack_usage_info)
3288 current_function_static_stack_size = current_frame_info.total_size;
3289
3290 if (dump_file)
3291 {
3292 fprintf (dump_file, "ia64 frame related registers "
3293 "recorded in current_frame_info.r[]:\n");
3294 #define PRINTREG(a) if (current_frame_info.r[a]) \
3295 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3296 PRINTREG(reg_fp);
3297 PRINTREG(reg_save_b0);
3298 PRINTREG(reg_save_pr);
3299 PRINTREG(reg_save_ar_pfs);
3300 PRINTREG(reg_save_ar_unat);
3301 PRINTREG(reg_save_ar_lc);
3302 PRINTREG(reg_save_gp);
3303 #undef PRINTREG
3304 }
3305
3306 /* If there is no epilogue, then we don't need some prologue insns.
3307 We need to avoid emitting the dead prologue insns, because flow
3308 will complain about them. */
3309 if (optimize)
3310 {
3311 edge e;
3312 edge_iterator ei;
3313
3314 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3315 if ((e->flags & EDGE_FAKE) == 0
3316 && (e->flags & EDGE_FALLTHRU) != 0)
3317 break;
3318 epilogue_p = (e != NULL);
3319 }
3320 else
3321 epilogue_p = 1;
3322
3323 /* Set the local, input, and output register names. We need to do this
3324 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3325 half. If we use in/loc/out register names, then we get assembler errors
3326 in crtn.S because there is no alloc insn or regstk directive in there. */
3327 if (! TARGET_REG_NAMES)
3328 {
3329 int inputs = current_frame_info.n_input_regs;
3330 int locals = current_frame_info.n_local_regs;
3331 int outputs = current_frame_info.n_output_regs;
3332
3333 for (i = 0; i < inputs; i++)
3334 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3335 for (i = 0; i < locals; i++)
3336 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3337 for (i = 0; i < outputs; i++)
3338 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3339 }
3340
3341 /* Set the frame pointer register name. The regnum is logically loc79,
3342 but of course we'll not have allocated that many locals. Rather than
3343 worrying about renumbering the existing rtxs, we adjust the name. */
3344 /* ??? This code means that we can never use one local register when
3345 there is a frame pointer. loc79 gets wasted in this case, as it is
3346 renamed to a register that will never be used. See also the try_locals
3347 code in find_gr_spill. */
3348 if (current_frame_info.r[reg_fp])
3349 {
3350 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3351 reg_names[HARD_FRAME_POINTER_REGNUM]
3352 = reg_names[current_frame_info.r[reg_fp]];
3353 reg_names[current_frame_info.r[reg_fp]] = tmp;
3354 }
3355
3356 /* We don't need an alloc instruction if we've used no outputs or locals. */
3357 if (current_frame_info.n_local_regs == 0
3358 && current_frame_info.n_output_regs == 0
3359 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3360 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3361 {
3362 /* If there is no alloc, but there are input registers used, then we
3363 need a .regstk directive. */
3364 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3365 ar_pfs_save_reg = NULL_RTX;
3366 }
3367 else
3368 {
3369 current_frame_info.need_regstk = 0;
3370
3371 if (current_frame_info.r[reg_save_ar_pfs])
3372 {
3373 regno = current_frame_info.r[reg_save_ar_pfs];
3374 reg_emitted (reg_save_ar_pfs);
3375 }
3376 else
3377 regno = next_scratch_gr_reg ();
3378 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3379
3380 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3381 GEN_INT (current_frame_info.n_input_regs),
3382 GEN_INT (current_frame_info.n_local_regs),
3383 GEN_INT (current_frame_info.n_output_regs),
3384 GEN_INT (current_frame_info.n_rotate_regs)));
3385 if (current_frame_info.r[reg_save_ar_pfs])
3386 {
3387 RTX_FRAME_RELATED_P (insn) = 1;
3388 add_reg_note (insn, REG_CFA_REGISTER,
3389 gen_rtx_SET (VOIDmode,
3390 ar_pfs_save_reg,
3391 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3392 }
3393 }
3394
3395 /* Set up frame pointer, stack pointer, and spill iterators. */
3396
3397 n_varargs = cfun->machine->n_varargs;
3398 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3399 stack_pointer_rtx, 0);
3400
3401 if (frame_pointer_needed)
3402 {
3403 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3404 RTX_FRAME_RELATED_P (insn) = 1;
3405
3406 /* Force the unwind info to recognize this as defining a new CFA,
3407 rather than some temp register setup. */
3408 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3409 }
3410
3411 if (current_frame_info.total_size != 0)
3412 {
3413 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3414 rtx offset;
3415
3416 if (satisfies_constraint_I (frame_size_rtx))
3417 offset = frame_size_rtx;
3418 else
3419 {
3420 regno = next_scratch_gr_reg ();
3421 offset = gen_rtx_REG (DImode, regno);
3422 emit_move_insn (offset, frame_size_rtx);
3423 }
3424
3425 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3426 stack_pointer_rtx, offset));
3427
3428 if (! frame_pointer_needed)
3429 {
3430 RTX_FRAME_RELATED_P (insn) = 1;
3431 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3432 gen_rtx_SET (VOIDmode,
3433 stack_pointer_rtx,
3434 gen_rtx_PLUS (DImode,
3435 stack_pointer_rtx,
3436 frame_size_rtx)));
3437 }
3438
3439 /* ??? At this point we must generate a magic insn that appears to
3440 modify the stack pointer, the frame pointer, and all spill
3441 iterators. This would allow the most scheduling freedom. For
3442 now, just hard stop. */
3443 emit_insn (gen_blockage ());
3444 }
3445
3446 /* Must copy out ar.unat before doing any integer spills. */
3447 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3448 {
3449 if (current_frame_info.r[reg_save_ar_unat])
3450 {
3451 ar_unat_save_reg
3452 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3453 reg_emitted (reg_save_ar_unat);
3454 }
3455 else
3456 {
3457 alt_regno = next_scratch_gr_reg ();
3458 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3459 current_frame_info.gr_used_mask |= 1 << alt_regno;
3460 }
3461
3462 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3463 insn = emit_move_insn (ar_unat_save_reg, reg);
3464 if (current_frame_info.r[reg_save_ar_unat])
3465 {
3466 RTX_FRAME_RELATED_P (insn) = 1;
3467 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3468 }
3469
3470 /* Even if we're not going to generate an epilogue, we still
3471 need to save the register so that EH works. */
3472 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3473 emit_insn (gen_prologue_use (ar_unat_save_reg));
3474 }
3475 else
3476 ar_unat_save_reg = NULL_RTX;
3477
3478 /* Spill all varargs registers. Do this before spilling any GR registers,
3479 since we want the UNAT bits for the GR registers to override the UNAT
3480 bits from varargs, which we don't care about. */
3481
3482 cfa_off = -16;
3483 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3484 {
3485 reg = gen_rtx_REG (DImode, regno);
3486 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3487 }
3488
3489 /* Locate the bottom of the register save area. */
3490 cfa_off = (current_frame_info.spill_cfa_off
3491 + current_frame_info.spill_size
3492 + current_frame_info.extra_spill_size);
3493
3494 /* Save the predicate register block either in a register or in memory. */
3495 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3496 {
3497 reg = gen_rtx_REG (DImode, PR_REG (0));
3498 if (current_frame_info.r[reg_save_pr] != 0)
3499 {
3500 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3501 reg_emitted (reg_save_pr);
3502 insn = emit_move_insn (alt_reg, reg);
3503
3504 /* ??? Denote pr spill/fill by a DImode move that modifies all
3505 64 hard registers. */
3506 RTX_FRAME_RELATED_P (insn) = 1;
3507 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3508
3509 /* Even if we're not going to generate an epilogue, we still
3510 need to save the register so that EH works. */
3511 if (! epilogue_p)
3512 emit_insn (gen_prologue_use (alt_reg));
3513 }
3514 else
3515 {
3516 alt_regno = next_scratch_gr_reg ();
3517 alt_reg = gen_rtx_REG (DImode, alt_regno);
3518 insn = emit_move_insn (alt_reg, reg);
3519 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3520 cfa_off -= 8;
3521 }
3522 }
3523
3524 /* Handle AR regs in numerical order. All of them get special handling. */
3525 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3526 && current_frame_info.r[reg_save_ar_unat] == 0)
3527 {
3528 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3529 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3530 cfa_off -= 8;
3531 }
3532
3533 /* The alloc insn already copied ar.pfs into a general register. The
3534 only thing we have to do now is copy that register to a stack slot
3535 if we'd not allocated a local register for the job. */
3536 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3537 && current_frame_info.r[reg_save_ar_pfs] == 0)
3538 {
3539 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3540 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3541 cfa_off -= 8;
3542 }
3543
3544 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3545 {
3546 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3547 if (current_frame_info.r[reg_save_ar_lc] != 0)
3548 {
3549 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3550 reg_emitted (reg_save_ar_lc);
3551 insn = emit_move_insn (alt_reg, reg);
3552 RTX_FRAME_RELATED_P (insn) = 1;
3553 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3554
3555 /* Even if we're not going to generate an epilogue, we still
3556 need to save the register so that EH works. */
3557 if (! epilogue_p)
3558 emit_insn (gen_prologue_use (alt_reg));
3559 }
3560 else
3561 {
3562 alt_regno = next_scratch_gr_reg ();
3563 alt_reg = gen_rtx_REG (DImode, alt_regno);
3564 emit_move_insn (alt_reg, reg);
3565 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3566 cfa_off -= 8;
3567 }
3568 }
3569
3570 /* Save the return pointer. */
3571 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3572 {
3573 reg = gen_rtx_REG (DImode, BR_REG (0));
3574 if (current_frame_info.r[reg_save_b0] != 0)
3575 {
3576 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3577 reg_emitted (reg_save_b0);
3578 insn = emit_move_insn (alt_reg, reg);
3579 RTX_FRAME_RELATED_P (insn) = 1;
3580 add_reg_note (insn, REG_CFA_REGISTER,
3581 gen_rtx_SET (VOIDmode, alt_reg, pc_rtx));
3582
3583 /* Even if we're not going to generate an epilogue, we still
3584 need to save the register so that EH works. */
3585 if (! epilogue_p)
3586 emit_insn (gen_prologue_use (alt_reg));
3587 }
3588 else
3589 {
3590 alt_regno = next_scratch_gr_reg ();
3591 alt_reg = gen_rtx_REG (DImode, alt_regno);
3592 emit_move_insn (alt_reg, reg);
3593 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3594 cfa_off -= 8;
3595 }
3596 }
3597
3598 if (current_frame_info.r[reg_save_gp])
3599 {
3600 reg_emitted (reg_save_gp);
3601 insn = emit_move_insn (gen_rtx_REG (DImode,
3602 current_frame_info.r[reg_save_gp]),
3603 pic_offset_table_rtx);
3604 }
3605
3606 /* We should now be at the base of the gr/br/fr spill area. */
3607 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3608 + current_frame_info.spill_size));
3609
3610 /* Spill all general registers. */
3611 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3612 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3613 {
3614 reg = gen_rtx_REG (DImode, regno);
3615 do_spill (gen_gr_spill, reg, cfa_off, reg);
3616 cfa_off -= 8;
3617 }
3618
3619 /* Spill the rest of the BR registers. */
3620 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3621 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3622 {
3623 alt_regno = next_scratch_gr_reg ();
3624 alt_reg = gen_rtx_REG (DImode, alt_regno);
3625 reg = gen_rtx_REG (DImode, regno);
3626 emit_move_insn (alt_reg, reg);
3627 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3628 cfa_off -= 8;
3629 }
3630
3631 /* Align the frame and spill all FR registers. */
3632 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3633 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3634 {
3635 gcc_assert (!(cfa_off & 15));
3636 reg = gen_rtx_REG (XFmode, regno);
3637 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3638 cfa_off -= 16;
3639 }
3640
3641 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3642
3643 finish_spill_pointers ();
3644 }
3645
3646 /* Output the textual info surrounding the prologue. */
3647
3648 void
3649 ia64_start_function (FILE *file, const char *fnname,
3650 tree decl ATTRIBUTE_UNUSED)
3651 {
3652 #if VMS_DEBUGGING_INFO
3653 if (vms_debug_main
3654 && debug_info_level > DINFO_LEVEL_NONE
3655 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3656 {
3657 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3658 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3659 dwarf2out_vms_debug_main_pointer ();
3660 vms_debug_main = 0;
3661 }
3662 #endif
3663
3664 fputs ("\t.proc ", file);
3665 assemble_name (file, fnname);
3666 fputc ('\n', file);
3667 ASM_OUTPUT_LABEL (file, fnname);
3668 }
3669
3670 /* Called after register allocation to add any instructions needed for the
3671 epilogue. Using an epilogue insn is favored compared to putting all of the
3672 instructions in output_function_prologue(), since it allows the scheduler
3673 to intermix instructions with the saves of the caller saved registers. In
3674 some cases, it might be necessary to emit a barrier instruction as the last
3675 insn to prevent such scheduling. */
3676
3677 void
3678 ia64_expand_epilogue (int sibcall_p)
3679 {
3680 rtx insn, reg, alt_reg, ar_unat_save_reg;
3681 int regno, alt_regno, cfa_off;
3682
3683 ia64_compute_frame_size (get_frame_size ());
3684
3685 /* If there is a frame pointer, then we use it instead of the stack
3686 pointer, so that the stack pointer does not need to be valid when
3687 the epilogue starts. See EXIT_IGNORE_STACK. */
3688 if (frame_pointer_needed)
3689 setup_spill_pointers (current_frame_info.n_spilled,
3690 hard_frame_pointer_rtx, 0);
3691 else
3692 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3693 current_frame_info.total_size);
3694
3695 if (current_frame_info.total_size != 0)
3696 {
3697 /* ??? At this point we must generate a magic insn that appears to
3698 modify the spill iterators and the frame pointer. This would
3699 allow the most scheduling freedom. For now, just hard stop. */
3700 emit_insn (gen_blockage ());
3701 }
3702
3703 /* Locate the bottom of the register save area. */
3704 cfa_off = (current_frame_info.spill_cfa_off
3705 + current_frame_info.spill_size
3706 + current_frame_info.extra_spill_size);
3707
3708 /* Restore the predicate registers. */
3709 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3710 {
3711 if (current_frame_info.r[reg_save_pr] != 0)
3712 {
3713 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3714 reg_emitted (reg_save_pr);
3715 }
3716 else
3717 {
3718 alt_regno = next_scratch_gr_reg ();
3719 alt_reg = gen_rtx_REG (DImode, alt_regno);
3720 do_restore (gen_movdi_x, alt_reg, cfa_off);
3721 cfa_off -= 8;
3722 }
3723 reg = gen_rtx_REG (DImode, PR_REG (0));
3724 emit_move_insn (reg, alt_reg);
3725 }
3726
3727 /* Restore the application registers. */
3728
3729 /* Load the saved unat from the stack, but do not restore it until
3730 after the GRs have been restored. */
3731 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3732 {
3733 if (current_frame_info.r[reg_save_ar_unat] != 0)
3734 {
3735 ar_unat_save_reg
3736 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3737 reg_emitted (reg_save_ar_unat);
3738 }
3739 else
3740 {
3741 alt_regno = next_scratch_gr_reg ();
3742 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3743 current_frame_info.gr_used_mask |= 1 << alt_regno;
3744 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3745 cfa_off -= 8;
3746 }
3747 }
3748 else
3749 ar_unat_save_reg = NULL_RTX;
3750
3751 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3752 {
3753 reg_emitted (reg_save_ar_pfs);
3754 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3755 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3756 emit_move_insn (reg, alt_reg);
3757 }
3758 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3759 {
3760 alt_regno = next_scratch_gr_reg ();
3761 alt_reg = gen_rtx_REG (DImode, alt_regno);
3762 do_restore (gen_movdi_x, alt_reg, cfa_off);
3763 cfa_off -= 8;
3764 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3765 emit_move_insn (reg, alt_reg);
3766 }
3767
3768 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3769 {
3770 if (current_frame_info.r[reg_save_ar_lc] != 0)
3771 {
3772 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3773 reg_emitted (reg_save_ar_lc);
3774 }
3775 else
3776 {
3777 alt_regno = next_scratch_gr_reg ();
3778 alt_reg = gen_rtx_REG (DImode, alt_regno);
3779 do_restore (gen_movdi_x, alt_reg, cfa_off);
3780 cfa_off -= 8;
3781 }
3782 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3783 emit_move_insn (reg, alt_reg);
3784 }
3785
3786 /* Restore the return pointer. */
3787 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3788 {
3789 if (current_frame_info.r[reg_save_b0] != 0)
3790 {
3791 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3792 reg_emitted (reg_save_b0);
3793 }
3794 else
3795 {
3796 alt_regno = next_scratch_gr_reg ();
3797 alt_reg = gen_rtx_REG (DImode, alt_regno);
3798 do_restore (gen_movdi_x, alt_reg, cfa_off);
3799 cfa_off -= 8;
3800 }
3801 reg = gen_rtx_REG (DImode, BR_REG (0));
3802 emit_move_insn (reg, alt_reg);
3803 }
3804
3805 /* We should now be at the base of the gr/br/fr spill area. */
3806 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3807 + current_frame_info.spill_size));
3808
3809 /* The GP may be stored on the stack in the prologue, but it's
3810 never restored in the epilogue. Skip the stack slot. */
3811 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3812 cfa_off -= 8;
3813
3814 /* Restore all general registers. */
3815 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3816 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3817 {
3818 reg = gen_rtx_REG (DImode, regno);
3819 do_restore (gen_gr_restore, reg, cfa_off);
3820 cfa_off -= 8;
3821 }
3822
3823 /* Restore the branch registers. */
3824 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3825 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3826 {
3827 alt_regno = next_scratch_gr_reg ();
3828 alt_reg = gen_rtx_REG (DImode, alt_regno);
3829 do_restore (gen_movdi_x, alt_reg, cfa_off);
3830 cfa_off -= 8;
3831 reg = gen_rtx_REG (DImode, regno);
3832 emit_move_insn (reg, alt_reg);
3833 }
3834
3835 /* Restore floating point registers. */
3836 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3837 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3838 {
3839 gcc_assert (!(cfa_off & 15));
3840 reg = gen_rtx_REG (XFmode, regno);
3841 do_restore (gen_fr_restore_x, reg, cfa_off);
3842 cfa_off -= 16;
3843 }
3844
3845 /* Restore ar.unat for real. */
3846 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3847 {
3848 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3849 emit_move_insn (reg, ar_unat_save_reg);
3850 }
3851
3852 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3853
3854 finish_spill_pointers ();
3855
3856 if (current_frame_info.total_size
3857 || cfun->machine->ia64_eh_epilogue_sp
3858 || frame_pointer_needed)
3859 {
3860 /* ??? At this point we must generate a magic insn that appears to
3861 modify the spill iterators, the stack pointer, and the frame
3862 pointer. This would allow the most scheduling freedom. For now,
3863 just hard stop. */
3864 emit_insn (gen_blockage ());
3865 }
3866
3867 if (cfun->machine->ia64_eh_epilogue_sp)
3868 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3869 else if (frame_pointer_needed)
3870 {
3871 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3872 RTX_FRAME_RELATED_P (insn) = 1;
3873 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
3874 }
3875 else if (current_frame_info.total_size)
3876 {
3877 rtx offset, frame_size_rtx;
3878
3879 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3880 if (satisfies_constraint_I (frame_size_rtx))
3881 offset = frame_size_rtx;
3882 else
3883 {
3884 regno = next_scratch_gr_reg ();
3885 offset = gen_rtx_REG (DImode, regno);
3886 emit_move_insn (offset, frame_size_rtx);
3887 }
3888
3889 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3890 offset));
3891
3892 RTX_FRAME_RELATED_P (insn) = 1;
3893 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3894 gen_rtx_SET (VOIDmode,
3895 stack_pointer_rtx,
3896 gen_rtx_PLUS (DImode,
3897 stack_pointer_rtx,
3898 frame_size_rtx)));
3899 }
3900
3901 if (cfun->machine->ia64_eh_epilogue_bsp)
3902 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3903
3904 if (! sibcall_p)
3905 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3906 else
3907 {
3908 int fp = GR_REG (2);
3909 /* We need a throw away register here, r0 and r1 are reserved,
3910 so r2 is the first available call clobbered register. If
3911 there was a frame_pointer register, we may have swapped the
3912 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
3913 sure we're using the string "r2" when emitting the register
3914 name for the assembler. */
3915 if (current_frame_info.r[reg_fp]
3916 && current_frame_info.r[reg_fp] == GR_REG (2))
3917 fp = HARD_FRAME_POINTER_REGNUM;
3918
3919 /* We must emit an alloc to force the input registers to become output
3920 registers. Otherwise, if the callee tries to pass its parameters
3921 through to another call without an intervening alloc, then these
3922 values get lost. */
3923 /* ??? We don't need to preserve all input registers. We only need to
3924 preserve those input registers used as arguments to the sibling call.
3925 It is unclear how to compute that number here. */
3926 if (current_frame_info.n_input_regs != 0)
3927 {
3928 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3929
3930 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3931 const0_rtx, const0_rtx,
3932 n_inputs, const0_rtx));
3933 RTX_FRAME_RELATED_P (insn) = 1;
3934
3935 /* ??? We need to mark the alloc as frame-related so that it gets
3936 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
3937 But there's nothing dwarf2 related to be done wrt the register
3938 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
3939 the empty parallel means dwarf2out will not see anything. */
3940 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3941 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
3942 }
3943 }
3944 }
3945
3946 /* Return 1 if br.ret can do all the work required to return from a
3947 function. */
3948
3949 int
3950 ia64_direct_return (void)
3951 {
3952 if (reload_completed && ! frame_pointer_needed)
3953 {
3954 ia64_compute_frame_size (get_frame_size ());
3955
3956 return (current_frame_info.total_size == 0
3957 && current_frame_info.n_spilled == 0
3958 && current_frame_info.r[reg_save_b0] == 0
3959 && current_frame_info.r[reg_save_pr] == 0
3960 && current_frame_info.r[reg_save_ar_pfs] == 0
3961 && current_frame_info.r[reg_save_ar_unat] == 0
3962 && current_frame_info.r[reg_save_ar_lc] == 0);
3963 }
3964 return 0;
3965 }
3966
3967 /* Return the magic cookie that we use to hold the return address
3968 during early compilation. */
3969
3970 rtx
3971 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3972 {
3973 if (count != 0)
3974 return NULL;
3975 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3976 }
3977
3978 /* Split this value after reload, now that we know where the return
3979 address is saved. */
3980
3981 void
3982 ia64_split_return_addr_rtx (rtx dest)
3983 {
3984 rtx src;
3985
3986 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3987 {
3988 if (current_frame_info.r[reg_save_b0] != 0)
3989 {
3990 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3991 reg_emitted (reg_save_b0);
3992 }
3993 else
3994 {
3995 HOST_WIDE_INT off;
3996 unsigned int regno;
3997 rtx off_r;
3998
3999 /* Compute offset from CFA for BR0. */
4000 /* ??? Must be kept in sync with ia64_expand_prologue. */
4001 off = (current_frame_info.spill_cfa_off
4002 + current_frame_info.spill_size);
4003 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
4004 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4005 off -= 8;
4006
4007 /* Convert CFA offset to a register based offset. */
4008 if (frame_pointer_needed)
4009 src = hard_frame_pointer_rtx;
4010 else
4011 {
4012 src = stack_pointer_rtx;
4013 off += current_frame_info.total_size;
4014 }
4015
4016 /* Load address into scratch register. */
4017 off_r = GEN_INT (off);
4018 if (satisfies_constraint_I (off_r))
4019 emit_insn (gen_adddi3 (dest, src, off_r));
4020 else
4021 {
4022 emit_move_insn (dest, off_r);
4023 emit_insn (gen_adddi3 (dest, src, dest));
4024 }
4025
4026 src = gen_rtx_MEM (Pmode, dest);
4027 }
4028 }
4029 else
4030 src = gen_rtx_REG (DImode, BR_REG (0));
4031
4032 emit_move_insn (dest, src);
4033 }
4034
4035 int
4036 ia64_hard_regno_rename_ok (int from, int to)
4037 {
4038 /* Don't clobber any of the registers we reserved for the prologue. */
4039 unsigned int r;
4040
4041 for (r = reg_fp; r <= reg_save_ar_lc; r++)
4042 if (to == current_frame_info.r[r]
4043 || from == current_frame_info.r[r]
4044 || to == emitted_frame_related_regs[r]
4045 || from == emitted_frame_related_regs[r])
4046 return 0;
4047
4048 /* Don't use output registers outside the register frame. */
4049 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
4050 return 0;
4051
4052 /* Retain even/oddness on predicate register pairs. */
4053 if (PR_REGNO_P (from) && PR_REGNO_P (to))
4054 return (from & 1) == (to & 1);
4055
4056 return 1;
4057 }
4058
4059 /* Target hook for assembling integer objects. Handle word-sized
4060 aligned objects and detect the cases when @fptr is needed. */
4061
4062 static bool
4063 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
4064 {
4065 if (size == POINTER_SIZE / BITS_PER_UNIT
4066 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
4067 && GET_CODE (x) == SYMBOL_REF
4068 && SYMBOL_REF_FUNCTION_P (x))
4069 {
4070 static const char * const directive[2][2] = {
4071 /* 64-bit pointer */ /* 32-bit pointer */
4072 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4073 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4074 };
4075 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4076 output_addr_const (asm_out_file, x);
4077 fputs (")\n", asm_out_file);
4078 return true;
4079 }
4080 return default_assemble_integer (x, size, aligned_p);
4081 }
4082
4083 /* Emit the function prologue. */
4084
4085 static void
4086 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4087 {
4088 int mask, grsave, grsave_prev;
4089
4090 if (current_frame_info.need_regstk)
4091 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4092 current_frame_info.n_input_regs,
4093 current_frame_info.n_local_regs,
4094 current_frame_info.n_output_regs,
4095 current_frame_info.n_rotate_regs);
4096
4097 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4098 return;
4099
4100 /* Emit the .prologue directive. */
4101
4102 mask = 0;
4103 grsave = grsave_prev = 0;
4104 if (current_frame_info.r[reg_save_b0] != 0)
4105 {
4106 mask |= 8;
4107 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4108 }
4109 if (current_frame_info.r[reg_save_ar_pfs] != 0
4110 && (grsave_prev == 0
4111 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4112 {
4113 mask |= 4;
4114 if (grsave_prev == 0)
4115 grsave = current_frame_info.r[reg_save_ar_pfs];
4116 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4117 }
4118 if (current_frame_info.r[reg_fp] != 0
4119 && (grsave_prev == 0
4120 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4121 {
4122 mask |= 2;
4123 if (grsave_prev == 0)
4124 grsave = HARD_FRAME_POINTER_REGNUM;
4125 grsave_prev = current_frame_info.r[reg_fp];
4126 }
4127 if (current_frame_info.r[reg_save_pr] != 0
4128 && (grsave_prev == 0
4129 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4130 {
4131 mask |= 1;
4132 if (grsave_prev == 0)
4133 grsave = current_frame_info.r[reg_save_pr];
4134 }
4135
4136 if (mask && TARGET_GNU_AS)
4137 fprintf (file, "\t.prologue %d, %d\n", mask,
4138 ia64_dbx_register_number (grsave));
4139 else
4140 fputs ("\t.prologue\n", file);
4141
4142 /* Emit a .spill directive, if necessary, to relocate the base of
4143 the register spill area. */
4144 if (current_frame_info.spill_cfa_off != -16)
4145 fprintf (file, "\t.spill %ld\n",
4146 (long) (current_frame_info.spill_cfa_off
4147 + current_frame_info.spill_size));
4148 }
4149
4150 /* Emit the .body directive at the scheduled end of the prologue. */
4151
4152 static void
4153 ia64_output_function_end_prologue (FILE *file)
4154 {
4155 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4156 return;
4157
4158 fputs ("\t.body\n", file);
4159 }
4160
4161 /* Emit the function epilogue. */
4162
4163 static void
4164 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4165 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4166 {
4167 int i;
4168
4169 if (current_frame_info.r[reg_fp])
4170 {
4171 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4172 reg_names[HARD_FRAME_POINTER_REGNUM]
4173 = reg_names[current_frame_info.r[reg_fp]];
4174 reg_names[current_frame_info.r[reg_fp]] = tmp;
4175 reg_emitted (reg_fp);
4176 }
4177 if (! TARGET_REG_NAMES)
4178 {
4179 for (i = 0; i < current_frame_info.n_input_regs; i++)
4180 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4181 for (i = 0; i < current_frame_info.n_local_regs; i++)
4182 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4183 for (i = 0; i < current_frame_info.n_output_regs; i++)
4184 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4185 }
4186
4187 current_frame_info.initialized = 0;
4188 }
4189
4190 int
4191 ia64_dbx_register_number (int regno)
4192 {
4193 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4194 from its home at loc79 to something inside the register frame. We
4195 must perform the same renumbering here for the debug info. */
4196 if (current_frame_info.r[reg_fp])
4197 {
4198 if (regno == HARD_FRAME_POINTER_REGNUM)
4199 regno = current_frame_info.r[reg_fp];
4200 else if (regno == current_frame_info.r[reg_fp])
4201 regno = HARD_FRAME_POINTER_REGNUM;
4202 }
4203
4204 if (IN_REGNO_P (regno))
4205 return 32 + regno - IN_REG (0);
4206 else if (LOC_REGNO_P (regno))
4207 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4208 else if (OUT_REGNO_P (regno))
4209 return (32 + current_frame_info.n_input_regs
4210 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4211 else
4212 return regno;
4213 }
4214
4215 /* Implement TARGET_TRAMPOLINE_INIT.
4216
4217 The trampoline should set the static chain pointer to value placed
4218 into the trampoline and should branch to the specified routine.
4219 To make the normal indirect-subroutine calling convention work,
4220 the trampoline must look like a function descriptor; the first
4221 word being the target address and the second being the target's
4222 global pointer.
4223
4224 We abuse the concept of a global pointer by arranging for it
4225 to point to the data we need to load. The complete trampoline
4226 has the following form:
4227
4228 +-------------------+ \
4229 TRAMP: | __ia64_trampoline | |
4230 +-------------------+ > fake function descriptor
4231 | TRAMP+16 | |
4232 +-------------------+ /
4233 | target descriptor |
4234 +-------------------+
4235 | static link |
4236 +-------------------+
4237 */
4238
4239 static void
4240 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4241 {
4242 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4243 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4244
4245 /* The Intel assembler requires that the global __ia64_trampoline symbol
4246 be declared explicitly */
4247 if (!TARGET_GNU_AS)
4248 {
4249 static bool declared_ia64_trampoline = false;
4250
4251 if (!declared_ia64_trampoline)
4252 {
4253 declared_ia64_trampoline = true;
4254 (*targetm.asm_out.globalize_label) (asm_out_file,
4255 "__ia64_trampoline");
4256 }
4257 }
4258
4259 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4260 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4261 fnaddr = convert_memory_address (Pmode, fnaddr);
4262 static_chain = convert_memory_address (Pmode, static_chain);
4263
4264 /* Load up our iterator. */
4265 addr_reg = copy_to_reg (addr);
4266 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4267
4268 /* The first two words are the fake descriptor:
4269 __ia64_trampoline, ADDR+16. */
4270 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4271 if (TARGET_ABI_OPEN_VMS)
4272 {
4273 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4274 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4275 relocation against function symbols to make it identical to the
4276 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4277 strict ELF and dereference to get the bare code address. */
4278 rtx reg = gen_reg_rtx (Pmode);
4279 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4280 emit_move_insn (reg, tramp);
4281 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4282 tramp = reg;
4283 }
4284 emit_move_insn (m_tramp, tramp);
4285 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4286 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4287
4288 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4289 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4290 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4291
4292 /* The third word is the target descriptor. */
4293 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4294 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4295 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4296
4297 /* The fourth word is the static chain. */
4298 emit_move_insn (m_tramp, static_chain);
4299 }
4300 \f
4301 /* Do any needed setup for a variadic function. CUM has not been updated
4302 for the last named argument which has type TYPE and mode MODE.
4303
4304 We generate the actual spill instructions during prologue generation. */
4305
4306 static void
4307 ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
4308 tree type, int * pretend_size,
4309 int second_time ATTRIBUTE_UNUSED)
4310 {
4311 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4312
4313 /* Skip the current argument. */
4314 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4315
4316 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4317 {
4318 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4319 *pretend_size = n * UNITS_PER_WORD;
4320 cfun->machine->n_varargs = n;
4321 }
4322 }
4323
4324 /* Check whether TYPE is a homogeneous floating point aggregate. If
4325 it is, return the mode of the floating point type that appears
4326 in all leafs. If it is not, return VOIDmode.
4327
4328 An aggregate is a homogeneous floating point aggregate is if all
4329 fields/elements in it have the same floating point type (e.g,
4330 SFmode). 128-bit quad-precision floats are excluded.
4331
4332 Variable sized aggregates should never arrive here, since we should
4333 have already decided to pass them by reference. Top-level zero-sized
4334 aggregates are excluded because our parallels crash the middle-end. */
4335
4336 static enum machine_mode
4337 hfa_element_mode (const_tree type, bool nested)
4338 {
4339 enum machine_mode element_mode = VOIDmode;
4340 enum machine_mode mode;
4341 enum tree_code code = TREE_CODE (type);
4342 int know_element_mode = 0;
4343 tree t;
4344
4345 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4346 return VOIDmode;
4347
4348 switch (code)
4349 {
4350 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4351 case BOOLEAN_TYPE: case POINTER_TYPE:
4352 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4353 case LANG_TYPE: case FUNCTION_TYPE:
4354 return VOIDmode;
4355
4356 /* Fortran complex types are supposed to be HFAs, so we need to handle
4357 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4358 types though. */
4359 case COMPLEX_TYPE:
4360 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4361 && TYPE_MODE (type) != TCmode)
4362 return GET_MODE_INNER (TYPE_MODE (type));
4363 else
4364 return VOIDmode;
4365
4366 case REAL_TYPE:
4367 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4368 mode if this is contained within an aggregate. */
4369 if (nested && TYPE_MODE (type) != TFmode)
4370 return TYPE_MODE (type);
4371 else
4372 return VOIDmode;
4373
4374 case ARRAY_TYPE:
4375 return hfa_element_mode (TREE_TYPE (type), 1);
4376
4377 case RECORD_TYPE:
4378 case UNION_TYPE:
4379 case QUAL_UNION_TYPE:
4380 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4381 {
4382 if (TREE_CODE (t) != FIELD_DECL)
4383 continue;
4384
4385 mode = hfa_element_mode (TREE_TYPE (t), 1);
4386 if (know_element_mode)
4387 {
4388 if (mode != element_mode)
4389 return VOIDmode;
4390 }
4391 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4392 return VOIDmode;
4393 else
4394 {
4395 know_element_mode = 1;
4396 element_mode = mode;
4397 }
4398 }
4399 return element_mode;
4400
4401 default:
4402 /* If we reach here, we probably have some front-end specific type
4403 that the backend doesn't know about. This can happen via the
4404 aggregate_value_p call in init_function_start. All we can do is
4405 ignore unknown tree types. */
4406 return VOIDmode;
4407 }
4408
4409 return VOIDmode;
4410 }
4411
4412 /* Return the number of words required to hold a quantity of TYPE and MODE
4413 when passed as an argument. */
4414 static int
4415 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4416 {
4417 int words;
4418
4419 if (mode == BLKmode)
4420 words = int_size_in_bytes (type);
4421 else
4422 words = GET_MODE_SIZE (mode);
4423
4424 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4425 }
4426
4427 /* Return the number of registers that should be skipped so the current
4428 argument (described by TYPE and WORDS) will be properly aligned.
4429
4430 Integer and float arguments larger than 8 bytes start at the next
4431 even boundary. Aggregates larger than 8 bytes start at the next
4432 even boundary if the aggregate has 16 byte alignment. Note that
4433 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4434 but are still to be aligned in registers.
4435
4436 ??? The ABI does not specify how to handle aggregates with
4437 alignment from 9 to 15 bytes, or greater than 16. We handle them
4438 all as if they had 16 byte alignment. Such aggregates can occur
4439 only if gcc extensions are used. */
4440 static int
4441 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4442 const_tree type, int words)
4443 {
4444 /* No registers are skipped on VMS. */
4445 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4446 return 0;
4447
4448 if (type
4449 && TREE_CODE (type) != INTEGER_TYPE
4450 && TREE_CODE (type) != REAL_TYPE)
4451 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4452 else
4453 return words > 1;
4454 }
4455
4456 /* Return rtx for register where argument is passed, or zero if it is passed
4457 on the stack. */
4458 /* ??? 128-bit quad-precision floats are always passed in general
4459 registers. */
4460
4461 static rtx
4462 ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
4463 const_tree type, bool named, bool incoming)
4464 {
4465 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4466
4467 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4468 int words = ia64_function_arg_words (type, mode);
4469 int offset = ia64_function_arg_offset (cum, type, words);
4470 enum machine_mode hfa_mode = VOIDmode;
4471
4472 /* For OPEN VMS, emit the instruction setting up the argument register here,
4473 when we know this will be together with the other arguments setup related
4474 insns. This is not the conceptually best place to do this, but this is
4475 the easiest as we have convenient access to cumulative args info. */
4476
4477 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4478 && named == 1)
4479 {
4480 unsigned HOST_WIDE_INT regval = cum->words;
4481 int i;
4482
4483 for (i = 0; i < 8; i++)
4484 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4485
4486 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4487 GEN_INT (regval));
4488 }
4489
4490 /* If all argument slots are used, then it must go on the stack. */
4491 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4492 return 0;
4493
4494 /* On OpenVMS argument is either in Rn or Fn. */
4495 if (TARGET_ABI_OPEN_VMS)
4496 {
4497 if (FLOAT_MODE_P (mode))
4498 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4499 else
4500 return gen_rtx_REG (mode, basereg + cum->words);
4501 }
4502
4503 /* Check for and handle homogeneous FP aggregates. */
4504 if (type)
4505 hfa_mode = hfa_element_mode (type, 0);
4506
4507 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4508 and unprototyped hfas are passed specially. */
4509 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4510 {
4511 rtx loc[16];
4512 int i = 0;
4513 int fp_regs = cum->fp_regs;
4514 int int_regs = cum->words + offset;
4515 int hfa_size = GET_MODE_SIZE (hfa_mode);
4516 int byte_size;
4517 int args_byte_size;
4518
4519 /* If prototyped, pass it in FR regs then GR regs.
4520 If not prototyped, pass it in both FR and GR regs.
4521
4522 If this is an SFmode aggregate, then it is possible to run out of
4523 FR regs while GR regs are still left. In that case, we pass the
4524 remaining part in the GR regs. */
4525
4526 /* Fill the FP regs. We do this always. We stop if we reach the end
4527 of the argument, the last FP register, or the last argument slot. */
4528
4529 byte_size = ((mode == BLKmode)
4530 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4531 args_byte_size = int_regs * UNITS_PER_WORD;
4532 offset = 0;
4533 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4534 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4535 {
4536 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4537 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4538 + fp_regs)),
4539 GEN_INT (offset));
4540 offset += hfa_size;
4541 args_byte_size += hfa_size;
4542 fp_regs++;
4543 }
4544
4545 /* If no prototype, then the whole thing must go in GR regs. */
4546 if (! cum->prototype)
4547 offset = 0;
4548 /* If this is an SFmode aggregate, then we might have some left over
4549 that needs to go in GR regs. */
4550 else if (byte_size != offset)
4551 int_regs += offset / UNITS_PER_WORD;
4552
4553 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4554
4555 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4556 {
4557 enum machine_mode gr_mode = DImode;
4558 unsigned int gr_size;
4559
4560 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4561 then this goes in a GR reg left adjusted/little endian, right
4562 adjusted/big endian. */
4563 /* ??? Currently this is handled wrong, because 4-byte hunks are
4564 always right adjusted/little endian. */
4565 if (offset & 0x4)
4566 gr_mode = SImode;
4567 /* If we have an even 4 byte hunk because the aggregate is a
4568 multiple of 4 bytes in size, then this goes in a GR reg right
4569 adjusted/little endian. */
4570 else if (byte_size - offset == 4)
4571 gr_mode = SImode;
4572
4573 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4574 gen_rtx_REG (gr_mode, (basereg
4575 + int_regs)),
4576 GEN_INT (offset));
4577
4578 gr_size = GET_MODE_SIZE (gr_mode);
4579 offset += gr_size;
4580 if (gr_size == UNITS_PER_WORD
4581 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4582 int_regs++;
4583 else if (gr_size > UNITS_PER_WORD)
4584 int_regs += gr_size / UNITS_PER_WORD;
4585 }
4586 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4587 }
4588
4589 /* Integral and aggregates go in general registers. If we have run out of
4590 FR registers, then FP values must also go in general registers. This can
4591 happen when we have a SFmode HFA. */
4592 else if (mode == TFmode || mode == TCmode
4593 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4594 {
4595 int byte_size = ((mode == BLKmode)
4596 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4597 if (BYTES_BIG_ENDIAN
4598 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4599 && byte_size < UNITS_PER_WORD
4600 && byte_size > 0)
4601 {
4602 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4603 gen_rtx_REG (DImode,
4604 (basereg + cum->words
4605 + offset)),
4606 const0_rtx);
4607 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4608 }
4609 else
4610 return gen_rtx_REG (mode, basereg + cum->words + offset);
4611
4612 }
4613
4614 /* If there is a prototype, then FP values go in a FR register when
4615 named, and in a GR register when unnamed. */
4616 else if (cum->prototype)
4617 {
4618 if (named)
4619 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4620 /* In big-endian mode, an anonymous SFmode value must be represented
4621 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4622 the value into the high half of the general register. */
4623 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4624 return gen_rtx_PARALLEL (mode,
4625 gen_rtvec (1,
4626 gen_rtx_EXPR_LIST (VOIDmode,
4627 gen_rtx_REG (DImode, basereg + cum->words + offset),
4628 const0_rtx)));
4629 else
4630 return gen_rtx_REG (mode, basereg + cum->words + offset);
4631 }
4632 /* If there is no prototype, then FP values go in both FR and GR
4633 registers. */
4634 else
4635 {
4636 /* See comment above. */
4637 enum machine_mode inner_mode =
4638 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4639
4640 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4641 gen_rtx_REG (mode, (FR_ARG_FIRST
4642 + cum->fp_regs)),
4643 const0_rtx);
4644 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4645 gen_rtx_REG (inner_mode,
4646 (basereg + cum->words
4647 + offset)),
4648 const0_rtx);
4649
4650 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4651 }
4652 }
4653
4654 /* Implement TARGET_FUNCION_ARG target hook. */
4655
4656 static rtx
4657 ia64_function_arg (cumulative_args_t cum, enum machine_mode mode,
4658 const_tree type, bool named)
4659 {
4660 return ia64_function_arg_1 (cum, mode, type, named, false);
4661 }
4662
4663 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4664
4665 static rtx
4666 ia64_function_incoming_arg (cumulative_args_t cum,
4667 enum machine_mode mode,
4668 const_tree type, bool named)
4669 {
4670 return ia64_function_arg_1 (cum, mode, type, named, true);
4671 }
4672
4673 /* Return number of bytes, at the beginning of the argument, that must be
4674 put in registers. 0 is the argument is entirely in registers or entirely
4675 in memory. */
4676
4677 static int
4678 ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
4679 tree type, bool named ATTRIBUTE_UNUSED)
4680 {
4681 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4682
4683 int words = ia64_function_arg_words (type, mode);
4684 int offset = ia64_function_arg_offset (cum, type, words);
4685
4686 /* If all argument slots are used, then it must go on the stack. */
4687 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4688 return 0;
4689
4690 /* It doesn't matter whether the argument goes in FR or GR regs. If
4691 it fits within the 8 argument slots, then it goes entirely in
4692 registers. If it extends past the last argument slot, then the rest
4693 goes on the stack. */
4694
4695 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4696 return 0;
4697
4698 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4699 }
4700
4701 /* Return ivms_arg_type based on machine_mode. */
4702
4703 static enum ivms_arg_type
4704 ia64_arg_type (enum machine_mode mode)
4705 {
4706 switch (mode)
4707 {
4708 case SFmode:
4709 return FS;
4710 case DFmode:
4711 return FT;
4712 default:
4713 return I64;
4714 }
4715 }
4716
4717 /* Update CUM to point after this argument. This is patterned after
4718 ia64_function_arg. */
4719
4720 static void
4721 ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4722 const_tree type, bool named)
4723 {
4724 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4725 int words = ia64_function_arg_words (type, mode);
4726 int offset = ia64_function_arg_offset (cum, type, words);
4727 enum machine_mode hfa_mode = VOIDmode;
4728
4729 /* If all arg slots are already full, then there is nothing to do. */
4730 if (cum->words >= MAX_ARGUMENT_SLOTS)
4731 {
4732 cum->words += words + offset;
4733 return;
4734 }
4735
4736 cum->atypes[cum->words] = ia64_arg_type (mode);
4737 cum->words += words + offset;
4738
4739 /* On OpenVMS argument is either in Rn or Fn. */
4740 if (TARGET_ABI_OPEN_VMS)
4741 {
4742 cum->int_regs = cum->words;
4743 cum->fp_regs = cum->words;
4744 return;
4745 }
4746
4747 /* Check for and handle homogeneous FP aggregates. */
4748 if (type)
4749 hfa_mode = hfa_element_mode (type, 0);
4750
4751 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4752 and unprototyped hfas are passed specially. */
4753 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4754 {
4755 int fp_regs = cum->fp_regs;
4756 /* This is the original value of cum->words + offset. */
4757 int int_regs = cum->words - words;
4758 int hfa_size = GET_MODE_SIZE (hfa_mode);
4759 int byte_size;
4760 int args_byte_size;
4761
4762 /* If prototyped, pass it in FR regs then GR regs.
4763 If not prototyped, pass it in both FR and GR regs.
4764
4765 If this is an SFmode aggregate, then it is possible to run out of
4766 FR regs while GR regs are still left. In that case, we pass the
4767 remaining part in the GR regs. */
4768
4769 /* Fill the FP regs. We do this always. We stop if we reach the end
4770 of the argument, the last FP register, or the last argument slot. */
4771
4772 byte_size = ((mode == BLKmode)
4773 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4774 args_byte_size = int_regs * UNITS_PER_WORD;
4775 offset = 0;
4776 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4777 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4778 {
4779 offset += hfa_size;
4780 args_byte_size += hfa_size;
4781 fp_regs++;
4782 }
4783
4784 cum->fp_regs = fp_regs;
4785 }
4786
4787 /* Integral and aggregates go in general registers. So do TFmode FP values.
4788 If we have run out of FR registers, then other FP values must also go in
4789 general registers. This can happen when we have a SFmode HFA. */
4790 else if (mode == TFmode || mode == TCmode
4791 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4792 cum->int_regs = cum->words;
4793
4794 /* If there is a prototype, then FP values go in a FR register when
4795 named, and in a GR register when unnamed. */
4796 else if (cum->prototype)
4797 {
4798 if (! named)
4799 cum->int_regs = cum->words;
4800 else
4801 /* ??? Complex types should not reach here. */
4802 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4803 }
4804 /* If there is no prototype, then FP values go in both FR and GR
4805 registers. */
4806 else
4807 {
4808 /* ??? Complex types should not reach here. */
4809 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4810 cum->int_regs = cum->words;
4811 }
4812 }
4813
4814 /* Arguments with alignment larger than 8 bytes start at the next even
4815 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4816 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4817
4818 static unsigned int
4819 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4820 {
4821 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4822 return PARM_BOUNDARY * 2;
4823
4824 if (type)
4825 {
4826 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4827 return PARM_BOUNDARY * 2;
4828 else
4829 return PARM_BOUNDARY;
4830 }
4831
4832 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4833 return PARM_BOUNDARY * 2;
4834 else
4835 return PARM_BOUNDARY;
4836 }
4837
4838 /* True if it is OK to do sibling call optimization for the specified
4839 call expression EXP. DECL will be the called function, or NULL if
4840 this is an indirect call. */
4841 static bool
4842 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4843 {
4844 /* We can't perform a sibcall if the current function has the syscall_linkage
4845 attribute. */
4846 if (lookup_attribute ("syscall_linkage",
4847 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4848 return false;
4849
4850 /* We must always return with our current GP. This means we can
4851 only sibcall to functions defined in the current module unless
4852 TARGET_CONST_GP is set to true. */
4853 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4854 }
4855 \f
4856
4857 /* Implement va_arg. */
4858
4859 static tree
4860 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4861 gimple_seq *post_p)
4862 {
4863 /* Variable sized types are passed by reference. */
4864 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4865 {
4866 tree ptrtype = build_pointer_type (type);
4867 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4868 return build_va_arg_indirect_ref (addr);
4869 }
4870
4871 /* Aggregate arguments with alignment larger than 8 bytes start at
4872 the next even boundary. Integer and floating point arguments
4873 do so if they are larger than 8 bytes, whether or not they are
4874 also aligned larger than 8 bytes. */
4875 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4876 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4877 {
4878 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
4879 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4880 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
4881 gimplify_assign (unshare_expr (valist), t, pre_p);
4882 }
4883
4884 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4885 }
4886 \f
4887 /* Return 1 if function return value returned in memory. Return 0 if it is
4888 in a register. */
4889
4890 static bool
4891 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4892 {
4893 enum machine_mode mode;
4894 enum machine_mode hfa_mode;
4895 HOST_WIDE_INT byte_size;
4896
4897 mode = TYPE_MODE (valtype);
4898 byte_size = GET_MODE_SIZE (mode);
4899 if (mode == BLKmode)
4900 {
4901 byte_size = int_size_in_bytes (valtype);
4902 if (byte_size < 0)
4903 return true;
4904 }
4905
4906 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4907
4908 hfa_mode = hfa_element_mode (valtype, 0);
4909 if (hfa_mode != VOIDmode)
4910 {
4911 int hfa_size = GET_MODE_SIZE (hfa_mode);
4912
4913 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4914 return true;
4915 else
4916 return false;
4917 }
4918 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4919 return true;
4920 else
4921 return false;
4922 }
4923
4924 /* Return rtx for register that holds the function return value. */
4925
4926 static rtx
4927 ia64_function_value (const_tree valtype,
4928 const_tree fn_decl_or_type,
4929 bool outgoing ATTRIBUTE_UNUSED)
4930 {
4931 enum machine_mode mode;
4932 enum machine_mode hfa_mode;
4933 int unsignedp;
4934 const_tree func = fn_decl_or_type;
4935
4936 if (fn_decl_or_type
4937 && !DECL_P (fn_decl_or_type))
4938 func = NULL;
4939
4940 mode = TYPE_MODE (valtype);
4941 hfa_mode = hfa_element_mode (valtype, 0);
4942
4943 if (hfa_mode != VOIDmode)
4944 {
4945 rtx loc[8];
4946 int i;
4947 int hfa_size;
4948 int byte_size;
4949 int offset;
4950
4951 hfa_size = GET_MODE_SIZE (hfa_mode);
4952 byte_size = ((mode == BLKmode)
4953 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4954 offset = 0;
4955 for (i = 0; offset < byte_size; i++)
4956 {
4957 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4958 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4959 GEN_INT (offset));
4960 offset += hfa_size;
4961 }
4962 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4963 }
4964 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4965 return gen_rtx_REG (mode, FR_ARG_FIRST);
4966 else
4967 {
4968 bool need_parallel = false;
4969
4970 /* In big-endian mode, we need to manage the layout of aggregates
4971 in the registers so that we get the bits properly aligned in
4972 the highpart of the registers. */
4973 if (BYTES_BIG_ENDIAN
4974 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4975 need_parallel = true;
4976
4977 /* Something like struct S { long double x; char a[0] } is not an
4978 HFA structure, and therefore doesn't go in fp registers. But
4979 the middle-end will give it XFmode anyway, and XFmode values
4980 don't normally fit in integer registers. So we need to smuggle
4981 the value inside a parallel. */
4982 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4983 need_parallel = true;
4984
4985 if (need_parallel)
4986 {
4987 rtx loc[8];
4988 int offset;
4989 int bytesize;
4990 int i;
4991
4992 offset = 0;
4993 bytesize = int_size_in_bytes (valtype);
4994 /* An empty PARALLEL is invalid here, but the return value
4995 doesn't matter for empty structs. */
4996 if (bytesize == 0)
4997 return gen_rtx_REG (mode, GR_RET_FIRST);
4998 for (i = 0; offset < bytesize; i++)
4999 {
5000 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5001 gen_rtx_REG (DImode,
5002 GR_RET_FIRST + i),
5003 GEN_INT (offset));
5004 offset += UNITS_PER_WORD;
5005 }
5006 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5007 }
5008
5009 mode = promote_function_mode (valtype, mode, &unsignedp,
5010 func ? TREE_TYPE (func) : NULL_TREE,
5011 true);
5012
5013 return gen_rtx_REG (mode, GR_RET_FIRST);
5014 }
5015 }
5016
5017 /* Worker function for TARGET_LIBCALL_VALUE. */
5018
5019 static rtx
5020 ia64_libcall_value (enum machine_mode mode,
5021 const_rtx fun ATTRIBUTE_UNUSED)
5022 {
5023 return gen_rtx_REG (mode,
5024 (((GET_MODE_CLASS (mode) == MODE_FLOAT
5025 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5026 && (mode) != TFmode)
5027 ? FR_RET_FIRST : GR_RET_FIRST));
5028 }
5029
5030 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5031
5032 static bool
5033 ia64_function_value_regno_p (const unsigned int regno)
5034 {
5035 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
5036 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
5037 }
5038
5039 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5040 We need to emit DTP-relative relocations. */
5041
5042 static void
5043 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
5044 {
5045 gcc_assert (size == 4 || size == 8);
5046 if (size == 4)
5047 fputs ("\tdata4.ua\t@dtprel(", file);
5048 else
5049 fputs ("\tdata8.ua\t@dtprel(", file);
5050 output_addr_const (file, x);
5051 fputs (")", file);
5052 }
5053
5054 /* Print a memory address as an operand to reference that memory location. */
5055
5056 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5057 also call this from ia64_print_operand for memory addresses. */
5058
5059 static void
5060 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
5061 rtx address ATTRIBUTE_UNUSED)
5062 {
5063 }
5064
5065 /* Print an operand to an assembler instruction.
5066 C Swap and print a comparison operator.
5067 D Print an FP comparison operator.
5068 E Print 32 - constant, for SImode shifts as extract.
5069 e Print 64 - constant, for DImode rotates.
5070 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5071 a floating point register emitted normally.
5072 G A floating point constant.
5073 I Invert a predicate register by adding 1.
5074 J Select the proper predicate register for a condition.
5075 j Select the inverse predicate register for a condition.
5076 O Append .acq for volatile load.
5077 P Postincrement of a MEM.
5078 Q Append .rel for volatile store.
5079 R Print .s .d or nothing for a single, double or no truncation.
5080 S Shift amount for shladd instruction.
5081 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5082 for Intel assembler.
5083 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5084 for Intel assembler.
5085 X A pair of floating point registers.
5086 r Print register name, or constant 0 as r0. HP compatibility for
5087 Linux kernel.
5088 v Print vector constant value as an 8-byte integer value. */
5089
5090 static void
5091 ia64_print_operand (FILE * file, rtx x, int code)
5092 {
5093 const char *str;
5094
5095 switch (code)
5096 {
5097 case 0:
5098 /* Handled below. */
5099 break;
5100
5101 case 'C':
5102 {
5103 enum rtx_code c = swap_condition (GET_CODE (x));
5104 fputs (GET_RTX_NAME (c), file);
5105 return;
5106 }
5107
5108 case 'D':
5109 switch (GET_CODE (x))
5110 {
5111 case NE:
5112 str = "neq";
5113 break;
5114 case UNORDERED:
5115 str = "unord";
5116 break;
5117 case ORDERED:
5118 str = "ord";
5119 break;
5120 case UNLT:
5121 str = "nge";
5122 break;
5123 case UNLE:
5124 str = "ngt";
5125 break;
5126 case UNGT:
5127 str = "nle";
5128 break;
5129 case UNGE:
5130 str = "nlt";
5131 break;
5132 default:
5133 str = GET_RTX_NAME (GET_CODE (x));
5134 break;
5135 }
5136 fputs (str, file);
5137 return;
5138
5139 case 'E':
5140 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5141 return;
5142
5143 case 'e':
5144 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5145 return;
5146
5147 case 'F':
5148 if (x == CONST0_RTX (GET_MODE (x)))
5149 str = reg_names [FR_REG (0)];
5150 else if (x == CONST1_RTX (GET_MODE (x)))
5151 str = reg_names [FR_REG (1)];
5152 else
5153 {
5154 gcc_assert (GET_CODE (x) == REG);
5155 str = reg_names [REGNO (x)];
5156 }
5157 fputs (str, file);
5158 return;
5159
5160 case 'G':
5161 {
5162 long val[4];
5163 REAL_VALUE_TYPE rv;
5164 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5165 real_to_target (val, &rv, GET_MODE (x));
5166 if (GET_MODE (x) == SFmode)
5167 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5168 else if (GET_MODE (x) == DFmode)
5169 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5170 & 0xffffffff,
5171 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5172 & 0xffffffff);
5173 else
5174 output_operand_lossage ("invalid %%G mode");
5175 }
5176 return;
5177
5178 case 'I':
5179 fputs (reg_names [REGNO (x) + 1], file);
5180 return;
5181
5182 case 'J':
5183 case 'j':
5184 {
5185 unsigned int regno = REGNO (XEXP (x, 0));
5186 if (GET_CODE (x) == EQ)
5187 regno += 1;
5188 if (code == 'j')
5189 regno ^= 1;
5190 fputs (reg_names [regno], file);
5191 }
5192 return;
5193
5194 case 'O':
5195 if (MEM_VOLATILE_P (x))
5196 fputs(".acq", file);
5197 return;
5198
5199 case 'P':
5200 {
5201 HOST_WIDE_INT value;
5202
5203 switch (GET_CODE (XEXP (x, 0)))
5204 {
5205 default:
5206 return;
5207
5208 case POST_MODIFY:
5209 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5210 if (GET_CODE (x) == CONST_INT)
5211 value = INTVAL (x);
5212 else
5213 {
5214 gcc_assert (GET_CODE (x) == REG);
5215 fprintf (file, ", %s", reg_names[REGNO (x)]);
5216 return;
5217 }
5218 break;
5219
5220 case POST_INC:
5221 value = GET_MODE_SIZE (GET_MODE (x));
5222 break;
5223
5224 case POST_DEC:
5225 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5226 break;
5227 }
5228
5229 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5230 return;
5231 }
5232
5233 case 'Q':
5234 if (MEM_VOLATILE_P (x))
5235 fputs(".rel", file);
5236 return;
5237
5238 case 'R':
5239 if (x == CONST0_RTX (GET_MODE (x)))
5240 fputs(".s", file);
5241 else if (x == CONST1_RTX (GET_MODE (x)))
5242 fputs(".d", file);
5243 else if (x == CONST2_RTX (GET_MODE (x)))
5244 ;
5245 else
5246 output_operand_lossage ("invalid %%R value");
5247 return;
5248
5249 case 'S':
5250 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5251 return;
5252
5253 case 'T':
5254 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5255 {
5256 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5257 return;
5258 }
5259 break;
5260
5261 case 'U':
5262 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5263 {
5264 const char *prefix = "0x";
5265 if (INTVAL (x) & 0x80000000)
5266 {
5267 fprintf (file, "0xffffffff");
5268 prefix = "";
5269 }
5270 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5271 return;
5272 }
5273 break;
5274
5275 case 'X':
5276 {
5277 unsigned int regno = REGNO (x);
5278 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5279 }
5280 return;
5281
5282 case 'r':
5283 /* If this operand is the constant zero, write it as register zero.
5284 Any register, zero, or CONST_INT value is OK here. */
5285 if (GET_CODE (x) == REG)
5286 fputs (reg_names[REGNO (x)], file);
5287 else if (x == CONST0_RTX (GET_MODE (x)))
5288 fputs ("r0", file);
5289 else if (GET_CODE (x) == CONST_INT)
5290 output_addr_const (file, x);
5291 else
5292 output_operand_lossage ("invalid %%r value");
5293 return;
5294
5295 case 'v':
5296 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5297 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5298 break;
5299
5300 case '+':
5301 {
5302 const char *which;
5303
5304 /* For conditional branches, returns or calls, substitute
5305 sptk, dptk, dpnt, or spnt for %s. */
5306 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5307 if (x)
5308 {
5309 int pred_val = INTVAL (XEXP (x, 0));
5310
5311 /* Guess top and bottom 10% statically predicted. */
5312 if (pred_val < REG_BR_PROB_BASE / 50
5313 && br_prob_note_reliable_p (x))
5314 which = ".spnt";
5315 else if (pred_val < REG_BR_PROB_BASE / 2)
5316 which = ".dpnt";
5317 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5318 || !br_prob_note_reliable_p (x))
5319 which = ".dptk";
5320 else
5321 which = ".sptk";
5322 }
5323 else if (GET_CODE (current_output_insn) == CALL_INSN)
5324 which = ".sptk";
5325 else
5326 which = ".dptk";
5327
5328 fputs (which, file);
5329 return;
5330 }
5331
5332 case ',':
5333 x = current_insn_predicate;
5334 if (x)
5335 {
5336 unsigned int regno = REGNO (XEXP (x, 0));
5337 if (GET_CODE (x) == EQ)
5338 regno += 1;
5339 fprintf (file, "(%s) ", reg_names [regno]);
5340 }
5341 return;
5342
5343 default:
5344 output_operand_lossage ("ia64_print_operand: unknown code");
5345 return;
5346 }
5347
5348 switch (GET_CODE (x))
5349 {
5350 /* This happens for the spill/restore instructions. */
5351 case POST_INC:
5352 case POST_DEC:
5353 case POST_MODIFY:
5354 x = XEXP (x, 0);
5355 /* ... fall through ... */
5356
5357 case REG:
5358 fputs (reg_names [REGNO (x)], file);
5359 break;
5360
5361 case MEM:
5362 {
5363 rtx addr = XEXP (x, 0);
5364 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5365 addr = XEXP (addr, 0);
5366 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5367 break;
5368 }
5369
5370 default:
5371 output_addr_const (file, x);
5372 break;
5373 }
5374
5375 return;
5376 }
5377
5378 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5379
5380 static bool
5381 ia64_print_operand_punct_valid_p (unsigned char code)
5382 {
5383 return (code == '+' || code == ',');
5384 }
5385 \f
5386 /* Compute a (partial) cost for rtx X. Return true if the complete
5387 cost has been computed, and false if subexpressions should be
5388 scanned. In either case, *TOTAL contains the cost result. */
5389 /* ??? This is incomplete. */
5390
5391 static bool
5392 ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
5393 int *total, bool speed ATTRIBUTE_UNUSED)
5394 {
5395 switch (code)
5396 {
5397 case CONST_INT:
5398 switch (outer_code)
5399 {
5400 case SET:
5401 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5402 return true;
5403 case PLUS:
5404 if (satisfies_constraint_I (x))
5405 *total = 0;
5406 else if (satisfies_constraint_J (x))
5407 *total = 1;
5408 else
5409 *total = COSTS_N_INSNS (1);
5410 return true;
5411 default:
5412 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5413 *total = 0;
5414 else
5415 *total = COSTS_N_INSNS (1);
5416 return true;
5417 }
5418
5419 case CONST_DOUBLE:
5420 *total = COSTS_N_INSNS (1);
5421 return true;
5422
5423 case CONST:
5424 case SYMBOL_REF:
5425 case LABEL_REF:
5426 *total = COSTS_N_INSNS (3);
5427 return true;
5428
5429 case FMA:
5430 *total = COSTS_N_INSNS (4);
5431 return true;
5432
5433 case MULT:
5434 /* For multiplies wider than HImode, we have to go to the FPU,
5435 which normally involves copies. Plus there's the latency
5436 of the multiply itself, and the latency of the instructions to
5437 transfer integer regs to FP regs. */
5438 if (FLOAT_MODE_P (GET_MODE (x)))
5439 *total = COSTS_N_INSNS (4);
5440 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5441 *total = COSTS_N_INSNS (10);
5442 else
5443 *total = COSTS_N_INSNS (2);
5444 return true;
5445
5446 case PLUS:
5447 case MINUS:
5448 if (FLOAT_MODE_P (GET_MODE (x)))
5449 {
5450 *total = COSTS_N_INSNS (4);
5451 return true;
5452 }
5453 /* FALLTHRU */
5454
5455 case ASHIFT:
5456 case ASHIFTRT:
5457 case LSHIFTRT:
5458 *total = COSTS_N_INSNS (1);
5459 return true;
5460
5461 case DIV:
5462 case UDIV:
5463 case MOD:
5464 case UMOD:
5465 /* We make divide expensive, so that divide-by-constant will be
5466 optimized to a multiply. */
5467 *total = COSTS_N_INSNS (60);
5468 return true;
5469
5470 default:
5471 return false;
5472 }
5473 }
5474
5475 /* Calculate the cost of moving data from a register in class FROM to
5476 one in class TO, using MODE. */
5477
5478 static int
5479 ia64_register_move_cost (enum machine_mode mode, reg_class_t from,
5480 reg_class_t to)
5481 {
5482 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5483 if (to == ADDL_REGS)
5484 to = GR_REGS;
5485 if (from == ADDL_REGS)
5486 from = GR_REGS;
5487
5488 /* All costs are symmetric, so reduce cases by putting the
5489 lower number class as the destination. */
5490 if (from < to)
5491 {
5492 reg_class_t tmp = to;
5493 to = from, from = tmp;
5494 }
5495
5496 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5497 so that we get secondary memory reloads. Between FR_REGS,
5498 we have to make this at least as expensive as memory_move_cost
5499 to avoid spectacularly poor register class preferencing. */
5500 if (mode == XFmode || mode == RFmode)
5501 {
5502 if (to != GR_REGS || from != GR_REGS)
5503 return memory_move_cost (mode, to, false);
5504 else
5505 return 3;
5506 }
5507
5508 switch (to)
5509 {
5510 case PR_REGS:
5511 /* Moving between PR registers takes two insns. */
5512 if (from == PR_REGS)
5513 return 3;
5514 /* Moving between PR and anything but GR is impossible. */
5515 if (from != GR_REGS)
5516 return memory_move_cost (mode, to, false);
5517 break;
5518
5519 case BR_REGS:
5520 /* Moving between BR and anything but GR is impossible. */
5521 if (from != GR_REGS && from != GR_AND_BR_REGS)
5522 return memory_move_cost (mode, to, false);
5523 break;
5524
5525 case AR_I_REGS:
5526 case AR_M_REGS:
5527 /* Moving between AR and anything but GR is impossible. */
5528 if (from != GR_REGS)
5529 return memory_move_cost (mode, to, false);
5530 break;
5531
5532 case GR_REGS:
5533 case FR_REGS:
5534 case FP_REGS:
5535 case GR_AND_FR_REGS:
5536 case GR_AND_BR_REGS:
5537 case ALL_REGS:
5538 break;
5539
5540 default:
5541 gcc_unreachable ();
5542 }
5543
5544 return 2;
5545 }
5546
5547 /* Calculate the cost of moving data of MODE from a register to or from
5548 memory. */
5549
5550 static int
5551 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5552 reg_class_t rclass,
5553 bool in ATTRIBUTE_UNUSED)
5554 {
5555 if (rclass == GENERAL_REGS
5556 || rclass == FR_REGS
5557 || rclass == FP_REGS
5558 || rclass == GR_AND_FR_REGS)
5559 return 4;
5560 else
5561 return 10;
5562 }
5563
5564 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5565 on RCLASS to use when copying X into that class. */
5566
5567 static reg_class_t
5568 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5569 {
5570 switch (rclass)
5571 {
5572 case FR_REGS:
5573 case FP_REGS:
5574 /* Don't allow volatile mem reloads into floating point registers.
5575 This is defined to force reload to choose the r/m case instead
5576 of the f/f case when reloading (set (reg fX) (mem/v)). */
5577 if (MEM_P (x) && MEM_VOLATILE_P (x))
5578 return NO_REGS;
5579
5580 /* Force all unrecognized constants into the constant pool. */
5581 if (CONSTANT_P (x))
5582 return NO_REGS;
5583 break;
5584
5585 case AR_M_REGS:
5586 case AR_I_REGS:
5587 if (!OBJECT_P (x))
5588 return NO_REGS;
5589 break;
5590
5591 default:
5592 break;
5593 }
5594
5595 return rclass;
5596 }
5597
5598 /* This function returns the register class required for a secondary
5599 register when copying between one of the registers in RCLASS, and X,
5600 using MODE. A return value of NO_REGS means that no secondary register
5601 is required. */
5602
5603 enum reg_class
5604 ia64_secondary_reload_class (enum reg_class rclass,
5605 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5606 {
5607 int regno = -1;
5608
5609 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5610 regno = true_regnum (x);
5611
5612 switch (rclass)
5613 {
5614 case BR_REGS:
5615 case AR_M_REGS:
5616 case AR_I_REGS:
5617 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5618 interaction. We end up with two pseudos with overlapping lifetimes
5619 both of which are equiv to the same constant, and both which need
5620 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5621 changes depending on the path length, which means the qty_first_reg
5622 check in make_regs_eqv can give different answers at different times.
5623 At some point I'll probably need a reload_indi pattern to handle
5624 this.
5625
5626 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5627 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5628 non-general registers for good measure. */
5629 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5630 return GR_REGS;
5631
5632 /* This is needed if a pseudo used as a call_operand gets spilled to a
5633 stack slot. */
5634 if (GET_CODE (x) == MEM)
5635 return GR_REGS;
5636 break;
5637
5638 case FR_REGS:
5639 case FP_REGS:
5640 /* Need to go through general registers to get to other class regs. */
5641 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5642 return GR_REGS;
5643
5644 /* This can happen when a paradoxical subreg is an operand to the
5645 muldi3 pattern. */
5646 /* ??? This shouldn't be necessary after instruction scheduling is
5647 enabled, because paradoxical subregs are not accepted by
5648 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5649 stop the paradoxical subreg stupidity in the *_operand functions
5650 in recog.c. */
5651 if (GET_CODE (x) == MEM
5652 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5653 || GET_MODE (x) == QImode))
5654 return GR_REGS;
5655
5656 /* This can happen because of the ior/and/etc patterns that accept FP
5657 registers as operands. If the third operand is a constant, then it
5658 needs to be reloaded into a FP register. */
5659 if (GET_CODE (x) == CONST_INT)
5660 return GR_REGS;
5661
5662 /* This can happen because of register elimination in a muldi3 insn.
5663 E.g. `26107 * (unsigned long)&u'. */
5664 if (GET_CODE (x) == PLUS)
5665 return GR_REGS;
5666 break;
5667
5668 case PR_REGS:
5669 /* ??? This happens if we cse/gcse a BImode value across a call,
5670 and the function has a nonlocal goto. This is because global
5671 does not allocate call crossing pseudos to hard registers when
5672 crtl->has_nonlocal_goto is true. This is relatively
5673 common for C++ programs that use exceptions. To reproduce,
5674 return NO_REGS and compile libstdc++. */
5675 if (GET_CODE (x) == MEM)
5676 return GR_REGS;
5677
5678 /* This can happen when we take a BImode subreg of a DImode value,
5679 and that DImode value winds up in some non-GR register. */
5680 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5681 return GR_REGS;
5682 break;
5683
5684 default:
5685 break;
5686 }
5687
5688 return NO_REGS;
5689 }
5690
5691 \f
5692 /* Implement targetm.unspec_may_trap_p hook. */
5693 static int
5694 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5695 {
5696 if (GET_CODE (x) == UNSPEC)
5697 {
5698 switch (XINT (x, 1))
5699 {
5700 case UNSPEC_LDA:
5701 case UNSPEC_LDS:
5702 case UNSPEC_LDSA:
5703 case UNSPEC_LDCCLR:
5704 case UNSPEC_CHKACLR:
5705 case UNSPEC_CHKS:
5706 /* These unspecs are just wrappers. */
5707 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5708 }
5709 }
5710
5711 return default_unspec_may_trap_p (x, flags);
5712 }
5713
5714 \f
5715 /* Parse the -mfixed-range= option string. */
5716
5717 static void
5718 fix_range (const char *const_str)
5719 {
5720 int i, first, last;
5721 char *str, *dash, *comma;
5722
5723 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5724 REG2 are either register names or register numbers. The effect
5725 of this option is to mark the registers in the range from REG1 to
5726 REG2 as ``fixed'' so they won't be used by the compiler. This is
5727 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5728
5729 i = strlen (const_str);
5730 str = (char *) alloca (i + 1);
5731 memcpy (str, const_str, i + 1);
5732
5733 while (1)
5734 {
5735 dash = strchr (str, '-');
5736 if (!dash)
5737 {
5738 warning (0, "value of -mfixed-range must have form REG1-REG2");
5739 return;
5740 }
5741 *dash = '\0';
5742
5743 comma = strchr (dash + 1, ',');
5744 if (comma)
5745 *comma = '\0';
5746
5747 first = decode_reg_name (str);
5748 if (first < 0)
5749 {
5750 warning (0, "unknown register name: %s", str);
5751 return;
5752 }
5753
5754 last = decode_reg_name (dash + 1);
5755 if (last < 0)
5756 {
5757 warning (0, "unknown register name: %s", dash + 1);
5758 return;
5759 }
5760
5761 *dash = '-';
5762
5763 if (first > last)
5764 {
5765 warning (0, "%s-%s is an empty range", str, dash + 1);
5766 return;
5767 }
5768
5769 for (i = first; i <= last; ++i)
5770 fixed_regs[i] = call_used_regs[i] = 1;
5771
5772 if (!comma)
5773 break;
5774
5775 *comma = ',';
5776 str = comma + 1;
5777 }
5778 }
5779
5780 /* Implement TARGET_OPTION_OVERRIDE. */
5781
5782 static void
5783 ia64_option_override (void)
5784 {
5785 unsigned int i;
5786 cl_deferred_option *opt;
5787 VEC(cl_deferred_option,heap) *vec
5788 = (VEC(cl_deferred_option,heap) *) ia64_deferred_options;
5789
5790 FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
5791 {
5792 switch (opt->opt_index)
5793 {
5794 case OPT_mfixed_range_:
5795 fix_range (opt->arg);
5796 break;
5797
5798 default:
5799 gcc_unreachable ();
5800 }
5801 }
5802
5803 if (TARGET_AUTO_PIC)
5804 target_flags |= MASK_CONST_GP;
5805
5806 /* Numerous experiment shows that IRA based loop pressure
5807 calculation works better for RTL loop invariant motion on targets
5808 with enough (>= 32) registers. It is an expensive optimization.
5809 So it is on only for peak performance. */
5810 if (optimize >= 3)
5811 flag_ira_loop_pressure = 1;
5812
5813
5814 ia64_section_threshold = (global_options_set.x_g_switch_value
5815 ? g_switch_value
5816 : IA64_DEFAULT_GVALUE);
5817
5818 init_machine_status = ia64_init_machine_status;
5819
5820 if (align_functions <= 0)
5821 align_functions = 64;
5822 if (align_loops <= 0)
5823 align_loops = 32;
5824 if (TARGET_ABI_OPEN_VMS)
5825 flag_no_common = 1;
5826
5827 ia64_override_options_after_change();
5828 }
5829
5830 /* Implement targetm.override_options_after_change. */
5831
5832 static void
5833 ia64_override_options_after_change (void)
5834 {
5835 if (optimize >= 3
5836 && !global_options_set.x_flag_selective_scheduling
5837 && !global_options_set.x_flag_selective_scheduling2)
5838 {
5839 flag_selective_scheduling2 = 1;
5840 flag_sel_sched_pipelining = 1;
5841 }
5842 if (mflag_sched_control_spec == 2)
5843 {
5844 /* Control speculation is on by default for the selective scheduler,
5845 but not for the Haifa scheduler. */
5846 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5847 }
5848 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5849 {
5850 /* FIXME: remove this when we'd implement breaking autoinsns as
5851 a transformation. */
5852 flag_auto_inc_dec = 0;
5853 }
5854 }
5855
5856 /* Initialize the record of emitted frame related registers. */
5857
5858 void ia64_init_expanders (void)
5859 {
5860 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5861 }
5862
5863 static struct machine_function *
5864 ia64_init_machine_status (void)
5865 {
5866 return ggc_alloc_cleared_machine_function ();
5867 }
5868 \f
5869 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5870 static enum attr_type ia64_safe_type (rtx);
5871
5872 static enum attr_itanium_class
5873 ia64_safe_itanium_class (rtx insn)
5874 {
5875 if (recog_memoized (insn) >= 0)
5876 return get_attr_itanium_class (insn);
5877 else if (DEBUG_INSN_P (insn))
5878 return ITANIUM_CLASS_IGNORE;
5879 else
5880 return ITANIUM_CLASS_UNKNOWN;
5881 }
5882
5883 static enum attr_type
5884 ia64_safe_type (rtx insn)
5885 {
5886 if (recog_memoized (insn) >= 0)
5887 return get_attr_type (insn);
5888 else
5889 return TYPE_UNKNOWN;
5890 }
5891 \f
5892 /* The following collection of routines emit instruction group stop bits as
5893 necessary to avoid dependencies. */
5894
5895 /* Need to track some additional registers as far as serialization is
5896 concerned so we can properly handle br.call and br.ret. We could
5897 make these registers visible to gcc, but since these registers are
5898 never explicitly used in gcc generated code, it seems wasteful to
5899 do so (plus it would make the call and return patterns needlessly
5900 complex). */
5901 #define REG_RP (BR_REG (0))
5902 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5903 /* This is used for volatile asms which may require a stop bit immediately
5904 before and after them. */
5905 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5906 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5907 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5908
5909 /* For each register, we keep track of how it has been written in the
5910 current instruction group.
5911
5912 If a register is written unconditionally (no qualifying predicate),
5913 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5914
5915 If a register is written if its qualifying predicate P is true, we
5916 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5917 may be written again by the complement of P (P^1) and when this happens,
5918 WRITE_COUNT gets set to 2.
5919
5920 The result of this is that whenever an insn attempts to write a register
5921 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5922
5923 If a predicate register is written by a floating-point insn, we set
5924 WRITTEN_BY_FP to true.
5925
5926 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5927 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5928
5929 #if GCC_VERSION >= 4000
5930 #define RWS_FIELD_TYPE __extension__ unsigned short
5931 #else
5932 #define RWS_FIELD_TYPE unsigned int
5933 #endif
5934 struct reg_write_state
5935 {
5936 RWS_FIELD_TYPE write_count : 2;
5937 RWS_FIELD_TYPE first_pred : 10;
5938 RWS_FIELD_TYPE written_by_fp : 1;
5939 RWS_FIELD_TYPE written_by_and : 1;
5940 RWS_FIELD_TYPE written_by_or : 1;
5941 };
5942
5943 /* Cumulative info for the current instruction group. */
5944 struct reg_write_state rws_sum[NUM_REGS];
5945 #ifdef ENABLE_CHECKING
5946 /* Bitmap whether a register has been written in the current insn. */
5947 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5948 / HOST_BITS_PER_WIDEST_FAST_INT];
5949
5950 static inline void
5951 rws_insn_set (int regno)
5952 {
5953 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5954 SET_HARD_REG_BIT (rws_insn, regno);
5955 }
5956
5957 static inline int
5958 rws_insn_test (int regno)
5959 {
5960 return TEST_HARD_REG_BIT (rws_insn, regno);
5961 }
5962 #else
5963 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5964 unsigned char rws_insn[2];
5965
5966 static inline void
5967 rws_insn_set (int regno)
5968 {
5969 if (regno == REG_AR_CFM)
5970 rws_insn[0] = 1;
5971 else if (regno == REG_VOLATILE)
5972 rws_insn[1] = 1;
5973 }
5974
5975 static inline int
5976 rws_insn_test (int regno)
5977 {
5978 if (regno == REG_AR_CFM)
5979 return rws_insn[0];
5980 if (regno == REG_VOLATILE)
5981 return rws_insn[1];
5982 return 0;
5983 }
5984 #endif
5985
5986 /* Indicates whether this is the first instruction after a stop bit,
5987 in which case we don't need another stop bit. Without this,
5988 ia64_variable_issue will die when scheduling an alloc. */
5989 static int first_instruction;
5990
5991 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5992 RTL for one instruction. */
5993 struct reg_flags
5994 {
5995 unsigned int is_write : 1; /* Is register being written? */
5996 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5997 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5998 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5999 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
6000 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
6001 };
6002
6003 static void rws_update (int, struct reg_flags, int);
6004 static int rws_access_regno (int, struct reg_flags, int);
6005 static int rws_access_reg (rtx, struct reg_flags, int);
6006 static void update_set_flags (rtx, struct reg_flags *);
6007 static int set_src_needs_barrier (rtx, struct reg_flags, int);
6008 static int rtx_needs_barrier (rtx, struct reg_flags, int);
6009 static void init_insn_group_barriers (void);
6010 static int group_barrier_needed (rtx);
6011 static int safe_group_barrier_needed (rtx);
6012 static int in_safe_group_barrier;
6013
6014 /* Update *RWS for REGNO, which is being written by the current instruction,
6015 with predicate PRED, and associated register flags in FLAGS. */
6016
6017 static void
6018 rws_update (int regno, struct reg_flags flags, int pred)
6019 {
6020 if (pred)
6021 rws_sum[regno].write_count++;
6022 else
6023 rws_sum[regno].write_count = 2;
6024 rws_sum[regno].written_by_fp |= flags.is_fp;
6025 /* ??? Not tracking and/or across differing predicates. */
6026 rws_sum[regno].written_by_and = flags.is_and;
6027 rws_sum[regno].written_by_or = flags.is_or;
6028 rws_sum[regno].first_pred = pred;
6029 }
6030
6031 /* Handle an access to register REGNO of type FLAGS using predicate register
6032 PRED. Update rws_sum array. Return 1 if this access creates
6033 a dependency with an earlier instruction in the same group. */
6034
6035 static int
6036 rws_access_regno (int regno, struct reg_flags flags, int pred)
6037 {
6038 int need_barrier = 0;
6039
6040 gcc_assert (regno < NUM_REGS);
6041
6042 if (! PR_REGNO_P (regno))
6043 flags.is_and = flags.is_or = 0;
6044
6045 if (flags.is_write)
6046 {
6047 int write_count;
6048
6049 rws_insn_set (regno);
6050 write_count = rws_sum[regno].write_count;
6051
6052 switch (write_count)
6053 {
6054 case 0:
6055 /* The register has not been written yet. */
6056 if (!in_safe_group_barrier)
6057 rws_update (regno, flags, pred);
6058 break;
6059
6060 case 1:
6061 /* The register has been written via a predicate. Treat
6062 it like a unconditional write and do not try to check
6063 for complementary pred reg in earlier write. */
6064 if (flags.is_and && rws_sum[regno].written_by_and)
6065 ;
6066 else if (flags.is_or && rws_sum[regno].written_by_or)
6067 ;
6068 else
6069 need_barrier = 1;
6070 if (!in_safe_group_barrier)
6071 rws_update (regno, flags, pred);
6072 break;
6073
6074 case 2:
6075 /* The register has been unconditionally written already. We
6076 need a barrier. */
6077 if (flags.is_and && rws_sum[regno].written_by_and)
6078 ;
6079 else if (flags.is_or && rws_sum[regno].written_by_or)
6080 ;
6081 else
6082 need_barrier = 1;
6083 if (!in_safe_group_barrier)
6084 {
6085 rws_sum[regno].written_by_and = flags.is_and;
6086 rws_sum[regno].written_by_or = flags.is_or;
6087 }
6088 break;
6089
6090 default:
6091 gcc_unreachable ();
6092 }
6093 }
6094 else
6095 {
6096 if (flags.is_branch)
6097 {
6098 /* Branches have several RAW exceptions that allow to avoid
6099 barriers. */
6100
6101 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6102 /* RAW dependencies on branch regs are permissible as long
6103 as the writer is a non-branch instruction. Since we
6104 never generate code that uses a branch register written
6105 by a branch instruction, handling this case is
6106 easy. */
6107 return 0;
6108
6109 if (REGNO_REG_CLASS (regno) == PR_REGS
6110 && ! rws_sum[regno].written_by_fp)
6111 /* The predicates of a branch are available within the
6112 same insn group as long as the predicate was written by
6113 something other than a floating-point instruction. */
6114 return 0;
6115 }
6116
6117 if (flags.is_and && rws_sum[regno].written_by_and)
6118 return 0;
6119 if (flags.is_or && rws_sum[regno].written_by_or)
6120 return 0;
6121
6122 switch (rws_sum[regno].write_count)
6123 {
6124 case 0:
6125 /* The register has not been written yet. */
6126 break;
6127
6128 case 1:
6129 /* The register has been written via a predicate, assume we
6130 need a barrier (don't check for complementary regs). */
6131 need_barrier = 1;
6132 break;
6133
6134 case 2:
6135 /* The register has been unconditionally written already. We
6136 need a barrier. */
6137 need_barrier = 1;
6138 break;
6139
6140 default:
6141 gcc_unreachable ();
6142 }
6143 }
6144
6145 return need_barrier;
6146 }
6147
6148 static int
6149 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6150 {
6151 int regno = REGNO (reg);
6152 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6153
6154 if (n == 1)
6155 return rws_access_regno (regno, flags, pred);
6156 else
6157 {
6158 int need_barrier = 0;
6159 while (--n >= 0)
6160 need_barrier |= rws_access_regno (regno + n, flags, pred);
6161 return need_barrier;
6162 }
6163 }
6164
6165 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6166 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6167
6168 static void
6169 update_set_flags (rtx x, struct reg_flags *pflags)
6170 {
6171 rtx src = SET_SRC (x);
6172
6173 switch (GET_CODE (src))
6174 {
6175 case CALL:
6176 return;
6177
6178 case IF_THEN_ELSE:
6179 /* There are four cases here:
6180 (1) The destination is (pc), in which case this is a branch,
6181 nothing here applies.
6182 (2) The destination is ar.lc, in which case this is a
6183 doloop_end_internal,
6184 (3) The destination is an fp register, in which case this is
6185 an fselect instruction.
6186 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6187 this is a check load.
6188 In all cases, nothing we do in this function applies. */
6189 return;
6190
6191 default:
6192 if (COMPARISON_P (src)
6193 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6194 /* Set pflags->is_fp to 1 so that we know we're dealing
6195 with a floating point comparison when processing the
6196 destination of the SET. */
6197 pflags->is_fp = 1;
6198
6199 /* Discover if this is a parallel comparison. We only handle
6200 and.orcm and or.andcm at present, since we must retain a
6201 strict inverse on the predicate pair. */
6202 else if (GET_CODE (src) == AND)
6203 pflags->is_and = 1;
6204 else if (GET_CODE (src) == IOR)
6205 pflags->is_or = 1;
6206
6207 break;
6208 }
6209 }
6210
6211 /* Subroutine of rtx_needs_barrier; this function determines whether the
6212 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6213 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6214 for this insn. */
6215
6216 static int
6217 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6218 {
6219 int need_barrier = 0;
6220 rtx dst;
6221 rtx src = SET_SRC (x);
6222
6223 if (GET_CODE (src) == CALL)
6224 /* We don't need to worry about the result registers that
6225 get written by subroutine call. */
6226 return rtx_needs_barrier (src, flags, pred);
6227 else if (SET_DEST (x) == pc_rtx)
6228 {
6229 /* X is a conditional branch. */
6230 /* ??? This seems redundant, as the caller sets this bit for
6231 all JUMP_INSNs. */
6232 if (!ia64_spec_check_src_p (src))
6233 flags.is_branch = 1;
6234 return rtx_needs_barrier (src, flags, pred);
6235 }
6236
6237 if (ia64_spec_check_src_p (src))
6238 /* Avoid checking one register twice (in condition
6239 and in 'then' section) for ldc pattern. */
6240 {
6241 gcc_assert (REG_P (XEXP (src, 2)));
6242 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6243
6244 /* We process MEM below. */
6245 src = XEXP (src, 1);
6246 }
6247
6248 need_barrier |= rtx_needs_barrier (src, flags, pred);
6249
6250 dst = SET_DEST (x);
6251 if (GET_CODE (dst) == ZERO_EXTRACT)
6252 {
6253 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6254 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6255 }
6256 return need_barrier;
6257 }
6258
6259 /* Handle an access to rtx X of type FLAGS using predicate register
6260 PRED. Return 1 if this access creates a dependency with an earlier
6261 instruction in the same group. */
6262
6263 static int
6264 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6265 {
6266 int i, j;
6267 int is_complemented = 0;
6268 int need_barrier = 0;
6269 const char *format_ptr;
6270 struct reg_flags new_flags;
6271 rtx cond;
6272
6273 if (! x)
6274 return 0;
6275
6276 new_flags = flags;
6277
6278 switch (GET_CODE (x))
6279 {
6280 case SET:
6281 update_set_flags (x, &new_flags);
6282 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6283 if (GET_CODE (SET_SRC (x)) != CALL)
6284 {
6285 new_flags.is_write = 1;
6286 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6287 }
6288 break;
6289
6290 case CALL:
6291 new_flags.is_write = 0;
6292 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6293
6294 /* Avoid multiple register writes, in case this is a pattern with
6295 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6296 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6297 {
6298 new_flags.is_write = 1;
6299 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6300 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6301 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6302 }
6303 break;
6304
6305 case COND_EXEC:
6306 /* X is a predicated instruction. */
6307
6308 cond = COND_EXEC_TEST (x);
6309 gcc_assert (!pred);
6310 need_barrier = rtx_needs_barrier (cond, flags, 0);
6311
6312 if (GET_CODE (cond) == EQ)
6313 is_complemented = 1;
6314 cond = XEXP (cond, 0);
6315 gcc_assert (GET_CODE (cond) == REG
6316 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6317 pred = REGNO (cond);
6318 if (is_complemented)
6319 ++pred;
6320
6321 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6322 return need_barrier;
6323
6324 case CLOBBER:
6325 case USE:
6326 /* Clobber & use are for earlier compiler-phases only. */
6327 break;
6328
6329 case ASM_OPERANDS:
6330 case ASM_INPUT:
6331 /* We always emit stop bits for traditional asms. We emit stop bits
6332 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6333 if (GET_CODE (x) != ASM_OPERANDS
6334 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6335 {
6336 /* Avoid writing the register multiple times if we have multiple
6337 asm outputs. This avoids a failure in rws_access_reg. */
6338 if (! rws_insn_test (REG_VOLATILE))
6339 {
6340 new_flags.is_write = 1;
6341 rws_access_regno (REG_VOLATILE, new_flags, pred);
6342 }
6343 return 1;
6344 }
6345
6346 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6347 We cannot just fall through here since then we would be confused
6348 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6349 traditional asms unlike their normal usage. */
6350
6351 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6352 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6353 need_barrier = 1;
6354 break;
6355
6356 case PARALLEL:
6357 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6358 {
6359 rtx pat = XVECEXP (x, 0, i);
6360 switch (GET_CODE (pat))
6361 {
6362 case SET:
6363 update_set_flags (pat, &new_flags);
6364 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6365 break;
6366
6367 case USE:
6368 case CALL:
6369 case ASM_OPERANDS:
6370 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6371 break;
6372
6373 case CLOBBER:
6374 if (REG_P (XEXP (pat, 0))
6375 && extract_asm_operands (x) != NULL_RTX
6376 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6377 {
6378 new_flags.is_write = 1;
6379 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6380 new_flags, pred);
6381 new_flags = flags;
6382 }
6383 break;
6384
6385 case RETURN:
6386 break;
6387
6388 default:
6389 gcc_unreachable ();
6390 }
6391 }
6392 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6393 {
6394 rtx pat = XVECEXP (x, 0, i);
6395 if (GET_CODE (pat) == SET)
6396 {
6397 if (GET_CODE (SET_SRC (pat)) != CALL)
6398 {
6399 new_flags.is_write = 1;
6400 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6401 pred);
6402 }
6403 }
6404 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6405 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6406 }
6407 break;
6408
6409 case SUBREG:
6410 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6411 break;
6412 case REG:
6413 if (REGNO (x) == AR_UNAT_REGNUM)
6414 {
6415 for (i = 0; i < 64; ++i)
6416 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6417 }
6418 else
6419 need_barrier = rws_access_reg (x, flags, pred);
6420 break;
6421
6422 case MEM:
6423 /* Find the regs used in memory address computation. */
6424 new_flags.is_write = 0;
6425 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6426 break;
6427
6428 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6429 case SYMBOL_REF: case LABEL_REF: case CONST:
6430 break;
6431
6432 /* Operators with side-effects. */
6433 case POST_INC: case POST_DEC:
6434 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6435
6436 new_flags.is_write = 0;
6437 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6438 new_flags.is_write = 1;
6439 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6440 break;
6441
6442 case POST_MODIFY:
6443 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6444
6445 new_flags.is_write = 0;
6446 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6447 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6448 new_flags.is_write = 1;
6449 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6450 break;
6451
6452 /* Handle common unary and binary ops for efficiency. */
6453 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6454 case MOD: case UDIV: case UMOD: case AND: case IOR:
6455 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6456 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6457 case NE: case EQ: case GE: case GT: case LE:
6458 case LT: case GEU: case GTU: case LEU: case LTU:
6459 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6460 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6461 break;
6462
6463 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6464 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6465 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6466 case SQRT: case FFS: case POPCOUNT:
6467 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6468 break;
6469
6470 case VEC_SELECT:
6471 /* VEC_SELECT's second argument is a PARALLEL with integers that
6472 describe the elements selected. On ia64, those integers are
6473 always constants. Avoid walking the PARALLEL so that we don't
6474 get confused with "normal" parallels and then die. */
6475 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6476 break;
6477
6478 case UNSPEC:
6479 switch (XINT (x, 1))
6480 {
6481 case UNSPEC_LTOFF_DTPMOD:
6482 case UNSPEC_LTOFF_DTPREL:
6483 case UNSPEC_DTPREL:
6484 case UNSPEC_LTOFF_TPREL:
6485 case UNSPEC_TPREL:
6486 case UNSPEC_PRED_REL_MUTEX:
6487 case UNSPEC_PIC_CALL:
6488 case UNSPEC_MF:
6489 case UNSPEC_FETCHADD_ACQ:
6490 case UNSPEC_FETCHADD_REL:
6491 case UNSPEC_BSP_VALUE:
6492 case UNSPEC_FLUSHRS:
6493 case UNSPEC_BUNDLE_SELECTOR:
6494 break;
6495
6496 case UNSPEC_GR_SPILL:
6497 case UNSPEC_GR_RESTORE:
6498 {
6499 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6500 HOST_WIDE_INT bit = (offset >> 3) & 63;
6501
6502 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6503 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6504 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6505 new_flags, pred);
6506 break;
6507 }
6508
6509 case UNSPEC_FR_SPILL:
6510 case UNSPEC_FR_RESTORE:
6511 case UNSPEC_GETF_EXP:
6512 case UNSPEC_SETF_EXP:
6513 case UNSPEC_ADDP4:
6514 case UNSPEC_FR_SQRT_RECIP_APPROX:
6515 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6516 case UNSPEC_LDA:
6517 case UNSPEC_LDS:
6518 case UNSPEC_LDS_A:
6519 case UNSPEC_LDSA:
6520 case UNSPEC_CHKACLR:
6521 case UNSPEC_CHKS:
6522 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6523 break;
6524
6525 case UNSPEC_FR_RECIP_APPROX:
6526 case UNSPEC_SHRP:
6527 case UNSPEC_COPYSIGN:
6528 case UNSPEC_FR_RECIP_APPROX_RES:
6529 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6530 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6531 break;
6532
6533 case UNSPEC_CMPXCHG_ACQ:
6534 case UNSPEC_CMPXCHG_REL:
6535 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6536 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6537 break;
6538
6539 default:
6540 gcc_unreachable ();
6541 }
6542 break;
6543
6544 case UNSPEC_VOLATILE:
6545 switch (XINT (x, 1))
6546 {
6547 case UNSPECV_ALLOC:
6548 /* Alloc must always be the first instruction of a group.
6549 We force this by always returning true. */
6550 /* ??? We might get better scheduling if we explicitly check for
6551 input/local/output register dependencies, and modify the
6552 scheduler so that alloc is always reordered to the start of
6553 the current group. We could then eliminate all of the
6554 first_instruction code. */
6555 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6556
6557 new_flags.is_write = 1;
6558 rws_access_regno (REG_AR_CFM, new_flags, pred);
6559 return 1;
6560
6561 case UNSPECV_SET_BSP:
6562 need_barrier = 1;
6563 break;
6564
6565 case UNSPECV_BLOCKAGE:
6566 case UNSPECV_INSN_GROUP_BARRIER:
6567 case UNSPECV_BREAK:
6568 case UNSPECV_PSAC_ALL:
6569 case UNSPECV_PSAC_NORMAL:
6570 return 0;
6571
6572 default:
6573 gcc_unreachable ();
6574 }
6575 break;
6576
6577 case RETURN:
6578 new_flags.is_write = 0;
6579 need_barrier = rws_access_regno (REG_RP, flags, pred);
6580 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6581
6582 new_flags.is_write = 1;
6583 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6584 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6585 break;
6586
6587 default:
6588 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6589 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6590 switch (format_ptr[i])
6591 {
6592 case '0': /* unused field */
6593 case 'i': /* integer */
6594 case 'n': /* note */
6595 case 'w': /* wide integer */
6596 case 's': /* pointer to string */
6597 case 'S': /* optional pointer to string */
6598 break;
6599
6600 case 'e':
6601 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6602 need_barrier = 1;
6603 break;
6604
6605 case 'E':
6606 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6607 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6608 need_barrier = 1;
6609 break;
6610
6611 default:
6612 gcc_unreachable ();
6613 }
6614 break;
6615 }
6616 return need_barrier;
6617 }
6618
6619 /* Clear out the state for group_barrier_needed at the start of a
6620 sequence of insns. */
6621
6622 static void
6623 init_insn_group_barriers (void)
6624 {
6625 memset (rws_sum, 0, sizeof (rws_sum));
6626 first_instruction = 1;
6627 }
6628
6629 /* Given the current state, determine whether a group barrier (a stop bit) is
6630 necessary before INSN. Return nonzero if so. This modifies the state to
6631 include the effects of INSN as a side-effect. */
6632
6633 static int
6634 group_barrier_needed (rtx insn)
6635 {
6636 rtx pat;
6637 int need_barrier = 0;
6638 struct reg_flags flags;
6639
6640 memset (&flags, 0, sizeof (flags));
6641 switch (GET_CODE (insn))
6642 {
6643 case NOTE:
6644 case DEBUG_INSN:
6645 break;
6646
6647 case BARRIER:
6648 /* A barrier doesn't imply an instruction group boundary. */
6649 break;
6650
6651 case CODE_LABEL:
6652 memset (rws_insn, 0, sizeof (rws_insn));
6653 return 1;
6654
6655 case CALL_INSN:
6656 flags.is_branch = 1;
6657 flags.is_sibcall = SIBLING_CALL_P (insn);
6658 memset (rws_insn, 0, sizeof (rws_insn));
6659
6660 /* Don't bundle a call following another call. */
6661 if ((pat = prev_active_insn (insn))
6662 && GET_CODE (pat) == CALL_INSN)
6663 {
6664 need_barrier = 1;
6665 break;
6666 }
6667
6668 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6669 break;
6670
6671 case JUMP_INSN:
6672 if (!ia64_spec_check_p (insn))
6673 flags.is_branch = 1;
6674
6675 /* Don't bundle a jump following a call. */
6676 if ((pat = prev_active_insn (insn))
6677 && GET_CODE (pat) == CALL_INSN)
6678 {
6679 need_barrier = 1;
6680 break;
6681 }
6682 /* FALLTHRU */
6683
6684 case INSN:
6685 if (GET_CODE (PATTERN (insn)) == USE
6686 || GET_CODE (PATTERN (insn)) == CLOBBER)
6687 /* Don't care about USE and CLOBBER "insns"---those are used to
6688 indicate to the optimizer that it shouldn't get rid of
6689 certain operations. */
6690 break;
6691
6692 pat = PATTERN (insn);
6693
6694 /* Ug. Hack hacks hacked elsewhere. */
6695 switch (recog_memoized (insn))
6696 {
6697 /* We play dependency tricks with the epilogue in order
6698 to get proper schedules. Undo this for dv analysis. */
6699 case CODE_FOR_epilogue_deallocate_stack:
6700 case CODE_FOR_prologue_allocate_stack:
6701 pat = XVECEXP (pat, 0, 0);
6702 break;
6703
6704 /* The pattern we use for br.cloop confuses the code above.
6705 The second element of the vector is representative. */
6706 case CODE_FOR_doloop_end_internal:
6707 pat = XVECEXP (pat, 0, 1);
6708 break;
6709
6710 /* Doesn't generate code. */
6711 case CODE_FOR_pred_rel_mutex:
6712 case CODE_FOR_prologue_use:
6713 return 0;
6714
6715 default:
6716 break;
6717 }
6718
6719 memset (rws_insn, 0, sizeof (rws_insn));
6720 need_barrier = rtx_needs_barrier (pat, flags, 0);
6721
6722 /* Check to see if the previous instruction was a volatile
6723 asm. */
6724 if (! need_barrier)
6725 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6726
6727 break;
6728
6729 default:
6730 gcc_unreachable ();
6731 }
6732
6733 if (first_instruction && INSN_P (insn)
6734 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6735 && GET_CODE (PATTERN (insn)) != USE
6736 && GET_CODE (PATTERN (insn)) != CLOBBER)
6737 {
6738 need_barrier = 0;
6739 first_instruction = 0;
6740 }
6741
6742 return need_barrier;
6743 }
6744
6745 /* Like group_barrier_needed, but do not clobber the current state. */
6746
6747 static int
6748 safe_group_barrier_needed (rtx insn)
6749 {
6750 int saved_first_instruction;
6751 int t;
6752
6753 saved_first_instruction = first_instruction;
6754 in_safe_group_barrier = 1;
6755
6756 t = group_barrier_needed (insn);
6757
6758 first_instruction = saved_first_instruction;
6759 in_safe_group_barrier = 0;
6760
6761 return t;
6762 }
6763
6764 /* Scan the current function and insert stop bits as necessary to
6765 eliminate dependencies. This function assumes that a final
6766 instruction scheduling pass has been run which has already
6767 inserted most of the necessary stop bits. This function only
6768 inserts new ones at basic block boundaries, since these are
6769 invisible to the scheduler. */
6770
6771 static void
6772 emit_insn_group_barriers (FILE *dump)
6773 {
6774 rtx insn;
6775 rtx last_label = 0;
6776 int insns_since_last_label = 0;
6777
6778 init_insn_group_barriers ();
6779
6780 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6781 {
6782 if (GET_CODE (insn) == CODE_LABEL)
6783 {
6784 if (insns_since_last_label)
6785 last_label = insn;
6786 insns_since_last_label = 0;
6787 }
6788 else if (GET_CODE (insn) == NOTE
6789 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6790 {
6791 if (insns_since_last_label)
6792 last_label = insn;
6793 insns_since_last_label = 0;
6794 }
6795 else if (GET_CODE (insn) == INSN
6796 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6797 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6798 {
6799 init_insn_group_barriers ();
6800 last_label = 0;
6801 }
6802 else if (NONDEBUG_INSN_P (insn))
6803 {
6804 insns_since_last_label = 1;
6805
6806 if (group_barrier_needed (insn))
6807 {
6808 if (last_label)
6809 {
6810 if (dump)
6811 fprintf (dump, "Emitting stop before label %d\n",
6812 INSN_UID (last_label));
6813 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6814 insn = last_label;
6815
6816 init_insn_group_barriers ();
6817 last_label = 0;
6818 }
6819 }
6820 }
6821 }
6822 }
6823
6824 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6825 This function has to emit all necessary group barriers. */
6826
6827 static void
6828 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6829 {
6830 rtx insn;
6831
6832 init_insn_group_barriers ();
6833
6834 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6835 {
6836 if (GET_CODE (insn) == BARRIER)
6837 {
6838 rtx last = prev_active_insn (insn);
6839
6840 if (! last)
6841 continue;
6842 if (GET_CODE (last) == JUMP_INSN
6843 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6844 last = prev_active_insn (last);
6845 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6846 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6847
6848 init_insn_group_barriers ();
6849 }
6850 else if (NONDEBUG_INSN_P (insn))
6851 {
6852 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6853 init_insn_group_barriers ();
6854 else if (group_barrier_needed (insn))
6855 {
6856 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6857 init_insn_group_barriers ();
6858 group_barrier_needed (insn);
6859 }
6860 }
6861 }
6862 }
6863
6864 \f
6865
6866 /* Instruction scheduling support. */
6867
6868 #define NR_BUNDLES 10
6869
6870 /* A list of names of all available bundles. */
6871
6872 static const char *bundle_name [NR_BUNDLES] =
6873 {
6874 ".mii",
6875 ".mmi",
6876 ".mfi",
6877 ".mmf",
6878 #if NR_BUNDLES == 10
6879 ".bbb",
6880 ".mbb",
6881 #endif
6882 ".mib",
6883 ".mmb",
6884 ".mfb",
6885 ".mlx"
6886 };
6887
6888 /* Nonzero if we should insert stop bits into the schedule. */
6889
6890 int ia64_final_schedule = 0;
6891
6892 /* Codes of the corresponding queried units: */
6893
6894 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6895 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6896
6897 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6898 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6899
6900 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6901
6902 /* The following variable value is an insn group barrier. */
6903
6904 static rtx dfa_stop_insn;
6905
6906 /* The following variable value is the last issued insn. */
6907
6908 static rtx last_scheduled_insn;
6909
6910 /* The following variable value is pointer to a DFA state used as
6911 temporary variable. */
6912
6913 static state_t temp_dfa_state = NULL;
6914
6915 /* The following variable value is DFA state after issuing the last
6916 insn. */
6917
6918 static state_t prev_cycle_state = NULL;
6919
6920 /* The following array element values are TRUE if the corresponding
6921 insn requires to add stop bits before it. */
6922
6923 static char *stops_p = NULL;
6924
6925 /* The following variable is used to set up the mentioned above array. */
6926
6927 static int stop_before_p = 0;
6928
6929 /* The following variable value is length of the arrays `clocks' and
6930 `add_cycles'. */
6931
6932 static int clocks_length;
6933
6934 /* The following variable value is number of data speculations in progress. */
6935 static int pending_data_specs = 0;
6936
6937 /* Number of memory references on current and three future processor cycles. */
6938 static char mem_ops_in_group[4];
6939
6940 /* Number of current processor cycle (from scheduler's point of view). */
6941 static int current_cycle;
6942
6943 static rtx ia64_single_set (rtx);
6944 static void ia64_emit_insn_before (rtx, rtx);
6945
6946 /* Map a bundle number to its pseudo-op. */
6947
6948 const char *
6949 get_bundle_name (int b)
6950 {
6951 return bundle_name[b];
6952 }
6953
6954
6955 /* Return the maximum number of instructions a cpu can issue. */
6956
6957 static int
6958 ia64_issue_rate (void)
6959 {
6960 return 6;
6961 }
6962
6963 /* Helper function - like single_set, but look inside COND_EXEC. */
6964
6965 static rtx
6966 ia64_single_set (rtx insn)
6967 {
6968 rtx x = PATTERN (insn), ret;
6969 if (GET_CODE (x) == COND_EXEC)
6970 x = COND_EXEC_CODE (x);
6971 if (GET_CODE (x) == SET)
6972 return x;
6973
6974 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6975 Although they are not classical single set, the second set is there just
6976 to protect it from moving past FP-relative stack accesses. */
6977 switch (recog_memoized (insn))
6978 {
6979 case CODE_FOR_prologue_allocate_stack:
6980 case CODE_FOR_epilogue_deallocate_stack:
6981 ret = XVECEXP (x, 0, 0);
6982 break;
6983
6984 default:
6985 ret = single_set_2 (insn, x);
6986 break;
6987 }
6988
6989 return ret;
6990 }
6991
6992 /* Adjust the cost of a scheduling dependency.
6993 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6994 COST is the current cost, DW is dependency weakness. */
6995 static int
6996 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6997 {
6998 enum reg_note dep_type = (enum reg_note) dep_type1;
6999 enum attr_itanium_class dep_class;
7000 enum attr_itanium_class insn_class;
7001
7002 insn_class = ia64_safe_itanium_class (insn);
7003 dep_class = ia64_safe_itanium_class (dep_insn);
7004
7005 /* Treat true memory dependencies separately. Ignore apparent true
7006 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7007 if (dep_type == REG_DEP_TRUE
7008 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
7009 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
7010 return 0;
7011
7012 if (dw == MIN_DEP_WEAK)
7013 /* Store and load are likely to alias, use higher cost to avoid stall. */
7014 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
7015 else if (dw > MIN_DEP_WEAK)
7016 {
7017 /* Store and load are less likely to alias. */
7018 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
7019 /* Assume there will be no cache conflict for floating-point data.
7020 For integer data, L1 conflict penalty is huge (17 cycles), so we
7021 never assume it will not cause a conflict. */
7022 return 0;
7023 else
7024 return cost;
7025 }
7026
7027 if (dep_type != REG_DEP_OUTPUT)
7028 return cost;
7029
7030 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
7031 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
7032 return 0;
7033
7034 return cost;
7035 }
7036
7037 /* Like emit_insn_before, but skip cycle_display notes.
7038 ??? When cycle display notes are implemented, update this. */
7039
7040 static void
7041 ia64_emit_insn_before (rtx insn, rtx before)
7042 {
7043 emit_insn_before (insn, before);
7044 }
7045
7046 /* The following function marks insns who produce addresses for load
7047 and store insns. Such insns will be placed into M slots because it
7048 decrease latency time for Itanium1 (see function
7049 `ia64_produce_address_p' and the DFA descriptions). */
7050
7051 static void
7052 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
7053 {
7054 rtx insn, next, next_tail;
7055
7056 /* Before reload, which_alternative is not set, which means that
7057 ia64_safe_itanium_class will produce wrong results for (at least)
7058 move instructions. */
7059 if (!reload_completed)
7060 return;
7061
7062 next_tail = NEXT_INSN (tail);
7063 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7064 if (INSN_P (insn))
7065 insn->call = 0;
7066 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7067 if (INSN_P (insn)
7068 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7069 {
7070 sd_iterator_def sd_it;
7071 dep_t dep;
7072 bool has_mem_op_consumer_p = false;
7073
7074 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7075 {
7076 enum attr_itanium_class c;
7077
7078 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7079 continue;
7080
7081 next = DEP_CON (dep);
7082 c = ia64_safe_itanium_class (next);
7083 if ((c == ITANIUM_CLASS_ST
7084 || c == ITANIUM_CLASS_STF)
7085 && ia64_st_address_bypass_p (insn, next))
7086 {
7087 has_mem_op_consumer_p = true;
7088 break;
7089 }
7090 else if ((c == ITANIUM_CLASS_LD
7091 || c == ITANIUM_CLASS_FLD
7092 || c == ITANIUM_CLASS_FLDP)
7093 && ia64_ld_address_bypass_p (insn, next))
7094 {
7095 has_mem_op_consumer_p = true;
7096 break;
7097 }
7098 }
7099
7100 insn->call = has_mem_op_consumer_p;
7101 }
7102 }
7103
7104 /* We're beginning a new block. Initialize data structures as necessary. */
7105
7106 static void
7107 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7108 int sched_verbose ATTRIBUTE_UNUSED,
7109 int max_ready ATTRIBUTE_UNUSED)
7110 {
7111 #ifdef ENABLE_CHECKING
7112 rtx insn;
7113
7114 if (!sel_sched_p () && reload_completed)
7115 for (insn = NEXT_INSN (current_sched_info->prev_head);
7116 insn != current_sched_info->next_tail;
7117 insn = NEXT_INSN (insn))
7118 gcc_assert (!SCHED_GROUP_P (insn));
7119 #endif
7120 last_scheduled_insn = NULL_RTX;
7121 init_insn_group_barriers ();
7122
7123 current_cycle = 0;
7124 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7125 }
7126
7127 /* We're beginning a scheduling pass. Check assertion. */
7128
7129 static void
7130 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7131 int sched_verbose ATTRIBUTE_UNUSED,
7132 int max_ready ATTRIBUTE_UNUSED)
7133 {
7134 gcc_assert (pending_data_specs == 0);
7135 }
7136
7137 /* Scheduling pass is now finished. Free/reset static variable. */
7138 static void
7139 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7140 int sched_verbose ATTRIBUTE_UNUSED)
7141 {
7142 gcc_assert (pending_data_specs == 0);
7143 }
7144
7145 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7146 speculation check), FALSE otherwise. */
7147 static bool
7148 is_load_p (rtx insn)
7149 {
7150 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7151
7152 return
7153 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7154 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7155 }
7156
7157 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7158 (taking account for 3-cycle cache reference postponing for stores: Intel
7159 Itanium 2 Reference Manual for Software Development and Optimization,
7160 6.7.3.1). */
7161 static void
7162 record_memory_reference (rtx insn)
7163 {
7164 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7165
7166 switch (insn_class) {
7167 case ITANIUM_CLASS_FLD:
7168 case ITANIUM_CLASS_LD:
7169 mem_ops_in_group[current_cycle % 4]++;
7170 break;
7171 case ITANIUM_CLASS_STF:
7172 case ITANIUM_CLASS_ST:
7173 mem_ops_in_group[(current_cycle + 3) % 4]++;
7174 break;
7175 default:;
7176 }
7177 }
7178
7179 /* We are about to being issuing insns for this clock cycle.
7180 Override the default sort algorithm to better slot instructions. */
7181
7182 static int
7183 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7184 int *pn_ready, int clock_var,
7185 int reorder_type)
7186 {
7187 int n_asms;
7188 int n_ready = *pn_ready;
7189 rtx *e_ready = ready + n_ready;
7190 rtx *insnp;
7191
7192 if (sched_verbose)
7193 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7194
7195 if (reorder_type == 0)
7196 {
7197 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7198 n_asms = 0;
7199 for (insnp = ready; insnp < e_ready; insnp++)
7200 if (insnp < e_ready)
7201 {
7202 rtx insn = *insnp;
7203 enum attr_type t = ia64_safe_type (insn);
7204 if (t == TYPE_UNKNOWN)
7205 {
7206 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7207 || asm_noperands (PATTERN (insn)) >= 0)
7208 {
7209 rtx lowest = ready[n_asms];
7210 ready[n_asms] = insn;
7211 *insnp = lowest;
7212 n_asms++;
7213 }
7214 else
7215 {
7216 rtx highest = ready[n_ready - 1];
7217 ready[n_ready - 1] = insn;
7218 *insnp = highest;
7219 return 1;
7220 }
7221 }
7222 }
7223
7224 if (n_asms < n_ready)
7225 {
7226 /* Some normal insns to process. Skip the asms. */
7227 ready += n_asms;
7228 n_ready -= n_asms;
7229 }
7230 else if (n_ready > 0)
7231 return 1;
7232 }
7233
7234 if (ia64_final_schedule)
7235 {
7236 int deleted = 0;
7237 int nr_need_stop = 0;
7238
7239 for (insnp = ready; insnp < e_ready; insnp++)
7240 if (safe_group_barrier_needed (*insnp))
7241 nr_need_stop++;
7242
7243 if (reorder_type == 1 && n_ready == nr_need_stop)
7244 return 0;
7245 if (reorder_type == 0)
7246 return 1;
7247 insnp = e_ready;
7248 /* Move down everything that needs a stop bit, preserving
7249 relative order. */
7250 while (insnp-- > ready + deleted)
7251 while (insnp >= ready + deleted)
7252 {
7253 rtx insn = *insnp;
7254 if (! safe_group_barrier_needed (insn))
7255 break;
7256 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7257 *ready = insn;
7258 deleted++;
7259 }
7260 n_ready -= deleted;
7261 ready += deleted;
7262 }
7263
7264 current_cycle = clock_var;
7265 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7266 {
7267 int moved = 0;
7268
7269 insnp = e_ready;
7270 /* Move down loads/stores, preserving relative order. */
7271 while (insnp-- > ready + moved)
7272 while (insnp >= ready + moved)
7273 {
7274 rtx insn = *insnp;
7275 if (! is_load_p (insn))
7276 break;
7277 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7278 *ready = insn;
7279 moved++;
7280 }
7281 n_ready -= moved;
7282 ready += moved;
7283 }
7284
7285 return 1;
7286 }
7287
7288 /* We are about to being issuing insns for this clock cycle. Override
7289 the default sort algorithm to better slot instructions. */
7290
7291 static int
7292 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7293 int clock_var)
7294 {
7295 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7296 pn_ready, clock_var, 0);
7297 }
7298
7299 /* Like ia64_sched_reorder, but called after issuing each insn.
7300 Override the default sort algorithm to better slot instructions. */
7301
7302 static int
7303 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7304 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7305 int *pn_ready, int clock_var)
7306 {
7307 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7308 clock_var, 1);
7309 }
7310
7311 /* We are about to issue INSN. Return the number of insns left on the
7312 ready queue that can be issued this cycle. */
7313
7314 static int
7315 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7316 int sched_verbose ATTRIBUTE_UNUSED,
7317 rtx insn ATTRIBUTE_UNUSED,
7318 int can_issue_more ATTRIBUTE_UNUSED)
7319 {
7320 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7321 /* Modulo scheduling does not extend h_i_d when emitting
7322 new instructions. Don't use h_i_d, if we don't have to. */
7323 {
7324 if (DONE_SPEC (insn) & BEGIN_DATA)
7325 pending_data_specs++;
7326 if (CHECK_SPEC (insn) & BEGIN_DATA)
7327 pending_data_specs--;
7328 }
7329
7330 if (DEBUG_INSN_P (insn))
7331 return 1;
7332
7333 last_scheduled_insn = insn;
7334 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7335 if (reload_completed)
7336 {
7337 int needed = group_barrier_needed (insn);
7338
7339 gcc_assert (!needed);
7340 if (GET_CODE (insn) == CALL_INSN)
7341 init_insn_group_barriers ();
7342 stops_p [INSN_UID (insn)] = stop_before_p;
7343 stop_before_p = 0;
7344
7345 record_memory_reference (insn);
7346 }
7347 return 1;
7348 }
7349
7350 /* We are choosing insn from the ready queue. Return nonzero if INSN
7351 can be chosen. */
7352
7353 static int
7354 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7355 {
7356 gcc_assert (insn && INSN_P (insn));
7357 return ((!reload_completed
7358 || !safe_group_barrier_needed (insn))
7359 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7360 && (!mflag_sched_mem_insns_hard_limit
7361 || !is_load_p (insn)
7362 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7363 }
7364
7365 /* We are choosing insn from the ready queue. Return nonzero if INSN
7366 can be chosen. */
7367
7368 static bool
7369 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7370 {
7371 gcc_assert (insn && INSN_P (insn));
7372 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7373 we keep ALAT half-empty. */
7374 return (pending_data_specs < 16
7375 || !(TODO_SPEC (insn) & BEGIN_DATA));
7376 }
7377
7378 /* The following variable value is pseudo-insn used by the DFA insn
7379 scheduler to change the DFA state when the simulated clock is
7380 increased. */
7381
7382 static rtx dfa_pre_cycle_insn;
7383
7384 /* Returns 1 when a meaningful insn was scheduled between the last group
7385 barrier and LAST. */
7386 static int
7387 scheduled_good_insn (rtx last)
7388 {
7389 if (last && recog_memoized (last) >= 0)
7390 return 1;
7391
7392 for ( ;
7393 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7394 && !stops_p[INSN_UID (last)];
7395 last = PREV_INSN (last))
7396 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7397 the ebb we're scheduling. */
7398 if (INSN_P (last) && recog_memoized (last) >= 0)
7399 return 1;
7400
7401 return 0;
7402 }
7403
7404 /* We are about to being issuing INSN. Return nonzero if we cannot
7405 issue it on given cycle CLOCK and return zero if we should not sort
7406 the ready queue on the next clock start. */
7407
7408 static int
7409 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7410 int clock, int *sort_p)
7411 {
7412 gcc_assert (insn && INSN_P (insn));
7413
7414 if (DEBUG_INSN_P (insn))
7415 return 0;
7416
7417 /* When a group barrier is needed for insn, last_scheduled_insn
7418 should be set. */
7419 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7420 || last_scheduled_insn);
7421
7422 if ((reload_completed
7423 && (safe_group_barrier_needed (insn)
7424 || (mflag_sched_stop_bits_after_every_cycle
7425 && last_clock != clock
7426 && last_scheduled_insn
7427 && scheduled_good_insn (last_scheduled_insn))))
7428 || (last_scheduled_insn
7429 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7430 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7431 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7432 {
7433 init_insn_group_barriers ();
7434
7435 if (verbose && dump)
7436 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7437 last_clock == clock ? " + cycle advance" : "");
7438
7439 stop_before_p = 1;
7440 current_cycle = clock;
7441 mem_ops_in_group[current_cycle % 4] = 0;
7442
7443 if (last_clock == clock)
7444 {
7445 state_transition (curr_state, dfa_stop_insn);
7446 if (TARGET_EARLY_STOP_BITS)
7447 *sort_p = (last_scheduled_insn == NULL_RTX
7448 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7449 else
7450 *sort_p = 0;
7451 return 1;
7452 }
7453
7454 if (last_scheduled_insn)
7455 {
7456 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7457 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7458 state_reset (curr_state);
7459 else
7460 {
7461 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7462 state_transition (curr_state, dfa_stop_insn);
7463 state_transition (curr_state, dfa_pre_cycle_insn);
7464 state_transition (curr_state, NULL);
7465 }
7466 }
7467 }
7468 return 0;
7469 }
7470
7471 /* Implement targetm.sched.h_i_d_extended hook.
7472 Extend internal data structures. */
7473 static void
7474 ia64_h_i_d_extended (void)
7475 {
7476 if (stops_p != NULL)
7477 {
7478 int new_clocks_length = get_max_uid () * 3 / 2;
7479 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7480 clocks_length = new_clocks_length;
7481 }
7482 }
7483 \f
7484
7485 /* This structure describes the data used by the backend to guide scheduling.
7486 When the current scheduling point is switched, this data should be saved
7487 and restored later, if the scheduler returns to this point. */
7488 struct _ia64_sched_context
7489 {
7490 state_t prev_cycle_state;
7491 rtx last_scheduled_insn;
7492 struct reg_write_state rws_sum[NUM_REGS];
7493 struct reg_write_state rws_insn[NUM_REGS];
7494 int first_instruction;
7495 int pending_data_specs;
7496 int current_cycle;
7497 char mem_ops_in_group[4];
7498 };
7499 typedef struct _ia64_sched_context *ia64_sched_context_t;
7500
7501 /* Allocates a scheduling context. */
7502 static void *
7503 ia64_alloc_sched_context (void)
7504 {
7505 return xmalloc (sizeof (struct _ia64_sched_context));
7506 }
7507
7508 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7509 the global context otherwise. */
7510 static void
7511 ia64_init_sched_context (void *_sc, bool clean_p)
7512 {
7513 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7514
7515 sc->prev_cycle_state = xmalloc (dfa_state_size);
7516 if (clean_p)
7517 {
7518 state_reset (sc->prev_cycle_state);
7519 sc->last_scheduled_insn = NULL_RTX;
7520 memset (sc->rws_sum, 0, sizeof (rws_sum));
7521 memset (sc->rws_insn, 0, sizeof (rws_insn));
7522 sc->first_instruction = 1;
7523 sc->pending_data_specs = 0;
7524 sc->current_cycle = 0;
7525 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7526 }
7527 else
7528 {
7529 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7530 sc->last_scheduled_insn = last_scheduled_insn;
7531 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7532 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7533 sc->first_instruction = first_instruction;
7534 sc->pending_data_specs = pending_data_specs;
7535 sc->current_cycle = current_cycle;
7536 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7537 }
7538 }
7539
7540 /* Sets the global scheduling context to the one pointed to by _SC. */
7541 static void
7542 ia64_set_sched_context (void *_sc)
7543 {
7544 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7545
7546 gcc_assert (sc != NULL);
7547
7548 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7549 last_scheduled_insn = sc->last_scheduled_insn;
7550 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7551 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7552 first_instruction = sc->first_instruction;
7553 pending_data_specs = sc->pending_data_specs;
7554 current_cycle = sc->current_cycle;
7555 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7556 }
7557
7558 /* Clears the data in the _SC scheduling context. */
7559 static void
7560 ia64_clear_sched_context (void *_sc)
7561 {
7562 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7563
7564 free (sc->prev_cycle_state);
7565 sc->prev_cycle_state = NULL;
7566 }
7567
7568 /* Frees the _SC scheduling context. */
7569 static void
7570 ia64_free_sched_context (void *_sc)
7571 {
7572 gcc_assert (_sc != NULL);
7573
7574 free (_sc);
7575 }
7576
7577 typedef rtx (* gen_func_t) (rtx, rtx);
7578
7579 /* Return a function that will generate a load of mode MODE_NO
7580 with speculation types TS. */
7581 static gen_func_t
7582 get_spec_load_gen_function (ds_t ts, int mode_no)
7583 {
7584 static gen_func_t gen_ld_[] = {
7585 gen_movbi,
7586 gen_movqi_internal,
7587 gen_movhi_internal,
7588 gen_movsi_internal,
7589 gen_movdi_internal,
7590 gen_movsf_internal,
7591 gen_movdf_internal,
7592 gen_movxf_internal,
7593 gen_movti_internal,
7594 gen_zero_extendqidi2,
7595 gen_zero_extendhidi2,
7596 gen_zero_extendsidi2,
7597 };
7598
7599 static gen_func_t gen_ld_a[] = {
7600 gen_movbi_advanced,
7601 gen_movqi_advanced,
7602 gen_movhi_advanced,
7603 gen_movsi_advanced,
7604 gen_movdi_advanced,
7605 gen_movsf_advanced,
7606 gen_movdf_advanced,
7607 gen_movxf_advanced,
7608 gen_movti_advanced,
7609 gen_zero_extendqidi2_advanced,
7610 gen_zero_extendhidi2_advanced,
7611 gen_zero_extendsidi2_advanced,
7612 };
7613 static gen_func_t gen_ld_s[] = {
7614 gen_movbi_speculative,
7615 gen_movqi_speculative,
7616 gen_movhi_speculative,
7617 gen_movsi_speculative,
7618 gen_movdi_speculative,
7619 gen_movsf_speculative,
7620 gen_movdf_speculative,
7621 gen_movxf_speculative,
7622 gen_movti_speculative,
7623 gen_zero_extendqidi2_speculative,
7624 gen_zero_extendhidi2_speculative,
7625 gen_zero_extendsidi2_speculative,
7626 };
7627 static gen_func_t gen_ld_sa[] = {
7628 gen_movbi_speculative_advanced,
7629 gen_movqi_speculative_advanced,
7630 gen_movhi_speculative_advanced,
7631 gen_movsi_speculative_advanced,
7632 gen_movdi_speculative_advanced,
7633 gen_movsf_speculative_advanced,
7634 gen_movdf_speculative_advanced,
7635 gen_movxf_speculative_advanced,
7636 gen_movti_speculative_advanced,
7637 gen_zero_extendqidi2_speculative_advanced,
7638 gen_zero_extendhidi2_speculative_advanced,
7639 gen_zero_extendsidi2_speculative_advanced,
7640 };
7641 static gen_func_t gen_ld_s_a[] = {
7642 gen_movbi_speculative_a,
7643 gen_movqi_speculative_a,
7644 gen_movhi_speculative_a,
7645 gen_movsi_speculative_a,
7646 gen_movdi_speculative_a,
7647 gen_movsf_speculative_a,
7648 gen_movdf_speculative_a,
7649 gen_movxf_speculative_a,
7650 gen_movti_speculative_a,
7651 gen_zero_extendqidi2_speculative_a,
7652 gen_zero_extendhidi2_speculative_a,
7653 gen_zero_extendsidi2_speculative_a,
7654 };
7655
7656 gen_func_t *gen_ld;
7657
7658 if (ts & BEGIN_DATA)
7659 {
7660 if (ts & BEGIN_CONTROL)
7661 gen_ld = gen_ld_sa;
7662 else
7663 gen_ld = gen_ld_a;
7664 }
7665 else if (ts & BEGIN_CONTROL)
7666 {
7667 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7668 || ia64_needs_block_p (ts))
7669 gen_ld = gen_ld_s;
7670 else
7671 gen_ld = gen_ld_s_a;
7672 }
7673 else if (ts == 0)
7674 gen_ld = gen_ld_;
7675 else
7676 gcc_unreachable ();
7677
7678 return gen_ld[mode_no];
7679 }
7680
7681 /* Constants that help mapping 'enum machine_mode' to int. */
7682 enum SPEC_MODES
7683 {
7684 SPEC_MODE_INVALID = -1,
7685 SPEC_MODE_FIRST = 0,
7686 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7687 SPEC_MODE_FOR_EXTEND_LAST = 3,
7688 SPEC_MODE_LAST = 8
7689 };
7690
7691 enum
7692 {
7693 /* Offset to reach ZERO_EXTEND patterns. */
7694 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7695 };
7696
7697 /* Return index of the MODE. */
7698 static int
7699 ia64_mode_to_int (enum machine_mode mode)
7700 {
7701 switch (mode)
7702 {
7703 case BImode: return 0; /* SPEC_MODE_FIRST */
7704 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7705 case HImode: return 2;
7706 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7707 case DImode: return 4;
7708 case SFmode: return 5;
7709 case DFmode: return 6;
7710 case XFmode: return 7;
7711 case TImode:
7712 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7713 mentioned in itanium[12].md. Predicate fp_register_operand also
7714 needs to be defined. Bottom line: better disable for now. */
7715 return SPEC_MODE_INVALID;
7716 default: return SPEC_MODE_INVALID;
7717 }
7718 }
7719
7720 /* Provide information about speculation capabilities. */
7721 static void
7722 ia64_set_sched_flags (spec_info_t spec_info)
7723 {
7724 unsigned int *flags = &(current_sched_info->flags);
7725
7726 if (*flags & SCHED_RGN
7727 || *flags & SCHED_EBB
7728 || *flags & SEL_SCHED)
7729 {
7730 int mask = 0;
7731
7732 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7733 || (mflag_sched_ar_data_spec && reload_completed))
7734 {
7735 mask |= BEGIN_DATA;
7736
7737 if (!sel_sched_p ()
7738 && ((mflag_sched_br_in_data_spec && !reload_completed)
7739 || (mflag_sched_ar_in_data_spec && reload_completed)))
7740 mask |= BE_IN_DATA;
7741 }
7742
7743 if (mflag_sched_control_spec
7744 && (!sel_sched_p ()
7745 || reload_completed))
7746 {
7747 mask |= BEGIN_CONTROL;
7748
7749 if (!sel_sched_p () && mflag_sched_in_control_spec)
7750 mask |= BE_IN_CONTROL;
7751 }
7752
7753 spec_info->mask = mask;
7754
7755 if (mask)
7756 {
7757 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7758
7759 if (mask & BE_IN_SPEC)
7760 *flags |= NEW_BBS;
7761
7762 spec_info->flags = 0;
7763
7764 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7765 spec_info->flags |= PREFER_NON_DATA_SPEC;
7766
7767 if (mask & CONTROL_SPEC)
7768 {
7769 if (mflag_sched_prefer_non_control_spec_insns)
7770 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7771
7772 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7773 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7774 }
7775
7776 if (sched_verbose >= 1)
7777 spec_info->dump = sched_dump;
7778 else
7779 spec_info->dump = 0;
7780
7781 if (mflag_sched_count_spec_in_critical_path)
7782 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7783 }
7784 }
7785 else
7786 spec_info->mask = 0;
7787 }
7788
7789 /* If INSN is an appropriate load return its mode.
7790 Return -1 otherwise. */
7791 static int
7792 get_mode_no_for_insn (rtx insn)
7793 {
7794 rtx reg, mem, mode_rtx;
7795 int mode_no;
7796 bool extend_p;
7797
7798 extract_insn_cached (insn);
7799
7800 /* We use WHICH_ALTERNATIVE only after reload. This will
7801 guarantee that reload won't touch a speculative insn. */
7802
7803 if (recog_data.n_operands != 2)
7804 return -1;
7805
7806 reg = recog_data.operand[0];
7807 mem = recog_data.operand[1];
7808
7809 /* We should use MEM's mode since REG's mode in presence of
7810 ZERO_EXTEND will always be DImode. */
7811 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7812 /* Process non-speculative ld. */
7813 {
7814 if (!reload_completed)
7815 {
7816 /* Do not speculate into regs like ar.lc. */
7817 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7818 return -1;
7819
7820 if (!MEM_P (mem))
7821 return -1;
7822
7823 {
7824 rtx mem_reg = XEXP (mem, 0);
7825
7826 if (!REG_P (mem_reg))
7827 return -1;
7828 }
7829
7830 mode_rtx = mem;
7831 }
7832 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7833 {
7834 gcc_assert (REG_P (reg) && MEM_P (mem));
7835 mode_rtx = mem;
7836 }
7837 else
7838 return -1;
7839 }
7840 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7841 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7842 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7843 /* Process speculative ld or ld.c. */
7844 {
7845 gcc_assert (REG_P (reg) && MEM_P (mem));
7846 mode_rtx = mem;
7847 }
7848 else
7849 {
7850 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7851
7852 if (attr_class == ITANIUM_CLASS_CHK_A
7853 || attr_class == ITANIUM_CLASS_CHK_S_I
7854 || attr_class == ITANIUM_CLASS_CHK_S_F)
7855 /* Process chk. */
7856 mode_rtx = reg;
7857 else
7858 return -1;
7859 }
7860
7861 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7862
7863 if (mode_no == SPEC_MODE_INVALID)
7864 return -1;
7865
7866 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7867
7868 if (extend_p)
7869 {
7870 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7871 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7872 return -1;
7873
7874 mode_no += SPEC_GEN_EXTEND_OFFSET;
7875 }
7876
7877 return mode_no;
7878 }
7879
7880 /* If X is an unspec part of a speculative load, return its code.
7881 Return -1 otherwise. */
7882 static int
7883 get_spec_unspec_code (const_rtx x)
7884 {
7885 if (GET_CODE (x) != UNSPEC)
7886 return -1;
7887
7888 {
7889 int code;
7890
7891 code = XINT (x, 1);
7892
7893 switch (code)
7894 {
7895 case UNSPEC_LDA:
7896 case UNSPEC_LDS:
7897 case UNSPEC_LDS_A:
7898 case UNSPEC_LDSA:
7899 return code;
7900
7901 default:
7902 return -1;
7903 }
7904 }
7905 }
7906
7907 /* Implement skip_rtx_p hook. */
7908 static bool
7909 ia64_skip_rtx_p (const_rtx x)
7910 {
7911 return get_spec_unspec_code (x) != -1;
7912 }
7913
7914 /* If INSN is a speculative load, return its UNSPEC code.
7915 Return -1 otherwise. */
7916 static int
7917 get_insn_spec_code (const_rtx insn)
7918 {
7919 rtx pat, reg, mem;
7920
7921 pat = PATTERN (insn);
7922
7923 if (GET_CODE (pat) == COND_EXEC)
7924 pat = COND_EXEC_CODE (pat);
7925
7926 if (GET_CODE (pat) != SET)
7927 return -1;
7928
7929 reg = SET_DEST (pat);
7930 if (!REG_P (reg))
7931 return -1;
7932
7933 mem = SET_SRC (pat);
7934 if (GET_CODE (mem) == ZERO_EXTEND)
7935 mem = XEXP (mem, 0);
7936
7937 return get_spec_unspec_code (mem);
7938 }
7939
7940 /* If INSN is a speculative load, return a ds with the speculation types.
7941 Otherwise [if INSN is a normal instruction] return 0. */
7942 static ds_t
7943 ia64_get_insn_spec_ds (rtx insn)
7944 {
7945 int code = get_insn_spec_code (insn);
7946
7947 switch (code)
7948 {
7949 case UNSPEC_LDA:
7950 return BEGIN_DATA;
7951
7952 case UNSPEC_LDS:
7953 case UNSPEC_LDS_A:
7954 return BEGIN_CONTROL;
7955
7956 case UNSPEC_LDSA:
7957 return BEGIN_DATA | BEGIN_CONTROL;
7958
7959 default:
7960 return 0;
7961 }
7962 }
7963
7964 /* If INSN is a speculative load return a ds with the speculation types that
7965 will be checked.
7966 Otherwise [if INSN is a normal instruction] return 0. */
7967 static ds_t
7968 ia64_get_insn_checked_ds (rtx insn)
7969 {
7970 int code = get_insn_spec_code (insn);
7971
7972 switch (code)
7973 {
7974 case UNSPEC_LDA:
7975 return BEGIN_DATA | BEGIN_CONTROL;
7976
7977 case UNSPEC_LDS:
7978 return BEGIN_CONTROL;
7979
7980 case UNSPEC_LDS_A:
7981 case UNSPEC_LDSA:
7982 return BEGIN_DATA | BEGIN_CONTROL;
7983
7984 default:
7985 return 0;
7986 }
7987 }
7988
7989 /* If GEN_P is true, calculate the index of needed speculation check and return
7990 speculative pattern for INSN with speculative mode TS, machine mode
7991 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7992 If GEN_P is false, just calculate the index of needed speculation check. */
7993 static rtx
7994 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7995 {
7996 rtx pat, new_pat;
7997 gen_func_t gen_load;
7998
7999 gen_load = get_spec_load_gen_function (ts, mode_no);
8000
8001 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
8002 copy_rtx (recog_data.operand[1]));
8003
8004 pat = PATTERN (insn);
8005 if (GET_CODE (pat) == COND_EXEC)
8006 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8007 new_pat);
8008
8009 return new_pat;
8010 }
8011
8012 static bool
8013 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
8014 ds_t ds ATTRIBUTE_UNUSED)
8015 {
8016 return false;
8017 }
8018
8019 /* Implement targetm.sched.speculate_insn hook.
8020 Check if the INSN can be TS speculative.
8021 If 'no' - return -1.
8022 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8023 If current pattern of the INSN already provides TS speculation,
8024 return 0. */
8025 static int
8026 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
8027 {
8028 int mode_no;
8029 int res;
8030
8031 gcc_assert (!(ts & ~SPECULATIVE));
8032
8033 if (ia64_spec_check_p (insn))
8034 return -1;
8035
8036 if ((ts & BE_IN_SPEC)
8037 && !insn_can_be_in_speculative_p (insn, ts))
8038 return -1;
8039
8040 mode_no = get_mode_no_for_insn (insn);
8041
8042 if (mode_no != SPEC_MODE_INVALID)
8043 {
8044 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
8045 res = 0;
8046 else
8047 {
8048 res = 1;
8049 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
8050 }
8051 }
8052 else
8053 res = -1;
8054
8055 return res;
8056 }
8057
8058 /* Return a function that will generate a check for speculation TS with mode
8059 MODE_NO.
8060 If simple check is needed, pass true for SIMPLE_CHECK_P.
8061 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8062 static gen_func_t
8063 get_spec_check_gen_function (ds_t ts, int mode_no,
8064 bool simple_check_p, bool clearing_check_p)
8065 {
8066 static gen_func_t gen_ld_c_clr[] = {
8067 gen_movbi_clr,
8068 gen_movqi_clr,
8069 gen_movhi_clr,
8070 gen_movsi_clr,
8071 gen_movdi_clr,
8072 gen_movsf_clr,
8073 gen_movdf_clr,
8074 gen_movxf_clr,
8075 gen_movti_clr,
8076 gen_zero_extendqidi2_clr,
8077 gen_zero_extendhidi2_clr,
8078 gen_zero_extendsidi2_clr,
8079 };
8080 static gen_func_t gen_ld_c_nc[] = {
8081 gen_movbi_nc,
8082 gen_movqi_nc,
8083 gen_movhi_nc,
8084 gen_movsi_nc,
8085 gen_movdi_nc,
8086 gen_movsf_nc,
8087 gen_movdf_nc,
8088 gen_movxf_nc,
8089 gen_movti_nc,
8090 gen_zero_extendqidi2_nc,
8091 gen_zero_extendhidi2_nc,
8092 gen_zero_extendsidi2_nc,
8093 };
8094 static gen_func_t gen_chk_a_clr[] = {
8095 gen_advanced_load_check_clr_bi,
8096 gen_advanced_load_check_clr_qi,
8097 gen_advanced_load_check_clr_hi,
8098 gen_advanced_load_check_clr_si,
8099 gen_advanced_load_check_clr_di,
8100 gen_advanced_load_check_clr_sf,
8101 gen_advanced_load_check_clr_df,
8102 gen_advanced_load_check_clr_xf,
8103 gen_advanced_load_check_clr_ti,
8104 gen_advanced_load_check_clr_di,
8105 gen_advanced_load_check_clr_di,
8106 gen_advanced_load_check_clr_di,
8107 };
8108 static gen_func_t gen_chk_a_nc[] = {
8109 gen_advanced_load_check_nc_bi,
8110 gen_advanced_load_check_nc_qi,
8111 gen_advanced_load_check_nc_hi,
8112 gen_advanced_load_check_nc_si,
8113 gen_advanced_load_check_nc_di,
8114 gen_advanced_load_check_nc_sf,
8115 gen_advanced_load_check_nc_df,
8116 gen_advanced_load_check_nc_xf,
8117 gen_advanced_load_check_nc_ti,
8118 gen_advanced_load_check_nc_di,
8119 gen_advanced_load_check_nc_di,
8120 gen_advanced_load_check_nc_di,
8121 };
8122 static gen_func_t gen_chk_s[] = {
8123 gen_speculation_check_bi,
8124 gen_speculation_check_qi,
8125 gen_speculation_check_hi,
8126 gen_speculation_check_si,
8127 gen_speculation_check_di,
8128 gen_speculation_check_sf,
8129 gen_speculation_check_df,
8130 gen_speculation_check_xf,
8131 gen_speculation_check_ti,
8132 gen_speculation_check_di,
8133 gen_speculation_check_di,
8134 gen_speculation_check_di,
8135 };
8136
8137 gen_func_t *gen_check;
8138
8139 if (ts & BEGIN_DATA)
8140 {
8141 /* We don't need recovery because even if this is ld.sa
8142 ALAT entry will be allocated only if NAT bit is set to zero.
8143 So it is enough to use ld.c here. */
8144
8145 if (simple_check_p)
8146 {
8147 gcc_assert (mflag_sched_spec_ldc);
8148
8149 if (clearing_check_p)
8150 gen_check = gen_ld_c_clr;
8151 else
8152 gen_check = gen_ld_c_nc;
8153 }
8154 else
8155 {
8156 if (clearing_check_p)
8157 gen_check = gen_chk_a_clr;
8158 else
8159 gen_check = gen_chk_a_nc;
8160 }
8161 }
8162 else if (ts & BEGIN_CONTROL)
8163 {
8164 if (simple_check_p)
8165 /* We might want to use ld.sa -> ld.c instead of
8166 ld.s -> chk.s. */
8167 {
8168 gcc_assert (!ia64_needs_block_p (ts));
8169
8170 if (clearing_check_p)
8171 gen_check = gen_ld_c_clr;
8172 else
8173 gen_check = gen_ld_c_nc;
8174 }
8175 else
8176 {
8177 gen_check = gen_chk_s;
8178 }
8179 }
8180 else
8181 gcc_unreachable ();
8182
8183 gcc_assert (mode_no >= 0);
8184 return gen_check[mode_no];
8185 }
8186
8187 /* Return nonzero, if INSN needs branchy recovery check. */
8188 static bool
8189 ia64_needs_block_p (ds_t ts)
8190 {
8191 if (ts & BEGIN_DATA)
8192 return !mflag_sched_spec_ldc;
8193
8194 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8195
8196 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8197 }
8198
8199 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
8200 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
8201 Otherwise, generate a simple check. */
8202 static rtx
8203 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8204 {
8205 rtx op1, pat, check_pat;
8206 gen_func_t gen_check;
8207 int mode_no;
8208
8209 mode_no = get_mode_no_for_insn (insn);
8210 gcc_assert (mode_no >= 0);
8211
8212 if (label)
8213 op1 = label;
8214 else
8215 {
8216 gcc_assert (!ia64_needs_block_p (ds));
8217 op1 = copy_rtx (recog_data.operand[1]);
8218 }
8219
8220 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8221 true);
8222
8223 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8224
8225 pat = PATTERN (insn);
8226 if (GET_CODE (pat) == COND_EXEC)
8227 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8228 check_pat);
8229
8230 return check_pat;
8231 }
8232
8233 /* Return nonzero, if X is branchy recovery check. */
8234 static int
8235 ia64_spec_check_p (rtx x)
8236 {
8237 x = PATTERN (x);
8238 if (GET_CODE (x) == COND_EXEC)
8239 x = COND_EXEC_CODE (x);
8240 if (GET_CODE (x) == SET)
8241 return ia64_spec_check_src_p (SET_SRC (x));
8242 return 0;
8243 }
8244
8245 /* Return nonzero, if SRC belongs to recovery check. */
8246 static int
8247 ia64_spec_check_src_p (rtx src)
8248 {
8249 if (GET_CODE (src) == IF_THEN_ELSE)
8250 {
8251 rtx t;
8252
8253 t = XEXP (src, 0);
8254 if (GET_CODE (t) == NE)
8255 {
8256 t = XEXP (t, 0);
8257
8258 if (GET_CODE (t) == UNSPEC)
8259 {
8260 int code;
8261
8262 code = XINT (t, 1);
8263
8264 if (code == UNSPEC_LDCCLR
8265 || code == UNSPEC_LDCNC
8266 || code == UNSPEC_CHKACLR
8267 || code == UNSPEC_CHKANC
8268 || code == UNSPEC_CHKS)
8269 {
8270 gcc_assert (code != 0);
8271 return code;
8272 }
8273 }
8274 }
8275 }
8276 return 0;
8277 }
8278 \f
8279
8280 /* The following page contains abstract data `bundle states' which are
8281 used for bundling insns (inserting nops and template generation). */
8282
8283 /* The following describes state of insn bundling. */
8284
8285 struct bundle_state
8286 {
8287 /* Unique bundle state number to identify them in the debugging
8288 output */
8289 int unique_num;
8290 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8291 /* number nops before and after the insn */
8292 short before_nops_num, after_nops_num;
8293 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8294 insn */
8295 int cost; /* cost of the state in cycles */
8296 int accumulated_insns_num; /* number of all previous insns including
8297 nops. L is considered as 2 insns */
8298 int branch_deviation; /* deviation of previous branches from 3rd slots */
8299 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8300 struct bundle_state *next; /* next state with the same insn_num */
8301 struct bundle_state *originator; /* originator (previous insn state) */
8302 /* All bundle states are in the following chain. */
8303 struct bundle_state *allocated_states_chain;
8304 /* The DFA State after issuing the insn and the nops. */
8305 state_t dfa_state;
8306 };
8307
8308 /* The following is map insn number to the corresponding bundle state. */
8309
8310 static struct bundle_state **index_to_bundle_states;
8311
8312 /* The unique number of next bundle state. */
8313
8314 static int bundle_states_num;
8315
8316 /* All allocated bundle states are in the following chain. */
8317
8318 static struct bundle_state *allocated_bundle_states_chain;
8319
8320 /* All allocated but not used bundle states are in the following
8321 chain. */
8322
8323 static struct bundle_state *free_bundle_state_chain;
8324
8325
8326 /* The following function returns a free bundle state. */
8327
8328 static struct bundle_state *
8329 get_free_bundle_state (void)
8330 {
8331 struct bundle_state *result;
8332
8333 if (free_bundle_state_chain != NULL)
8334 {
8335 result = free_bundle_state_chain;
8336 free_bundle_state_chain = result->next;
8337 }
8338 else
8339 {
8340 result = XNEW (struct bundle_state);
8341 result->dfa_state = xmalloc (dfa_state_size);
8342 result->allocated_states_chain = allocated_bundle_states_chain;
8343 allocated_bundle_states_chain = result;
8344 }
8345 result->unique_num = bundle_states_num++;
8346 return result;
8347
8348 }
8349
8350 /* The following function frees given bundle state. */
8351
8352 static void
8353 free_bundle_state (struct bundle_state *state)
8354 {
8355 state->next = free_bundle_state_chain;
8356 free_bundle_state_chain = state;
8357 }
8358
8359 /* Start work with abstract data `bundle states'. */
8360
8361 static void
8362 initiate_bundle_states (void)
8363 {
8364 bundle_states_num = 0;
8365 free_bundle_state_chain = NULL;
8366 allocated_bundle_states_chain = NULL;
8367 }
8368
8369 /* Finish work with abstract data `bundle states'. */
8370
8371 static void
8372 finish_bundle_states (void)
8373 {
8374 struct bundle_state *curr_state, *next_state;
8375
8376 for (curr_state = allocated_bundle_states_chain;
8377 curr_state != NULL;
8378 curr_state = next_state)
8379 {
8380 next_state = curr_state->allocated_states_chain;
8381 free (curr_state->dfa_state);
8382 free (curr_state);
8383 }
8384 }
8385
8386 /* Hash table of the bundle states. The key is dfa_state and insn_num
8387 of the bundle states. */
8388
8389 static htab_t bundle_state_table;
8390
8391 /* The function returns hash of BUNDLE_STATE. */
8392
8393 static unsigned
8394 bundle_state_hash (const void *bundle_state)
8395 {
8396 const struct bundle_state *const state
8397 = (const struct bundle_state *) bundle_state;
8398 unsigned result, i;
8399
8400 for (result = i = 0; i < dfa_state_size; i++)
8401 result += (((unsigned char *) state->dfa_state) [i]
8402 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8403 return result + state->insn_num;
8404 }
8405
8406 /* The function returns nonzero if the bundle state keys are equal. */
8407
8408 static int
8409 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8410 {
8411 const struct bundle_state *const state1
8412 = (const struct bundle_state *) bundle_state_1;
8413 const struct bundle_state *const state2
8414 = (const struct bundle_state *) bundle_state_2;
8415
8416 return (state1->insn_num == state2->insn_num
8417 && memcmp (state1->dfa_state, state2->dfa_state,
8418 dfa_state_size) == 0);
8419 }
8420
8421 /* The function inserts the BUNDLE_STATE into the hash table. The
8422 function returns nonzero if the bundle has been inserted into the
8423 table. The table contains the best bundle state with given key. */
8424
8425 static int
8426 insert_bundle_state (struct bundle_state *bundle_state)
8427 {
8428 void **entry_ptr;
8429
8430 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8431 if (*entry_ptr == NULL)
8432 {
8433 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8434 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8435 *entry_ptr = (void *) bundle_state;
8436 return TRUE;
8437 }
8438 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8439 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8440 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8441 > bundle_state->accumulated_insns_num
8442 || (((struct bundle_state *)
8443 *entry_ptr)->accumulated_insns_num
8444 == bundle_state->accumulated_insns_num
8445 && (((struct bundle_state *)
8446 *entry_ptr)->branch_deviation
8447 > bundle_state->branch_deviation
8448 || (((struct bundle_state *)
8449 *entry_ptr)->branch_deviation
8450 == bundle_state->branch_deviation
8451 && ((struct bundle_state *)
8452 *entry_ptr)->middle_bundle_stops
8453 > bundle_state->middle_bundle_stops))))))
8454
8455 {
8456 struct bundle_state temp;
8457
8458 temp = *(struct bundle_state *) *entry_ptr;
8459 *(struct bundle_state *) *entry_ptr = *bundle_state;
8460 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8461 *bundle_state = temp;
8462 }
8463 return FALSE;
8464 }
8465
8466 /* Start work with the hash table. */
8467
8468 static void
8469 initiate_bundle_state_table (void)
8470 {
8471 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8472 (htab_del) 0);
8473 }
8474
8475 /* Finish work with the hash table. */
8476
8477 static void
8478 finish_bundle_state_table (void)
8479 {
8480 htab_delete (bundle_state_table);
8481 }
8482
8483 \f
8484
8485 /* The following variable is a insn `nop' used to check bundle states
8486 with different number of inserted nops. */
8487
8488 static rtx ia64_nop;
8489
8490 /* The following function tries to issue NOPS_NUM nops for the current
8491 state without advancing processor cycle. If it failed, the
8492 function returns FALSE and frees the current state. */
8493
8494 static int
8495 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8496 {
8497 int i;
8498
8499 for (i = 0; i < nops_num; i++)
8500 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8501 {
8502 free_bundle_state (curr_state);
8503 return FALSE;
8504 }
8505 return TRUE;
8506 }
8507
8508 /* The following function tries to issue INSN for the current
8509 state without advancing processor cycle. If it failed, the
8510 function returns FALSE and frees the current state. */
8511
8512 static int
8513 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8514 {
8515 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8516 {
8517 free_bundle_state (curr_state);
8518 return FALSE;
8519 }
8520 return TRUE;
8521 }
8522
8523 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8524 starting with ORIGINATOR without advancing processor cycle. If
8525 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8526 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8527 If it was successful, the function creates new bundle state and
8528 insert into the hash table and into `index_to_bundle_states'. */
8529
8530 static void
8531 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8532 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8533 {
8534 struct bundle_state *curr_state;
8535
8536 curr_state = get_free_bundle_state ();
8537 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8538 curr_state->insn = insn;
8539 curr_state->insn_num = originator->insn_num + 1;
8540 curr_state->cost = originator->cost;
8541 curr_state->originator = originator;
8542 curr_state->before_nops_num = before_nops_num;
8543 curr_state->after_nops_num = 0;
8544 curr_state->accumulated_insns_num
8545 = originator->accumulated_insns_num + before_nops_num;
8546 curr_state->branch_deviation = originator->branch_deviation;
8547 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8548 gcc_assert (insn);
8549 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8550 {
8551 gcc_assert (GET_MODE (insn) != TImode);
8552 if (!try_issue_nops (curr_state, before_nops_num))
8553 return;
8554 if (!try_issue_insn (curr_state, insn))
8555 return;
8556 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8557 if (curr_state->accumulated_insns_num % 3 != 0)
8558 curr_state->middle_bundle_stops++;
8559 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8560 && curr_state->accumulated_insns_num % 3 != 0)
8561 {
8562 free_bundle_state (curr_state);
8563 return;
8564 }
8565 }
8566 else if (GET_MODE (insn) != TImode)
8567 {
8568 if (!try_issue_nops (curr_state, before_nops_num))
8569 return;
8570 if (!try_issue_insn (curr_state, insn))
8571 return;
8572 curr_state->accumulated_insns_num++;
8573 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8574 && asm_noperands (PATTERN (insn)) < 0);
8575
8576 if (ia64_safe_type (insn) == TYPE_L)
8577 curr_state->accumulated_insns_num++;
8578 }
8579 else
8580 {
8581 /* If this is an insn that must be first in a group, then don't allow
8582 nops to be emitted before it. Currently, alloc is the only such
8583 supported instruction. */
8584 /* ??? The bundling automatons should handle this for us, but they do
8585 not yet have support for the first_insn attribute. */
8586 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8587 {
8588 free_bundle_state (curr_state);
8589 return;
8590 }
8591
8592 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8593 state_transition (curr_state->dfa_state, NULL);
8594 curr_state->cost++;
8595 if (!try_issue_nops (curr_state, before_nops_num))
8596 return;
8597 if (!try_issue_insn (curr_state, insn))
8598 return;
8599 curr_state->accumulated_insns_num++;
8600 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8601 || asm_noperands (PATTERN (insn)) >= 0)
8602 {
8603 /* Finish bundle containing asm insn. */
8604 curr_state->after_nops_num
8605 = 3 - curr_state->accumulated_insns_num % 3;
8606 curr_state->accumulated_insns_num
8607 += 3 - curr_state->accumulated_insns_num % 3;
8608 }
8609 else if (ia64_safe_type (insn) == TYPE_L)
8610 curr_state->accumulated_insns_num++;
8611 }
8612 if (ia64_safe_type (insn) == TYPE_B)
8613 curr_state->branch_deviation
8614 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8615 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8616 {
8617 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8618 {
8619 state_t dfa_state;
8620 struct bundle_state *curr_state1;
8621 struct bundle_state *allocated_states_chain;
8622
8623 curr_state1 = get_free_bundle_state ();
8624 dfa_state = curr_state1->dfa_state;
8625 allocated_states_chain = curr_state1->allocated_states_chain;
8626 *curr_state1 = *curr_state;
8627 curr_state1->dfa_state = dfa_state;
8628 curr_state1->allocated_states_chain = allocated_states_chain;
8629 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8630 dfa_state_size);
8631 curr_state = curr_state1;
8632 }
8633 if (!try_issue_nops (curr_state,
8634 3 - curr_state->accumulated_insns_num % 3))
8635 return;
8636 curr_state->after_nops_num
8637 = 3 - curr_state->accumulated_insns_num % 3;
8638 curr_state->accumulated_insns_num
8639 += 3 - curr_state->accumulated_insns_num % 3;
8640 }
8641 if (!insert_bundle_state (curr_state))
8642 free_bundle_state (curr_state);
8643 return;
8644 }
8645
8646 /* The following function returns position in the two window bundle
8647 for given STATE. */
8648
8649 static int
8650 get_max_pos (state_t state)
8651 {
8652 if (cpu_unit_reservation_p (state, pos_6))
8653 return 6;
8654 else if (cpu_unit_reservation_p (state, pos_5))
8655 return 5;
8656 else if (cpu_unit_reservation_p (state, pos_4))
8657 return 4;
8658 else if (cpu_unit_reservation_p (state, pos_3))
8659 return 3;
8660 else if (cpu_unit_reservation_p (state, pos_2))
8661 return 2;
8662 else if (cpu_unit_reservation_p (state, pos_1))
8663 return 1;
8664 else
8665 return 0;
8666 }
8667
8668 /* The function returns code of a possible template for given position
8669 and state. The function should be called only with 2 values of
8670 position equal to 3 or 6. We avoid generating F NOPs by putting
8671 templates containing F insns at the end of the template search
8672 because undocumented anomaly in McKinley derived cores which can
8673 cause stalls if an F-unit insn (including a NOP) is issued within a
8674 six-cycle window after reading certain application registers (such
8675 as ar.bsp). Furthermore, power-considerations also argue against
8676 the use of F-unit instructions unless they're really needed. */
8677
8678 static int
8679 get_template (state_t state, int pos)
8680 {
8681 switch (pos)
8682 {
8683 case 3:
8684 if (cpu_unit_reservation_p (state, _0mmi_))
8685 return 1;
8686 else if (cpu_unit_reservation_p (state, _0mii_))
8687 return 0;
8688 else if (cpu_unit_reservation_p (state, _0mmb_))
8689 return 7;
8690 else if (cpu_unit_reservation_p (state, _0mib_))
8691 return 6;
8692 else if (cpu_unit_reservation_p (state, _0mbb_))
8693 return 5;
8694 else if (cpu_unit_reservation_p (state, _0bbb_))
8695 return 4;
8696 else if (cpu_unit_reservation_p (state, _0mmf_))
8697 return 3;
8698 else if (cpu_unit_reservation_p (state, _0mfi_))
8699 return 2;
8700 else if (cpu_unit_reservation_p (state, _0mfb_))
8701 return 8;
8702 else if (cpu_unit_reservation_p (state, _0mlx_))
8703 return 9;
8704 else
8705 gcc_unreachable ();
8706 case 6:
8707 if (cpu_unit_reservation_p (state, _1mmi_))
8708 return 1;
8709 else if (cpu_unit_reservation_p (state, _1mii_))
8710 return 0;
8711 else if (cpu_unit_reservation_p (state, _1mmb_))
8712 return 7;
8713 else if (cpu_unit_reservation_p (state, _1mib_))
8714 return 6;
8715 else if (cpu_unit_reservation_p (state, _1mbb_))
8716 return 5;
8717 else if (cpu_unit_reservation_p (state, _1bbb_))
8718 return 4;
8719 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8720 return 3;
8721 else if (cpu_unit_reservation_p (state, _1mfi_))
8722 return 2;
8723 else if (cpu_unit_reservation_p (state, _1mfb_))
8724 return 8;
8725 else if (cpu_unit_reservation_p (state, _1mlx_))
8726 return 9;
8727 else
8728 gcc_unreachable ();
8729 default:
8730 gcc_unreachable ();
8731 }
8732 }
8733
8734 /* True when INSN is important for bundling. */
8735 static bool
8736 important_for_bundling_p (rtx insn)
8737 {
8738 return (INSN_P (insn)
8739 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8740 && GET_CODE (PATTERN (insn)) != USE
8741 && GET_CODE (PATTERN (insn)) != CLOBBER);
8742 }
8743
8744 /* The following function returns an insn important for insn bundling
8745 followed by INSN and before TAIL. */
8746
8747 static rtx
8748 get_next_important_insn (rtx insn, rtx tail)
8749 {
8750 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8751 if (important_for_bundling_p (insn))
8752 return insn;
8753 return NULL_RTX;
8754 }
8755
8756 /* Add a bundle selector TEMPLATE0 before INSN. */
8757
8758 static void
8759 ia64_add_bundle_selector_before (int template0, rtx insn)
8760 {
8761 rtx b = gen_bundle_selector (GEN_INT (template0));
8762
8763 ia64_emit_insn_before (b, insn);
8764 #if NR_BUNDLES == 10
8765 if ((template0 == 4 || template0 == 5)
8766 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8767 {
8768 int i;
8769 rtx note = NULL_RTX;
8770
8771 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8772 first or second slot. If it is and has REG_EH_NOTE set, copy it
8773 to following nops, as br.call sets rp to the address of following
8774 bundle and therefore an EH region end must be on a bundle
8775 boundary. */
8776 insn = PREV_INSN (insn);
8777 for (i = 0; i < 3; i++)
8778 {
8779 do
8780 insn = next_active_insn (insn);
8781 while (GET_CODE (insn) == INSN
8782 && get_attr_empty (insn) == EMPTY_YES);
8783 if (GET_CODE (insn) == CALL_INSN)
8784 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8785 else if (note)
8786 {
8787 int code;
8788
8789 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8790 || code == CODE_FOR_nop_b);
8791 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8792 note = NULL_RTX;
8793 else
8794 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8795 }
8796 }
8797 }
8798 #endif
8799 }
8800
8801 /* The following function does insn bundling. Bundling means
8802 inserting templates and nop insns to fit insn groups into permitted
8803 templates. Instruction scheduling uses NDFA (non-deterministic
8804 finite automata) encoding informations about the templates and the
8805 inserted nops. Nondeterminism of the automata permits follows
8806 all possible insn sequences very fast.
8807
8808 Unfortunately it is not possible to get information about inserting
8809 nop insns and used templates from the automata states. The
8810 automata only says that we can issue an insn possibly inserting
8811 some nops before it and using some template. Therefore insn
8812 bundling in this function is implemented by using DFA
8813 (deterministic finite automata). We follow all possible insn
8814 sequences by inserting 0-2 nops (that is what the NDFA describe for
8815 insn scheduling) before/after each insn being bundled. We know the
8816 start of simulated processor cycle from insn scheduling (insn
8817 starting a new cycle has TImode).
8818
8819 Simple implementation of insn bundling would create enormous
8820 number of possible insn sequences satisfying information about new
8821 cycle ticks taken from the insn scheduling. To make the algorithm
8822 practical we use dynamic programming. Each decision (about
8823 inserting nops and implicitly about previous decisions) is described
8824 by structure bundle_state (see above). If we generate the same
8825 bundle state (key is automaton state after issuing the insns and
8826 nops for it), we reuse already generated one. As consequence we
8827 reject some decisions which cannot improve the solution and
8828 reduce memory for the algorithm.
8829
8830 When we reach the end of EBB (extended basic block), we choose the
8831 best sequence and then, moving back in EBB, insert templates for
8832 the best alternative. The templates are taken from querying
8833 automaton state for each insn in chosen bundle states.
8834
8835 So the algorithm makes two (forward and backward) passes through
8836 EBB. */
8837
8838 static void
8839 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8840 {
8841 struct bundle_state *curr_state, *next_state, *best_state;
8842 rtx insn, next_insn;
8843 int insn_num;
8844 int i, bundle_end_p, only_bundle_end_p, asm_p;
8845 int pos = 0, max_pos, template0, template1;
8846 rtx b;
8847 rtx nop;
8848 enum attr_type type;
8849
8850 insn_num = 0;
8851 /* Count insns in the EBB. */
8852 for (insn = NEXT_INSN (prev_head_insn);
8853 insn && insn != tail;
8854 insn = NEXT_INSN (insn))
8855 if (INSN_P (insn))
8856 insn_num++;
8857 if (insn_num == 0)
8858 return;
8859 bundling_p = 1;
8860 dfa_clean_insn_cache ();
8861 initiate_bundle_state_table ();
8862 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8863 /* First (forward) pass -- generation of bundle states. */
8864 curr_state = get_free_bundle_state ();
8865 curr_state->insn = NULL;
8866 curr_state->before_nops_num = 0;
8867 curr_state->after_nops_num = 0;
8868 curr_state->insn_num = 0;
8869 curr_state->cost = 0;
8870 curr_state->accumulated_insns_num = 0;
8871 curr_state->branch_deviation = 0;
8872 curr_state->middle_bundle_stops = 0;
8873 curr_state->next = NULL;
8874 curr_state->originator = NULL;
8875 state_reset (curr_state->dfa_state);
8876 index_to_bundle_states [0] = curr_state;
8877 insn_num = 0;
8878 /* Shift cycle mark if it is put on insn which could be ignored. */
8879 for (insn = NEXT_INSN (prev_head_insn);
8880 insn != tail;
8881 insn = NEXT_INSN (insn))
8882 if (INSN_P (insn)
8883 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8884 || GET_CODE (PATTERN (insn)) == USE
8885 || GET_CODE (PATTERN (insn)) == CLOBBER)
8886 && GET_MODE (insn) == TImode)
8887 {
8888 PUT_MODE (insn, VOIDmode);
8889 for (next_insn = NEXT_INSN (insn);
8890 next_insn != tail;
8891 next_insn = NEXT_INSN (next_insn))
8892 if (INSN_P (next_insn)
8893 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8894 && GET_CODE (PATTERN (next_insn)) != USE
8895 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8896 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8897 {
8898 PUT_MODE (next_insn, TImode);
8899 break;
8900 }
8901 }
8902 /* Forward pass: generation of bundle states. */
8903 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8904 insn != NULL_RTX;
8905 insn = next_insn)
8906 {
8907 gcc_assert (INSN_P (insn)
8908 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8909 && GET_CODE (PATTERN (insn)) != USE
8910 && GET_CODE (PATTERN (insn)) != CLOBBER);
8911 type = ia64_safe_type (insn);
8912 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8913 insn_num++;
8914 index_to_bundle_states [insn_num] = NULL;
8915 for (curr_state = index_to_bundle_states [insn_num - 1];
8916 curr_state != NULL;
8917 curr_state = next_state)
8918 {
8919 pos = curr_state->accumulated_insns_num % 3;
8920 next_state = curr_state->next;
8921 /* We must fill up the current bundle in order to start a
8922 subsequent asm insn in a new bundle. Asm insn is always
8923 placed in a separate bundle. */
8924 only_bundle_end_p
8925 = (next_insn != NULL_RTX
8926 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8927 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8928 /* We may fill up the current bundle if it is the cycle end
8929 without a group barrier. */
8930 bundle_end_p
8931 = (only_bundle_end_p || next_insn == NULL_RTX
8932 || (GET_MODE (next_insn) == TImode
8933 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8934 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8935 || type == TYPE_S)
8936 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8937 only_bundle_end_p);
8938 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8939 only_bundle_end_p);
8940 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8941 only_bundle_end_p);
8942 }
8943 gcc_assert (index_to_bundle_states [insn_num]);
8944 for (curr_state = index_to_bundle_states [insn_num];
8945 curr_state != NULL;
8946 curr_state = curr_state->next)
8947 if (verbose >= 2 && dump)
8948 {
8949 /* This structure is taken from generated code of the
8950 pipeline hazard recognizer (see file insn-attrtab.c).
8951 Please don't forget to change the structure if a new
8952 automaton is added to .md file. */
8953 struct DFA_chip
8954 {
8955 unsigned short one_automaton_state;
8956 unsigned short oneb_automaton_state;
8957 unsigned short two_automaton_state;
8958 unsigned short twob_automaton_state;
8959 };
8960
8961 fprintf
8962 (dump,
8963 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8964 curr_state->unique_num,
8965 (curr_state->originator == NULL
8966 ? -1 : curr_state->originator->unique_num),
8967 curr_state->cost,
8968 curr_state->before_nops_num, curr_state->after_nops_num,
8969 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8970 curr_state->middle_bundle_stops,
8971 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8972 INSN_UID (insn));
8973 }
8974 }
8975
8976 /* We should find a solution because the 2nd insn scheduling has
8977 found one. */
8978 gcc_assert (index_to_bundle_states [insn_num]);
8979 /* Find a state corresponding to the best insn sequence. */
8980 best_state = NULL;
8981 for (curr_state = index_to_bundle_states [insn_num];
8982 curr_state != NULL;
8983 curr_state = curr_state->next)
8984 /* We are just looking at the states with fully filled up last
8985 bundle. The first we prefer insn sequences with minimal cost
8986 then with minimal inserted nops and finally with branch insns
8987 placed in the 3rd slots. */
8988 if (curr_state->accumulated_insns_num % 3 == 0
8989 && (best_state == NULL || best_state->cost > curr_state->cost
8990 || (best_state->cost == curr_state->cost
8991 && (curr_state->accumulated_insns_num
8992 < best_state->accumulated_insns_num
8993 || (curr_state->accumulated_insns_num
8994 == best_state->accumulated_insns_num
8995 && (curr_state->branch_deviation
8996 < best_state->branch_deviation
8997 || (curr_state->branch_deviation
8998 == best_state->branch_deviation
8999 && curr_state->middle_bundle_stops
9000 < best_state->middle_bundle_stops)))))))
9001 best_state = curr_state;
9002 /* Second (backward) pass: adding nops and templates. */
9003 gcc_assert (best_state);
9004 insn_num = best_state->before_nops_num;
9005 template0 = template1 = -1;
9006 for (curr_state = best_state;
9007 curr_state->originator != NULL;
9008 curr_state = curr_state->originator)
9009 {
9010 insn = curr_state->insn;
9011 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
9012 || asm_noperands (PATTERN (insn)) >= 0);
9013 insn_num++;
9014 if (verbose >= 2 && dump)
9015 {
9016 struct DFA_chip
9017 {
9018 unsigned short one_automaton_state;
9019 unsigned short oneb_automaton_state;
9020 unsigned short two_automaton_state;
9021 unsigned short twob_automaton_state;
9022 };
9023
9024 fprintf
9025 (dump,
9026 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9027 curr_state->unique_num,
9028 (curr_state->originator == NULL
9029 ? -1 : curr_state->originator->unique_num),
9030 curr_state->cost,
9031 curr_state->before_nops_num, curr_state->after_nops_num,
9032 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9033 curr_state->middle_bundle_stops,
9034 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9035 INSN_UID (insn));
9036 }
9037 /* Find the position in the current bundle window. The window can
9038 contain at most two bundles. Two bundle window means that
9039 the processor will make two bundle rotation. */
9040 max_pos = get_max_pos (curr_state->dfa_state);
9041 if (max_pos == 6
9042 /* The following (negative template number) means that the
9043 processor did one bundle rotation. */
9044 || (max_pos == 3 && template0 < 0))
9045 {
9046 /* We are at the end of the window -- find template(s) for
9047 its bundle(s). */
9048 pos = max_pos;
9049 if (max_pos == 3)
9050 template0 = get_template (curr_state->dfa_state, 3);
9051 else
9052 {
9053 template1 = get_template (curr_state->dfa_state, 3);
9054 template0 = get_template (curr_state->dfa_state, 6);
9055 }
9056 }
9057 if (max_pos > 3 && template1 < 0)
9058 /* It may happen when we have the stop inside a bundle. */
9059 {
9060 gcc_assert (pos <= 3);
9061 template1 = get_template (curr_state->dfa_state, 3);
9062 pos += 3;
9063 }
9064 if (!asm_p)
9065 /* Emit nops after the current insn. */
9066 for (i = 0; i < curr_state->after_nops_num; i++)
9067 {
9068 nop = gen_nop ();
9069 emit_insn_after (nop, insn);
9070 pos--;
9071 gcc_assert (pos >= 0);
9072 if (pos % 3 == 0)
9073 {
9074 /* We are at the start of a bundle: emit the template
9075 (it should be defined). */
9076 gcc_assert (template0 >= 0);
9077 ia64_add_bundle_selector_before (template0, nop);
9078 /* If we have two bundle window, we make one bundle
9079 rotation. Otherwise template0 will be undefined
9080 (negative value). */
9081 template0 = template1;
9082 template1 = -1;
9083 }
9084 }
9085 /* Move the position backward in the window. Group barrier has
9086 no slot. Asm insn takes all bundle. */
9087 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9088 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9089 && asm_noperands (PATTERN (insn)) < 0)
9090 pos--;
9091 /* Long insn takes 2 slots. */
9092 if (ia64_safe_type (insn) == TYPE_L)
9093 pos--;
9094 gcc_assert (pos >= 0);
9095 if (pos % 3 == 0
9096 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9097 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9098 && asm_noperands (PATTERN (insn)) < 0)
9099 {
9100 /* The current insn is at the bundle start: emit the
9101 template. */
9102 gcc_assert (template0 >= 0);
9103 ia64_add_bundle_selector_before (template0, insn);
9104 b = PREV_INSN (insn);
9105 insn = b;
9106 /* See comment above in analogous place for emitting nops
9107 after the insn. */
9108 template0 = template1;
9109 template1 = -1;
9110 }
9111 /* Emit nops after the current insn. */
9112 for (i = 0; i < curr_state->before_nops_num; i++)
9113 {
9114 nop = gen_nop ();
9115 ia64_emit_insn_before (nop, insn);
9116 nop = PREV_INSN (insn);
9117 insn = nop;
9118 pos--;
9119 gcc_assert (pos >= 0);
9120 if (pos % 3 == 0)
9121 {
9122 /* See comment above in analogous place for emitting nops
9123 after the insn. */
9124 gcc_assert (template0 >= 0);
9125 ia64_add_bundle_selector_before (template0, insn);
9126 b = PREV_INSN (insn);
9127 insn = b;
9128 template0 = template1;
9129 template1 = -1;
9130 }
9131 }
9132 }
9133
9134 #ifdef ENABLE_CHECKING
9135 {
9136 /* Assert right calculation of middle_bundle_stops. */
9137 int num = best_state->middle_bundle_stops;
9138 bool start_bundle = true, end_bundle = false;
9139
9140 for (insn = NEXT_INSN (prev_head_insn);
9141 insn && insn != tail;
9142 insn = NEXT_INSN (insn))
9143 {
9144 if (!INSN_P (insn))
9145 continue;
9146 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9147 start_bundle = true;
9148 else
9149 {
9150 rtx next_insn;
9151
9152 for (next_insn = NEXT_INSN (insn);
9153 next_insn && next_insn != tail;
9154 next_insn = NEXT_INSN (next_insn))
9155 if (INSN_P (next_insn)
9156 && (ia64_safe_itanium_class (next_insn)
9157 != ITANIUM_CLASS_IGNORE
9158 || recog_memoized (next_insn)
9159 == CODE_FOR_bundle_selector)
9160 && GET_CODE (PATTERN (next_insn)) != USE
9161 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9162 break;
9163
9164 end_bundle = next_insn == NULL_RTX
9165 || next_insn == tail
9166 || (INSN_P (next_insn)
9167 && recog_memoized (next_insn)
9168 == CODE_FOR_bundle_selector);
9169 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9170 && !start_bundle && !end_bundle
9171 && next_insn
9172 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
9173 && asm_noperands (PATTERN (next_insn)) < 0)
9174 num--;
9175
9176 start_bundle = false;
9177 }
9178 }
9179
9180 gcc_assert (num == 0);
9181 }
9182 #endif
9183
9184 free (index_to_bundle_states);
9185 finish_bundle_state_table ();
9186 bundling_p = 0;
9187 dfa_clean_insn_cache ();
9188 }
9189
9190 /* The following function is called at the end of scheduling BB or
9191 EBB. After reload, it inserts stop bits and does insn bundling. */
9192
9193 static void
9194 ia64_sched_finish (FILE *dump, int sched_verbose)
9195 {
9196 if (sched_verbose)
9197 fprintf (dump, "// Finishing schedule.\n");
9198 if (!reload_completed)
9199 return;
9200 if (reload_completed)
9201 {
9202 final_emit_insn_group_barriers (dump);
9203 bundling (dump, sched_verbose, current_sched_info->prev_head,
9204 current_sched_info->next_tail);
9205 if (sched_verbose && dump)
9206 fprintf (dump, "// finishing %d-%d\n",
9207 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9208 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9209
9210 return;
9211 }
9212 }
9213
9214 /* The following function inserts stop bits in scheduled BB or EBB. */
9215
9216 static void
9217 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9218 {
9219 rtx insn;
9220 int need_barrier_p = 0;
9221 int seen_good_insn = 0;
9222
9223 init_insn_group_barriers ();
9224
9225 for (insn = NEXT_INSN (current_sched_info->prev_head);
9226 insn != current_sched_info->next_tail;
9227 insn = NEXT_INSN (insn))
9228 {
9229 if (GET_CODE (insn) == BARRIER)
9230 {
9231 rtx last = prev_active_insn (insn);
9232
9233 if (! last)
9234 continue;
9235 if (GET_CODE (last) == JUMP_INSN
9236 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
9237 last = prev_active_insn (last);
9238 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9239 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9240
9241 init_insn_group_barriers ();
9242 seen_good_insn = 0;
9243 need_barrier_p = 0;
9244 }
9245 else if (NONDEBUG_INSN_P (insn))
9246 {
9247 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9248 {
9249 init_insn_group_barriers ();
9250 seen_good_insn = 0;
9251 need_barrier_p = 0;
9252 }
9253 else if (need_barrier_p || group_barrier_needed (insn)
9254 || (mflag_sched_stop_bits_after_every_cycle
9255 && GET_MODE (insn) == TImode
9256 && seen_good_insn))
9257 {
9258 if (TARGET_EARLY_STOP_BITS)
9259 {
9260 rtx last;
9261
9262 for (last = insn;
9263 last != current_sched_info->prev_head;
9264 last = PREV_INSN (last))
9265 if (INSN_P (last) && GET_MODE (last) == TImode
9266 && stops_p [INSN_UID (last)])
9267 break;
9268 if (last == current_sched_info->prev_head)
9269 last = insn;
9270 last = prev_active_insn (last);
9271 if (last
9272 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9273 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9274 last);
9275 init_insn_group_barriers ();
9276 for (last = NEXT_INSN (last);
9277 last != insn;
9278 last = NEXT_INSN (last))
9279 if (INSN_P (last))
9280 {
9281 group_barrier_needed (last);
9282 if (recog_memoized (last) >= 0
9283 && important_for_bundling_p (last))
9284 seen_good_insn = 1;
9285 }
9286 }
9287 else
9288 {
9289 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9290 insn);
9291 init_insn_group_barriers ();
9292 seen_good_insn = 0;
9293 }
9294 group_barrier_needed (insn);
9295 if (recog_memoized (insn) >= 0
9296 && important_for_bundling_p (insn))
9297 seen_good_insn = 1;
9298 }
9299 else if (recog_memoized (insn) >= 0
9300 && important_for_bundling_p (insn))
9301 seen_good_insn = 1;
9302 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9303 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9304 || asm_noperands (PATTERN (insn)) >= 0);
9305 }
9306 }
9307 }
9308
9309 \f
9310
9311 /* If the following function returns TRUE, we will use the DFA
9312 insn scheduler. */
9313
9314 static int
9315 ia64_first_cycle_multipass_dfa_lookahead (void)
9316 {
9317 return (reload_completed ? 6 : 4);
9318 }
9319
9320 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9321
9322 static void
9323 ia64_init_dfa_pre_cycle_insn (void)
9324 {
9325 if (temp_dfa_state == NULL)
9326 {
9327 dfa_state_size = state_size ();
9328 temp_dfa_state = xmalloc (dfa_state_size);
9329 prev_cycle_state = xmalloc (dfa_state_size);
9330 }
9331 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9332 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9333 recog_memoized (dfa_pre_cycle_insn);
9334 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9335 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9336 recog_memoized (dfa_stop_insn);
9337 }
9338
9339 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9340 used by the DFA insn scheduler. */
9341
9342 static rtx
9343 ia64_dfa_pre_cycle_insn (void)
9344 {
9345 return dfa_pre_cycle_insn;
9346 }
9347
9348 /* The following function returns TRUE if PRODUCER (of type ilog or
9349 ld) produces address for CONSUMER (of type st or stf). */
9350
9351 int
9352 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9353 {
9354 rtx dest, reg, mem;
9355
9356 gcc_assert (producer && consumer);
9357 dest = ia64_single_set (producer);
9358 gcc_assert (dest);
9359 reg = SET_DEST (dest);
9360 gcc_assert (reg);
9361 if (GET_CODE (reg) == SUBREG)
9362 reg = SUBREG_REG (reg);
9363 gcc_assert (GET_CODE (reg) == REG);
9364
9365 dest = ia64_single_set (consumer);
9366 gcc_assert (dest);
9367 mem = SET_DEST (dest);
9368 gcc_assert (mem && GET_CODE (mem) == MEM);
9369 return reg_mentioned_p (reg, mem);
9370 }
9371
9372 /* The following function returns TRUE if PRODUCER (of type ilog or
9373 ld) produces address for CONSUMER (of type ld or fld). */
9374
9375 int
9376 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9377 {
9378 rtx dest, src, reg, mem;
9379
9380 gcc_assert (producer && consumer);
9381 dest = ia64_single_set (producer);
9382 gcc_assert (dest);
9383 reg = SET_DEST (dest);
9384 gcc_assert (reg);
9385 if (GET_CODE (reg) == SUBREG)
9386 reg = SUBREG_REG (reg);
9387 gcc_assert (GET_CODE (reg) == REG);
9388
9389 src = ia64_single_set (consumer);
9390 gcc_assert (src);
9391 mem = SET_SRC (src);
9392 gcc_assert (mem);
9393
9394 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9395 mem = XVECEXP (mem, 0, 0);
9396 else if (GET_CODE (mem) == IF_THEN_ELSE)
9397 /* ??? Is this bypass necessary for ld.c? */
9398 {
9399 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9400 mem = XEXP (mem, 1);
9401 }
9402
9403 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9404 mem = XEXP (mem, 0);
9405
9406 if (GET_CODE (mem) == UNSPEC)
9407 {
9408 int c = XINT (mem, 1);
9409
9410 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9411 || c == UNSPEC_LDSA);
9412 mem = XVECEXP (mem, 0, 0);
9413 }
9414
9415 /* Note that LO_SUM is used for GOT loads. */
9416 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9417
9418 return reg_mentioned_p (reg, mem);
9419 }
9420
9421 /* The following function returns TRUE if INSN produces address for a
9422 load/store insn. We will place such insns into M slot because it
9423 decreases its latency time. */
9424
9425 int
9426 ia64_produce_address_p (rtx insn)
9427 {
9428 return insn->call;
9429 }
9430
9431 \f
9432 /* Emit pseudo-ops for the assembler to describe predicate relations.
9433 At present this assumes that we only consider predicate pairs to
9434 be mutex, and that the assembler can deduce proper values from
9435 straight-line code. */
9436
9437 static void
9438 emit_predicate_relation_info (void)
9439 {
9440 basic_block bb;
9441
9442 FOR_EACH_BB_REVERSE (bb)
9443 {
9444 int r;
9445 rtx head = BB_HEAD (bb);
9446
9447 /* We only need such notes at code labels. */
9448 if (GET_CODE (head) != CODE_LABEL)
9449 continue;
9450 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9451 head = NEXT_INSN (head);
9452
9453 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9454 grabbing the entire block of predicate registers. */
9455 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9456 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9457 {
9458 rtx p = gen_rtx_REG (BImode, r);
9459 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9460 if (head == BB_END (bb))
9461 BB_END (bb) = n;
9462 head = n;
9463 }
9464 }
9465
9466 /* Look for conditional calls that do not return, and protect predicate
9467 relations around them. Otherwise the assembler will assume the call
9468 returns, and complain about uses of call-clobbered predicates after
9469 the call. */
9470 FOR_EACH_BB_REVERSE (bb)
9471 {
9472 rtx insn = BB_HEAD (bb);
9473
9474 while (1)
9475 {
9476 if (GET_CODE (insn) == CALL_INSN
9477 && GET_CODE (PATTERN (insn)) == COND_EXEC
9478 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9479 {
9480 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9481 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9482 if (BB_HEAD (bb) == insn)
9483 BB_HEAD (bb) = b;
9484 if (BB_END (bb) == insn)
9485 BB_END (bb) = a;
9486 }
9487
9488 if (insn == BB_END (bb))
9489 break;
9490 insn = NEXT_INSN (insn);
9491 }
9492 }
9493 }
9494
9495 /* Perform machine dependent operations on the rtl chain INSNS. */
9496
9497 static void
9498 ia64_reorg (void)
9499 {
9500 /* We are freeing block_for_insn in the toplev to keep compatibility
9501 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9502 compute_bb_for_insn ();
9503
9504 /* If optimizing, we'll have split before scheduling. */
9505 if (optimize == 0)
9506 split_all_insns ();
9507
9508 if (optimize && flag_schedule_insns_after_reload
9509 && dbg_cnt (ia64_sched2))
9510 {
9511 basic_block bb;
9512 timevar_push (TV_SCHED2);
9513 ia64_final_schedule = 1;
9514
9515 /* We can't let modulo-sched prevent us from scheduling any bbs,
9516 since we need the final schedule to produce bundle information. */
9517 FOR_EACH_BB (bb)
9518 bb->flags &= ~BB_DISABLE_SCHEDULE;
9519
9520 initiate_bundle_states ();
9521 ia64_nop = make_insn_raw (gen_nop ());
9522 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9523 recog_memoized (ia64_nop);
9524 clocks_length = get_max_uid () + 1;
9525 stops_p = XCNEWVEC (char, clocks_length);
9526
9527 if (ia64_tune == PROCESSOR_ITANIUM2)
9528 {
9529 pos_1 = get_cpu_unit_code ("2_1");
9530 pos_2 = get_cpu_unit_code ("2_2");
9531 pos_3 = get_cpu_unit_code ("2_3");
9532 pos_4 = get_cpu_unit_code ("2_4");
9533 pos_5 = get_cpu_unit_code ("2_5");
9534 pos_6 = get_cpu_unit_code ("2_6");
9535 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9536 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9537 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9538 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9539 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9540 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9541 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9542 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9543 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9544 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9545 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9546 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9547 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9548 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9549 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9550 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9551 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9552 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9553 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9554 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9555 }
9556 else
9557 {
9558 pos_1 = get_cpu_unit_code ("1_1");
9559 pos_2 = get_cpu_unit_code ("1_2");
9560 pos_3 = get_cpu_unit_code ("1_3");
9561 pos_4 = get_cpu_unit_code ("1_4");
9562 pos_5 = get_cpu_unit_code ("1_5");
9563 pos_6 = get_cpu_unit_code ("1_6");
9564 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9565 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9566 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9567 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9568 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9569 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9570 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9571 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9572 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9573 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9574 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9575 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9576 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9577 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9578 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9579 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9580 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9581 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9582 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9583 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9584 }
9585
9586 if (flag_selective_scheduling2
9587 && !maybe_skip_selective_scheduling ())
9588 run_selective_scheduling ();
9589 else
9590 schedule_ebbs ();
9591
9592 /* Redo alignment computation, as it might gone wrong. */
9593 compute_alignments ();
9594
9595 /* We cannot reuse this one because it has been corrupted by the
9596 evil glat. */
9597 finish_bundle_states ();
9598 free (stops_p);
9599 stops_p = NULL;
9600 emit_insn_group_barriers (dump_file);
9601
9602 ia64_final_schedule = 0;
9603 timevar_pop (TV_SCHED2);
9604 }
9605 else
9606 emit_all_insn_group_barriers (dump_file);
9607
9608 df_analyze ();
9609
9610 /* A call must not be the last instruction in a function, so that the
9611 return address is still within the function, so that unwinding works
9612 properly. Note that IA-64 differs from dwarf2 on this point. */
9613 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9614 {
9615 rtx insn;
9616 int saw_stop = 0;
9617
9618 insn = get_last_insn ();
9619 if (! INSN_P (insn))
9620 insn = prev_active_insn (insn);
9621 if (insn)
9622 {
9623 /* Skip over insns that expand to nothing. */
9624 while (GET_CODE (insn) == INSN
9625 && get_attr_empty (insn) == EMPTY_YES)
9626 {
9627 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9628 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9629 saw_stop = 1;
9630 insn = prev_active_insn (insn);
9631 }
9632 if (GET_CODE (insn) == CALL_INSN)
9633 {
9634 if (! saw_stop)
9635 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9636 emit_insn (gen_break_f ());
9637 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9638 }
9639 }
9640 }
9641
9642 emit_predicate_relation_info ();
9643
9644 if (flag_var_tracking)
9645 {
9646 timevar_push (TV_VAR_TRACKING);
9647 variable_tracking_main ();
9648 timevar_pop (TV_VAR_TRACKING);
9649 }
9650 df_finish_pass (false);
9651 }
9652 \f
9653 /* Return true if REGNO is used by the epilogue. */
9654
9655 int
9656 ia64_epilogue_uses (int regno)
9657 {
9658 switch (regno)
9659 {
9660 case R_GR (1):
9661 /* With a call to a function in another module, we will write a new
9662 value to "gp". After returning from such a call, we need to make
9663 sure the function restores the original gp-value, even if the
9664 function itself does not use the gp anymore. */
9665 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9666
9667 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9668 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9669 /* For functions defined with the syscall_linkage attribute, all
9670 input registers are marked as live at all function exits. This
9671 prevents the register allocator from using the input registers,
9672 which in turn makes it possible to restart a system call after
9673 an interrupt without having to save/restore the input registers.
9674 This also prevents kernel data from leaking to application code. */
9675 return lookup_attribute ("syscall_linkage",
9676 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9677
9678 case R_BR (0):
9679 /* Conditional return patterns can't represent the use of `b0' as
9680 the return address, so we force the value live this way. */
9681 return 1;
9682
9683 case AR_PFS_REGNUM:
9684 /* Likewise for ar.pfs, which is used by br.ret. */
9685 return 1;
9686
9687 default:
9688 return 0;
9689 }
9690 }
9691
9692 /* Return true if REGNO is used by the frame unwinder. */
9693
9694 int
9695 ia64_eh_uses (int regno)
9696 {
9697 unsigned int r;
9698
9699 if (! reload_completed)
9700 return 0;
9701
9702 if (regno == 0)
9703 return 0;
9704
9705 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9706 if (regno == current_frame_info.r[r]
9707 || regno == emitted_frame_related_regs[r])
9708 return 1;
9709
9710 return 0;
9711 }
9712 \f
9713 /* Return true if this goes in small data/bss. */
9714
9715 /* ??? We could also support own long data here. Generating movl/add/ld8
9716 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9717 code faster because there is one less load. This also includes incomplete
9718 types which can't go in sdata/sbss. */
9719
9720 static bool
9721 ia64_in_small_data_p (const_tree exp)
9722 {
9723 if (TARGET_NO_SDATA)
9724 return false;
9725
9726 /* We want to merge strings, so we never consider them small data. */
9727 if (TREE_CODE (exp) == STRING_CST)
9728 return false;
9729
9730 /* Functions are never small data. */
9731 if (TREE_CODE (exp) == FUNCTION_DECL)
9732 return false;
9733
9734 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9735 {
9736 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9737
9738 if (strcmp (section, ".sdata") == 0
9739 || strncmp (section, ".sdata.", 7) == 0
9740 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9741 || strcmp (section, ".sbss") == 0
9742 || strncmp (section, ".sbss.", 6) == 0
9743 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9744 return true;
9745 }
9746 else
9747 {
9748 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9749
9750 /* If this is an incomplete type with size 0, then we can't put it
9751 in sdata because it might be too big when completed. */
9752 if (size > 0 && size <= ia64_section_threshold)
9753 return true;
9754 }
9755
9756 return false;
9757 }
9758 \f
9759 /* Output assembly directives for prologue regions. */
9760
9761 /* The current basic block number. */
9762
9763 static bool last_block;
9764
9765 /* True if we need a copy_state command at the start of the next block. */
9766
9767 static bool need_copy_state;
9768
9769 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9770 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9771 #endif
9772
9773 /* The function emits unwind directives for the start of an epilogue. */
9774
9775 static void
9776 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9777 bool unwind, bool frame ATTRIBUTE_UNUSED)
9778 {
9779 /* If this isn't the last block of the function, then we need to label the
9780 current state, and copy it back in at the start of the next block. */
9781
9782 if (!last_block)
9783 {
9784 if (unwind)
9785 fprintf (asm_out_file, "\t.label_state %d\n",
9786 ++cfun->machine->state_num);
9787 need_copy_state = true;
9788 }
9789
9790 if (unwind)
9791 fprintf (asm_out_file, "\t.restore sp\n");
9792 }
9793
9794 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9795
9796 static void
9797 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9798 bool unwind, bool frame)
9799 {
9800 rtx dest = SET_DEST (pat);
9801 rtx src = SET_SRC (pat);
9802
9803 if (dest == stack_pointer_rtx)
9804 {
9805 if (GET_CODE (src) == PLUS)
9806 {
9807 rtx op0 = XEXP (src, 0);
9808 rtx op1 = XEXP (src, 1);
9809
9810 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9811
9812 if (INTVAL (op1) < 0)
9813 {
9814 gcc_assert (!frame_pointer_needed);
9815 if (unwind)
9816 fprintf (asm_out_file,
9817 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9818 -INTVAL (op1));
9819 }
9820 else
9821 process_epilogue (asm_out_file, insn, unwind, frame);
9822 }
9823 else
9824 {
9825 gcc_assert (src == hard_frame_pointer_rtx);
9826 process_epilogue (asm_out_file, insn, unwind, frame);
9827 }
9828 }
9829 else if (dest == hard_frame_pointer_rtx)
9830 {
9831 gcc_assert (src == stack_pointer_rtx);
9832 gcc_assert (frame_pointer_needed);
9833
9834 if (unwind)
9835 fprintf (asm_out_file, "\t.vframe r%d\n",
9836 ia64_dbx_register_number (REGNO (dest)));
9837 }
9838 else
9839 gcc_unreachable ();
9840 }
9841
9842 /* This function processes a SET pattern for REG_CFA_REGISTER. */
9843
9844 static void
9845 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
9846 {
9847 rtx dest = SET_DEST (pat);
9848 rtx src = SET_SRC (pat);
9849 int dest_regno = REGNO (dest);
9850 int src_regno;
9851
9852 if (src == pc_rtx)
9853 {
9854 /* Saving return address pointer. */
9855 if (unwind)
9856 fprintf (asm_out_file, "\t.save rp, r%d\n",
9857 ia64_dbx_register_number (dest_regno));
9858 return;
9859 }
9860
9861 src_regno = REGNO (src);
9862
9863 switch (src_regno)
9864 {
9865 case PR_REG (0):
9866 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9867 if (unwind)
9868 fprintf (asm_out_file, "\t.save pr, r%d\n",
9869 ia64_dbx_register_number (dest_regno));
9870 break;
9871
9872 case AR_UNAT_REGNUM:
9873 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9874 if (unwind)
9875 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9876 ia64_dbx_register_number (dest_regno));
9877 break;
9878
9879 case AR_LC_REGNUM:
9880 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9881 if (unwind)
9882 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9883 ia64_dbx_register_number (dest_regno));
9884 break;
9885
9886 default:
9887 /* Everything else should indicate being stored to memory. */
9888 gcc_unreachable ();
9889 }
9890 }
9891
9892 /* This function processes a SET pattern for REG_CFA_OFFSET. */
9893
9894 static void
9895 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
9896 {
9897 rtx dest = SET_DEST (pat);
9898 rtx src = SET_SRC (pat);
9899 int src_regno = REGNO (src);
9900 const char *saveop;
9901 HOST_WIDE_INT off;
9902 rtx base;
9903
9904 gcc_assert (MEM_P (dest));
9905 if (GET_CODE (XEXP (dest, 0)) == REG)
9906 {
9907 base = XEXP (dest, 0);
9908 off = 0;
9909 }
9910 else
9911 {
9912 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9913 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9914 base = XEXP (XEXP (dest, 0), 0);
9915 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9916 }
9917
9918 if (base == hard_frame_pointer_rtx)
9919 {
9920 saveop = ".savepsp";
9921 off = - off;
9922 }
9923 else
9924 {
9925 gcc_assert (base == stack_pointer_rtx);
9926 saveop = ".savesp";
9927 }
9928
9929 src_regno = REGNO (src);
9930 switch (src_regno)
9931 {
9932 case BR_REG (0):
9933 gcc_assert (!current_frame_info.r[reg_save_b0]);
9934 if (unwind)
9935 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
9936 saveop, off);
9937 break;
9938
9939 case PR_REG (0):
9940 gcc_assert (!current_frame_info.r[reg_save_pr]);
9941 if (unwind)
9942 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
9943 saveop, off);
9944 break;
9945
9946 case AR_LC_REGNUM:
9947 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9948 if (unwind)
9949 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
9950 saveop, off);
9951 break;
9952
9953 case AR_PFS_REGNUM:
9954 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9955 if (unwind)
9956 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
9957 saveop, off);
9958 break;
9959
9960 case AR_UNAT_REGNUM:
9961 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9962 if (unwind)
9963 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
9964 saveop, off);
9965 break;
9966
9967 case GR_REG (4):
9968 case GR_REG (5):
9969 case GR_REG (6):
9970 case GR_REG (7):
9971 if (unwind)
9972 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9973 1 << (src_regno - GR_REG (4)));
9974 break;
9975
9976 case BR_REG (1):
9977 case BR_REG (2):
9978 case BR_REG (3):
9979 case BR_REG (4):
9980 case BR_REG (5):
9981 if (unwind)
9982 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9983 1 << (src_regno - BR_REG (1)));
9984 break;
9985
9986 case FR_REG (2):
9987 case FR_REG (3):
9988 case FR_REG (4):
9989 case FR_REG (5):
9990 if (unwind)
9991 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9992 1 << (src_regno - FR_REG (2)));
9993 break;
9994
9995 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9996 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9997 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9998 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9999 if (unwind)
10000 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10001 1 << (src_regno - FR_REG (12)));
10002 break;
10003
10004 default:
10005 /* ??? For some reason we mark other general registers, even those
10006 we can't represent in the unwind info. Ignore them. */
10007 break;
10008 }
10009 }
10010
10011 /* This function looks at a single insn and emits any directives
10012 required to unwind this insn. */
10013
10014 static void
10015 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
10016 {
10017 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10018 bool frame = dwarf2out_do_frame ();
10019 rtx note, pat;
10020 bool handled_one;
10021
10022 if (!unwind && !frame)
10023 return;
10024
10025 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10026 {
10027 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
10028
10029 /* Restore unwind state from immediately before the epilogue. */
10030 if (need_copy_state)
10031 {
10032 if (unwind)
10033 {
10034 fprintf (asm_out_file, "\t.body\n");
10035 fprintf (asm_out_file, "\t.copy_state %d\n",
10036 cfun->machine->state_num);
10037 }
10038 need_copy_state = false;
10039 }
10040 }
10041
10042 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
10043 return;
10044
10045 /* Look for the ALLOC insn. */
10046 if (INSN_CODE (insn) == CODE_FOR_alloc)
10047 {
10048 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10049 int dest_regno = REGNO (dest);
10050
10051 /* If this is the final destination for ar.pfs, then this must
10052 be the alloc in the prologue. */
10053 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10054 {
10055 if (unwind)
10056 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10057 ia64_dbx_register_number (dest_regno));
10058 }
10059 else
10060 {
10061 /* This must be an alloc before a sibcall. We must drop the
10062 old frame info. The easiest way to drop the old frame
10063 info is to ensure we had a ".restore sp" directive
10064 followed by a new prologue. If the procedure doesn't
10065 have a memory-stack frame, we'll issue a dummy ".restore
10066 sp" now. */
10067 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10068 /* if haven't done process_epilogue() yet, do it now */
10069 process_epilogue (asm_out_file, insn, unwind, frame);
10070 if (unwind)
10071 fprintf (asm_out_file, "\t.prologue\n");
10072 }
10073 return;
10074 }
10075
10076 handled_one = false;
10077 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10078 switch (REG_NOTE_KIND (note))
10079 {
10080 case REG_CFA_ADJUST_CFA:
10081 pat = XEXP (note, 0);
10082 if (pat == NULL)
10083 pat = PATTERN (insn);
10084 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10085 handled_one = true;
10086 break;
10087
10088 case REG_CFA_OFFSET:
10089 pat = XEXP (note, 0);
10090 if (pat == NULL)
10091 pat = PATTERN (insn);
10092 process_cfa_offset (asm_out_file, pat, unwind);
10093 handled_one = true;
10094 break;
10095
10096 case REG_CFA_REGISTER:
10097 pat = XEXP (note, 0);
10098 if (pat == NULL)
10099 pat = PATTERN (insn);
10100 process_cfa_register (asm_out_file, pat, unwind);
10101 handled_one = true;
10102 break;
10103
10104 case REG_FRAME_RELATED_EXPR:
10105 case REG_CFA_DEF_CFA:
10106 case REG_CFA_EXPRESSION:
10107 case REG_CFA_RESTORE:
10108 case REG_CFA_SET_VDRAP:
10109 /* Not used in the ia64 port. */
10110 gcc_unreachable ();
10111
10112 default:
10113 /* Not a frame-related note. */
10114 break;
10115 }
10116
10117 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10118 explicit action to take. No guessing required. */
10119 gcc_assert (handled_one);
10120 }
10121
10122 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10123
10124 static void
10125 ia64_asm_emit_except_personality (rtx personality)
10126 {
10127 fputs ("\t.personality\t", asm_out_file);
10128 output_addr_const (asm_out_file, personality);
10129 fputc ('\n', asm_out_file);
10130 }
10131
10132 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10133
10134 static void
10135 ia64_asm_init_sections (void)
10136 {
10137 exception_section = get_unnamed_section (0, output_section_asm_op,
10138 "\t.handlerdata");
10139 }
10140
10141 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10142
10143 static enum unwind_info_type
10144 ia64_debug_unwind_info (void)
10145 {
10146 return UI_TARGET;
10147 }
10148 \f
10149 enum ia64_builtins
10150 {
10151 IA64_BUILTIN_BSP,
10152 IA64_BUILTIN_COPYSIGNQ,
10153 IA64_BUILTIN_FABSQ,
10154 IA64_BUILTIN_FLUSHRS,
10155 IA64_BUILTIN_INFQ,
10156 IA64_BUILTIN_HUGE_VALQ,
10157 IA64_BUILTIN_max
10158 };
10159
10160 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10161
10162 void
10163 ia64_init_builtins (void)
10164 {
10165 tree fpreg_type;
10166 tree float80_type;
10167 tree decl;
10168
10169 /* The __fpreg type. */
10170 fpreg_type = make_node (REAL_TYPE);
10171 TYPE_PRECISION (fpreg_type) = 82;
10172 layout_type (fpreg_type);
10173 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10174
10175 /* The __float80 type. */
10176 float80_type = make_node (REAL_TYPE);
10177 TYPE_PRECISION (float80_type) = 80;
10178 layout_type (float80_type);
10179 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10180
10181 /* The __float128 type. */
10182 if (!TARGET_HPUX)
10183 {
10184 tree ftype;
10185 tree float128_type = make_node (REAL_TYPE);
10186
10187 TYPE_PRECISION (float128_type) = 128;
10188 layout_type (float128_type);
10189 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10190
10191 /* TFmode support builtins. */
10192 ftype = build_function_type_list (float128_type, NULL_TREE);
10193 decl = add_builtin_function ("__builtin_infq", ftype,
10194 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10195 NULL, NULL_TREE);
10196 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10197
10198 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10199 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10200 NULL, NULL_TREE);
10201 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10202
10203 ftype = build_function_type_list (float128_type,
10204 float128_type,
10205 NULL_TREE);
10206 decl = add_builtin_function ("__builtin_fabsq", ftype,
10207 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10208 "__fabstf2", NULL_TREE);
10209 TREE_READONLY (decl) = 1;
10210 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10211
10212 ftype = build_function_type_list (float128_type,
10213 float128_type,
10214 float128_type,
10215 NULL_TREE);
10216 decl = add_builtin_function ("__builtin_copysignq", ftype,
10217 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10218 "__copysigntf3", NULL_TREE);
10219 TREE_READONLY (decl) = 1;
10220 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10221 }
10222 else
10223 /* Under HPUX, this is a synonym for "long double". */
10224 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10225 "__float128");
10226
10227 /* Fwrite on VMS is non-standard. */
10228 #if TARGET_ABI_OPEN_VMS
10229 vms_patch_builtins ();
10230 #endif
10231
10232 #define def_builtin(name, type, code) \
10233 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10234 NULL, NULL_TREE)
10235
10236 decl = def_builtin ("__builtin_ia64_bsp",
10237 build_function_type_list (ptr_type_node, NULL_TREE),
10238 IA64_BUILTIN_BSP);
10239 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10240
10241 decl = def_builtin ("__builtin_ia64_flushrs",
10242 build_function_type_list (void_type_node, NULL_TREE),
10243 IA64_BUILTIN_FLUSHRS);
10244 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10245
10246 #undef def_builtin
10247
10248 if (TARGET_HPUX)
10249 {
10250 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10251 set_user_assembler_name (decl, "_Isfinite");
10252 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10253 set_user_assembler_name (decl, "_Isfinitef");
10254 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10255 set_user_assembler_name (decl, "_Isfinitef128");
10256 }
10257 }
10258
10259 rtx
10260 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10261 enum machine_mode mode ATTRIBUTE_UNUSED,
10262 int ignore ATTRIBUTE_UNUSED)
10263 {
10264 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10265 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10266
10267 switch (fcode)
10268 {
10269 case IA64_BUILTIN_BSP:
10270 if (! target || ! register_operand (target, DImode))
10271 target = gen_reg_rtx (DImode);
10272 emit_insn (gen_bsp_value (target));
10273 #ifdef POINTERS_EXTEND_UNSIGNED
10274 target = convert_memory_address (ptr_mode, target);
10275 #endif
10276 return target;
10277
10278 case IA64_BUILTIN_FLUSHRS:
10279 emit_insn (gen_flushrs ());
10280 return const0_rtx;
10281
10282 case IA64_BUILTIN_INFQ:
10283 case IA64_BUILTIN_HUGE_VALQ:
10284 {
10285 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10286 REAL_VALUE_TYPE inf;
10287 rtx tmp;
10288
10289 real_inf (&inf);
10290 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10291
10292 tmp = validize_mem (force_const_mem (target_mode, tmp));
10293
10294 if (target == 0)
10295 target = gen_reg_rtx (target_mode);
10296
10297 emit_move_insn (target, tmp);
10298 return target;
10299 }
10300
10301 case IA64_BUILTIN_FABSQ:
10302 case IA64_BUILTIN_COPYSIGNQ:
10303 return expand_call (exp, target, ignore);
10304
10305 default:
10306 gcc_unreachable ();
10307 }
10308
10309 return NULL_RTX;
10310 }
10311
10312 /* Return the ia64 builtin for CODE. */
10313
10314 static tree
10315 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10316 {
10317 if (code >= IA64_BUILTIN_max)
10318 return error_mark_node;
10319
10320 return ia64_builtins[code];
10321 }
10322
10323 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10324 most significant bits of the stack slot. */
10325
10326 enum direction
10327 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10328 {
10329 /* Exception to normal case for structures/unions/etc. */
10330
10331 if (type && AGGREGATE_TYPE_P (type)
10332 && int_size_in_bytes (type) < UNITS_PER_WORD)
10333 return upward;
10334
10335 /* Fall back to the default. */
10336 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10337 }
10338
10339 /* Emit text to declare externally defined variables and functions, because
10340 the Intel assembler does not support undefined externals. */
10341
10342 void
10343 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10344 {
10345 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10346 set in order to avoid putting out names that are never really
10347 used. */
10348 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10349 {
10350 /* maybe_assemble_visibility will return 1 if the assembler
10351 visibility directive is output. */
10352 int need_visibility = ((*targetm.binds_local_p) (decl)
10353 && maybe_assemble_visibility (decl));
10354
10355 /* GNU as does not need anything here, but the HP linker does
10356 need something for external functions. */
10357 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10358 && TREE_CODE (decl) == FUNCTION_DECL)
10359 (*targetm.asm_out.globalize_decl_name) (file, decl);
10360 else if (need_visibility && !TARGET_GNU_AS)
10361 (*targetm.asm_out.globalize_label) (file, name);
10362 }
10363 }
10364
10365 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10366 modes of word_mode and larger. Rename the TFmode libfuncs using the
10367 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10368 backward compatibility. */
10369
10370 static void
10371 ia64_init_libfuncs (void)
10372 {
10373 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10374 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10375 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10376 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10377
10378 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10379 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10380 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10381 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10382 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10383
10384 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10385 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10386 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10387 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10388 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10389 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10390
10391 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10392 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10393 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10394 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10395 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10396
10397 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10398 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10399 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10400 /* HP-UX 11.23 libc does not have a function for unsigned
10401 SImode-to-TFmode conversion. */
10402 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10403 }
10404
10405 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10406
10407 static void
10408 ia64_hpux_init_libfuncs (void)
10409 {
10410 ia64_init_libfuncs ();
10411
10412 /* The HP SI millicode division and mod functions expect DI arguments.
10413 By turning them off completely we avoid using both libgcc and the
10414 non-standard millicode routines and use the HP DI millicode routines
10415 instead. */
10416
10417 set_optab_libfunc (sdiv_optab, SImode, 0);
10418 set_optab_libfunc (udiv_optab, SImode, 0);
10419 set_optab_libfunc (smod_optab, SImode, 0);
10420 set_optab_libfunc (umod_optab, SImode, 0);
10421
10422 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10423 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10424 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10425 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10426
10427 /* HP-UX libc has TF min/max/abs routines in it. */
10428 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10429 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10430 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10431
10432 /* ia64_expand_compare uses this. */
10433 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10434
10435 /* These should never be used. */
10436 set_optab_libfunc (eq_optab, TFmode, 0);
10437 set_optab_libfunc (ne_optab, TFmode, 0);
10438 set_optab_libfunc (gt_optab, TFmode, 0);
10439 set_optab_libfunc (ge_optab, TFmode, 0);
10440 set_optab_libfunc (lt_optab, TFmode, 0);
10441 set_optab_libfunc (le_optab, TFmode, 0);
10442 }
10443
10444 /* Rename the division and modulus functions in VMS. */
10445
10446 static void
10447 ia64_vms_init_libfuncs (void)
10448 {
10449 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10450 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10451 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10452 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10453 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10454 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10455 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10456 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10457 abort_libfunc = init_one_libfunc ("decc$abort");
10458 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10459 #ifdef MEM_LIBFUNCS_INIT
10460 MEM_LIBFUNCS_INIT;
10461 #endif
10462 }
10463
10464 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10465 the HPUX conventions. */
10466
10467 static void
10468 ia64_sysv4_init_libfuncs (void)
10469 {
10470 ia64_init_libfuncs ();
10471
10472 /* These functions are not part of the HPUX TFmode interface. We
10473 use them instead of _U_Qfcmp, which doesn't work the way we
10474 expect. */
10475 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10476 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10477 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10478 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10479 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10480 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10481
10482 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10483 glibc doesn't have them. */
10484 }
10485
10486 /* Use soft-fp. */
10487
10488 static void
10489 ia64_soft_fp_init_libfuncs (void)
10490 {
10491 }
10492
10493 static bool
10494 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10495 {
10496 return (mode == SImode || mode == DImode);
10497 }
10498 \f
10499 /* For HPUX, it is illegal to have relocations in shared segments. */
10500
10501 static int
10502 ia64_hpux_reloc_rw_mask (void)
10503 {
10504 return 3;
10505 }
10506
10507 /* For others, relax this so that relocations to local data goes in
10508 read-only segments, but we still cannot allow global relocations
10509 in read-only segments. */
10510
10511 static int
10512 ia64_reloc_rw_mask (void)
10513 {
10514 return flag_pic ? 3 : 2;
10515 }
10516
10517 /* Return the section to use for X. The only special thing we do here
10518 is to honor small data. */
10519
10520 static section *
10521 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10522 unsigned HOST_WIDE_INT align)
10523 {
10524 if (GET_MODE_SIZE (mode) > 0
10525 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10526 && !TARGET_NO_SDATA)
10527 return sdata_section;
10528 else
10529 return default_elf_select_rtx_section (mode, x, align);
10530 }
10531
10532 static unsigned int
10533 ia64_section_type_flags (tree decl, const char *name, int reloc)
10534 {
10535 unsigned int flags = 0;
10536
10537 if (strcmp (name, ".sdata") == 0
10538 || strncmp (name, ".sdata.", 7) == 0
10539 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10540 || strncmp (name, ".sdata2.", 8) == 0
10541 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10542 || strcmp (name, ".sbss") == 0
10543 || strncmp (name, ".sbss.", 6) == 0
10544 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10545 flags = SECTION_SMALL;
10546
10547 #if TARGET_ABI_OPEN_VMS
10548 if (decl && DECL_ATTRIBUTES (decl)
10549 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10550 flags |= SECTION_VMS_OVERLAY;
10551 #endif
10552
10553 flags |= default_section_type_flags (decl, name, reloc);
10554 return flags;
10555 }
10556
10557 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10558 structure type and that the address of that type should be passed
10559 in out0, rather than in r8. */
10560
10561 static bool
10562 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10563 {
10564 tree ret_type = TREE_TYPE (fntype);
10565
10566 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10567 as the structure return address parameter, if the return value
10568 type has a non-trivial copy constructor or destructor. It is not
10569 clear if this same convention should be used for other
10570 programming languages. Until G++ 3.4, we incorrectly used r8 for
10571 these return values. */
10572 return (abi_version_at_least (2)
10573 && ret_type
10574 && TYPE_MODE (ret_type) == BLKmode
10575 && TREE_ADDRESSABLE (ret_type)
10576 && strcmp (lang_hooks.name, "GNU C++") == 0);
10577 }
10578
10579 /* Output the assembler code for a thunk function. THUNK_DECL is the
10580 declaration for the thunk function itself, FUNCTION is the decl for
10581 the target function. DELTA is an immediate constant offset to be
10582 added to THIS. If VCALL_OFFSET is nonzero, the word at
10583 *(*this + vcall_offset) should be added to THIS. */
10584
10585 static void
10586 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10587 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10588 tree function)
10589 {
10590 rtx this_rtx, insn, funexp;
10591 unsigned int this_parmno;
10592 unsigned int this_regno;
10593 rtx delta_rtx;
10594
10595 reload_completed = 1;
10596 epilogue_completed = 1;
10597
10598 /* Set things up as ia64_expand_prologue might. */
10599 last_scratch_gr_reg = 15;
10600
10601 memset (&current_frame_info, 0, sizeof (current_frame_info));
10602 current_frame_info.spill_cfa_off = -16;
10603 current_frame_info.n_input_regs = 1;
10604 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10605
10606 /* Mark the end of the (empty) prologue. */
10607 emit_note (NOTE_INSN_PROLOGUE_END);
10608
10609 /* Figure out whether "this" will be the first parameter (the
10610 typical case) or the second parameter (as happens when the
10611 virtual function returns certain class objects). */
10612 this_parmno
10613 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10614 ? 1 : 0);
10615 this_regno = IN_REG (this_parmno);
10616 if (!TARGET_REG_NAMES)
10617 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10618
10619 this_rtx = gen_rtx_REG (Pmode, this_regno);
10620
10621 /* Apply the constant offset, if required. */
10622 delta_rtx = GEN_INT (delta);
10623 if (TARGET_ILP32)
10624 {
10625 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10626 REG_POINTER (tmp) = 1;
10627 if (delta && satisfies_constraint_I (delta_rtx))
10628 {
10629 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10630 delta = 0;
10631 }
10632 else
10633 emit_insn (gen_ptr_extend (this_rtx, tmp));
10634 }
10635 if (delta)
10636 {
10637 if (!satisfies_constraint_I (delta_rtx))
10638 {
10639 rtx tmp = gen_rtx_REG (Pmode, 2);
10640 emit_move_insn (tmp, delta_rtx);
10641 delta_rtx = tmp;
10642 }
10643 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10644 }
10645
10646 /* Apply the offset from the vtable, if required. */
10647 if (vcall_offset)
10648 {
10649 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10650 rtx tmp = gen_rtx_REG (Pmode, 2);
10651
10652 if (TARGET_ILP32)
10653 {
10654 rtx t = gen_rtx_REG (ptr_mode, 2);
10655 REG_POINTER (t) = 1;
10656 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10657 if (satisfies_constraint_I (vcall_offset_rtx))
10658 {
10659 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10660 vcall_offset = 0;
10661 }
10662 else
10663 emit_insn (gen_ptr_extend (tmp, t));
10664 }
10665 else
10666 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10667
10668 if (vcall_offset)
10669 {
10670 if (!satisfies_constraint_J (vcall_offset_rtx))
10671 {
10672 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10673 emit_move_insn (tmp2, vcall_offset_rtx);
10674 vcall_offset_rtx = tmp2;
10675 }
10676 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10677 }
10678
10679 if (TARGET_ILP32)
10680 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10681 else
10682 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10683
10684 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10685 }
10686
10687 /* Generate a tail call to the target function. */
10688 if (! TREE_USED (function))
10689 {
10690 assemble_external (function);
10691 TREE_USED (function) = 1;
10692 }
10693 funexp = XEXP (DECL_RTL (function), 0);
10694 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10695 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10696 insn = get_last_insn ();
10697 SIBLING_CALL_P (insn) = 1;
10698
10699 /* Code generation for calls relies on splitting. */
10700 reload_completed = 1;
10701 epilogue_completed = 1;
10702 try_split (PATTERN (insn), insn, 0);
10703
10704 emit_barrier ();
10705
10706 /* Run just enough of rest_of_compilation to get the insns emitted.
10707 There's not really enough bulk here to make other passes such as
10708 instruction scheduling worth while. Note that use_thunk calls
10709 assemble_start_function and assemble_end_function. */
10710
10711 insn_locators_alloc ();
10712 emit_all_insn_group_barriers (NULL);
10713 insn = get_insns ();
10714 shorten_branches (insn);
10715 final_start_function (insn, file, 1);
10716 final (insn, file, 1);
10717 final_end_function ();
10718
10719 reload_completed = 0;
10720 epilogue_completed = 0;
10721 }
10722
10723 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10724
10725 static rtx
10726 ia64_struct_value_rtx (tree fntype,
10727 int incoming ATTRIBUTE_UNUSED)
10728 {
10729 if (TARGET_ABI_OPEN_VMS ||
10730 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10731 return NULL_RTX;
10732 return gen_rtx_REG (Pmode, GR_REG (8));
10733 }
10734
10735 static bool
10736 ia64_scalar_mode_supported_p (enum machine_mode mode)
10737 {
10738 switch (mode)
10739 {
10740 case QImode:
10741 case HImode:
10742 case SImode:
10743 case DImode:
10744 case TImode:
10745 return true;
10746
10747 case SFmode:
10748 case DFmode:
10749 case XFmode:
10750 case RFmode:
10751 return true;
10752
10753 case TFmode:
10754 return true;
10755
10756 default:
10757 return false;
10758 }
10759 }
10760
10761 static bool
10762 ia64_vector_mode_supported_p (enum machine_mode mode)
10763 {
10764 switch (mode)
10765 {
10766 case V8QImode:
10767 case V4HImode:
10768 case V2SImode:
10769 return true;
10770
10771 case V2SFmode:
10772 return true;
10773
10774 default:
10775 return false;
10776 }
10777 }
10778
10779 /* Implement the FUNCTION_PROFILER macro. */
10780
10781 void
10782 ia64_output_function_profiler (FILE *file, int labelno)
10783 {
10784 bool indirect_call;
10785
10786 /* If the function needs a static chain and the static chain
10787 register is r15, we use an indirect call so as to bypass
10788 the PLT stub in case the executable is dynamically linked,
10789 because the stub clobbers r15 as per 5.3.6 of the psABI.
10790 We don't need to do that in non canonical PIC mode. */
10791
10792 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10793 {
10794 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10795 indirect_call = true;
10796 }
10797 else
10798 indirect_call = false;
10799
10800 if (TARGET_GNU_AS)
10801 fputs ("\t.prologue 4, r40\n", file);
10802 else
10803 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10804 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10805
10806 if (NO_PROFILE_COUNTERS)
10807 fputs ("\tmov out3 = r0\n", file);
10808 else
10809 {
10810 char buf[20];
10811 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10812
10813 if (TARGET_AUTO_PIC)
10814 fputs ("\tmovl out3 = @gprel(", file);
10815 else
10816 fputs ("\taddl out3 = @ltoff(", file);
10817 assemble_name (file, buf);
10818 if (TARGET_AUTO_PIC)
10819 fputs (")\n", file);
10820 else
10821 fputs ("), r1\n", file);
10822 }
10823
10824 if (indirect_call)
10825 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10826 fputs ("\t;;\n", file);
10827
10828 fputs ("\t.save rp, r42\n", file);
10829 fputs ("\tmov out2 = b0\n", file);
10830 if (indirect_call)
10831 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10832 fputs ("\t.body\n", file);
10833 fputs ("\tmov out1 = r1\n", file);
10834 if (indirect_call)
10835 {
10836 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10837 fputs ("\tmov b6 = r16\n", file);
10838 fputs ("\tld8 r1 = [r14]\n", file);
10839 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10840 }
10841 else
10842 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10843 }
10844
10845 static GTY(()) rtx mcount_func_rtx;
10846 static rtx
10847 gen_mcount_func_rtx (void)
10848 {
10849 if (!mcount_func_rtx)
10850 mcount_func_rtx = init_one_libfunc ("_mcount");
10851 return mcount_func_rtx;
10852 }
10853
10854 void
10855 ia64_profile_hook (int labelno)
10856 {
10857 rtx label, ip;
10858
10859 if (NO_PROFILE_COUNTERS)
10860 label = const0_rtx;
10861 else
10862 {
10863 char buf[30];
10864 const char *label_name;
10865 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10866 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
10867 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10868 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10869 }
10870 ip = gen_reg_rtx (Pmode);
10871 emit_insn (gen_ip_value (ip));
10872 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10873 VOIDmode, 3,
10874 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10875 ip, Pmode,
10876 label, Pmode);
10877 }
10878
10879 /* Return the mangling of TYPE if it is an extended fundamental type. */
10880
10881 static const char *
10882 ia64_mangle_type (const_tree type)
10883 {
10884 type = TYPE_MAIN_VARIANT (type);
10885
10886 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10887 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10888 return NULL;
10889
10890 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10891 mangled as "e". */
10892 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10893 return "g";
10894 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10895 an extended mangling. Elsewhere, "e" is available since long
10896 double is 80 bits. */
10897 if (TYPE_MODE (type) == XFmode)
10898 return TARGET_HPUX ? "u9__float80" : "e";
10899 if (TYPE_MODE (type) == RFmode)
10900 return "u7__fpreg";
10901 return NULL;
10902 }
10903
10904 /* Return the diagnostic message string if conversion from FROMTYPE to
10905 TOTYPE is not allowed, NULL otherwise. */
10906 static const char *
10907 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10908 {
10909 /* Reject nontrivial conversion to or from __fpreg. */
10910 if (TYPE_MODE (fromtype) == RFmode
10911 && TYPE_MODE (totype) != RFmode
10912 && TYPE_MODE (totype) != VOIDmode)
10913 return N_("invalid conversion from %<__fpreg%>");
10914 if (TYPE_MODE (totype) == RFmode
10915 && TYPE_MODE (fromtype) != RFmode)
10916 return N_("invalid conversion to %<__fpreg%>");
10917 return NULL;
10918 }
10919
10920 /* Return the diagnostic message string if the unary operation OP is
10921 not permitted on TYPE, NULL otherwise. */
10922 static const char *
10923 ia64_invalid_unary_op (int op, const_tree type)
10924 {
10925 /* Reject operations on __fpreg other than unary + or &. */
10926 if (TYPE_MODE (type) == RFmode
10927 && op != CONVERT_EXPR
10928 && op != ADDR_EXPR)
10929 return N_("invalid operation on %<__fpreg%>");
10930 return NULL;
10931 }
10932
10933 /* Return the diagnostic message string if the binary operation OP is
10934 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10935 static const char *
10936 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10937 {
10938 /* Reject operations on __fpreg. */
10939 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10940 return N_("invalid operation on %<__fpreg%>");
10941 return NULL;
10942 }
10943
10944 /* HP-UX version_id attribute.
10945 For object foo, if the version_id is set to 1234 put out an alias
10946 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10947 other than an alias statement because it is an illegal symbol name. */
10948
10949 static tree
10950 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10951 tree name ATTRIBUTE_UNUSED,
10952 tree args,
10953 int flags ATTRIBUTE_UNUSED,
10954 bool *no_add_attrs)
10955 {
10956 tree arg = TREE_VALUE (args);
10957
10958 if (TREE_CODE (arg) != STRING_CST)
10959 {
10960 error("version attribute is not a string");
10961 *no_add_attrs = true;
10962 return NULL_TREE;
10963 }
10964 return NULL_TREE;
10965 }
10966
10967 /* Target hook for c_mode_for_suffix. */
10968
10969 static enum machine_mode
10970 ia64_c_mode_for_suffix (char suffix)
10971 {
10972 if (suffix == 'q')
10973 return TFmode;
10974 if (suffix == 'w')
10975 return XFmode;
10976
10977 return VOIDmode;
10978 }
10979
10980 static GTY(()) rtx ia64_dconst_0_5_rtx;
10981
10982 rtx
10983 ia64_dconst_0_5 (void)
10984 {
10985 if (! ia64_dconst_0_5_rtx)
10986 {
10987 REAL_VALUE_TYPE rv;
10988 real_from_string (&rv, "0.5");
10989 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
10990 }
10991 return ia64_dconst_0_5_rtx;
10992 }
10993
10994 static GTY(()) rtx ia64_dconst_0_375_rtx;
10995
10996 rtx
10997 ia64_dconst_0_375 (void)
10998 {
10999 if (! ia64_dconst_0_375_rtx)
11000 {
11001 REAL_VALUE_TYPE rv;
11002 real_from_string (&rv, "0.375");
11003 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11004 }
11005 return ia64_dconst_0_375_rtx;
11006 }
11007
11008 static enum machine_mode
11009 ia64_get_reg_raw_mode (int regno)
11010 {
11011 if (FR_REGNO_P (regno))
11012 return XFmode;
11013 return default_get_reg_raw_mode(regno);
11014 }
11015
11016 /* Always default to .text section until HP-UX linker is fixed. */
11017
11018 ATTRIBUTE_UNUSED static section *
11019 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11020 enum node_frequency freq ATTRIBUTE_UNUSED,
11021 bool startup ATTRIBUTE_UNUSED,
11022 bool exit ATTRIBUTE_UNUSED)
11023 {
11024 return NULL;
11025 }
11026 \f
11027 /* Construct (set target (vec_select op0 (parallel perm))) and
11028 return true if that's a valid instruction in the active ISA. */
11029
11030 static bool
11031 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
11032 {
11033 rtx rperm[MAX_VECT_LEN], x;
11034 unsigned i;
11035
11036 for (i = 0; i < nelt; ++i)
11037 rperm[i] = GEN_INT (perm[i]);
11038
11039 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
11040 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
11041 x = gen_rtx_SET (VOIDmode, target, x);
11042
11043 x = emit_insn (x);
11044 if (recog_memoized (x) < 0)
11045 {
11046 remove_insn (x);
11047 return false;
11048 }
11049 return true;
11050 }
11051
11052 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11053
11054 static bool
11055 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
11056 const unsigned char *perm, unsigned nelt)
11057 {
11058 enum machine_mode v2mode;
11059 rtx x;
11060
11061 v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
11062 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
11063 return expand_vselect (target, x, perm, nelt);
11064 }
11065
11066 /* Try to expand a no-op permutation. */
11067
11068 static bool
11069 expand_vec_perm_identity (struct expand_vec_perm_d *d)
11070 {
11071 unsigned i, nelt = d->nelt;
11072
11073 for (i = 0; i < nelt; ++i)
11074 if (d->perm[i] != i)
11075 return false;
11076
11077 if (!d->testing_p)
11078 emit_move_insn (d->target, d->op0);
11079
11080 return true;
11081 }
11082
11083 /* Try to expand D via a shrp instruction. */
11084
11085 static bool
11086 expand_vec_perm_shrp (struct expand_vec_perm_d *d)
11087 {
11088 unsigned i, nelt = d->nelt, shift, mask;
11089 rtx tmp, hi, lo;
11090
11091 /* ??? Don't force V2SFmode into the integer registers. */
11092 if (d->vmode == V2SFmode)
11093 return false;
11094
11095 mask = (d->one_operand_p ? nelt - 1 : 2 * nelt - 1);
11096
11097 shift = d->perm[0];
11098 if (BYTES_BIG_ENDIAN && shift > nelt)
11099 return false;
11100
11101 for (i = 1; i < nelt; ++i)
11102 if (d->perm[i] != ((shift + i) & mask))
11103 return false;
11104
11105 if (d->testing_p)
11106 return true;
11107
11108 hi = shift < nelt ? d->op1 : d->op0;
11109 lo = shift < nelt ? d->op0 : d->op1;
11110
11111 shift %= nelt;
11112
11113 shift *= GET_MODE_UNIT_SIZE (d->vmode) * BITS_PER_UNIT;
11114
11115 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11116 gcc_assert (IN_RANGE (shift, 1, 63));
11117
11118 /* Recall that big-endian elements are numbered starting at the top of
11119 the register. Ideally we'd have a shift-left-pair. But since we
11120 don't, convert to a shift the other direction. */
11121 if (BYTES_BIG_ENDIAN)
11122 shift = 64 - shift;
11123
11124 tmp = gen_reg_rtx (DImode);
11125 hi = gen_lowpart (DImode, hi);
11126 lo = gen_lowpart (DImode, lo);
11127 emit_insn (gen_shrp (tmp, hi, lo, GEN_INT (shift)));
11128
11129 emit_move_insn (d->target, gen_lowpart (d->vmode, tmp));
11130 return true;
11131 }
11132
11133 /* Try to instantiate D in a single instruction. */
11134
11135 static bool
11136 expand_vec_perm_1 (struct expand_vec_perm_d *d)
11137 {
11138 unsigned i, nelt = d->nelt;
11139 unsigned char perm2[MAX_VECT_LEN];
11140
11141 /* Try single-operand selections. */
11142 if (d->one_operand_p)
11143 {
11144 if (expand_vec_perm_identity (d))
11145 return true;
11146 if (expand_vselect (d->target, d->op0, d->perm, nelt))
11147 return true;
11148 }
11149
11150 /* Try two operand selections. */
11151 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
11152 return true;
11153
11154 /* Recognize interleave style patterns with reversed operands. */
11155 if (!d->one_operand_p)
11156 {
11157 for (i = 0; i < nelt; ++i)
11158 {
11159 unsigned e = d->perm[i];
11160 if (e >= nelt)
11161 e -= nelt;
11162 else
11163 e += nelt;
11164 perm2[i] = e;
11165 }
11166
11167 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
11168 return true;
11169 }
11170
11171 if (expand_vec_perm_shrp (d))
11172 return true;
11173
11174 /* ??? Look for deposit-like permutations where most of the result
11175 comes from one vector unchanged and the rest comes from a
11176 sequential hunk of the other vector. */
11177
11178 return false;
11179 }
11180
11181 /* Pattern match broadcast permutations. */
11182
11183 static bool
11184 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
11185 {
11186 unsigned i, elt, nelt = d->nelt;
11187 unsigned char perm2[2];
11188 rtx temp;
11189 bool ok;
11190
11191 if (!d->one_operand_p)
11192 return false;
11193
11194 elt = d->perm[0];
11195 for (i = 1; i < nelt; ++i)
11196 if (d->perm[i] != elt)
11197 return false;
11198
11199 switch (d->vmode)
11200 {
11201 case V2SImode:
11202 case V2SFmode:
11203 /* Implementable by interleave. */
11204 perm2[0] = elt;
11205 perm2[1] = elt + 2;
11206 ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2);
11207 gcc_assert (ok);
11208 break;
11209
11210 case V8QImode:
11211 /* Implementable by extract + broadcast. */
11212 if (BYTES_BIG_ENDIAN)
11213 elt = 7 - elt;
11214 elt *= BITS_PER_UNIT;
11215 temp = gen_reg_rtx (DImode);
11216 emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0),
11217 GEN_INT (8), GEN_INT (elt)));
11218 emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp)));
11219 break;
11220
11221 case V4HImode:
11222 /* Should have been matched directly by vec_select. */
11223 default:
11224 gcc_unreachable ();
11225 }
11226
11227 return true;
11228 }
11229
11230 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11231 two vector permutation into a single vector permutation by using
11232 an interleave operation to merge the vectors. */
11233
11234 static bool
11235 expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d)
11236 {
11237 struct expand_vec_perm_d dremap, dfinal;
11238 unsigned char remap[2 * MAX_VECT_LEN];
11239 unsigned contents, i, nelt, nelt2;
11240 unsigned h0, h1, h2, h3;
11241 rtx seq;
11242 bool ok;
11243
11244 if (d->one_operand_p)
11245 return false;
11246
11247 nelt = d->nelt;
11248 nelt2 = nelt / 2;
11249
11250 /* Examine from whence the elements come. */
11251 contents = 0;
11252 for (i = 0; i < nelt; ++i)
11253 contents |= 1u << d->perm[i];
11254
11255 memset (remap, 0xff, sizeof (remap));
11256 dremap = *d;
11257
11258 h0 = (1u << nelt2) - 1;
11259 h1 = h0 << nelt2;
11260 h2 = h0 << nelt;
11261 h3 = h0 << (nelt + nelt2);
11262
11263 if ((contents & (h0 | h2)) == contents) /* punpck even halves */
11264 {
11265 for (i = 0; i < nelt; ++i)
11266 {
11267 unsigned which = i / 2 + (i & 1 ? nelt : 0);
11268 remap[which] = i;
11269 dremap.perm[i] = which;
11270 }
11271 }
11272 else if ((contents & (h1 | h3)) == contents) /* punpck odd halves */
11273 {
11274 for (i = 0; i < nelt; ++i)
11275 {
11276 unsigned which = i / 2 + nelt2 + (i & 1 ? nelt : 0);
11277 remap[which] = i;
11278 dremap.perm[i] = which;
11279 }
11280 }
11281 else if ((contents & 0x5555) == contents) /* mix even elements */
11282 {
11283 for (i = 0; i < nelt; ++i)
11284 {
11285 unsigned which = (i & ~1) + (i & 1 ? nelt : 0);
11286 remap[which] = i;
11287 dremap.perm[i] = which;
11288 }
11289 }
11290 else if ((contents & 0xaaaa) == contents) /* mix odd elements */
11291 {
11292 for (i = 0; i < nelt; ++i)
11293 {
11294 unsigned which = (i | 1) + (i & 1 ? nelt : 0);
11295 remap[which] = i;
11296 dremap.perm[i] = which;
11297 }
11298 }
11299 else if (floor_log2 (contents) - ctz_hwi (contents) < (int)nelt) /* shrp */
11300 {
11301 unsigned shift = ctz_hwi (contents);
11302 for (i = 0; i < nelt; ++i)
11303 {
11304 unsigned which = (i + shift) & (2 * nelt - 1);
11305 remap[which] = i;
11306 dremap.perm[i] = which;
11307 }
11308 }
11309 else
11310 return false;
11311
11312 /* Use the remapping array set up above to move the elements from their
11313 swizzled locations into their final destinations. */
11314 dfinal = *d;
11315 for (i = 0; i < nelt; ++i)
11316 {
11317 unsigned e = remap[d->perm[i]];
11318 gcc_assert (e < nelt);
11319 dfinal.perm[i] = e;
11320 }
11321 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
11322 dfinal.op1 = dfinal.op0;
11323 dfinal.one_operand_p = true;
11324 dremap.target = dfinal.op0;
11325
11326 /* Test if the final remap can be done with a single insn. For V4HImode
11327 this *will* succeed. For V8QImode or V2SImode it may not. */
11328 start_sequence ();
11329 ok = expand_vec_perm_1 (&dfinal);
11330 seq = get_insns ();
11331 end_sequence ();
11332 if (!ok)
11333 return false;
11334 if (d->testing_p)
11335 return true;
11336
11337 ok = expand_vec_perm_1 (&dremap);
11338 gcc_assert (ok);
11339
11340 emit_insn (seq);
11341 return true;
11342 }
11343
11344 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11345 constant permutation via two mux2 and a merge. */
11346
11347 static bool
11348 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d *d)
11349 {
11350 unsigned char perm2[4];
11351 rtx rmask[4];
11352 unsigned i;
11353 rtx t0, t1, mask, x;
11354 bool ok;
11355
11356 if (d->vmode != V4HImode || d->one_operand_p)
11357 return false;
11358 if (d->testing_p)
11359 return true;
11360
11361 for (i = 0; i < 4; ++i)
11362 {
11363 perm2[i] = d->perm[i] & 3;
11364 rmask[i] = (d->perm[i] & 4 ? const0_rtx : constm1_rtx);
11365 }
11366 mask = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmask));
11367 mask = force_reg (V4HImode, mask);
11368
11369 t0 = gen_reg_rtx (V4HImode);
11370 t1 = gen_reg_rtx (V4HImode);
11371
11372 ok = expand_vselect (t0, d->op0, perm2, 4);
11373 gcc_assert (ok);
11374 ok = expand_vselect (t1, d->op1, perm2, 4);
11375 gcc_assert (ok);
11376
11377 x = gen_rtx_AND (V4HImode, mask, t0);
11378 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
11379
11380 x = gen_rtx_NOT (V4HImode, mask);
11381 x = gen_rtx_AND (V4HImode, x, t1);
11382 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
11383
11384 x = gen_rtx_IOR (V4HImode, t0, t1);
11385 emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
11386
11387 return true;
11388 }
11389
11390 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11391 With all of the interface bits taken care of, perform the expansion
11392 in D and return true on success. */
11393
11394 static bool
11395 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
11396 {
11397 if (expand_vec_perm_1 (d))
11398 return true;
11399 if (expand_vec_perm_broadcast (d))
11400 return true;
11401 if (expand_vec_perm_interleave_2 (d))
11402 return true;
11403 if (expand_vec_perm_v4hi_5 (d))
11404 return true;
11405 return false;
11406 }
11407
11408 bool
11409 ia64_expand_vec_perm_const (rtx operands[4])
11410 {
11411 struct expand_vec_perm_d d;
11412 unsigned char perm[MAX_VECT_LEN];
11413 int i, nelt, which;
11414 rtx sel;
11415
11416 d.target = operands[0];
11417 d.op0 = operands[1];
11418 d.op1 = operands[2];
11419 sel = operands[3];
11420
11421 d.vmode = GET_MODE (d.target);
11422 gcc_assert (VECTOR_MODE_P (d.vmode));
11423 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11424 d.testing_p = false;
11425
11426 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
11427 gcc_assert (XVECLEN (sel, 0) == nelt);
11428 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
11429
11430 for (i = which = 0; i < nelt; ++i)
11431 {
11432 rtx e = XVECEXP (sel, 0, i);
11433 int ei = INTVAL (e) & (2 * nelt - 1);
11434
11435 which |= (ei < nelt ? 1 : 2);
11436 d.perm[i] = ei;
11437 perm[i] = ei;
11438 }
11439
11440 switch (which)
11441 {
11442 default:
11443 gcc_unreachable();
11444
11445 case 3:
11446 if (!rtx_equal_p (d.op0, d.op1))
11447 {
11448 d.one_operand_p = false;
11449 break;
11450 }
11451
11452 /* The elements of PERM do not suggest that only the first operand
11453 is used, but both operands are identical. Allow easier matching
11454 of the permutation by folding the permutation into the single
11455 input vector. */
11456 for (i = 0; i < nelt; ++i)
11457 if (d.perm[i] >= nelt)
11458 d.perm[i] -= nelt;
11459 /* FALLTHRU */
11460
11461 case 1:
11462 d.op1 = d.op0;
11463 d.one_operand_p = true;
11464 break;
11465
11466 case 2:
11467 for (i = 0; i < nelt; ++i)
11468 d.perm[i] -= nelt;
11469 d.op0 = d.op1;
11470 d.one_operand_p = true;
11471 break;
11472 }
11473
11474 if (ia64_expand_vec_perm_const_1 (&d))
11475 return true;
11476
11477 /* If the mask says both arguments are needed, but they are the same,
11478 the above tried to expand with one_operand_p true. If that didn't
11479 work, retry with one_operand_p false, as that's what we used in _ok. */
11480 if (which == 3 && d.one_operand_p)
11481 {
11482 memcpy (d.perm, perm, sizeof (perm));
11483 d.one_operand_p = false;
11484 return ia64_expand_vec_perm_const_1 (&d);
11485 }
11486
11487 return false;
11488 }
11489
11490 /* Implement targetm.vectorize.vec_perm_const_ok. */
11491
11492 static bool
11493 ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
11494 const unsigned char *sel)
11495 {
11496 struct expand_vec_perm_d d;
11497 unsigned int i, nelt, which;
11498 bool ret;
11499
11500 d.vmode = vmode;
11501 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11502 d.testing_p = true;
11503
11504 /* Extract the values from the vector CST into the permutation
11505 array in D. */
11506 memcpy (d.perm, sel, nelt);
11507 for (i = which = 0; i < nelt; ++i)
11508 {
11509 unsigned char e = d.perm[i];
11510 gcc_assert (e < 2 * nelt);
11511 which |= (e < nelt ? 1 : 2);
11512 }
11513
11514 /* For all elements from second vector, fold the elements to first. */
11515 if (which == 2)
11516 for (i = 0; i < nelt; ++i)
11517 d.perm[i] -= nelt;
11518
11519 /* Check whether the mask can be applied to the vector type. */
11520 d.one_operand_p = (which != 3);
11521
11522 /* Otherwise we have to go through the motions and see if we can
11523 figure out how to generate the requested permutation. */
11524 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
11525 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
11526 if (!d.one_operand_p)
11527 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
11528
11529 start_sequence ();
11530 ret = ia64_expand_vec_perm_const_1 (&d);
11531 end_sequence ();
11532
11533 return ret;
11534 }
11535
11536 void
11537 ia64_expand_vec_setv2sf (rtx operands[3])
11538 {
11539 struct expand_vec_perm_d d;
11540 unsigned int which;
11541 bool ok;
11542
11543 d.target = operands[0];
11544 d.op0 = operands[0];
11545 d.op1 = gen_reg_rtx (V2SFmode);
11546 d.vmode = V2SFmode;
11547 d.nelt = 2;
11548 d.one_operand_p = false;
11549 d.testing_p = false;
11550
11551 which = INTVAL (operands[2]);
11552 gcc_assert (which <= 1);
11553 d.perm[0] = 1 - which;
11554 d.perm[1] = which + 2;
11555
11556 emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode)));
11557
11558 ok = ia64_expand_vec_perm_const_1 (&d);
11559 gcc_assert (ok);
11560 }
11561
11562 void
11563 ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd)
11564 {
11565 struct expand_vec_perm_d d;
11566 enum machine_mode vmode = GET_MODE (target);
11567 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
11568 bool ok;
11569
11570 d.target = target;
11571 d.op0 = op0;
11572 d.op1 = op1;
11573 d.vmode = vmode;
11574 d.nelt = nelt;
11575 d.one_operand_p = false;
11576 d.testing_p = false;
11577
11578 for (i = 0; i < nelt; ++i)
11579 d.perm[i] = i * 2 + odd;
11580
11581 ok = ia64_expand_vec_perm_const_1 (&d);
11582 gcc_assert (ok);
11583 }
11584
11585 #include "gt-ia64.h"