13d577706ae69340106f669268b9c8192b2d0e41
[gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "alias.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "varasm.h"
45 #include "calls.h"
46 #include "output.h"
47 #include "except.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "reload.h"
51 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "cfgrtl.h"
54 #include "opts.h"
55 #include "builtins.h"
56
57 /* This file should be included last. */
58 #include "target-def.h"
59
60 /* Return nonzero if there is a bypass for the output of
61 OUT_INSN and the fp store IN_INSN. */
62 int
63 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
64 {
65 machine_mode store_mode;
66 machine_mode other_mode;
67 rtx set;
68
69 if (recog_memoized (in_insn) < 0
70 || (get_attr_type (in_insn) != TYPE_FPSTORE
71 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
72 || recog_memoized (out_insn) < 0)
73 return 0;
74
75 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
76
77 set = single_set (out_insn);
78 if (!set)
79 return 0;
80
81 other_mode = GET_MODE (SET_SRC (set));
82
83 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 }
85
86
87 #ifndef DO_FRAME_NOTES
88 #ifdef INCOMING_RETURN_ADDR_RTX
89 #define DO_FRAME_NOTES 1
90 #else
91 #define DO_FRAME_NOTES 0
92 #endif
93 #endif
94
95 static void pa_option_override (void);
96 static void copy_reg_pointer (rtx, rtx);
97 static void fix_range (const char *);
98 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
99 reg_class_t);
100 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
101 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
102 static inline rtx force_mode (machine_mode, rtx);
103 static void pa_reorg (void);
104 static void pa_combine_instructions (void);
105 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
106 rtx, rtx);
107 static bool forward_branch_p (rtx_insn *);
108 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
109 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
110 static int compute_movmem_length (rtx_insn *);
111 static int compute_clrmem_length (rtx_insn *);
112 static bool pa_assemble_integer (rtx, unsigned int, int);
113 static void remove_useless_addtr_insns (int);
114 static void store_reg (int, HOST_WIDE_INT, int);
115 static void store_reg_modify (int, int, HOST_WIDE_INT);
116 static void load_reg (int, HOST_WIDE_INT, int);
117 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
118 static rtx pa_function_value (const_tree, const_tree, bool);
119 static rtx pa_libcall_value (machine_mode, const_rtx);
120 static bool pa_function_value_regno_p (const unsigned int);
121 static void pa_output_function_prologue (FILE *);
122 static void update_total_code_bytes (unsigned int);
123 static void pa_output_function_epilogue (FILE *);
124 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
125 static int pa_adjust_priority (rtx_insn *, int);
126 static int pa_issue_rate (void);
127 static int pa_reloc_rw_mask (void);
128 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
129 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
130 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
131 ATTRIBUTE_UNUSED;
132 static void pa_encode_section_info (tree, rtx, int);
133 static const char *pa_strip_name_encoding (const char *);
134 static bool pa_function_ok_for_sibcall (tree, tree);
135 static void pa_globalize_label (FILE *, const char *)
136 ATTRIBUTE_UNUSED;
137 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
138 HOST_WIDE_INT, tree);
139 #if !defined(USE_COLLECT2)
140 static void pa_asm_out_constructor (rtx, int);
141 static void pa_asm_out_destructor (rtx, int);
142 #endif
143 static void pa_init_builtins (void);
144 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
145 static rtx hppa_builtin_saveregs (void);
146 static void hppa_va_start (tree, rtx);
147 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
148 static bool pa_scalar_mode_supported_p (scalar_mode);
149 static bool pa_commutative_p (const_rtx x, int outer_code);
150 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
151 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
152 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
153 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
155 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
156 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
157 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
160 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
161 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
162 static void output_deferred_plabels (void);
163 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
164 static void pa_file_end (void);
165 static void pa_init_libfuncs (void);
166 static rtx pa_struct_value_rtx (tree, int);
167 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
168 const_tree, bool);
169 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
170 tree, bool);
171 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static rtx pa_function_arg (cumulative_args_t, machine_mode,
174 const_tree, bool);
175 static pad_direction pa_function_arg_padding (machine_mode, const_tree);
176 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
177 static struct machine_function * pa_init_machine_status (void);
178 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
179 machine_mode,
180 secondary_reload_info *);
181 static bool pa_secondary_memory_needed (machine_mode,
182 reg_class_t, reg_class_t);
183 static void pa_extra_live_on_entry (bitmap);
184 static machine_mode pa_promote_function_mode (const_tree,
185 machine_mode, int *,
186 const_tree, int);
187
188 static void pa_asm_trampoline_template (FILE *);
189 static void pa_trampoline_init (rtx, tree, rtx);
190 static rtx pa_trampoline_adjust_address (rtx);
191 static rtx pa_delegitimize_address (rtx);
192 static bool pa_print_operand_punct_valid_p (unsigned char);
193 static rtx pa_internal_arg_pointer (void);
194 static bool pa_can_eliminate (const int, const int);
195 static void pa_conditional_register_usage (void);
196 static machine_mode pa_c_mode_for_suffix (char);
197 static section *pa_function_section (tree, enum node_frequency, bool, bool);
198 static bool pa_cannot_force_const_mem (machine_mode, rtx);
199 static bool pa_legitimate_constant_p (machine_mode, rtx);
200 static unsigned int pa_section_type_flags (tree, const char *, int);
201 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
202 static bool pa_callee_copies (cumulative_args_t, machine_mode,
203 const_tree, bool);
204 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
205 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
206 static bool pa_modes_tieable_p (machine_mode, machine_mode);
207 static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
208 static HOST_WIDE_INT pa_starting_frame_offset (void);
209
210 /* The following extra sections are only used for SOM. */
211 static GTY(()) section *som_readonly_data_section;
212 static GTY(()) section *som_one_only_readonly_data_section;
213 static GTY(()) section *som_one_only_data_section;
214 static GTY(()) section *som_tm_clone_table_section;
215
216 /* Counts for the number of callee-saved general and floating point
217 registers which were saved by the current function's prologue. */
218 static int gr_saved, fr_saved;
219
220 /* Boolean indicating whether the return pointer was saved by the
221 current function's prologue. */
222 static bool rp_saved;
223
224 static rtx find_addr_reg (rtx);
225
226 /* Keep track of the number of bytes we have output in the CODE subspace
227 during this compilation so we'll know when to emit inline long-calls. */
228 unsigned long total_code_bytes;
229
230 /* The last address of the previous function plus the number of bytes in
231 associated thunks that have been output. This is used to determine if
232 a thunk can use an IA-relative branch to reach its target function. */
233 static unsigned int last_address;
234
235 /* Variables to handle plabels that we discover are necessary at assembly
236 output time. They are output after the current function. */
237 struct GTY(()) deferred_plabel
238 {
239 rtx internal_label;
240 rtx symbol;
241 };
242 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
243 deferred_plabels;
244 static size_t n_deferred_plabels = 0;
245 \f
246 /* Initialize the GCC target structure. */
247
248 #undef TARGET_OPTION_OVERRIDE
249 #define TARGET_OPTION_OVERRIDE pa_option_override
250
251 #undef TARGET_ASM_ALIGNED_HI_OP
252 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
253 #undef TARGET_ASM_ALIGNED_SI_OP
254 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
255 #undef TARGET_ASM_ALIGNED_DI_OP
256 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
257 #undef TARGET_ASM_UNALIGNED_HI_OP
258 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
259 #undef TARGET_ASM_UNALIGNED_SI_OP
260 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
261 #undef TARGET_ASM_UNALIGNED_DI_OP
262 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
263 #undef TARGET_ASM_INTEGER
264 #define TARGET_ASM_INTEGER pa_assemble_integer
265
266 #undef TARGET_ASM_FUNCTION_PROLOGUE
267 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
268 #undef TARGET_ASM_FUNCTION_EPILOGUE
269 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
270
271 #undef TARGET_FUNCTION_VALUE
272 #define TARGET_FUNCTION_VALUE pa_function_value
273 #undef TARGET_LIBCALL_VALUE
274 #define TARGET_LIBCALL_VALUE pa_libcall_value
275 #undef TARGET_FUNCTION_VALUE_REGNO_P
276 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
277
278 #undef TARGET_LEGITIMIZE_ADDRESS
279 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
280
281 #undef TARGET_SCHED_ADJUST_COST
282 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
283 #undef TARGET_SCHED_ADJUST_PRIORITY
284 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
285 #undef TARGET_SCHED_ISSUE_RATE
286 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
287
288 #undef TARGET_ENCODE_SECTION_INFO
289 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
290 #undef TARGET_STRIP_NAME_ENCODING
291 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
292
293 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
294 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
295
296 #undef TARGET_COMMUTATIVE_P
297 #define TARGET_COMMUTATIVE_P pa_commutative_p
298
299 #undef TARGET_ASM_OUTPUT_MI_THUNK
300 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
301 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
302 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
303
304 #undef TARGET_ASM_FILE_END
305 #define TARGET_ASM_FILE_END pa_file_end
306
307 #undef TARGET_ASM_RELOC_RW_MASK
308 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
309
310 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
311 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
312
313 #if !defined(USE_COLLECT2)
314 #undef TARGET_ASM_CONSTRUCTOR
315 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
316 #undef TARGET_ASM_DESTRUCTOR
317 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
318 #endif
319
320 #undef TARGET_INIT_BUILTINS
321 #define TARGET_INIT_BUILTINS pa_init_builtins
322
323 #undef TARGET_EXPAND_BUILTIN
324 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
325
326 #undef TARGET_REGISTER_MOVE_COST
327 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
328 #undef TARGET_RTX_COSTS
329 #define TARGET_RTX_COSTS hppa_rtx_costs
330 #undef TARGET_ADDRESS_COST
331 #define TARGET_ADDRESS_COST hppa_address_cost
332
333 #undef TARGET_MACHINE_DEPENDENT_REORG
334 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
335
336 #undef TARGET_INIT_LIBFUNCS
337 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
338
339 #undef TARGET_PROMOTE_FUNCTION_MODE
340 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
341 #undef TARGET_PROMOTE_PROTOTYPES
342 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
343
344 #undef TARGET_STRUCT_VALUE_RTX
345 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
346 #undef TARGET_RETURN_IN_MEMORY
347 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
348 #undef TARGET_MUST_PASS_IN_STACK
349 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
350 #undef TARGET_PASS_BY_REFERENCE
351 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
352 #undef TARGET_CALLEE_COPIES
353 #define TARGET_CALLEE_COPIES pa_callee_copies
354 #undef TARGET_ARG_PARTIAL_BYTES
355 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
356 #undef TARGET_FUNCTION_ARG
357 #define TARGET_FUNCTION_ARG pa_function_arg
358 #undef TARGET_FUNCTION_ARG_ADVANCE
359 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
360 #undef TARGET_FUNCTION_ARG_PADDING
361 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
362 #undef TARGET_FUNCTION_ARG_BOUNDARY
363 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
364
365 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
366 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
367 #undef TARGET_EXPAND_BUILTIN_VA_START
368 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
369 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
370 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
371
372 #undef TARGET_SCALAR_MODE_SUPPORTED_P
373 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
374
375 #undef TARGET_CANNOT_FORCE_CONST_MEM
376 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
377
378 #undef TARGET_SECONDARY_RELOAD
379 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
380 #undef TARGET_SECONDARY_MEMORY_NEEDED
381 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
382
383 #undef TARGET_EXTRA_LIVE_ON_ENTRY
384 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
385
386 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
387 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
388 #undef TARGET_TRAMPOLINE_INIT
389 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
390 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
391 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
392 #undef TARGET_DELEGITIMIZE_ADDRESS
393 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
394 #undef TARGET_INTERNAL_ARG_POINTER
395 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
396 #undef TARGET_CAN_ELIMINATE
397 #define TARGET_CAN_ELIMINATE pa_can_eliminate
398 #undef TARGET_CONDITIONAL_REGISTER_USAGE
399 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
400 #undef TARGET_C_MODE_FOR_SUFFIX
401 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
402 #undef TARGET_ASM_FUNCTION_SECTION
403 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
404
405 #undef TARGET_LEGITIMATE_CONSTANT_P
406 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
407 #undef TARGET_SECTION_TYPE_FLAGS
408 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
409 #undef TARGET_LEGITIMATE_ADDRESS_P
410 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
411
412 #undef TARGET_LRA_P
413 #define TARGET_LRA_P hook_bool_void_false
414
415 #undef TARGET_HARD_REGNO_NREGS
416 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
417 #undef TARGET_HARD_REGNO_MODE_OK
418 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
419 #undef TARGET_MODES_TIEABLE_P
420 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
421
422 #undef TARGET_CAN_CHANGE_MODE_CLASS
423 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
424
425 #undef TARGET_CONSTANT_ALIGNMENT
426 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
427
428 #undef TARGET_STARTING_FRAME_OFFSET
429 #define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
430
431 struct gcc_target targetm = TARGET_INITIALIZER;
432 \f
433 /* Parse the -mfixed-range= option string. */
434
435 static void
436 fix_range (const char *const_str)
437 {
438 int i, first, last;
439 char *str, *dash, *comma;
440
441 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
442 REG2 are either register names or register numbers. The effect
443 of this option is to mark the registers in the range from REG1 to
444 REG2 as ``fixed'' so they won't be used by the compiler. This is
445 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
446
447 i = strlen (const_str);
448 str = (char *) alloca (i + 1);
449 memcpy (str, const_str, i + 1);
450
451 while (1)
452 {
453 dash = strchr (str, '-');
454 if (!dash)
455 {
456 warning (0, "value of -mfixed-range must have form REG1-REG2");
457 return;
458 }
459 *dash = '\0';
460
461 comma = strchr (dash + 1, ',');
462 if (comma)
463 *comma = '\0';
464
465 first = decode_reg_name (str);
466 if (first < 0)
467 {
468 warning (0, "unknown register name: %s", str);
469 return;
470 }
471
472 last = decode_reg_name (dash + 1);
473 if (last < 0)
474 {
475 warning (0, "unknown register name: %s", dash + 1);
476 return;
477 }
478
479 *dash = '-';
480
481 if (first > last)
482 {
483 warning (0, "%s-%s is an empty range", str, dash + 1);
484 return;
485 }
486
487 for (i = first; i <= last; ++i)
488 fixed_regs[i] = call_used_regs[i] = 1;
489
490 if (!comma)
491 break;
492
493 *comma = ',';
494 str = comma + 1;
495 }
496
497 /* Check if all floating point registers have been fixed. */
498 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
499 if (!fixed_regs[i])
500 break;
501
502 if (i > FP_REG_LAST)
503 target_flags |= MASK_DISABLE_FPREGS;
504 }
505
506 /* Implement the TARGET_OPTION_OVERRIDE hook. */
507
508 static void
509 pa_option_override (void)
510 {
511 unsigned int i;
512 cl_deferred_option *opt;
513 vec<cl_deferred_option> *v
514 = (vec<cl_deferred_option> *) pa_deferred_options;
515
516 if (v)
517 FOR_EACH_VEC_ELT (*v, i, opt)
518 {
519 switch (opt->opt_index)
520 {
521 case OPT_mfixed_range_:
522 fix_range (opt->arg);
523 break;
524
525 default:
526 gcc_unreachable ();
527 }
528 }
529
530 if (flag_pic && TARGET_PORTABLE_RUNTIME)
531 {
532 warning (0, "PIC code generation is not supported in the portable runtime model");
533 }
534
535 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
536 {
537 warning (0, "PIC code generation is not compatible with fast indirect calls");
538 }
539
540 if (! TARGET_GAS && write_symbols != NO_DEBUG)
541 {
542 warning (0, "-g is only supported when using GAS on this processor,");
543 warning (0, "-g option disabled");
544 write_symbols = NO_DEBUG;
545 }
546
547 /* We only support the "big PIC" model now. And we always generate PIC
548 code when in 64bit mode. */
549 if (flag_pic == 1 || TARGET_64BIT)
550 flag_pic = 2;
551
552 /* Disable -freorder-blocks-and-partition as we don't support hot and
553 cold partitioning. */
554 if (flag_reorder_blocks_and_partition)
555 {
556 inform (input_location,
557 "-freorder-blocks-and-partition does not work "
558 "on this architecture");
559 flag_reorder_blocks_and_partition = 0;
560 flag_reorder_blocks = 1;
561 }
562
563 /* We can't guarantee that .dword is available for 32-bit targets. */
564 if (UNITS_PER_WORD == 4)
565 targetm.asm_out.aligned_op.di = NULL;
566
567 /* The unaligned ops are only available when using GAS. */
568 if (!TARGET_GAS)
569 {
570 targetm.asm_out.unaligned_op.hi = NULL;
571 targetm.asm_out.unaligned_op.si = NULL;
572 targetm.asm_out.unaligned_op.di = NULL;
573 }
574
575 init_machine_status = pa_init_machine_status;
576 }
577
578 enum pa_builtins
579 {
580 PA_BUILTIN_COPYSIGNQ,
581 PA_BUILTIN_FABSQ,
582 PA_BUILTIN_INFQ,
583 PA_BUILTIN_HUGE_VALQ,
584 PA_BUILTIN_max
585 };
586
587 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
588
589 static void
590 pa_init_builtins (void)
591 {
592 #ifdef DONT_HAVE_FPUTC_UNLOCKED
593 {
594 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
595 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
596 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
597 }
598 #endif
599 #if TARGET_HPUX_11
600 {
601 tree decl;
602
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinite");
605 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
606 set_user_assembler_name (decl, "_Isfinitef");
607 }
608 #endif
609
610 if (HPUX_LONG_DOUBLE_LIBRARY)
611 {
612 tree decl, ftype;
613
614 /* Under HPUX, the __float128 type is a synonym for "long double". */
615 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
616 "__float128");
617
618 /* TFmode support builtins. */
619 ftype = build_function_type_list (long_double_type_node,
620 long_double_type_node,
621 NULL_TREE);
622 decl = add_builtin_function ("__builtin_fabsq", ftype,
623 PA_BUILTIN_FABSQ, BUILT_IN_MD,
624 "_U_Qfabs", NULL_TREE);
625 TREE_READONLY (decl) = 1;
626 pa_builtins[PA_BUILTIN_FABSQ] = decl;
627
628 ftype = build_function_type_list (long_double_type_node,
629 long_double_type_node,
630 long_double_type_node,
631 NULL_TREE);
632 decl = add_builtin_function ("__builtin_copysignq", ftype,
633 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
634 "_U_Qfcopysign", NULL_TREE);
635 TREE_READONLY (decl) = 1;
636 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
637
638 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
639 decl = add_builtin_function ("__builtin_infq", ftype,
640 PA_BUILTIN_INFQ, BUILT_IN_MD,
641 NULL, NULL_TREE);
642 pa_builtins[PA_BUILTIN_INFQ] = decl;
643
644 decl = add_builtin_function ("__builtin_huge_valq", ftype,
645 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
646 NULL, NULL_TREE);
647 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
648 }
649 }
650
651 static rtx
652 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
653 machine_mode mode ATTRIBUTE_UNUSED,
654 int ignore ATTRIBUTE_UNUSED)
655 {
656 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
657 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
658
659 switch (fcode)
660 {
661 case PA_BUILTIN_FABSQ:
662 case PA_BUILTIN_COPYSIGNQ:
663 return expand_call (exp, target, ignore);
664
665 case PA_BUILTIN_INFQ:
666 case PA_BUILTIN_HUGE_VALQ:
667 {
668 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
669 REAL_VALUE_TYPE inf;
670 rtx tmp;
671
672 real_inf (&inf);
673 tmp = const_double_from_real_value (inf, target_mode);
674
675 tmp = validize_mem (force_const_mem (target_mode, tmp));
676
677 if (target == 0)
678 target = gen_reg_rtx (target_mode);
679
680 emit_move_insn (target, tmp);
681 return target;
682 }
683
684 default:
685 gcc_unreachable ();
686 }
687
688 return NULL_RTX;
689 }
690
691 /* Function to init struct machine_function.
692 This will be called, via a pointer variable,
693 from push_function_context. */
694
695 static struct machine_function *
696 pa_init_machine_status (void)
697 {
698 return ggc_cleared_alloc<machine_function> ();
699 }
700
701 /* If FROM is a probable pointer register, mark TO as a probable
702 pointer register with the same pointer alignment as FROM. */
703
704 static void
705 copy_reg_pointer (rtx to, rtx from)
706 {
707 if (REG_POINTER (from))
708 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
709 }
710
711 /* Return 1 if X contains a symbolic expression. We know these
712 expressions will have one of a few well defined forms, so
713 we need only check those forms. */
714 int
715 pa_symbolic_expression_p (rtx x)
716 {
717
718 /* Strip off any HIGH. */
719 if (GET_CODE (x) == HIGH)
720 x = XEXP (x, 0);
721
722 return symbolic_operand (x, VOIDmode);
723 }
724
725 /* Accept any constant that can be moved in one instruction into a
726 general register. */
727 int
728 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
729 {
730 /* OK if ldo, ldil, or zdepi, can be used. */
731 return (VAL_14_BITS_P (ival)
732 || pa_ldil_cint_p (ival)
733 || pa_zdepi_cint_p (ival));
734 }
735 \f
736 /* True iff ldil can be used to load this CONST_INT. The least
737 significant 11 bits of the value must be zero and the value must
738 not change sign when extended from 32 to 64 bits. */
739 int
740 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
741 {
742 unsigned HOST_WIDE_INT x;
743
744 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
745 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
746 }
747
748 /* True iff zdepi can be used to generate this CONST_INT.
749 zdepi first sign extends a 5-bit signed number to a given field
750 length, then places this field anywhere in a zero. */
751 int
752 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
753 {
754 unsigned HOST_WIDE_INT lsb_mask, t;
755
756 /* This might not be obvious, but it's at least fast.
757 This function is critical; we don't have the time loops would take. */
758 lsb_mask = x & -x;
759 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
760 /* Return true iff t is a power of two. */
761 return ((t & (t - 1)) == 0);
762 }
763
764 /* True iff depi or extru can be used to compute (reg & mask).
765 Accept bit pattern like these:
766 0....01....1
767 1....10....0
768 1..10..01..1 */
769 int
770 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
771 {
772 mask = ~mask;
773 mask += mask & -mask;
774 return (mask & (mask - 1)) == 0;
775 }
776
777 /* True iff depi can be used to compute (reg | MASK). */
778 int
779 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
780 {
781 mask += mask & -mask;
782 return (mask & (mask - 1)) == 0;
783 }
784 \f
785 /* Legitimize PIC addresses. If the address is already
786 position-independent, we return ORIG. Newly generated
787 position-independent addresses go to REG. If we need more
788 than one register, we lose. */
789
790 static rtx
791 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
792 {
793 rtx pic_ref = orig;
794
795 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
796
797 /* Labels need special handling. */
798 if (pic_label_operand (orig, mode))
799 {
800 rtx_insn *insn;
801
802 /* We do not want to go through the movXX expanders here since that
803 would create recursion.
804
805 Nor do we really want to call a generator for a named pattern
806 since that requires multiple patterns if we want to support
807 multiple word sizes.
808
809 So instead we just emit the raw set, which avoids the movXX
810 expanders completely. */
811 mark_reg_pointer (reg, BITS_PER_UNIT);
812 insn = emit_insn (gen_rtx_SET (reg, orig));
813
814 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
815 add_reg_note (insn, REG_EQUAL, orig);
816
817 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
818 and update LABEL_NUSES because this is not done automatically. */
819 if (reload_in_progress || reload_completed)
820 {
821 /* Extract LABEL_REF. */
822 if (GET_CODE (orig) == CONST)
823 orig = XEXP (XEXP (orig, 0), 0);
824 /* Extract CODE_LABEL. */
825 orig = XEXP (orig, 0);
826 add_reg_note (insn, REG_LABEL_OPERAND, orig);
827 /* Make sure we have label and not a note. */
828 if (LABEL_P (orig))
829 LABEL_NUSES (orig)++;
830 }
831 crtl->uses_pic_offset_table = 1;
832 return reg;
833 }
834 if (GET_CODE (orig) == SYMBOL_REF)
835 {
836 rtx_insn *insn;
837 rtx tmp_reg;
838
839 gcc_assert (reg);
840
841 /* Before reload, allocate a temporary register for the intermediate
842 result. This allows the sequence to be deleted when the final
843 result is unused and the insns are trivially dead. */
844 tmp_reg = ((reload_in_progress || reload_completed)
845 ? reg : gen_reg_rtx (Pmode));
846
847 if (function_label_operand (orig, VOIDmode))
848 {
849 /* Force function label into memory in word mode. */
850 orig = XEXP (force_const_mem (word_mode, orig), 0);
851 /* Load plabel address from DLT. */
852 emit_move_insn (tmp_reg,
853 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
854 gen_rtx_HIGH (word_mode, orig)));
855 pic_ref
856 = gen_const_mem (Pmode,
857 gen_rtx_LO_SUM (Pmode, tmp_reg,
858 gen_rtx_UNSPEC (Pmode,
859 gen_rtvec (1, orig),
860 UNSPEC_DLTIND14R)));
861 emit_move_insn (reg, pic_ref);
862 /* Now load address of function descriptor. */
863 pic_ref = gen_rtx_MEM (Pmode, reg);
864 }
865 else
866 {
867 /* Load symbol reference from DLT. */
868 emit_move_insn (tmp_reg,
869 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
870 gen_rtx_HIGH (word_mode, orig)));
871 pic_ref
872 = gen_const_mem (Pmode,
873 gen_rtx_LO_SUM (Pmode, tmp_reg,
874 gen_rtx_UNSPEC (Pmode,
875 gen_rtvec (1, orig),
876 UNSPEC_DLTIND14R)));
877 }
878
879 crtl->uses_pic_offset_table = 1;
880 mark_reg_pointer (reg, BITS_PER_UNIT);
881 insn = emit_move_insn (reg, pic_ref);
882
883 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
884 set_unique_reg_note (insn, REG_EQUAL, orig);
885
886 return reg;
887 }
888 else if (GET_CODE (orig) == CONST)
889 {
890 rtx base;
891
892 if (GET_CODE (XEXP (orig, 0)) == PLUS
893 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
894 return orig;
895
896 gcc_assert (reg);
897 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
898
899 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
900 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
901 base == reg ? 0 : reg);
902
903 if (GET_CODE (orig) == CONST_INT)
904 {
905 if (INT_14_BITS (orig))
906 return plus_constant (Pmode, base, INTVAL (orig));
907 orig = force_reg (Pmode, orig);
908 }
909 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
910 /* Likewise, should we set special REG_NOTEs here? */
911 }
912
913 return pic_ref;
914 }
915
916 static GTY(()) rtx gen_tls_tga;
917
918 static rtx
919 gen_tls_get_addr (void)
920 {
921 if (!gen_tls_tga)
922 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
923 return gen_tls_tga;
924 }
925
926 static rtx
927 hppa_tls_call (rtx arg)
928 {
929 rtx ret;
930
931 ret = gen_reg_rtx (Pmode);
932 emit_library_call_value (gen_tls_get_addr (), ret,
933 LCT_CONST, Pmode, arg, Pmode);
934
935 return ret;
936 }
937
938 static rtx
939 legitimize_tls_address (rtx addr)
940 {
941 rtx ret, tmp, t1, t2, tp;
942 rtx_insn *insn;
943
944 /* Currently, we can't handle anything but a SYMBOL_REF. */
945 if (GET_CODE (addr) != SYMBOL_REF)
946 return addr;
947
948 switch (SYMBOL_REF_TLS_MODEL (addr))
949 {
950 case TLS_MODEL_GLOBAL_DYNAMIC:
951 tmp = gen_reg_rtx (Pmode);
952 if (flag_pic)
953 emit_insn (gen_tgd_load_pic (tmp, addr));
954 else
955 emit_insn (gen_tgd_load (tmp, addr));
956 ret = hppa_tls_call (tmp);
957 break;
958
959 case TLS_MODEL_LOCAL_DYNAMIC:
960 ret = gen_reg_rtx (Pmode);
961 tmp = gen_reg_rtx (Pmode);
962 start_sequence ();
963 if (flag_pic)
964 emit_insn (gen_tld_load_pic (tmp, addr));
965 else
966 emit_insn (gen_tld_load (tmp, addr));
967 t1 = hppa_tls_call (tmp);
968 insn = get_insns ();
969 end_sequence ();
970 t2 = gen_reg_rtx (Pmode);
971 emit_libcall_block (insn, t2, t1,
972 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
973 UNSPEC_TLSLDBASE));
974 emit_insn (gen_tld_offset_load (ret, addr, t2));
975 break;
976
977 case TLS_MODEL_INITIAL_EXEC:
978 tp = gen_reg_rtx (Pmode);
979 tmp = gen_reg_rtx (Pmode);
980 ret = gen_reg_rtx (Pmode);
981 emit_insn (gen_tp_load (tp));
982 if (flag_pic)
983 emit_insn (gen_tie_load_pic (tmp, addr));
984 else
985 emit_insn (gen_tie_load (tmp, addr));
986 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
987 break;
988
989 case TLS_MODEL_LOCAL_EXEC:
990 tp = gen_reg_rtx (Pmode);
991 ret = gen_reg_rtx (Pmode);
992 emit_insn (gen_tp_load (tp));
993 emit_insn (gen_tle_load (ret, addr, tp));
994 break;
995
996 default:
997 gcc_unreachable ();
998 }
999
1000 return ret;
1001 }
1002
1003 /* Helper for hppa_legitimize_address. Given X, return true if it
1004 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1005
1006 This respectively represent canonical shift-add rtxs or scaled
1007 memory addresses. */
1008 static bool
1009 mem_shadd_or_shadd_rtx_p (rtx x)
1010 {
1011 return ((GET_CODE (x) == ASHIFT
1012 || GET_CODE (x) == MULT)
1013 && GET_CODE (XEXP (x, 1)) == CONST_INT
1014 && ((GET_CODE (x) == ASHIFT
1015 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
1016 || (GET_CODE (x) == MULT
1017 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
1018 }
1019
1020 /* Try machine-dependent ways of modifying an illegitimate address
1021 to be legitimate. If we find one, return the new, valid address.
1022 This macro is used in only one place: `memory_address' in explow.c.
1023
1024 OLDX is the address as it was before break_out_memory_refs was called.
1025 In some cases it is useful to look at this to decide what needs to be done.
1026
1027 It is always safe for this macro to do nothing. It exists to recognize
1028 opportunities to optimize the output.
1029
1030 For the PA, transform:
1031
1032 memory(X + <large int>)
1033
1034 into:
1035
1036 if (<large int> & mask) >= 16
1037 Y = (<large int> & ~mask) + mask + 1 Round up.
1038 else
1039 Y = (<large int> & ~mask) Round down.
1040 Z = X + Y
1041 memory (Z + (<large int> - Y));
1042
1043 This is for CSE to find several similar references, and only use one Z.
1044
1045 X can either be a SYMBOL_REF or REG, but because combine cannot
1046 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1047 D will not fit in 14 bits.
1048
1049 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1050 0x1f as the mask.
1051
1052 MODE_INT references allow displacements which fit in 14 bits, so use
1053 0x3fff as the mask.
1054
1055 This relies on the fact that most mode MODE_FLOAT references will use FP
1056 registers and most mode MODE_INT references will use integer registers.
1057 (In the rare case of an FP register used in an integer MODE, we depend
1058 on secondary reloads to clean things up.)
1059
1060
1061 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1062 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1063 addressing modes to be used).
1064
1065 Note that the addresses passed into hppa_legitimize_address always
1066 come from a MEM, so we only have to match the MULT form on incoming
1067 addresses. But to be future proof we also match the ASHIFT form.
1068
1069 However, this routine always places those shift-add sequences into
1070 registers, so we have to generate the ASHIFT form as our output.
1071
1072 Put X and Z into registers. Then put the entire expression into
1073 a register. */
1074
1075 rtx
1076 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1077 machine_mode mode)
1078 {
1079 rtx orig = x;
1080
1081 /* We need to canonicalize the order of operands in unscaled indexed
1082 addresses since the code that checks if an address is valid doesn't
1083 always try both orders. */
1084 if (!TARGET_NO_SPACE_REGS
1085 && GET_CODE (x) == PLUS
1086 && GET_MODE (x) == Pmode
1087 && REG_P (XEXP (x, 0))
1088 && REG_P (XEXP (x, 1))
1089 && REG_POINTER (XEXP (x, 0))
1090 && !REG_POINTER (XEXP (x, 1)))
1091 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1092
1093 if (tls_referenced_p (x))
1094 return legitimize_tls_address (x);
1095 else if (flag_pic)
1096 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1097
1098 /* Strip off CONST. */
1099 if (GET_CODE (x) == CONST)
1100 x = XEXP (x, 0);
1101
1102 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1103 That should always be safe. */
1104 if (GET_CODE (x) == PLUS
1105 && GET_CODE (XEXP (x, 0)) == REG
1106 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1107 {
1108 rtx reg = force_reg (Pmode, XEXP (x, 1));
1109 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1110 }
1111
1112 /* Note we must reject symbols which represent function addresses
1113 since the assembler/linker can't handle arithmetic on plabels. */
1114 if (GET_CODE (x) == PLUS
1115 && GET_CODE (XEXP (x, 1)) == CONST_INT
1116 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1117 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1118 || GET_CODE (XEXP (x, 0)) == REG))
1119 {
1120 rtx int_part, ptr_reg;
1121 int newoffset;
1122 int offset = INTVAL (XEXP (x, 1));
1123 int mask;
1124
1125 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1126 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1127
1128 /* Choose which way to round the offset. Round up if we
1129 are >= halfway to the next boundary. */
1130 if ((offset & mask) >= ((mask + 1) / 2))
1131 newoffset = (offset & ~ mask) + mask + 1;
1132 else
1133 newoffset = (offset & ~ mask);
1134
1135 /* If the newoffset will not fit in 14 bits (ldo), then
1136 handling this would take 4 or 5 instructions (2 to load
1137 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1138 add the new offset and the SYMBOL_REF.) Combine can
1139 not handle 4->2 or 5->2 combinations, so do not create
1140 them. */
1141 if (! VAL_14_BITS_P (newoffset)
1142 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1143 {
1144 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1145 rtx tmp_reg
1146 = force_reg (Pmode,
1147 gen_rtx_HIGH (Pmode, const_part));
1148 ptr_reg
1149 = force_reg (Pmode,
1150 gen_rtx_LO_SUM (Pmode,
1151 tmp_reg, const_part));
1152 }
1153 else
1154 {
1155 if (! VAL_14_BITS_P (newoffset))
1156 int_part = force_reg (Pmode, GEN_INT (newoffset));
1157 else
1158 int_part = GEN_INT (newoffset);
1159
1160 ptr_reg = force_reg (Pmode,
1161 gen_rtx_PLUS (Pmode,
1162 force_reg (Pmode, XEXP (x, 0)),
1163 int_part));
1164 }
1165 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1166 }
1167
1168 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1169
1170 if (GET_CODE (x) == PLUS
1171 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1172 && (OBJECT_P (XEXP (x, 1))
1173 || GET_CODE (XEXP (x, 1)) == SUBREG)
1174 && GET_CODE (XEXP (x, 1)) != CONST)
1175 {
1176 /* If we were given a MULT, we must fix the constant
1177 as we're going to create the ASHIFT form. */
1178 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1179 if (GET_CODE (XEXP (x, 0)) == MULT)
1180 shift_val = exact_log2 (shift_val);
1181
1182 rtx reg1, reg2;
1183 reg1 = XEXP (x, 1);
1184 if (GET_CODE (reg1) != REG)
1185 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1186
1187 reg2 = XEXP (XEXP (x, 0), 0);
1188 if (GET_CODE (reg2) != REG)
1189 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1190
1191 return force_reg (Pmode,
1192 gen_rtx_PLUS (Pmode,
1193 gen_rtx_ASHIFT (Pmode, reg2,
1194 GEN_INT (shift_val)),
1195 reg1));
1196 }
1197
1198 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1199
1200 Only do so for floating point modes since this is more speculative
1201 and we lose if it's an integer store. */
1202 if (GET_CODE (x) == PLUS
1203 && GET_CODE (XEXP (x, 0)) == PLUS
1204 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1205 && (mode == SFmode || mode == DFmode))
1206 {
1207 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1208
1209 /* If we were given a MULT, we must fix the constant
1210 as we're going to create the ASHIFT form. */
1211 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1212 shift_val = exact_log2 (shift_val);
1213
1214 /* Try and figure out what to use as a base register. */
1215 rtx reg1, reg2, base, idx;
1216
1217 reg1 = XEXP (XEXP (x, 0), 1);
1218 reg2 = XEXP (x, 1);
1219 base = NULL_RTX;
1220 idx = NULL_RTX;
1221
1222 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1223 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1224 it's a base register below. */
1225 if (GET_CODE (reg1) != REG)
1226 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1227
1228 if (GET_CODE (reg2) != REG)
1229 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1230
1231 /* Figure out what the base and index are. */
1232
1233 if (GET_CODE (reg1) == REG
1234 && REG_POINTER (reg1))
1235 {
1236 base = reg1;
1237 idx = gen_rtx_PLUS (Pmode,
1238 gen_rtx_ASHIFT (Pmode,
1239 XEXP (XEXP (XEXP (x, 0), 0), 0),
1240 GEN_INT (shift_val)),
1241 XEXP (x, 1));
1242 }
1243 else if (GET_CODE (reg2) == REG
1244 && REG_POINTER (reg2))
1245 {
1246 base = reg2;
1247 idx = XEXP (x, 0);
1248 }
1249
1250 if (base == 0)
1251 return orig;
1252
1253 /* If the index adds a large constant, try to scale the
1254 constant so that it can be loaded with only one insn. */
1255 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1256 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1257 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1258 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1259 {
1260 /* Divide the CONST_INT by the scale factor, then add it to A. */
1261 int val = INTVAL (XEXP (idx, 1));
1262 val /= (1 << shift_val);
1263
1264 reg1 = XEXP (XEXP (idx, 0), 0);
1265 if (GET_CODE (reg1) != REG)
1266 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1267
1268 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1269
1270 /* We can now generate a simple scaled indexed address. */
1271 return
1272 force_reg
1273 (Pmode, gen_rtx_PLUS (Pmode,
1274 gen_rtx_ASHIFT (Pmode, reg1,
1275 GEN_INT (shift_val)),
1276 base));
1277 }
1278
1279 /* If B + C is still a valid base register, then add them. */
1280 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1281 && INTVAL (XEXP (idx, 1)) <= 4096
1282 && INTVAL (XEXP (idx, 1)) >= -4096)
1283 {
1284 rtx reg1, reg2;
1285
1286 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1287
1288 reg2 = XEXP (XEXP (idx, 0), 0);
1289 if (GET_CODE (reg2) != CONST_INT)
1290 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1291
1292 return force_reg (Pmode,
1293 gen_rtx_PLUS (Pmode,
1294 gen_rtx_ASHIFT (Pmode, reg2,
1295 GEN_INT (shift_val)),
1296 reg1));
1297 }
1298
1299 /* Get the index into a register, then add the base + index and
1300 return a register holding the result. */
1301
1302 /* First get A into a register. */
1303 reg1 = XEXP (XEXP (idx, 0), 0);
1304 if (GET_CODE (reg1) != REG)
1305 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1306
1307 /* And get B into a register. */
1308 reg2 = XEXP (idx, 1);
1309 if (GET_CODE (reg2) != REG)
1310 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1311
1312 reg1 = force_reg (Pmode,
1313 gen_rtx_PLUS (Pmode,
1314 gen_rtx_ASHIFT (Pmode, reg1,
1315 GEN_INT (shift_val)),
1316 reg2));
1317
1318 /* Add the result to our base register and return. */
1319 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1320
1321 }
1322
1323 /* Uh-oh. We might have an address for x[n-100000]. This needs
1324 special handling to avoid creating an indexed memory address
1325 with x-100000 as the base.
1326
1327 If the constant part is small enough, then it's still safe because
1328 there is a guard page at the beginning and end of the data segment.
1329
1330 Scaled references are common enough that we want to try and rearrange the
1331 terms so that we can use indexing for these addresses too. Only
1332 do the optimization for floatint point modes. */
1333
1334 if (GET_CODE (x) == PLUS
1335 && pa_symbolic_expression_p (XEXP (x, 1)))
1336 {
1337 /* Ugly. We modify things here so that the address offset specified
1338 by the index expression is computed first, then added to x to form
1339 the entire address. */
1340
1341 rtx regx1, regx2, regy1, regy2, y;
1342
1343 /* Strip off any CONST. */
1344 y = XEXP (x, 1);
1345 if (GET_CODE (y) == CONST)
1346 y = XEXP (y, 0);
1347
1348 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1349 {
1350 /* See if this looks like
1351 (plus (mult (reg) (mem_shadd_const))
1352 (const (plus (symbol_ref) (const_int))))
1353
1354 Where const_int is small. In that case the const
1355 expression is a valid pointer for indexing.
1356
1357 If const_int is big, but can be divided evenly by shadd_const
1358 and added to (reg). This allows more scaled indexed addresses. */
1359 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1360 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1361 && GET_CODE (XEXP (y, 1)) == CONST_INT
1362 && INTVAL (XEXP (y, 1)) >= -4096
1363 && INTVAL (XEXP (y, 1)) <= 4095)
1364 {
1365 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1366
1367 /* If we were given a MULT, we must fix the constant
1368 as we're going to create the ASHIFT form. */
1369 if (GET_CODE (XEXP (x, 0)) == MULT)
1370 shift_val = exact_log2 (shift_val);
1371
1372 rtx reg1, reg2;
1373
1374 reg1 = XEXP (x, 1);
1375 if (GET_CODE (reg1) != REG)
1376 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1377
1378 reg2 = XEXP (XEXP (x, 0), 0);
1379 if (GET_CODE (reg2) != REG)
1380 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1381
1382 return
1383 force_reg (Pmode,
1384 gen_rtx_PLUS (Pmode,
1385 gen_rtx_ASHIFT (Pmode,
1386 reg2,
1387 GEN_INT (shift_val)),
1388 reg1));
1389 }
1390 else if ((mode == DFmode || mode == SFmode)
1391 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1392 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1393 && GET_CODE (XEXP (y, 1)) == CONST_INT
1394 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1395 {
1396 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1397
1398 /* If we were given a MULT, we must fix the constant
1399 as we're going to create the ASHIFT form. */
1400 if (GET_CODE (XEXP (x, 0)) == MULT)
1401 shift_val = exact_log2 (shift_val);
1402
1403 regx1
1404 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1405 / INTVAL (XEXP (XEXP (x, 0), 1))));
1406 regx2 = XEXP (XEXP (x, 0), 0);
1407 if (GET_CODE (regx2) != REG)
1408 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1409 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1410 regx2, regx1));
1411 return
1412 force_reg (Pmode,
1413 gen_rtx_PLUS (Pmode,
1414 gen_rtx_ASHIFT (Pmode, regx2,
1415 GEN_INT (shift_val)),
1416 force_reg (Pmode, XEXP (y, 0))));
1417 }
1418 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1419 && INTVAL (XEXP (y, 1)) >= -4096
1420 && INTVAL (XEXP (y, 1)) <= 4095)
1421 {
1422 /* This is safe because of the guard page at the
1423 beginning and end of the data space. Just
1424 return the original address. */
1425 return orig;
1426 }
1427 else
1428 {
1429 /* Doesn't look like one we can optimize. */
1430 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1431 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1432 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1433 regx1 = force_reg (Pmode,
1434 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1435 regx1, regy2));
1436 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1437 }
1438 }
1439 }
1440
1441 return orig;
1442 }
1443
1444 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1445
1446 Compute extra cost of moving data between one register class
1447 and another.
1448
1449 Make moves from SAR so expensive they should never happen. We used to
1450 have 0xffff here, but that generates overflow in rare cases.
1451
1452 Copies involving a FP register and a non-FP register are relatively
1453 expensive because they must go through memory.
1454
1455 Other copies are reasonably cheap. */
1456
1457 static int
1458 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1459 reg_class_t from, reg_class_t to)
1460 {
1461 if (from == SHIFT_REGS)
1462 return 0x100;
1463 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1464 return 18;
1465 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1466 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1467 return 16;
1468 else
1469 return 2;
1470 }
1471
1472 /* For the HPPA, REG and REG+CONST is cost 0
1473 and addresses involving symbolic constants are cost 2.
1474
1475 PIC addresses are very expensive.
1476
1477 It is no coincidence that this has the same structure
1478 as pa_legitimate_address_p. */
1479
1480 static int
1481 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1482 addr_space_t as ATTRIBUTE_UNUSED,
1483 bool speed ATTRIBUTE_UNUSED)
1484 {
1485 switch (GET_CODE (X))
1486 {
1487 case REG:
1488 case PLUS:
1489 case LO_SUM:
1490 return 1;
1491 case HIGH:
1492 return 2;
1493 default:
1494 return 4;
1495 }
1496 }
1497
1498 /* Compute a (partial) cost for rtx X. Return true if the complete
1499 cost has been computed, and false if subexpressions should be
1500 scanned. In either case, *TOTAL contains the cost result. */
1501
1502 static bool
1503 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1504 int opno ATTRIBUTE_UNUSED,
1505 int *total, bool speed ATTRIBUTE_UNUSED)
1506 {
1507 int factor;
1508 int code = GET_CODE (x);
1509
1510 switch (code)
1511 {
1512 case CONST_INT:
1513 if (INTVAL (x) == 0)
1514 *total = 0;
1515 else if (INT_14_BITS (x))
1516 *total = 1;
1517 else
1518 *total = 2;
1519 return true;
1520
1521 case HIGH:
1522 *total = 2;
1523 return true;
1524
1525 case CONST:
1526 case LABEL_REF:
1527 case SYMBOL_REF:
1528 *total = 4;
1529 return true;
1530
1531 case CONST_DOUBLE:
1532 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1533 && outer_code != SET)
1534 *total = 0;
1535 else
1536 *total = 8;
1537 return true;
1538
1539 case MULT:
1540 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1541 {
1542 *total = COSTS_N_INSNS (3);
1543 return true;
1544 }
1545
1546 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1547 factor = GET_MODE_SIZE (mode) / 4;
1548 if (factor == 0)
1549 factor = 1;
1550
1551 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1552 *total = factor * factor * COSTS_N_INSNS (8);
1553 else
1554 *total = factor * factor * COSTS_N_INSNS (20);
1555 return true;
1556
1557 case DIV:
1558 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1559 {
1560 *total = COSTS_N_INSNS (14);
1561 return true;
1562 }
1563 /* FALLTHRU */
1564
1565 case UDIV:
1566 case MOD:
1567 case UMOD:
1568 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1569 factor = GET_MODE_SIZE (mode) / 4;
1570 if (factor == 0)
1571 factor = 1;
1572
1573 *total = factor * factor * COSTS_N_INSNS (60);
1574 return true;
1575
1576 case PLUS: /* this includes shNadd insns */
1577 case MINUS:
1578 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1579 {
1580 *total = COSTS_N_INSNS (3);
1581 return true;
1582 }
1583
1584 /* A size N times larger than UNITS_PER_WORD needs N times as
1585 many insns, taking N times as long. */
1586 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1587 if (factor == 0)
1588 factor = 1;
1589 *total = factor * COSTS_N_INSNS (1);
1590 return true;
1591
1592 case ASHIFT:
1593 case ASHIFTRT:
1594 case LSHIFTRT:
1595 *total = COSTS_N_INSNS (1);
1596 return true;
1597
1598 default:
1599 return false;
1600 }
1601 }
1602
1603 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1604 new rtx with the correct mode. */
1605 static inline rtx
1606 force_mode (machine_mode mode, rtx orig)
1607 {
1608 if (mode == GET_MODE (orig))
1609 return orig;
1610
1611 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1612
1613 return gen_rtx_REG (mode, REGNO (orig));
1614 }
1615
1616 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1617
1618 static bool
1619 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1620 {
1621 return tls_referenced_p (x);
1622 }
1623
1624 /* Emit insns to move operands[1] into operands[0].
1625
1626 Return 1 if we have written out everything that needs to be done to
1627 do the move. Otherwise, return 0 and the caller will emit the move
1628 normally.
1629
1630 Note SCRATCH_REG may not be in the proper mode depending on how it
1631 will be used. This routine is responsible for creating a new copy
1632 of SCRATCH_REG in the proper mode. */
1633
1634 int
1635 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1636 {
1637 register rtx operand0 = operands[0];
1638 register rtx operand1 = operands[1];
1639 register rtx tem;
1640
1641 /* We can only handle indexed addresses in the destination operand
1642 of floating point stores. Thus, we need to break out indexed
1643 addresses from the destination operand. */
1644 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1645 {
1646 gcc_assert (can_create_pseudo_p ());
1647
1648 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1649 operand0 = replace_equiv_address (operand0, tem);
1650 }
1651
1652 /* On targets with non-equivalent space registers, break out unscaled
1653 indexed addresses from the source operand before the final CSE.
1654 We have to do this because the REG_POINTER flag is not correctly
1655 carried through various optimization passes and CSE may substitute
1656 a pseudo without the pointer set for one with the pointer set. As
1657 a result, we loose various opportunities to create insns with
1658 unscaled indexed addresses. */
1659 if (!TARGET_NO_SPACE_REGS
1660 && !cse_not_expected
1661 && GET_CODE (operand1) == MEM
1662 && GET_CODE (XEXP (operand1, 0)) == PLUS
1663 && REG_P (XEXP (XEXP (operand1, 0), 0))
1664 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1665 operand1
1666 = replace_equiv_address (operand1,
1667 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1668
1669 if (scratch_reg
1670 && reload_in_progress && GET_CODE (operand0) == REG
1671 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1672 operand0 = reg_equiv_mem (REGNO (operand0));
1673 else if (scratch_reg
1674 && reload_in_progress && GET_CODE (operand0) == SUBREG
1675 && GET_CODE (SUBREG_REG (operand0)) == REG
1676 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1677 {
1678 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1679 the code which tracks sets/uses for delete_output_reload. */
1680 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1681 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1682 SUBREG_BYTE (operand0));
1683 operand0 = alter_subreg (&temp, true);
1684 }
1685
1686 if (scratch_reg
1687 && reload_in_progress && GET_CODE (operand1) == REG
1688 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1689 operand1 = reg_equiv_mem (REGNO (operand1));
1690 else if (scratch_reg
1691 && reload_in_progress && GET_CODE (operand1) == SUBREG
1692 && GET_CODE (SUBREG_REG (operand1)) == REG
1693 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1694 {
1695 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1696 the code which tracks sets/uses for delete_output_reload. */
1697 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1698 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1699 SUBREG_BYTE (operand1));
1700 operand1 = alter_subreg (&temp, true);
1701 }
1702
1703 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1704 && ((tem = find_replacement (&XEXP (operand0, 0)))
1705 != XEXP (operand0, 0)))
1706 operand0 = replace_equiv_address (operand0, tem);
1707
1708 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1709 && ((tem = find_replacement (&XEXP (operand1, 0)))
1710 != XEXP (operand1, 0)))
1711 operand1 = replace_equiv_address (operand1, tem);
1712
1713 /* Handle secondary reloads for loads/stores of FP registers from
1714 REG+D addresses where D does not fit in 5 or 14 bits, including
1715 (subreg (mem (addr))) cases, and reloads for other unsupported
1716 memory operands. */
1717 if (scratch_reg
1718 && FP_REG_P (operand0)
1719 && (MEM_P (operand1)
1720 || (GET_CODE (operand1) == SUBREG
1721 && MEM_P (XEXP (operand1, 0)))))
1722 {
1723 rtx op1 = operand1;
1724
1725 if (GET_CODE (op1) == SUBREG)
1726 op1 = XEXP (op1, 0);
1727
1728 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1729 {
1730 if (!(TARGET_PA_20
1731 && !TARGET_ELF32
1732 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1733 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1734 {
1735 /* SCRATCH_REG will hold an address and maybe the actual data.
1736 We want it in WORD_MODE regardless of what mode it was
1737 originally given to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1739
1740 /* D might not fit in 14 bits either; for such cases load D
1741 into scratch reg. */
1742 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1743 {
1744 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1745 emit_move_insn (scratch_reg,
1746 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1747 Pmode,
1748 XEXP (XEXP (op1, 0), 0),
1749 scratch_reg));
1750 }
1751 else
1752 emit_move_insn (scratch_reg, XEXP (op1, 0));
1753 emit_insn (gen_rtx_SET (operand0,
1754 replace_equiv_address (op1, scratch_reg)));
1755 return 1;
1756 }
1757 }
1758 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1759 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1760 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1761 {
1762 /* Load memory address into SCRATCH_REG. */
1763 scratch_reg = force_mode (word_mode, scratch_reg);
1764 emit_move_insn (scratch_reg, XEXP (op1, 0));
1765 emit_insn (gen_rtx_SET (operand0,
1766 replace_equiv_address (op1, scratch_reg)));
1767 return 1;
1768 }
1769 }
1770 else if (scratch_reg
1771 && FP_REG_P (operand1)
1772 && (MEM_P (operand0)
1773 || (GET_CODE (operand0) == SUBREG
1774 && MEM_P (XEXP (operand0, 0)))))
1775 {
1776 rtx op0 = operand0;
1777
1778 if (GET_CODE (op0) == SUBREG)
1779 op0 = XEXP (op0, 0);
1780
1781 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1782 {
1783 if (!(TARGET_PA_20
1784 && !TARGET_ELF32
1785 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1786 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1787 {
1788 /* SCRATCH_REG will hold an address and maybe the actual data.
1789 We want it in WORD_MODE regardless of what mode it was
1790 originally given to us. */
1791 scratch_reg = force_mode (word_mode, scratch_reg);
1792
1793 /* D might not fit in 14 bits either; for such cases load D
1794 into scratch reg. */
1795 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1796 {
1797 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1798 emit_move_insn (scratch_reg,
1799 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1800 Pmode,
1801 XEXP (XEXP (op0, 0), 0),
1802 scratch_reg));
1803 }
1804 else
1805 emit_move_insn (scratch_reg, XEXP (op0, 0));
1806 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1807 operand1));
1808 return 1;
1809 }
1810 }
1811 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1812 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1813 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1814 {
1815 /* Load memory address into SCRATCH_REG. */
1816 scratch_reg = force_mode (word_mode, scratch_reg);
1817 emit_move_insn (scratch_reg, XEXP (op0, 0));
1818 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1819 operand1));
1820 return 1;
1821 }
1822 }
1823 /* Handle secondary reloads for loads of FP registers from constant
1824 expressions by forcing the constant into memory. For the most part,
1825 this is only necessary for SImode and DImode.
1826
1827 Use scratch_reg to hold the address of the memory location. */
1828 else if (scratch_reg
1829 && CONSTANT_P (operand1)
1830 && FP_REG_P (operand0))
1831 {
1832 rtx const_mem, xoperands[2];
1833
1834 if (operand1 == CONST0_RTX (mode))
1835 {
1836 emit_insn (gen_rtx_SET (operand0, operand1));
1837 return 1;
1838 }
1839
1840 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1841 it in WORD_MODE regardless of what mode it was originally given
1842 to us. */
1843 scratch_reg = force_mode (word_mode, scratch_reg);
1844
1845 /* Force the constant into memory and put the address of the
1846 memory location into scratch_reg. */
1847 const_mem = force_const_mem (mode, operand1);
1848 xoperands[0] = scratch_reg;
1849 xoperands[1] = XEXP (const_mem, 0);
1850 pa_emit_move_sequence (xoperands, Pmode, 0);
1851
1852 /* Now load the destination register. */
1853 emit_insn (gen_rtx_SET (operand0,
1854 replace_equiv_address (const_mem, scratch_reg)));
1855 return 1;
1856 }
1857 /* Handle secondary reloads for SAR. These occur when trying to load
1858 the SAR from memory or a constant. */
1859 else if (scratch_reg
1860 && GET_CODE (operand0) == REG
1861 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1862 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1863 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1864 {
1865 /* D might not fit in 14 bits either; for such cases load D into
1866 scratch reg. */
1867 if (GET_CODE (operand1) == MEM
1868 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1869 {
1870 /* We are reloading the address into the scratch register, so we
1871 want to make sure the scratch register is a full register. */
1872 scratch_reg = force_mode (word_mode, scratch_reg);
1873
1874 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1875 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1876 0)),
1877 Pmode,
1878 XEXP (XEXP (operand1, 0),
1879 0),
1880 scratch_reg));
1881
1882 /* Now we are going to load the scratch register from memory,
1883 we want to load it in the same width as the original MEM,
1884 which must be the same as the width of the ultimate destination,
1885 OPERAND0. */
1886 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1887
1888 emit_move_insn (scratch_reg,
1889 replace_equiv_address (operand1, scratch_reg));
1890 }
1891 else
1892 {
1893 /* We want to load the scratch register using the same mode as
1894 the ultimate destination. */
1895 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1896
1897 emit_move_insn (scratch_reg, operand1);
1898 }
1899
1900 /* And emit the insn to set the ultimate destination. We know that
1901 the scratch register has the same mode as the destination at this
1902 point. */
1903 emit_move_insn (operand0, scratch_reg);
1904 return 1;
1905 }
1906
1907 /* Handle the most common case: storing into a register. */
1908 if (register_operand (operand0, mode))
1909 {
1910 /* Legitimize TLS symbol references. This happens for references
1911 that aren't a legitimate constant. */
1912 if (PA_SYMBOL_REF_TLS_P (operand1))
1913 operand1 = legitimize_tls_address (operand1);
1914
1915 if (register_operand (operand1, mode)
1916 || (GET_CODE (operand1) == CONST_INT
1917 && pa_cint_ok_for_move (UINTVAL (operand1)))
1918 || (operand1 == CONST0_RTX (mode))
1919 || (GET_CODE (operand1) == HIGH
1920 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1921 /* Only `general_operands' can come here, so MEM is ok. */
1922 || GET_CODE (operand1) == MEM)
1923 {
1924 /* Various sets are created during RTL generation which don't
1925 have the REG_POINTER flag correctly set. After the CSE pass,
1926 instruction recognition can fail if we don't consistently
1927 set this flag when performing register copies. This should
1928 also improve the opportunities for creating insns that use
1929 unscaled indexing. */
1930 if (REG_P (operand0) && REG_P (operand1))
1931 {
1932 if (REG_POINTER (operand1)
1933 && !REG_POINTER (operand0)
1934 && !HARD_REGISTER_P (operand0))
1935 copy_reg_pointer (operand0, operand1);
1936 }
1937
1938 /* When MEMs are broken out, the REG_POINTER flag doesn't
1939 get set. In some cases, we can set the REG_POINTER flag
1940 from the declaration for the MEM. */
1941 if (REG_P (operand0)
1942 && GET_CODE (operand1) == MEM
1943 && !REG_POINTER (operand0))
1944 {
1945 tree decl = MEM_EXPR (operand1);
1946
1947 /* Set the register pointer flag and register alignment
1948 if the declaration for this memory reference is a
1949 pointer type. */
1950 if (decl)
1951 {
1952 tree type;
1953
1954 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1955 tree operand 1. */
1956 if (TREE_CODE (decl) == COMPONENT_REF)
1957 decl = TREE_OPERAND (decl, 1);
1958
1959 type = TREE_TYPE (decl);
1960 type = strip_array_types (type);
1961
1962 if (POINTER_TYPE_P (type))
1963 mark_reg_pointer (operand0, BITS_PER_UNIT);
1964 }
1965 }
1966
1967 emit_insn (gen_rtx_SET (operand0, operand1));
1968 return 1;
1969 }
1970 }
1971 else if (GET_CODE (operand0) == MEM)
1972 {
1973 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1974 && !(reload_in_progress || reload_completed))
1975 {
1976 rtx temp = gen_reg_rtx (DFmode);
1977
1978 emit_insn (gen_rtx_SET (temp, operand1));
1979 emit_insn (gen_rtx_SET (operand0, temp));
1980 return 1;
1981 }
1982 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1983 {
1984 /* Run this case quickly. */
1985 emit_insn (gen_rtx_SET (operand0, operand1));
1986 return 1;
1987 }
1988 if (! (reload_in_progress || reload_completed))
1989 {
1990 operands[0] = validize_mem (operand0);
1991 operands[1] = operand1 = force_reg (mode, operand1);
1992 }
1993 }
1994
1995 /* Simplify the source if we need to.
1996 Note we do have to handle function labels here, even though we do
1997 not consider them legitimate constants. Loop optimizations can
1998 call the emit_move_xxx with one as a source. */
1999 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
2000 || (GET_CODE (operand1) == HIGH
2001 && symbolic_operand (XEXP (operand1, 0), mode))
2002 || function_label_operand (operand1, VOIDmode)
2003 || tls_referenced_p (operand1))
2004 {
2005 int ishighonly = 0;
2006
2007 if (GET_CODE (operand1) == HIGH)
2008 {
2009 ishighonly = 1;
2010 operand1 = XEXP (operand1, 0);
2011 }
2012 if (symbolic_operand (operand1, mode))
2013 {
2014 /* Argh. The assembler and linker can't handle arithmetic
2015 involving plabels.
2016
2017 So we force the plabel into memory, load operand0 from
2018 the memory location, then add in the constant part. */
2019 if ((GET_CODE (operand1) == CONST
2020 && GET_CODE (XEXP (operand1, 0)) == PLUS
2021 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
2022 VOIDmode))
2023 || function_label_operand (operand1, VOIDmode))
2024 {
2025 rtx temp, const_part;
2026
2027 /* Figure out what (if any) scratch register to use. */
2028 if (reload_in_progress || reload_completed)
2029 {
2030 scratch_reg = scratch_reg ? scratch_reg : operand0;
2031 /* SCRATCH_REG will hold an address and maybe the actual
2032 data. We want it in WORD_MODE regardless of what mode it
2033 was originally given to us. */
2034 scratch_reg = force_mode (word_mode, scratch_reg);
2035 }
2036 else if (flag_pic)
2037 scratch_reg = gen_reg_rtx (Pmode);
2038
2039 if (GET_CODE (operand1) == CONST)
2040 {
2041 /* Save away the constant part of the expression. */
2042 const_part = XEXP (XEXP (operand1, 0), 1);
2043 gcc_assert (GET_CODE (const_part) == CONST_INT);
2044
2045 /* Force the function label into memory. */
2046 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2047 }
2048 else
2049 {
2050 /* No constant part. */
2051 const_part = NULL_RTX;
2052
2053 /* Force the function label into memory. */
2054 temp = force_const_mem (mode, operand1);
2055 }
2056
2057
2058 /* Get the address of the memory location. PIC-ify it if
2059 necessary. */
2060 temp = XEXP (temp, 0);
2061 if (flag_pic)
2062 temp = legitimize_pic_address (temp, mode, scratch_reg);
2063
2064 /* Put the address of the memory location into our destination
2065 register. */
2066 operands[1] = temp;
2067 pa_emit_move_sequence (operands, mode, scratch_reg);
2068
2069 /* Now load from the memory location into our destination
2070 register. */
2071 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2072 pa_emit_move_sequence (operands, mode, scratch_reg);
2073
2074 /* And add back in the constant part. */
2075 if (const_part != NULL_RTX)
2076 expand_inc (operand0, const_part);
2077
2078 return 1;
2079 }
2080
2081 if (flag_pic)
2082 {
2083 rtx_insn *insn;
2084 rtx temp;
2085
2086 if (reload_in_progress || reload_completed)
2087 {
2088 temp = scratch_reg ? scratch_reg : operand0;
2089 /* TEMP will hold an address and maybe the actual
2090 data. We want it in WORD_MODE regardless of what mode it
2091 was originally given to us. */
2092 temp = force_mode (word_mode, temp);
2093 }
2094 else
2095 temp = gen_reg_rtx (Pmode);
2096
2097 /* Force (const (plus (symbol) (const_int))) to memory
2098 if the const_int will not fit in 14 bits. Although
2099 this requires a relocation, the instruction sequence
2100 needed to load the value is shorter. */
2101 if (GET_CODE (operand1) == CONST
2102 && GET_CODE (XEXP (operand1, 0)) == PLUS
2103 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2104 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2105 {
2106 rtx x, m = force_const_mem (mode, operand1);
2107
2108 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2109 x = replace_equiv_address (m, x);
2110 insn = emit_move_insn (operand0, x);
2111 }
2112 else
2113 {
2114 operands[1] = legitimize_pic_address (operand1, mode, temp);
2115 if (REG_P (operand0) && REG_P (operands[1]))
2116 copy_reg_pointer (operand0, operands[1]);
2117 insn = emit_move_insn (operand0, operands[1]);
2118 }
2119
2120 /* Put a REG_EQUAL note on this insn. */
2121 set_unique_reg_note (insn, REG_EQUAL, operand1);
2122 }
2123 /* On the HPPA, references to data space are supposed to use dp,
2124 register 27, but showing it in the RTL inhibits various cse
2125 and loop optimizations. */
2126 else
2127 {
2128 rtx temp, set;
2129
2130 if (reload_in_progress || reload_completed)
2131 {
2132 temp = scratch_reg ? scratch_reg : operand0;
2133 /* TEMP will hold an address and maybe the actual
2134 data. We want it in WORD_MODE regardless of what mode it
2135 was originally given to us. */
2136 temp = force_mode (word_mode, temp);
2137 }
2138 else
2139 temp = gen_reg_rtx (mode);
2140
2141 /* Loading a SYMBOL_REF into a register makes that register
2142 safe to be used as the base in an indexed address.
2143
2144 Don't mark hard registers though. That loses. */
2145 if (GET_CODE (operand0) == REG
2146 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2147 mark_reg_pointer (operand0, BITS_PER_UNIT);
2148 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2149 mark_reg_pointer (temp, BITS_PER_UNIT);
2150
2151 if (ishighonly)
2152 set = gen_rtx_SET (operand0, temp);
2153 else
2154 set = gen_rtx_SET (operand0,
2155 gen_rtx_LO_SUM (mode, temp, operand1));
2156
2157 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2158 emit_insn (set);
2159
2160 }
2161 return 1;
2162 }
2163 else if (tls_referenced_p (operand1))
2164 {
2165 rtx tmp = operand1;
2166 rtx addend = NULL;
2167
2168 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2169 {
2170 addend = XEXP (XEXP (tmp, 0), 1);
2171 tmp = XEXP (XEXP (tmp, 0), 0);
2172 }
2173
2174 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2175 tmp = legitimize_tls_address (tmp);
2176 if (addend)
2177 {
2178 tmp = gen_rtx_PLUS (mode, tmp, addend);
2179 tmp = force_operand (tmp, operands[0]);
2180 }
2181 operands[1] = tmp;
2182 }
2183 else if (GET_CODE (operand1) != CONST_INT
2184 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2185 {
2186 rtx temp;
2187 rtx_insn *insn;
2188 rtx op1 = operand1;
2189 HOST_WIDE_INT value = 0;
2190 HOST_WIDE_INT insv = 0;
2191 int insert = 0;
2192
2193 if (GET_CODE (operand1) == CONST_INT)
2194 value = INTVAL (operand1);
2195
2196 if (TARGET_64BIT
2197 && GET_CODE (operand1) == CONST_INT
2198 && HOST_BITS_PER_WIDE_INT > 32
2199 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2200 {
2201 HOST_WIDE_INT nval;
2202
2203 /* Extract the low order 32 bits of the value and sign extend.
2204 If the new value is the same as the original value, we can
2205 can use the original value as-is. If the new value is
2206 different, we use it and insert the most-significant 32-bits
2207 of the original value into the final result. */
2208 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2209 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2210 if (value != nval)
2211 {
2212 #if HOST_BITS_PER_WIDE_INT > 32
2213 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2214 #endif
2215 insert = 1;
2216 value = nval;
2217 operand1 = GEN_INT (nval);
2218 }
2219 }
2220
2221 if (reload_in_progress || reload_completed)
2222 temp = scratch_reg ? scratch_reg : operand0;
2223 else
2224 temp = gen_reg_rtx (mode);
2225
2226 /* We don't directly split DImode constants on 32-bit targets
2227 because PLUS uses an 11-bit immediate and the insn sequence
2228 generated is not as efficient as the one using HIGH/LO_SUM. */
2229 if (GET_CODE (operand1) == CONST_INT
2230 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2231 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2232 && !insert)
2233 {
2234 /* Directly break constant into high and low parts. This
2235 provides better optimization opportunities because various
2236 passes recognize constants split with PLUS but not LO_SUM.
2237 We use a 14-bit signed low part except when the addition
2238 of 0x4000 to the high part might change the sign of the
2239 high part. */
2240 HOST_WIDE_INT low = value & 0x3fff;
2241 HOST_WIDE_INT high = value & ~ 0x3fff;
2242
2243 if (low >= 0x2000)
2244 {
2245 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2246 high += 0x2000;
2247 else
2248 high += 0x4000;
2249 }
2250
2251 low = value - high;
2252
2253 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2254 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2255 }
2256 else
2257 {
2258 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2259 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2260 }
2261
2262 insn = emit_move_insn (operands[0], operands[1]);
2263
2264 /* Now insert the most significant 32 bits of the value
2265 into the register. When we don't have a second register
2266 available, it could take up to nine instructions to load
2267 a 64-bit integer constant. Prior to reload, we force
2268 constants that would take more than three instructions
2269 to load to the constant pool. During and after reload,
2270 we have to handle all possible values. */
2271 if (insert)
2272 {
2273 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2274 register and the value to be inserted is outside the
2275 range that can be loaded with three depdi instructions. */
2276 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2277 {
2278 operand1 = GEN_INT (insv);
2279
2280 emit_insn (gen_rtx_SET (temp,
2281 gen_rtx_HIGH (mode, operand1)));
2282 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2283 if (mode == DImode)
2284 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2285 const0_rtx, temp));
2286 else
2287 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2288 const0_rtx, temp));
2289 }
2290 else
2291 {
2292 int len = 5, pos = 27;
2293
2294 /* Insert the bits using the depdi instruction. */
2295 while (pos >= 0)
2296 {
2297 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2298 HOST_WIDE_INT sign = v5 < 0;
2299
2300 /* Left extend the insertion. */
2301 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2302 while (pos > 0 && (insv & 1) == sign)
2303 {
2304 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2305 len += 1;
2306 pos -= 1;
2307 }
2308
2309 if (mode == DImode)
2310 insn = emit_insn (gen_insvdi (operand0,
2311 GEN_INT (len),
2312 GEN_INT (pos),
2313 GEN_INT (v5)));
2314 else
2315 insn = emit_insn (gen_insvsi (operand0,
2316 GEN_INT (len),
2317 GEN_INT (pos),
2318 GEN_INT (v5)));
2319
2320 len = pos > 0 && pos < 5 ? pos : 5;
2321 pos -= len;
2322 }
2323 }
2324 }
2325
2326 set_unique_reg_note (insn, REG_EQUAL, op1);
2327
2328 return 1;
2329 }
2330 }
2331 /* Now have insn-emit do whatever it normally does. */
2332 return 0;
2333 }
2334
2335 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2336 it will need a link/runtime reloc). */
2337
2338 int
2339 pa_reloc_needed (tree exp)
2340 {
2341 int reloc = 0;
2342
2343 switch (TREE_CODE (exp))
2344 {
2345 case ADDR_EXPR:
2346 return 1;
2347
2348 case POINTER_PLUS_EXPR:
2349 case PLUS_EXPR:
2350 case MINUS_EXPR:
2351 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2352 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2353 break;
2354
2355 CASE_CONVERT:
2356 case NON_LVALUE_EXPR:
2357 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2358 break;
2359
2360 case CONSTRUCTOR:
2361 {
2362 tree value;
2363 unsigned HOST_WIDE_INT ix;
2364
2365 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2366 if (value)
2367 reloc |= pa_reloc_needed (value);
2368 }
2369 break;
2370
2371 case ERROR_MARK:
2372 break;
2373
2374 default:
2375 break;
2376 }
2377 return reloc;
2378 }
2379
2380 \f
2381 /* Return the best assembler insn template
2382 for moving operands[1] into operands[0] as a fullword. */
2383 const char *
2384 pa_singlemove_string (rtx *operands)
2385 {
2386 HOST_WIDE_INT intval;
2387
2388 if (GET_CODE (operands[0]) == MEM)
2389 return "stw %r1,%0";
2390 if (GET_CODE (operands[1]) == MEM)
2391 return "ldw %1,%0";
2392 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2393 {
2394 long i;
2395
2396 gcc_assert (GET_MODE (operands[1]) == SFmode);
2397
2398 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2399 bit pattern. */
2400 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2401
2402 operands[1] = GEN_INT (i);
2403 /* Fall through to CONST_INT case. */
2404 }
2405 if (GET_CODE (operands[1]) == CONST_INT)
2406 {
2407 intval = INTVAL (operands[1]);
2408
2409 if (VAL_14_BITS_P (intval))
2410 return "ldi %1,%0";
2411 else if ((intval & 0x7ff) == 0)
2412 return "ldil L'%1,%0";
2413 else if (pa_zdepi_cint_p (intval))
2414 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2415 else
2416 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2417 }
2418 return "copy %1,%0";
2419 }
2420 \f
2421
2422 /* Compute position (in OP[1]) and width (in OP[2])
2423 useful for copying IMM to a register using the zdepi
2424 instructions. Store the immediate value to insert in OP[0]. */
2425 static void
2426 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2427 {
2428 int lsb, len;
2429
2430 /* Find the least significant set bit in IMM. */
2431 for (lsb = 0; lsb < 32; lsb++)
2432 {
2433 if ((imm & 1) != 0)
2434 break;
2435 imm >>= 1;
2436 }
2437
2438 /* Choose variants based on *sign* of the 5-bit field. */
2439 if ((imm & 0x10) == 0)
2440 len = (lsb <= 28) ? 4 : 32 - lsb;
2441 else
2442 {
2443 /* Find the width of the bitstring in IMM. */
2444 for (len = 5; len < 32 - lsb; len++)
2445 {
2446 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2447 break;
2448 }
2449
2450 /* Sign extend IMM as a 5-bit value. */
2451 imm = (imm & 0xf) - 0x10;
2452 }
2453
2454 op[0] = imm;
2455 op[1] = 31 - lsb;
2456 op[2] = len;
2457 }
2458
2459 /* Compute position (in OP[1]) and width (in OP[2])
2460 useful for copying IMM to a register using the depdi,z
2461 instructions. Store the immediate value to insert in OP[0]. */
2462
2463 static void
2464 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2465 {
2466 int lsb, len, maxlen;
2467
2468 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2469
2470 /* Find the least significant set bit in IMM. */
2471 for (lsb = 0; lsb < maxlen; lsb++)
2472 {
2473 if ((imm & 1) != 0)
2474 break;
2475 imm >>= 1;
2476 }
2477
2478 /* Choose variants based on *sign* of the 5-bit field. */
2479 if ((imm & 0x10) == 0)
2480 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2481 else
2482 {
2483 /* Find the width of the bitstring in IMM. */
2484 for (len = 5; len < maxlen - lsb; len++)
2485 {
2486 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2487 break;
2488 }
2489
2490 /* Extend length if host is narrow and IMM is negative. */
2491 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2492 len += 32;
2493
2494 /* Sign extend IMM as a 5-bit value. */
2495 imm = (imm & 0xf) - 0x10;
2496 }
2497
2498 op[0] = imm;
2499 op[1] = 63 - lsb;
2500 op[2] = len;
2501 }
2502
2503 /* Output assembler code to perform a doubleword move insn
2504 with operands OPERANDS. */
2505
2506 const char *
2507 pa_output_move_double (rtx *operands)
2508 {
2509 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2510 rtx latehalf[2];
2511 rtx addreg0 = 0, addreg1 = 0;
2512 int highonly = 0;
2513
2514 /* First classify both operands. */
2515
2516 if (REG_P (operands[0]))
2517 optype0 = REGOP;
2518 else if (offsettable_memref_p (operands[0]))
2519 optype0 = OFFSOP;
2520 else if (GET_CODE (operands[0]) == MEM)
2521 optype0 = MEMOP;
2522 else
2523 optype0 = RNDOP;
2524
2525 if (REG_P (operands[1]))
2526 optype1 = REGOP;
2527 else if (CONSTANT_P (operands[1]))
2528 optype1 = CNSTOP;
2529 else if (offsettable_memref_p (operands[1]))
2530 optype1 = OFFSOP;
2531 else if (GET_CODE (operands[1]) == MEM)
2532 optype1 = MEMOP;
2533 else
2534 optype1 = RNDOP;
2535
2536 /* Check for the cases that the operand constraints are not
2537 supposed to allow to happen. */
2538 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2539
2540 /* Handle copies between general and floating registers. */
2541
2542 if (optype0 == REGOP && optype1 == REGOP
2543 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2544 {
2545 if (FP_REG_P (operands[0]))
2546 {
2547 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2548 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2549 return "{fldds|fldd} -16(%%sp),%0";
2550 }
2551 else
2552 {
2553 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2554 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2555 return "{ldws|ldw} -12(%%sp),%R0";
2556 }
2557 }
2558
2559 /* Handle auto decrementing and incrementing loads and stores
2560 specifically, since the structure of the function doesn't work
2561 for them without major modification. Do it better when we learn
2562 this port about the general inc/dec addressing of PA.
2563 (This was written by tege. Chide him if it doesn't work.) */
2564
2565 if (optype0 == MEMOP)
2566 {
2567 /* We have to output the address syntax ourselves, since print_operand
2568 doesn't deal with the addresses we want to use. Fix this later. */
2569
2570 rtx addr = XEXP (operands[0], 0);
2571 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2572 {
2573 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2574
2575 operands[0] = XEXP (addr, 0);
2576 gcc_assert (GET_CODE (operands[1]) == REG
2577 && GET_CODE (operands[0]) == REG);
2578
2579 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2580
2581 /* No overlap between high target register and address
2582 register. (We do this in a non-obvious way to
2583 save a register file writeback) */
2584 if (GET_CODE (addr) == POST_INC)
2585 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2586 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2587 }
2588 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2589 {
2590 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2591
2592 operands[0] = XEXP (addr, 0);
2593 gcc_assert (GET_CODE (operands[1]) == REG
2594 && GET_CODE (operands[0]) == REG);
2595
2596 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2597 /* No overlap between high target register and address
2598 register. (We do this in a non-obvious way to save a
2599 register file writeback) */
2600 if (GET_CODE (addr) == PRE_INC)
2601 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2602 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2603 }
2604 }
2605 if (optype1 == MEMOP)
2606 {
2607 /* We have to output the address syntax ourselves, since print_operand
2608 doesn't deal with the addresses we want to use. Fix this later. */
2609
2610 rtx addr = XEXP (operands[1], 0);
2611 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2612 {
2613 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2614
2615 operands[1] = XEXP (addr, 0);
2616 gcc_assert (GET_CODE (operands[0]) == REG
2617 && GET_CODE (operands[1]) == REG);
2618
2619 if (!reg_overlap_mentioned_p (high_reg, addr))
2620 {
2621 /* No overlap between high target register and address
2622 register. (We do this in a non-obvious way to
2623 save a register file writeback) */
2624 if (GET_CODE (addr) == POST_INC)
2625 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2626 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2627 }
2628 else
2629 {
2630 /* This is an undefined situation. We should load into the
2631 address register *and* update that register. Probably
2632 we don't need to handle this at all. */
2633 if (GET_CODE (addr) == POST_INC)
2634 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2635 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2636 }
2637 }
2638 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2639 {
2640 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2641
2642 operands[1] = XEXP (addr, 0);
2643 gcc_assert (GET_CODE (operands[0]) == REG
2644 && GET_CODE (operands[1]) == REG);
2645
2646 if (!reg_overlap_mentioned_p (high_reg, addr))
2647 {
2648 /* No overlap between high target register and address
2649 register. (We do this in a non-obvious way to
2650 save a register file writeback) */
2651 if (GET_CODE (addr) == PRE_INC)
2652 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2653 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2654 }
2655 else
2656 {
2657 /* This is an undefined situation. We should load into the
2658 address register *and* update that register. Probably
2659 we don't need to handle this at all. */
2660 if (GET_CODE (addr) == PRE_INC)
2661 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2662 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2663 }
2664 }
2665 else if (GET_CODE (addr) == PLUS
2666 && GET_CODE (XEXP (addr, 0)) == MULT)
2667 {
2668 rtx xoperands[4];
2669
2670 /* Load address into left half of destination register. */
2671 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2672 xoperands[1] = XEXP (addr, 1);
2673 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2674 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2675 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2676 xoperands);
2677 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2678 }
2679 else if (GET_CODE (addr) == PLUS
2680 && REG_P (XEXP (addr, 0))
2681 && REG_P (XEXP (addr, 1)))
2682 {
2683 rtx xoperands[3];
2684
2685 /* Load address into left half of destination register. */
2686 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2687 xoperands[1] = XEXP (addr, 0);
2688 xoperands[2] = XEXP (addr, 1);
2689 output_asm_insn ("{addl|add,l} %1,%2,%0",
2690 xoperands);
2691 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2692 }
2693 }
2694
2695 /* If an operand is an unoffsettable memory ref, find a register
2696 we can increment temporarily to make it refer to the second word. */
2697
2698 if (optype0 == MEMOP)
2699 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2700
2701 if (optype1 == MEMOP)
2702 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2703
2704 /* Ok, we can do one word at a time.
2705 Normally we do the low-numbered word first.
2706
2707 In either case, set up in LATEHALF the operands to use
2708 for the high-numbered word and in some cases alter the
2709 operands in OPERANDS to be suitable for the low-numbered word. */
2710
2711 if (optype0 == REGOP)
2712 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2713 else if (optype0 == OFFSOP)
2714 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2715 else
2716 latehalf[0] = operands[0];
2717
2718 if (optype1 == REGOP)
2719 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2720 else if (optype1 == OFFSOP)
2721 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2722 else if (optype1 == CNSTOP)
2723 {
2724 if (GET_CODE (operands[1]) == HIGH)
2725 {
2726 operands[1] = XEXP (operands[1], 0);
2727 highonly = 1;
2728 }
2729 split_double (operands[1], &operands[1], &latehalf[1]);
2730 }
2731 else
2732 latehalf[1] = operands[1];
2733
2734 /* If the first move would clobber the source of the second one,
2735 do them in the other order.
2736
2737 This can happen in two cases:
2738
2739 mem -> register where the first half of the destination register
2740 is the same register used in the memory's address. Reload
2741 can create such insns.
2742
2743 mem in this case will be either register indirect or register
2744 indirect plus a valid offset.
2745
2746 register -> register move where REGNO(dst) == REGNO(src + 1)
2747 someone (Tim/Tege?) claimed this can happen for parameter loads.
2748
2749 Handle mem -> register case first. */
2750 if (optype0 == REGOP
2751 && (optype1 == MEMOP || optype1 == OFFSOP)
2752 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2753 {
2754 /* Do the late half first. */
2755 if (addreg1)
2756 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2757 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2758
2759 /* Then clobber. */
2760 if (addreg1)
2761 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2762 return pa_singlemove_string (operands);
2763 }
2764
2765 /* Now handle register -> register case. */
2766 if (optype0 == REGOP && optype1 == REGOP
2767 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2768 {
2769 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2770 return pa_singlemove_string (operands);
2771 }
2772
2773 /* Normal case: do the two words, low-numbered first. */
2774
2775 output_asm_insn (pa_singlemove_string (operands), operands);
2776
2777 /* Make any unoffsettable addresses point at high-numbered word. */
2778 if (addreg0)
2779 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2780 if (addreg1)
2781 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2782
2783 /* Do high-numbered word. */
2784 if (highonly)
2785 output_asm_insn ("ldil L'%1,%0", latehalf);
2786 else
2787 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2788
2789 /* Undo the adds we just did. */
2790 if (addreg0)
2791 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2792 if (addreg1)
2793 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2794
2795 return "";
2796 }
2797 \f
2798 const char *
2799 pa_output_fp_move_double (rtx *operands)
2800 {
2801 if (FP_REG_P (operands[0]))
2802 {
2803 if (FP_REG_P (operands[1])
2804 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2805 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2806 else
2807 output_asm_insn ("fldd%F1 %1,%0", operands);
2808 }
2809 else if (FP_REG_P (operands[1]))
2810 {
2811 output_asm_insn ("fstd%F0 %1,%0", operands);
2812 }
2813 else
2814 {
2815 rtx xoperands[2];
2816
2817 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2818
2819 /* This is a pain. You have to be prepared to deal with an
2820 arbitrary address here including pre/post increment/decrement.
2821
2822 so avoid this in the MD. */
2823 gcc_assert (GET_CODE (operands[0]) == REG);
2824
2825 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2826 xoperands[0] = operands[0];
2827 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2828 }
2829 return "";
2830 }
2831 \f
2832 /* Return a REG that occurs in ADDR with coefficient 1.
2833 ADDR can be effectively incremented by incrementing REG. */
2834
2835 static rtx
2836 find_addr_reg (rtx addr)
2837 {
2838 while (GET_CODE (addr) == PLUS)
2839 {
2840 if (GET_CODE (XEXP (addr, 0)) == REG)
2841 addr = XEXP (addr, 0);
2842 else if (GET_CODE (XEXP (addr, 1)) == REG)
2843 addr = XEXP (addr, 1);
2844 else if (CONSTANT_P (XEXP (addr, 0)))
2845 addr = XEXP (addr, 1);
2846 else if (CONSTANT_P (XEXP (addr, 1)))
2847 addr = XEXP (addr, 0);
2848 else
2849 gcc_unreachable ();
2850 }
2851 gcc_assert (GET_CODE (addr) == REG);
2852 return addr;
2853 }
2854
2855 /* Emit code to perform a block move.
2856
2857 OPERANDS[0] is the destination pointer as a REG, clobbered.
2858 OPERANDS[1] is the source pointer as a REG, clobbered.
2859 OPERANDS[2] is a register for temporary storage.
2860 OPERANDS[3] is a register for temporary storage.
2861 OPERANDS[4] is the size as a CONST_INT
2862 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2863 OPERANDS[6] is another temporary register. */
2864
2865 const char *
2866 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2867 {
2868 int align = INTVAL (operands[5]);
2869 unsigned long n_bytes = INTVAL (operands[4]);
2870
2871 /* We can't move more than a word at a time because the PA
2872 has no longer integer move insns. (Could use fp mem ops?) */
2873 if (align > (TARGET_64BIT ? 8 : 4))
2874 align = (TARGET_64BIT ? 8 : 4);
2875
2876 /* Note that we know each loop below will execute at least twice
2877 (else we would have open-coded the copy). */
2878 switch (align)
2879 {
2880 case 8:
2881 /* Pre-adjust the loop counter. */
2882 operands[4] = GEN_INT (n_bytes - 16);
2883 output_asm_insn ("ldi %4,%2", operands);
2884
2885 /* Copying loop. */
2886 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2887 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2888 output_asm_insn ("std,ma %3,8(%0)", operands);
2889 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2890 output_asm_insn ("std,ma %6,8(%0)", operands);
2891
2892 /* Handle the residual. There could be up to 7 bytes of
2893 residual to copy! */
2894 if (n_bytes % 16 != 0)
2895 {
2896 operands[4] = GEN_INT (n_bytes % 8);
2897 if (n_bytes % 16 >= 8)
2898 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2899 if (n_bytes % 8 != 0)
2900 output_asm_insn ("ldd 0(%1),%6", operands);
2901 if (n_bytes % 16 >= 8)
2902 output_asm_insn ("std,ma %3,8(%0)", operands);
2903 if (n_bytes % 8 != 0)
2904 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2905 }
2906 return "";
2907
2908 case 4:
2909 /* Pre-adjust the loop counter. */
2910 operands[4] = GEN_INT (n_bytes - 8);
2911 output_asm_insn ("ldi %4,%2", operands);
2912
2913 /* Copying loop. */
2914 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2915 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2916 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2917 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2918 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2919
2920 /* Handle the residual. There could be up to 7 bytes of
2921 residual to copy! */
2922 if (n_bytes % 8 != 0)
2923 {
2924 operands[4] = GEN_INT (n_bytes % 4);
2925 if (n_bytes % 8 >= 4)
2926 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2927 if (n_bytes % 4 != 0)
2928 output_asm_insn ("ldw 0(%1),%6", operands);
2929 if (n_bytes % 8 >= 4)
2930 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2931 if (n_bytes % 4 != 0)
2932 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2933 }
2934 return "";
2935
2936 case 2:
2937 /* Pre-adjust the loop counter. */
2938 operands[4] = GEN_INT (n_bytes - 4);
2939 output_asm_insn ("ldi %4,%2", operands);
2940
2941 /* Copying loop. */
2942 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2943 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2944 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2945 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2946 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2947
2948 /* Handle the residual. */
2949 if (n_bytes % 4 != 0)
2950 {
2951 if (n_bytes % 4 >= 2)
2952 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2953 if (n_bytes % 2 != 0)
2954 output_asm_insn ("ldb 0(%1),%6", operands);
2955 if (n_bytes % 4 >= 2)
2956 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2957 if (n_bytes % 2 != 0)
2958 output_asm_insn ("stb %6,0(%0)", operands);
2959 }
2960 return "";
2961
2962 case 1:
2963 /* Pre-adjust the loop counter. */
2964 operands[4] = GEN_INT (n_bytes - 2);
2965 output_asm_insn ("ldi %4,%2", operands);
2966
2967 /* Copying loop. */
2968 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2969 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2970 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2971 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2972 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2973
2974 /* Handle the residual. */
2975 if (n_bytes % 2 != 0)
2976 {
2977 output_asm_insn ("ldb 0(%1),%3", operands);
2978 output_asm_insn ("stb %3,0(%0)", operands);
2979 }
2980 return "";
2981
2982 default:
2983 gcc_unreachable ();
2984 }
2985 }
2986
2987 /* Count the number of insns necessary to handle this block move.
2988
2989 Basic structure is the same as emit_block_move, except that we
2990 count insns rather than emit them. */
2991
2992 static int
2993 compute_movmem_length (rtx_insn *insn)
2994 {
2995 rtx pat = PATTERN (insn);
2996 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2997 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2998 unsigned int n_insns = 0;
2999
3000 /* We can't move more than four bytes at a time because the PA
3001 has no longer integer move insns. (Could use fp mem ops?) */
3002 if (align > (TARGET_64BIT ? 8 : 4))
3003 align = (TARGET_64BIT ? 8 : 4);
3004
3005 /* The basic copying loop. */
3006 n_insns = 6;
3007
3008 /* Residuals. */
3009 if (n_bytes % (2 * align) != 0)
3010 {
3011 if ((n_bytes % (2 * align)) >= align)
3012 n_insns += 2;
3013
3014 if ((n_bytes % align) != 0)
3015 n_insns += 2;
3016 }
3017
3018 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3019 return n_insns * 4;
3020 }
3021
3022 /* Emit code to perform a block clear.
3023
3024 OPERANDS[0] is the destination pointer as a REG, clobbered.
3025 OPERANDS[1] is a register for temporary storage.
3026 OPERANDS[2] is the size as a CONST_INT
3027 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3028
3029 const char *
3030 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3031 {
3032 int align = INTVAL (operands[3]);
3033 unsigned long n_bytes = INTVAL (operands[2]);
3034
3035 /* We can't clear more than a word at a time because the PA
3036 has no longer integer move insns. */
3037 if (align > (TARGET_64BIT ? 8 : 4))
3038 align = (TARGET_64BIT ? 8 : 4);
3039
3040 /* Note that we know each loop below will execute at least twice
3041 (else we would have open-coded the copy). */
3042 switch (align)
3043 {
3044 case 8:
3045 /* Pre-adjust the loop counter. */
3046 operands[2] = GEN_INT (n_bytes - 16);
3047 output_asm_insn ("ldi %2,%1", operands);
3048
3049 /* Loop. */
3050 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3051 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3052 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3053
3054 /* Handle the residual. There could be up to 7 bytes of
3055 residual to copy! */
3056 if (n_bytes % 16 != 0)
3057 {
3058 operands[2] = GEN_INT (n_bytes % 8);
3059 if (n_bytes % 16 >= 8)
3060 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3061 if (n_bytes % 8 != 0)
3062 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3063 }
3064 return "";
3065
3066 case 4:
3067 /* Pre-adjust the loop counter. */
3068 operands[2] = GEN_INT (n_bytes - 8);
3069 output_asm_insn ("ldi %2,%1", operands);
3070
3071 /* Loop. */
3072 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3073 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3074 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3075
3076 /* Handle the residual. There could be up to 7 bytes of
3077 residual to copy! */
3078 if (n_bytes % 8 != 0)
3079 {
3080 operands[2] = GEN_INT (n_bytes % 4);
3081 if (n_bytes % 8 >= 4)
3082 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3083 if (n_bytes % 4 != 0)
3084 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3085 }
3086 return "";
3087
3088 case 2:
3089 /* Pre-adjust the loop counter. */
3090 operands[2] = GEN_INT (n_bytes - 4);
3091 output_asm_insn ("ldi %2,%1", operands);
3092
3093 /* Loop. */
3094 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3095 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3096 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3097
3098 /* Handle the residual. */
3099 if (n_bytes % 4 != 0)
3100 {
3101 if (n_bytes % 4 >= 2)
3102 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3103 if (n_bytes % 2 != 0)
3104 output_asm_insn ("stb %%r0,0(%0)", operands);
3105 }
3106 return "";
3107
3108 case 1:
3109 /* Pre-adjust the loop counter. */
3110 operands[2] = GEN_INT (n_bytes - 2);
3111 output_asm_insn ("ldi %2,%1", operands);
3112
3113 /* Loop. */
3114 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3115 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3116 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3117
3118 /* Handle the residual. */
3119 if (n_bytes % 2 != 0)
3120 output_asm_insn ("stb %%r0,0(%0)", operands);
3121
3122 return "";
3123
3124 default:
3125 gcc_unreachable ();
3126 }
3127 }
3128
3129 /* Count the number of insns necessary to handle this block move.
3130
3131 Basic structure is the same as emit_block_move, except that we
3132 count insns rather than emit them. */
3133
3134 static int
3135 compute_clrmem_length (rtx_insn *insn)
3136 {
3137 rtx pat = PATTERN (insn);
3138 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3139 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3140 unsigned int n_insns = 0;
3141
3142 /* We can't clear more than a word at a time because the PA
3143 has no longer integer move insns. */
3144 if (align > (TARGET_64BIT ? 8 : 4))
3145 align = (TARGET_64BIT ? 8 : 4);
3146
3147 /* The basic loop. */
3148 n_insns = 4;
3149
3150 /* Residuals. */
3151 if (n_bytes % (2 * align) != 0)
3152 {
3153 if ((n_bytes % (2 * align)) >= align)
3154 n_insns++;
3155
3156 if ((n_bytes % align) != 0)
3157 n_insns++;
3158 }
3159
3160 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3161 return n_insns * 4;
3162 }
3163 \f
3164
3165 const char *
3166 pa_output_and (rtx *operands)
3167 {
3168 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3169 {
3170 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3171 int ls0, ls1, ms0, p, len;
3172
3173 for (ls0 = 0; ls0 < 32; ls0++)
3174 if ((mask & (1 << ls0)) == 0)
3175 break;
3176
3177 for (ls1 = ls0; ls1 < 32; ls1++)
3178 if ((mask & (1 << ls1)) != 0)
3179 break;
3180
3181 for (ms0 = ls1; ms0 < 32; ms0++)
3182 if ((mask & (1 << ms0)) == 0)
3183 break;
3184
3185 gcc_assert (ms0 == 32);
3186
3187 if (ls1 == 32)
3188 {
3189 len = ls0;
3190
3191 gcc_assert (len);
3192
3193 operands[2] = GEN_INT (len);
3194 return "{extru|extrw,u} %1,31,%2,%0";
3195 }
3196 else
3197 {
3198 /* We could use this `depi' for the case above as well, but `depi'
3199 requires one more register file access than an `extru'. */
3200
3201 p = 31 - ls0;
3202 len = ls1 - ls0;
3203
3204 operands[2] = GEN_INT (p);
3205 operands[3] = GEN_INT (len);
3206 return "{depi|depwi} 0,%2,%3,%0";
3207 }
3208 }
3209 else
3210 return "and %1,%2,%0";
3211 }
3212
3213 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3214 storing the result in operands[0]. */
3215 const char *
3216 pa_output_64bit_and (rtx *operands)
3217 {
3218 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3219 {
3220 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3221 int ls0, ls1, ms0, p, len;
3222
3223 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3224 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3225 break;
3226
3227 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3228 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3229 break;
3230
3231 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3232 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3233 break;
3234
3235 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3236
3237 if (ls1 == HOST_BITS_PER_WIDE_INT)
3238 {
3239 len = ls0;
3240
3241 gcc_assert (len);
3242
3243 operands[2] = GEN_INT (len);
3244 return "extrd,u %1,63,%2,%0";
3245 }
3246 else
3247 {
3248 /* We could use this `depi' for the case above as well, but `depi'
3249 requires one more register file access than an `extru'. */
3250
3251 p = 63 - ls0;
3252 len = ls1 - ls0;
3253
3254 operands[2] = GEN_INT (p);
3255 operands[3] = GEN_INT (len);
3256 return "depdi 0,%2,%3,%0";
3257 }
3258 }
3259 else
3260 return "and %1,%2,%0";
3261 }
3262
3263 const char *
3264 pa_output_ior (rtx *operands)
3265 {
3266 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3267 int bs0, bs1, p, len;
3268
3269 if (INTVAL (operands[2]) == 0)
3270 return "copy %1,%0";
3271
3272 for (bs0 = 0; bs0 < 32; bs0++)
3273 if ((mask & (1 << bs0)) != 0)
3274 break;
3275
3276 for (bs1 = bs0; bs1 < 32; bs1++)
3277 if ((mask & (1 << bs1)) == 0)
3278 break;
3279
3280 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3281
3282 p = 31 - bs0;
3283 len = bs1 - bs0;
3284
3285 operands[2] = GEN_INT (p);
3286 operands[3] = GEN_INT (len);
3287 return "{depi|depwi} -1,%2,%3,%0";
3288 }
3289
3290 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3291 storing the result in operands[0]. */
3292 const char *
3293 pa_output_64bit_ior (rtx *operands)
3294 {
3295 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3296 int bs0, bs1, p, len;
3297
3298 if (INTVAL (operands[2]) == 0)
3299 return "copy %1,%0";
3300
3301 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3302 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3303 break;
3304
3305 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3306 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3307 break;
3308
3309 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3310 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3311
3312 p = 63 - bs0;
3313 len = bs1 - bs0;
3314
3315 operands[2] = GEN_INT (p);
3316 operands[3] = GEN_INT (len);
3317 return "depdi -1,%2,%3,%0";
3318 }
3319 \f
3320 /* Target hook for assembling integer objects. This code handles
3321 aligned SI and DI integers specially since function references
3322 must be preceded by P%. */
3323
3324 static bool
3325 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3326 {
3327 bool result;
3328 tree decl = NULL;
3329
3330 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3331 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3332 calling output_addr_const. Otherwise, it may call assemble_external
3333 in the midst of outputing the assembler code for the SYMBOL_REF.
3334 We restore the SYMBOL_REF_DECL after the output is done. */
3335 if (GET_CODE (x) == SYMBOL_REF)
3336 {
3337 decl = SYMBOL_REF_DECL (x);
3338 if (decl)
3339 {
3340 assemble_external (decl);
3341 SET_SYMBOL_REF_DECL (x, NULL);
3342 }
3343 }
3344
3345 if (size == UNITS_PER_WORD
3346 && aligned_p
3347 && function_label_operand (x, VOIDmode))
3348 {
3349 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3350
3351 /* We don't want an OPD when generating fast indirect calls. */
3352 if (!TARGET_FAST_INDIRECT_CALLS)
3353 fputs ("P%", asm_out_file);
3354
3355 output_addr_const (asm_out_file, x);
3356 fputc ('\n', asm_out_file);
3357 result = true;
3358 }
3359 else
3360 result = default_assemble_integer (x, size, aligned_p);
3361
3362 if (decl)
3363 SET_SYMBOL_REF_DECL (x, decl);
3364
3365 return result;
3366 }
3367 \f
3368 /* Output an ascii string. */
3369 void
3370 pa_output_ascii (FILE *file, const char *p, int size)
3371 {
3372 int i;
3373 int chars_output;
3374 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3375
3376 /* The HP assembler can only take strings of 256 characters at one
3377 time. This is a limitation on input line length, *not* the
3378 length of the string. Sigh. Even worse, it seems that the
3379 restriction is in number of input characters (see \xnn &
3380 \whatever). So we have to do this very carefully. */
3381
3382 fputs ("\t.STRING \"", file);
3383
3384 chars_output = 0;
3385 for (i = 0; i < size; i += 4)
3386 {
3387 int co = 0;
3388 int io = 0;
3389 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3390 {
3391 register unsigned int c = (unsigned char) p[i + io];
3392
3393 if (c == '\"' || c == '\\')
3394 partial_output[co++] = '\\';
3395 if (c >= ' ' && c < 0177)
3396 partial_output[co++] = c;
3397 else
3398 {
3399 unsigned int hexd;
3400 partial_output[co++] = '\\';
3401 partial_output[co++] = 'x';
3402 hexd = c / 16 - 0 + '0';
3403 if (hexd > '9')
3404 hexd -= '9' - 'a' + 1;
3405 partial_output[co++] = hexd;
3406 hexd = c % 16 - 0 + '0';
3407 if (hexd > '9')
3408 hexd -= '9' - 'a' + 1;
3409 partial_output[co++] = hexd;
3410 }
3411 }
3412 if (chars_output + co > 243)
3413 {
3414 fputs ("\"\n\t.STRING \"", file);
3415 chars_output = 0;
3416 }
3417 fwrite (partial_output, 1, (size_t) co, file);
3418 chars_output += co;
3419 co = 0;
3420 }
3421 fputs ("\"\n", file);
3422 }
3423
3424 /* Try to rewrite floating point comparisons & branches to avoid
3425 useless add,tr insns.
3426
3427 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3428 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3429 first attempt to remove useless add,tr insns. It is zero
3430 for the second pass as reorg sometimes leaves bogus REG_DEAD
3431 notes lying around.
3432
3433 When CHECK_NOTES is zero we can only eliminate add,tr insns
3434 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3435 instructions. */
3436 static void
3437 remove_useless_addtr_insns (int check_notes)
3438 {
3439 rtx_insn *insn;
3440 static int pass = 0;
3441
3442 /* This is fairly cheap, so always run it when optimizing. */
3443 if (optimize > 0)
3444 {
3445 int fcmp_count = 0;
3446 int fbranch_count = 0;
3447
3448 /* Walk all the insns in this function looking for fcmp & fbranch
3449 instructions. Keep track of how many of each we find. */
3450 for (insn = get_insns (); insn; insn = next_insn (insn))
3451 {
3452 rtx tmp;
3453
3454 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3455 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3456 continue;
3457
3458 tmp = PATTERN (insn);
3459
3460 /* It must be a set. */
3461 if (GET_CODE (tmp) != SET)
3462 continue;
3463
3464 /* If the destination is CCFP, then we've found an fcmp insn. */
3465 tmp = SET_DEST (tmp);
3466 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3467 {
3468 fcmp_count++;
3469 continue;
3470 }
3471
3472 tmp = PATTERN (insn);
3473 /* If this is an fbranch instruction, bump the fbranch counter. */
3474 if (GET_CODE (tmp) == SET
3475 && SET_DEST (tmp) == pc_rtx
3476 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3477 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3478 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3479 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3480 {
3481 fbranch_count++;
3482 continue;
3483 }
3484 }
3485
3486
3487 /* Find all floating point compare + branch insns. If possible,
3488 reverse the comparison & the branch to avoid add,tr insns. */
3489 for (insn = get_insns (); insn; insn = next_insn (insn))
3490 {
3491 rtx tmp;
3492 rtx_insn *next;
3493
3494 /* Ignore anything that isn't an INSN. */
3495 if (! NONJUMP_INSN_P (insn))
3496 continue;
3497
3498 tmp = PATTERN (insn);
3499
3500 /* It must be a set. */
3501 if (GET_CODE (tmp) != SET)
3502 continue;
3503
3504 /* The destination must be CCFP, which is register zero. */
3505 tmp = SET_DEST (tmp);
3506 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3507 continue;
3508
3509 /* INSN should be a set of CCFP.
3510
3511 See if the result of this insn is used in a reversed FP
3512 conditional branch. If so, reverse our condition and
3513 the branch. Doing so avoids useless add,tr insns. */
3514 next = next_insn (insn);
3515 while (next)
3516 {
3517 /* Jumps, calls and labels stop our search. */
3518 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3519 break;
3520
3521 /* As does another fcmp insn. */
3522 if (NONJUMP_INSN_P (next)
3523 && GET_CODE (PATTERN (next)) == SET
3524 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3525 && REGNO (SET_DEST (PATTERN (next))) == 0)
3526 break;
3527
3528 next = next_insn (next);
3529 }
3530
3531 /* Is NEXT_INSN a branch? */
3532 if (next && JUMP_P (next))
3533 {
3534 rtx pattern = PATTERN (next);
3535
3536 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3537 and CCFP dies, then reverse our conditional and the branch
3538 to avoid the add,tr. */
3539 if (GET_CODE (pattern) == SET
3540 && SET_DEST (pattern) == pc_rtx
3541 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3542 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3543 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3544 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3545 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3546 && (fcmp_count == fbranch_count
3547 || (check_notes
3548 && find_regno_note (next, REG_DEAD, 0))))
3549 {
3550 /* Reverse the branch. */
3551 tmp = XEXP (SET_SRC (pattern), 1);
3552 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3553 XEXP (SET_SRC (pattern), 2) = tmp;
3554 INSN_CODE (next) = -1;
3555
3556 /* Reverse our condition. */
3557 tmp = PATTERN (insn);
3558 PUT_CODE (XEXP (tmp, 1),
3559 (reverse_condition_maybe_unordered
3560 (GET_CODE (XEXP (tmp, 1)))));
3561 }
3562 }
3563 }
3564 }
3565
3566 pass = !pass;
3567
3568 }
3569 \f
3570 /* You may have trouble believing this, but this is the 32 bit HP-PA
3571 stack layout. Wow.
3572
3573 Offset Contents
3574
3575 Variable arguments (optional; any number may be allocated)
3576
3577 SP-(4*(N+9)) arg word N
3578 : :
3579 SP-56 arg word 5
3580 SP-52 arg word 4
3581
3582 Fixed arguments (must be allocated; may remain unused)
3583
3584 SP-48 arg word 3
3585 SP-44 arg word 2
3586 SP-40 arg word 1
3587 SP-36 arg word 0
3588
3589 Frame Marker
3590
3591 SP-32 External Data Pointer (DP)
3592 SP-28 External sr4
3593 SP-24 External/stub RP (RP')
3594 SP-20 Current RP
3595 SP-16 Static Link
3596 SP-12 Clean up
3597 SP-8 Calling Stub RP (RP'')
3598 SP-4 Previous SP
3599
3600 Top of Frame
3601
3602 SP-0 Stack Pointer (points to next available address)
3603
3604 */
3605
3606 /* This function saves registers as follows. Registers marked with ' are
3607 this function's registers (as opposed to the previous function's).
3608 If a frame_pointer isn't needed, r4 is saved as a general register;
3609 the space for the frame pointer is still allocated, though, to keep
3610 things simple.
3611
3612
3613 Top of Frame
3614
3615 SP (FP') Previous FP
3616 SP + 4 Alignment filler (sigh)
3617 SP + 8 Space for locals reserved here.
3618 .
3619 .
3620 .
3621 SP + n All call saved register used.
3622 .
3623 .
3624 .
3625 SP + o All call saved fp registers used.
3626 .
3627 .
3628 .
3629 SP + p (SP') points to next available address.
3630
3631 */
3632
3633 /* Global variables set by output_function_prologue(). */
3634 /* Size of frame. Need to know this to emit return insns from
3635 leaf procedures. */
3636 static HOST_WIDE_INT actual_fsize, local_fsize;
3637 static int save_fregs;
3638
3639 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3640 Handle case where DISP > 8k by using the add_high_const patterns.
3641
3642 Note in DISP > 8k case, we will leave the high part of the address
3643 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3644
3645 static void
3646 store_reg (int reg, HOST_WIDE_INT disp, int base)
3647 {
3648 rtx dest, src, basereg;
3649 rtx_insn *insn;
3650
3651 src = gen_rtx_REG (word_mode, reg);
3652 basereg = gen_rtx_REG (Pmode, base);
3653 if (VAL_14_BITS_P (disp))
3654 {
3655 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3656 insn = emit_move_insn (dest, src);
3657 }
3658 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3659 {
3660 rtx delta = GEN_INT (disp);
3661 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3662
3663 emit_move_insn (tmpreg, delta);
3664 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3665 if (DO_FRAME_NOTES)
3666 {
3667 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3668 gen_rtx_SET (tmpreg,
3669 gen_rtx_PLUS (Pmode, basereg, delta)));
3670 RTX_FRAME_RELATED_P (insn) = 1;
3671 }
3672 dest = gen_rtx_MEM (word_mode, tmpreg);
3673 insn = emit_move_insn (dest, src);
3674 }
3675 else
3676 {
3677 rtx delta = GEN_INT (disp);
3678 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3679 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3680
3681 emit_move_insn (tmpreg, high);
3682 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3683 insn = emit_move_insn (dest, src);
3684 if (DO_FRAME_NOTES)
3685 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3686 gen_rtx_SET (gen_rtx_MEM (word_mode,
3687 gen_rtx_PLUS (word_mode,
3688 basereg,
3689 delta)),
3690 src));
3691 }
3692
3693 if (DO_FRAME_NOTES)
3694 RTX_FRAME_RELATED_P (insn) = 1;
3695 }
3696
3697 /* Emit RTL to store REG at the memory location specified by BASE and then
3698 add MOD to BASE. MOD must be <= 8k. */
3699
3700 static void
3701 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3702 {
3703 rtx basereg, srcreg, delta;
3704 rtx_insn *insn;
3705
3706 gcc_assert (VAL_14_BITS_P (mod));
3707
3708 basereg = gen_rtx_REG (Pmode, base);
3709 srcreg = gen_rtx_REG (word_mode, reg);
3710 delta = GEN_INT (mod);
3711
3712 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3713 if (DO_FRAME_NOTES)
3714 {
3715 RTX_FRAME_RELATED_P (insn) = 1;
3716
3717 /* RTX_FRAME_RELATED_P must be set on each frame related set
3718 in a parallel with more than one element. */
3719 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3720 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3721 }
3722 }
3723
3724 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3725 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3726 whether to add a frame note or not.
3727
3728 In the DISP > 8k case, we leave the high part of the address in %r1.
3729 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3730
3731 static void
3732 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3733 {
3734 rtx_insn *insn;
3735
3736 if (VAL_14_BITS_P (disp))
3737 {
3738 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3739 plus_constant (Pmode,
3740 gen_rtx_REG (Pmode, base), disp));
3741 }
3742 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3743 {
3744 rtx basereg = gen_rtx_REG (Pmode, base);
3745 rtx delta = GEN_INT (disp);
3746 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3747
3748 emit_move_insn (tmpreg, delta);
3749 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3750 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3751 if (DO_FRAME_NOTES)
3752 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3753 gen_rtx_SET (tmpreg,
3754 gen_rtx_PLUS (Pmode, basereg, delta)));
3755 }
3756 else
3757 {
3758 rtx basereg = gen_rtx_REG (Pmode, base);
3759 rtx delta = GEN_INT (disp);
3760 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3761
3762 emit_move_insn (tmpreg,
3763 gen_rtx_PLUS (Pmode, basereg,
3764 gen_rtx_HIGH (Pmode, delta)));
3765 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3766 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3767 }
3768
3769 if (DO_FRAME_NOTES && note)
3770 RTX_FRAME_RELATED_P (insn) = 1;
3771 }
3772
3773 HOST_WIDE_INT
3774 pa_compute_frame_size (poly_int64 size, int *fregs_live)
3775 {
3776 int freg_saved = 0;
3777 int i, j;
3778
3779 /* The code in pa_expand_prologue and pa_expand_epilogue must
3780 be consistent with the rounding and size calculation done here.
3781 Change them at the same time. */
3782
3783 /* We do our own stack alignment. First, round the size of the
3784 stack locals up to a word boundary. */
3785 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3786
3787 /* Space for previous frame pointer + filler. If any frame is
3788 allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
3789 waste some space here for the sake of HP compatibility. The
3790 first slot is only used when the frame pointer is needed. */
3791 if (size || frame_pointer_needed)
3792 size += pa_starting_frame_offset ();
3793
3794 /* If the current function calls __builtin_eh_return, then we need
3795 to allocate stack space for registers that will hold data for
3796 the exception handler. */
3797 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3798 {
3799 unsigned int i;
3800
3801 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3802 continue;
3803 size += i * UNITS_PER_WORD;
3804 }
3805
3806 /* Account for space used by the callee general register saves. */
3807 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3808 if (df_regs_ever_live_p (i))
3809 size += UNITS_PER_WORD;
3810
3811 /* Account for space used by the callee floating point register saves. */
3812 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3813 if (df_regs_ever_live_p (i)
3814 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3815 {
3816 freg_saved = 1;
3817
3818 /* We always save both halves of the FP register, so always
3819 increment the frame size by 8 bytes. */
3820 size += 8;
3821 }
3822
3823 /* If any of the floating registers are saved, account for the
3824 alignment needed for the floating point register save block. */
3825 if (freg_saved)
3826 {
3827 size = (size + 7) & ~7;
3828 if (fregs_live)
3829 *fregs_live = 1;
3830 }
3831
3832 /* The various ABIs include space for the outgoing parameters in the
3833 size of the current function's stack frame. We don't need to align
3834 for the outgoing arguments as their alignment is set by the final
3835 rounding for the frame as a whole. */
3836 size += crtl->outgoing_args_size;
3837
3838 /* Allocate space for the fixed frame marker. This space must be
3839 allocated for any function that makes calls or allocates
3840 stack space. */
3841 if (!crtl->is_leaf || size)
3842 size += TARGET_64BIT ? 48 : 32;
3843
3844 /* Finally, round to the preferred stack boundary. */
3845 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3846 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3847 }
3848
3849 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3850 of memory. If any fpu reg is used in the function, we allocate
3851 such a block here, at the bottom of the frame, just in case it's needed.
3852
3853 If this function is a leaf procedure, then we may choose not
3854 to do a "save" insn. The decision about whether or not
3855 to do this is made in regclass.c. */
3856
3857 static void
3858 pa_output_function_prologue (FILE *file)
3859 {
3860 /* The function's label and associated .PROC must never be
3861 separated and must be output *after* any profiling declarations
3862 to avoid changing spaces/subspaces within a procedure. */
3863 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3864 fputs ("\t.PROC\n", file);
3865
3866 /* pa_expand_prologue does the dirty work now. We just need
3867 to output the assembler directives which denote the start
3868 of a function. */
3869 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3870 if (crtl->is_leaf)
3871 fputs (",NO_CALLS", file);
3872 else
3873 fputs (",CALLS", file);
3874 if (rp_saved)
3875 fputs (",SAVE_RP", file);
3876
3877 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3878 at the beginning of the frame and that it is used as the frame
3879 pointer for the frame. We do this because our current frame
3880 layout doesn't conform to that specified in the HP runtime
3881 documentation and we need a way to indicate to programs such as
3882 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3883 isn't used by HP compilers but is supported by the assembler.
3884 However, SAVE_SP is supposed to indicate that the previous stack
3885 pointer has been saved in the frame marker. */
3886 if (frame_pointer_needed)
3887 fputs (",SAVE_SP", file);
3888
3889 /* Pass on information about the number of callee register saves
3890 performed in the prologue.
3891
3892 The compiler is supposed to pass the highest register number
3893 saved, the assembler then has to adjust that number before
3894 entering it into the unwind descriptor (to account for any
3895 caller saved registers with lower register numbers than the
3896 first callee saved register). */
3897 if (gr_saved)
3898 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3899
3900 if (fr_saved)
3901 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3902
3903 fputs ("\n\t.ENTRY\n", file);
3904
3905 remove_useless_addtr_insns (0);
3906 }
3907
3908 void
3909 pa_expand_prologue (void)
3910 {
3911 int merge_sp_adjust_with_store = 0;
3912 HOST_WIDE_INT size = get_frame_size ();
3913 HOST_WIDE_INT offset;
3914 int i;
3915 rtx tmpreg;
3916 rtx_insn *insn;
3917
3918 gr_saved = 0;
3919 fr_saved = 0;
3920 save_fregs = 0;
3921
3922 /* Compute total size for frame pointer, filler, locals and rounding to
3923 the next word boundary. Similar code appears in pa_compute_frame_size
3924 and must be changed in tandem with this code. */
3925 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3926 if (local_fsize || frame_pointer_needed)
3927 local_fsize += pa_starting_frame_offset ();
3928
3929 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3930 if (flag_stack_usage_info)
3931 current_function_static_stack_size = actual_fsize;
3932
3933 /* Compute a few things we will use often. */
3934 tmpreg = gen_rtx_REG (word_mode, 1);
3935
3936 /* Save RP first. The calling conventions manual states RP will
3937 always be stored into the caller's frame at sp - 20 or sp - 16
3938 depending on which ABI is in use. */
3939 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3940 {
3941 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3942 rp_saved = true;
3943 }
3944 else
3945 rp_saved = false;
3946
3947 /* Allocate the local frame and set up the frame pointer if needed. */
3948 if (actual_fsize != 0)
3949 {
3950 if (frame_pointer_needed)
3951 {
3952 /* Copy the old frame pointer temporarily into %r1. Set up the
3953 new stack pointer, then store away the saved old frame pointer
3954 into the stack at sp and at the same time update the stack
3955 pointer by actual_fsize bytes. Two versions, first
3956 handles small (<8k) frames. The second handles large (>=8k)
3957 frames. */
3958 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3959 if (DO_FRAME_NOTES)
3960 RTX_FRAME_RELATED_P (insn) = 1;
3961
3962 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3963 if (DO_FRAME_NOTES)
3964 RTX_FRAME_RELATED_P (insn) = 1;
3965
3966 if (VAL_14_BITS_P (actual_fsize))
3967 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3968 else
3969 {
3970 /* It is incorrect to store the saved frame pointer at *sp,
3971 then increment sp (writes beyond the current stack boundary).
3972
3973 So instead use stwm to store at *sp and post-increment the
3974 stack pointer as an atomic operation. Then increment sp to
3975 finish allocating the new frame. */
3976 HOST_WIDE_INT adjust1 = 8192 - 64;
3977 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3978
3979 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3980 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3981 adjust2, 1);
3982 }
3983
3984 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3985 we need to store the previous stack pointer (frame pointer)
3986 into the frame marker on targets that use the HP unwind
3987 library. This allows the HP unwind library to be used to
3988 unwind GCC frames. However, we are not fully compatible
3989 with the HP library because our frame layout differs from
3990 that specified in the HP runtime specification.
3991
3992 We don't want a frame note on this instruction as the frame
3993 marker moves during dynamic stack allocation.
3994
3995 This instruction also serves as a blockage to prevent
3996 register spills from being scheduled before the stack
3997 pointer is raised. This is necessary as we store
3998 registers using the frame pointer as a base register,
3999 and the frame pointer is set before sp is raised. */
4000 if (TARGET_HPUX_UNWIND_LIBRARY)
4001 {
4002 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
4003 GEN_INT (TARGET_64BIT ? -8 : -4));
4004
4005 emit_move_insn (gen_rtx_MEM (word_mode, addr),
4006 hard_frame_pointer_rtx);
4007 }
4008 else
4009 emit_insn (gen_blockage ());
4010 }
4011 /* no frame pointer needed. */
4012 else
4013 {
4014 /* In some cases we can perform the first callee register save
4015 and allocating the stack frame at the same time. If so, just
4016 make a note of it and defer allocating the frame until saving
4017 the callee registers. */
4018 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
4019 merge_sp_adjust_with_store = 1;
4020 /* Can not optimize. Adjust the stack frame by actual_fsize
4021 bytes. */
4022 else
4023 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4024 actual_fsize, 1);
4025 }
4026 }
4027
4028 /* Normal register save.
4029
4030 Do not save the frame pointer in the frame_pointer_needed case. It
4031 was done earlier. */
4032 if (frame_pointer_needed)
4033 {
4034 offset = local_fsize;
4035
4036 /* Saving the EH return data registers in the frame is the simplest
4037 way to get the frame unwind information emitted. We put them
4038 just before the general registers. */
4039 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4040 {
4041 unsigned int i, regno;
4042
4043 for (i = 0; ; ++i)
4044 {
4045 regno = EH_RETURN_DATA_REGNO (i);
4046 if (regno == INVALID_REGNUM)
4047 break;
4048
4049 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4050 offset += UNITS_PER_WORD;
4051 }
4052 }
4053
4054 for (i = 18; i >= 4; i--)
4055 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4056 {
4057 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4058 offset += UNITS_PER_WORD;
4059 gr_saved++;
4060 }
4061 /* Account for %r3 which is saved in a special place. */
4062 gr_saved++;
4063 }
4064 /* No frame pointer needed. */
4065 else
4066 {
4067 offset = local_fsize - actual_fsize;
4068
4069 /* Saving the EH return data registers in the frame is the simplest
4070 way to get the frame unwind information emitted. */
4071 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4072 {
4073 unsigned int i, regno;
4074
4075 for (i = 0; ; ++i)
4076 {
4077 regno = EH_RETURN_DATA_REGNO (i);
4078 if (regno == INVALID_REGNUM)
4079 break;
4080
4081 /* If merge_sp_adjust_with_store is nonzero, then we can
4082 optimize the first save. */
4083 if (merge_sp_adjust_with_store)
4084 {
4085 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4086 merge_sp_adjust_with_store = 0;
4087 }
4088 else
4089 store_reg (regno, offset, STACK_POINTER_REGNUM);
4090 offset += UNITS_PER_WORD;
4091 }
4092 }
4093
4094 for (i = 18; i >= 3; i--)
4095 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4096 {
4097 /* If merge_sp_adjust_with_store is nonzero, then we can
4098 optimize the first GR save. */
4099 if (merge_sp_adjust_with_store)
4100 {
4101 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4102 merge_sp_adjust_with_store = 0;
4103 }
4104 else
4105 store_reg (i, offset, STACK_POINTER_REGNUM);
4106 offset += UNITS_PER_WORD;
4107 gr_saved++;
4108 }
4109
4110 /* If we wanted to merge the SP adjustment with a GR save, but we never
4111 did any GR saves, then just emit the adjustment here. */
4112 if (merge_sp_adjust_with_store)
4113 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4114 actual_fsize, 1);
4115 }
4116
4117 /* The hppa calling conventions say that %r19, the pic offset
4118 register, is saved at sp - 32 (in this function's frame)
4119 when generating PIC code. FIXME: What is the correct thing
4120 to do for functions which make no calls and allocate no
4121 frame? Do we need to allocate a frame, or can we just omit
4122 the save? For now we'll just omit the save.
4123
4124 We don't want a note on this insn as the frame marker can
4125 move if there is a dynamic stack allocation. */
4126 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4127 {
4128 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4129
4130 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4131
4132 }
4133
4134 /* Align pointer properly (doubleword boundary). */
4135 offset = (offset + 7) & ~7;
4136
4137 /* Floating point register store. */
4138 if (save_fregs)
4139 {
4140 rtx base;
4141
4142 /* First get the frame or stack pointer to the start of the FP register
4143 save area. */
4144 if (frame_pointer_needed)
4145 {
4146 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4147 base = hard_frame_pointer_rtx;
4148 }
4149 else
4150 {
4151 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4152 base = stack_pointer_rtx;
4153 }
4154
4155 /* Now actually save the FP registers. */
4156 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4157 {
4158 if (df_regs_ever_live_p (i)
4159 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4160 {
4161 rtx addr, reg;
4162 rtx_insn *insn;
4163 addr = gen_rtx_MEM (DFmode,
4164 gen_rtx_POST_INC (word_mode, tmpreg));
4165 reg = gen_rtx_REG (DFmode, i);
4166 insn = emit_move_insn (addr, reg);
4167 if (DO_FRAME_NOTES)
4168 {
4169 RTX_FRAME_RELATED_P (insn) = 1;
4170 if (TARGET_64BIT)
4171 {
4172 rtx mem = gen_rtx_MEM (DFmode,
4173 plus_constant (Pmode, base,
4174 offset));
4175 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4176 gen_rtx_SET (mem, reg));
4177 }
4178 else
4179 {
4180 rtx meml = gen_rtx_MEM (SFmode,
4181 plus_constant (Pmode, base,
4182 offset));
4183 rtx memr = gen_rtx_MEM (SFmode,
4184 plus_constant (Pmode, base,
4185 offset + 4));
4186 rtx regl = gen_rtx_REG (SFmode, i);
4187 rtx regr = gen_rtx_REG (SFmode, i + 1);
4188 rtx setl = gen_rtx_SET (meml, regl);
4189 rtx setr = gen_rtx_SET (memr, regr);
4190 rtvec vec;
4191
4192 RTX_FRAME_RELATED_P (setl) = 1;
4193 RTX_FRAME_RELATED_P (setr) = 1;
4194 vec = gen_rtvec (2, setl, setr);
4195 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4196 gen_rtx_SEQUENCE (VOIDmode, vec));
4197 }
4198 }
4199 offset += GET_MODE_SIZE (DFmode);
4200 fr_saved++;
4201 }
4202 }
4203 }
4204 }
4205
4206 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4207 Handle case where DISP > 8k by using the add_high_const patterns. */
4208
4209 static void
4210 load_reg (int reg, HOST_WIDE_INT disp, int base)
4211 {
4212 rtx dest = gen_rtx_REG (word_mode, reg);
4213 rtx basereg = gen_rtx_REG (Pmode, base);
4214 rtx src;
4215
4216 if (VAL_14_BITS_P (disp))
4217 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4218 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4219 {
4220 rtx delta = GEN_INT (disp);
4221 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4222
4223 emit_move_insn (tmpreg, delta);
4224 if (TARGET_DISABLE_INDEXING)
4225 {
4226 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4227 src = gen_rtx_MEM (word_mode, tmpreg);
4228 }
4229 else
4230 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4231 }
4232 else
4233 {
4234 rtx delta = GEN_INT (disp);
4235 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4236 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4237
4238 emit_move_insn (tmpreg, high);
4239 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4240 }
4241
4242 emit_move_insn (dest, src);
4243 }
4244
4245 /* Update the total code bytes output to the text section. */
4246
4247 static void
4248 update_total_code_bytes (unsigned int nbytes)
4249 {
4250 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4251 && !IN_NAMED_SECTION_P (cfun->decl))
4252 {
4253 unsigned int old_total = total_code_bytes;
4254
4255 total_code_bytes += nbytes;
4256
4257 /* Be prepared to handle overflows. */
4258 if (old_total > total_code_bytes)
4259 total_code_bytes = UINT_MAX;
4260 }
4261 }
4262
4263 /* This function generates the assembly code for function exit.
4264 Args are as for output_function_prologue ().
4265
4266 The function epilogue should not depend on the current stack
4267 pointer! It should use the frame pointer only. This is mandatory
4268 because of alloca; we also take advantage of it to omit stack
4269 adjustments before returning. */
4270
4271 static void
4272 pa_output_function_epilogue (FILE *file)
4273 {
4274 rtx_insn *insn = get_last_insn ();
4275 bool extra_nop;
4276
4277 /* pa_expand_epilogue does the dirty work now. We just need
4278 to output the assembler directives which denote the end
4279 of a function.
4280
4281 To make debuggers happy, emit a nop if the epilogue was completely
4282 eliminated due to a volatile call as the last insn in the
4283 current function. That way the return address (in %r2) will
4284 always point to a valid instruction in the current function. */
4285
4286 /* Get the last real insn. */
4287 if (NOTE_P (insn))
4288 insn = prev_real_insn (insn);
4289
4290 /* If it is a sequence, then look inside. */
4291 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4292 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4293
4294 /* If insn is a CALL_INSN, then it must be a call to a volatile
4295 function (otherwise there would be epilogue insns). */
4296 if (insn && CALL_P (insn))
4297 {
4298 fputs ("\tnop\n", file);
4299 extra_nop = true;
4300 }
4301 else
4302 extra_nop = false;
4303
4304 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4305
4306 if (TARGET_SOM && TARGET_GAS)
4307 {
4308 /* We are done with this subspace except possibly for some additional
4309 debug information. Forget that we are in this subspace to ensure
4310 that the next function is output in its own subspace. */
4311 in_section = NULL;
4312 cfun->machine->in_nsubspa = 2;
4313 }
4314
4315 /* Thunks do their own insn accounting. */
4316 if (cfun->is_thunk)
4317 return;
4318
4319 if (INSN_ADDRESSES_SET_P ())
4320 {
4321 last_address = extra_nop ? 4 : 0;
4322 insn = get_last_nonnote_insn ();
4323 if (insn)
4324 {
4325 last_address += INSN_ADDRESSES (INSN_UID (insn));
4326 if (INSN_P (insn))
4327 last_address += insn_default_length (insn);
4328 }
4329 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4330 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4331 }
4332 else
4333 last_address = UINT_MAX;
4334
4335 /* Finally, update the total number of code bytes output so far. */
4336 update_total_code_bytes (last_address);
4337 }
4338
4339 void
4340 pa_expand_epilogue (void)
4341 {
4342 rtx tmpreg;
4343 HOST_WIDE_INT offset;
4344 HOST_WIDE_INT ret_off = 0;
4345 int i;
4346 int merge_sp_adjust_with_load = 0;
4347
4348 /* We will use this often. */
4349 tmpreg = gen_rtx_REG (word_mode, 1);
4350
4351 /* Try to restore RP early to avoid load/use interlocks when
4352 RP gets used in the return (bv) instruction. This appears to still
4353 be necessary even when we schedule the prologue and epilogue. */
4354 if (rp_saved)
4355 {
4356 ret_off = TARGET_64BIT ? -16 : -20;
4357 if (frame_pointer_needed)
4358 {
4359 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4360 ret_off = 0;
4361 }
4362 else
4363 {
4364 /* No frame pointer, and stack is smaller than 8k. */
4365 if (VAL_14_BITS_P (ret_off - actual_fsize))
4366 {
4367 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4368 ret_off = 0;
4369 }
4370 }
4371 }
4372
4373 /* General register restores. */
4374 if (frame_pointer_needed)
4375 {
4376 offset = local_fsize;
4377
4378 /* If the current function calls __builtin_eh_return, then we need
4379 to restore the saved EH data registers. */
4380 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4381 {
4382 unsigned int i, regno;
4383
4384 for (i = 0; ; ++i)
4385 {
4386 regno = EH_RETURN_DATA_REGNO (i);
4387 if (regno == INVALID_REGNUM)
4388 break;
4389
4390 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4391 offset += UNITS_PER_WORD;
4392 }
4393 }
4394
4395 for (i = 18; i >= 4; i--)
4396 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4397 {
4398 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4399 offset += UNITS_PER_WORD;
4400 }
4401 }
4402 else
4403 {
4404 offset = local_fsize - actual_fsize;
4405
4406 /* If the current function calls __builtin_eh_return, then we need
4407 to restore the saved EH data registers. */
4408 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4409 {
4410 unsigned int i, regno;
4411
4412 for (i = 0; ; ++i)
4413 {
4414 regno = EH_RETURN_DATA_REGNO (i);
4415 if (regno == INVALID_REGNUM)
4416 break;
4417
4418 /* Only for the first load.
4419 merge_sp_adjust_with_load holds the register load
4420 with which we will merge the sp adjustment. */
4421 if (merge_sp_adjust_with_load == 0
4422 && local_fsize == 0
4423 && VAL_14_BITS_P (-actual_fsize))
4424 merge_sp_adjust_with_load = regno;
4425 else
4426 load_reg (regno, offset, STACK_POINTER_REGNUM);
4427 offset += UNITS_PER_WORD;
4428 }
4429 }
4430
4431 for (i = 18; i >= 3; i--)
4432 {
4433 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4434 {
4435 /* Only for the first load.
4436 merge_sp_adjust_with_load holds the register load
4437 with which we will merge the sp adjustment. */
4438 if (merge_sp_adjust_with_load == 0
4439 && local_fsize == 0
4440 && VAL_14_BITS_P (-actual_fsize))
4441 merge_sp_adjust_with_load = i;
4442 else
4443 load_reg (i, offset, STACK_POINTER_REGNUM);
4444 offset += UNITS_PER_WORD;
4445 }
4446 }
4447 }
4448
4449 /* Align pointer properly (doubleword boundary). */
4450 offset = (offset + 7) & ~7;
4451
4452 /* FP register restores. */
4453 if (save_fregs)
4454 {
4455 /* Adjust the register to index off of. */
4456 if (frame_pointer_needed)
4457 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4458 else
4459 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4460
4461 /* Actually do the restores now. */
4462 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4463 if (df_regs_ever_live_p (i)
4464 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4465 {
4466 rtx src = gen_rtx_MEM (DFmode,
4467 gen_rtx_POST_INC (word_mode, tmpreg));
4468 rtx dest = gen_rtx_REG (DFmode, i);
4469 emit_move_insn (dest, src);
4470 }
4471 }
4472
4473 /* Emit a blockage insn here to keep these insns from being moved to
4474 an earlier spot in the epilogue, or into the main instruction stream.
4475
4476 This is necessary as we must not cut the stack back before all the
4477 restores are finished. */
4478 emit_insn (gen_blockage ());
4479
4480 /* Reset stack pointer (and possibly frame pointer). The stack
4481 pointer is initially set to fp + 64 to avoid a race condition. */
4482 if (frame_pointer_needed)
4483 {
4484 rtx delta = GEN_INT (-64);
4485
4486 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4487 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4488 stack_pointer_rtx, delta));
4489 }
4490 /* If we were deferring a callee register restore, do it now. */
4491 else if (merge_sp_adjust_with_load)
4492 {
4493 rtx delta = GEN_INT (-actual_fsize);
4494 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4495
4496 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4497 }
4498 else if (actual_fsize != 0)
4499 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4500 - actual_fsize, 0);
4501
4502 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4503 frame greater than 8k), do so now. */
4504 if (ret_off != 0)
4505 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4506
4507 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4508 {
4509 rtx sa = EH_RETURN_STACKADJ_RTX;
4510
4511 emit_insn (gen_blockage ());
4512 emit_insn (TARGET_64BIT
4513 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4514 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4515 }
4516 }
4517
4518 bool
4519 pa_can_use_return_insn (void)
4520 {
4521 if (!reload_completed)
4522 return false;
4523
4524 if (frame_pointer_needed)
4525 return false;
4526
4527 if (df_regs_ever_live_p (2))
4528 return false;
4529
4530 if (crtl->profile)
4531 return false;
4532
4533 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4534 }
4535
4536 rtx
4537 hppa_pic_save_rtx (void)
4538 {
4539 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4540 }
4541
4542 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4543 #define NO_DEFERRED_PROFILE_COUNTERS 0
4544 #endif
4545
4546
4547 /* Vector of funcdef numbers. */
4548 static vec<int> funcdef_nos;
4549
4550 /* Output deferred profile counters. */
4551 static void
4552 output_deferred_profile_counters (void)
4553 {
4554 unsigned int i;
4555 int align, n;
4556
4557 if (funcdef_nos.is_empty ())
4558 return;
4559
4560 switch_to_section (data_section);
4561 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4562 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4563
4564 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4565 {
4566 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4567 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4568 }
4569
4570 funcdef_nos.release ();
4571 }
4572
4573 void
4574 hppa_profile_hook (int label_no)
4575 {
4576 /* We use SImode for the address of the function in both 32 and
4577 64-bit code to avoid having to provide DImode versions of the
4578 lcla2 and load_offset_label_address insn patterns. */
4579 rtx reg = gen_reg_rtx (SImode);
4580 rtx_code_label *label_rtx = gen_label_rtx ();
4581 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4582 rtx arg_bytes, begin_label_rtx, mcount, sym;
4583 rtx_insn *call_insn;
4584 char begin_label_name[16];
4585 bool use_mcount_pcrel_call;
4586
4587 /* Set up call destination. */
4588 sym = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
4589 pa_encode_label (sym);
4590 mcount = gen_rtx_MEM (Pmode, sym);
4591
4592 /* If we can reach _mcount with a pc-relative call, we can optimize
4593 loading the address of the current function. This requires linker
4594 long branch stub support. */
4595 if (!TARGET_PORTABLE_RUNTIME
4596 && !TARGET_LONG_CALLS
4597 && (TARGET_SOM || flag_function_sections))
4598 use_mcount_pcrel_call = TRUE;
4599 else
4600 use_mcount_pcrel_call = FALSE;
4601
4602 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4603 label_no);
4604 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4605
4606 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4607
4608 if (!use_mcount_pcrel_call)
4609 {
4610 /* The address of the function is loaded into %r25 with an instruction-
4611 relative sequence that avoids the use of relocations. The sequence
4612 is split so that the load_offset_label_address instruction can
4613 occupy the delay slot of the call to _mcount. */
4614 if (TARGET_PA_20)
4615 emit_insn (gen_lcla2 (reg, label_rtx));
4616 else
4617 emit_insn (gen_lcla1 (reg, label_rtx));
4618
4619 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4620 reg,
4621 begin_label_rtx,
4622 label_rtx));
4623 }
4624
4625 if (!NO_DEFERRED_PROFILE_COUNTERS)
4626 {
4627 rtx count_label_rtx, addr, r24;
4628 char count_label_name[16];
4629
4630 funcdef_nos.safe_push (label_no);
4631 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4632 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4633 ggc_strdup (count_label_name));
4634
4635 addr = force_reg (Pmode, count_label_rtx);
4636 r24 = gen_rtx_REG (Pmode, 24);
4637 emit_move_insn (r24, addr);
4638
4639 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4640 if (use_mcount_pcrel_call)
4641 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4642 begin_label_rtx));
4643 else
4644 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4645
4646 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4647 }
4648 else
4649 {
4650 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4651 if (use_mcount_pcrel_call)
4652 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4653 begin_label_rtx));
4654 else
4655 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4656 }
4657
4658 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4659 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4660
4661 /* Indicate the _mcount call cannot throw, nor will it execute a
4662 non-local goto. */
4663 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4664
4665 /* Allocate space for fixed arguments. */
4666 if (reg_parm_stack_space > crtl->outgoing_args_size)
4667 crtl->outgoing_args_size = reg_parm_stack_space;
4668 }
4669
4670 /* Fetch the return address for the frame COUNT steps up from
4671 the current frame, after the prologue. FRAMEADDR is the
4672 frame pointer of the COUNT frame.
4673
4674 We want to ignore any export stub remnants here. To handle this,
4675 we examine the code at the return address, and if it is an export
4676 stub, we return a memory rtx for the stub return address stored
4677 at frame-24.
4678
4679 The value returned is used in two different ways:
4680
4681 1. To find a function's caller.
4682
4683 2. To change the return address for a function.
4684
4685 This function handles most instances of case 1; however, it will
4686 fail if there are two levels of stubs to execute on the return
4687 path. The only way I believe that can happen is if the return value
4688 needs a parameter relocation, which never happens for C code.
4689
4690 This function handles most instances of case 2; however, it will
4691 fail if we did not originally have stub code on the return path
4692 but will need stub code on the new return path. This can happen if
4693 the caller & callee are both in the main program, but the new
4694 return location is in a shared library. */
4695
4696 rtx
4697 pa_return_addr_rtx (int count, rtx frameaddr)
4698 {
4699 rtx label;
4700 rtx rp;
4701 rtx saved_rp;
4702 rtx ins;
4703
4704 /* The instruction stream at the return address of a PA1.X export stub is:
4705
4706 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4707 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4708 0x00011820 | stub+16: mtsp r1,sr0
4709 0xe0400002 | stub+20: be,n 0(sr0,rp)
4710
4711 0xe0400002 must be specified as -532676606 so that it won't be
4712 rejected as an invalid immediate operand on 64-bit hosts.
4713
4714 The instruction stream at the return address of a PA2.0 export stub is:
4715
4716 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4717 0xe840d002 | stub+12: bve,n (rp)
4718 */
4719
4720 HOST_WIDE_INT insns[4];
4721 int i, len;
4722
4723 if (count != 0)
4724 return NULL_RTX;
4725
4726 rp = get_hard_reg_initial_val (Pmode, 2);
4727
4728 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4729 return rp;
4730
4731 /* If there is no export stub then just use the value saved from
4732 the return pointer register. */
4733
4734 saved_rp = gen_reg_rtx (Pmode);
4735 emit_move_insn (saved_rp, rp);
4736
4737 /* Get pointer to the instruction stream. We have to mask out the
4738 privilege level from the two low order bits of the return address
4739 pointer here so that ins will point to the start of the first
4740 instruction that would have been executed if we returned. */
4741 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4742 label = gen_label_rtx ();
4743
4744 if (TARGET_PA_20)
4745 {
4746 insns[0] = 0x4bc23fd1;
4747 insns[1] = -398405630;
4748 len = 2;
4749 }
4750 else
4751 {
4752 insns[0] = 0x4bc23fd1;
4753 insns[1] = 0x004010a1;
4754 insns[2] = 0x00011820;
4755 insns[3] = -532676606;
4756 len = 4;
4757 }
4758
4759 /* Check the instruction stream at the normal return address for the
4760 export stub. If it is an export stub, than our return address is
4761 really in -24[frameaddr]. */
4762
4763 for (i = 0; i < len; i++)
4764 {
4765 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4766 rtx op1 = GEN_INT (insns[i]);
4767 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4768 }
4769
4770 /* Here we know that our return address points to an export
4771 stub. We don't want to return the address of the export stub,
4772 but rather the return address of the export stub. That return
4773 address is stored at -24[frameaddr]. */
4774
4775 emit_move_insn (saved_rp,
4776 gen_rtx_MEM (Pmode,
4777 memory_address (Pmode,
4778 plus_constant (Pmode, frameaddr,
4779 -24))));
4780
4781 emit_label (label);
4782
4783 return saved_rp;
4784 }
4785
4786 void
4787 pa_emit_bcond_fp (rtx operands[])
4788 {
4789 enum rtx_code code = GET_CODE (operands[0]);
4790 rtx operand0 = operands[1];
4791 rtx operand1 = operands[2];
4792 rtx label = operands[3];
4793
4794 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4795 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4796
4797 emit_jump_insn (gen_rtx_SET (pc_rtx,
4798 gen_rtx_IF_THEN_ELSE (VOIDmode,
4799 gen_rtx_fmt_ee (NE,
4800 VOIDmode,
4801 gen_rtx_REG (CCFPmode, 0),
4802 const0_rtx),
4803 gen_rtx_LABEL_REF (VOIDmode, label),
4804 pc_rtx)));
4805
4806 }
4807
4808 /* Adjust the cost of a scheduling dependency. Return the new cost of
4809 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4810
4811 static int
4812 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4813 unsigned int)
4814 {
4815 enum attr_type attr_type;
4816
4817 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4818 true dependencies as they are described with bypasses now. */
4819 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4820 return cost;
4821
4822 if (! recog_memoized (insn))
4823 return 0;
4824
4825 attr_type = get_attr_type (insn);
4826
4827 switch (dep_type)
4828 {
4829 case REG_DEP_ANTI:
4830 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4831 cycles later. */
4832
4833 if (attr_type == TYPE_FPLOAD)
4834 {
4835 rtx pat = PATTERN (insn);
4836 rtx dep_pat = PATTERN (dep_insn);
4837 if (GET_CODE (pat) == PARALLEL)
4838 {
4839 /* This happens for the fldXs,mb patterns. */
4840 pat = XVECEXP (pat, 0, 0);
4841 }
4842 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4843 /* If this happens, we have to extend this to schedule
4844 optimally. Return 0 for now. */
4845 return 0;
4846
4847 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4848 {
4849 if (! recog_memoized (dep_insn))
4850 return 0;
4851 switch (get_attr_type (dep_insn))
4852 {
4853 case TYPE_FPALU:
4854 case TYPE_FPMULSGL:
4855 case TYPE_FPMULDBL:
4856 case TYPE_FPDIVSGL:
4857 case TYPE_FPDIVDBL:
4858 case TYPE_FPSQRTSGL:
4859 case TYPE_FPSQRTDBL:
4860 /* A fpload can't be issued until one cycle before a
4861 preceding arithmetic operation has finished if
4862 the target of the fpload is any of the sources
4863 (or destination) of the arithmetic operation. */
4864 return insn_default_latency (dep_insn) - 1;
4865
4866 default:
4867 return 0;
4868 }
4869 }
4870 }
4871 else if (attr_type == TYPE_FPALU)
4872 {
4873 rtx pat = PATTERN (insn);
4874 rtx dep_pat = PATTERN (dep_insn);
4875 if (GET_CODE (pat) == PARALLEL)
4876 {
4877 /* This happens for the fldXs,mb patterns. */
4878 pat = XVECEXP (pat, 0, 0);
4879 }
4880 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4881 /* If this happens, we have to extend this to schedule
4882 optimally. Return 0 for now. */
4883 return 0;
4884
4885 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4886 {
4887 if (! recog_memoized (dep_insn))
4888 return 0;
4889 switch (get_attr_type (dep_insn))
4890 {
4891 case TYPE_FPDIVSGL:
4892 case TYPE_FPDIVDBL:
4893 case TYPE_FPSQRTSGL:
4894 case TYPE_FPSQRTDBL:
4895 /* An ALU flop can't be issued until two cycles before a
4896 preceding divide or sqrt operation has finished if
4897 the target of the ALU flop is any of the sources
4898 (or destination) of the divide or sqrt operation. */
4899 return insn_default_latency (dep_insn) - 2;
4900
4901 default:
4902 return 0;
4903 }
4904 }
4905 }
4906
4907 /* For other anti dependencies, the cost is 0. */
4908 return 0;
4909
4910 case REG_DEP_OUTPUT:
4911 /* Output dependency; DEP_INSN writes a register that INSN writes some
4912 cycles later. */
4913 if (attr_type == TYPE_FPLOAD)
4914 {
4915 rtx pat = PATTERN (insn);
4916 rtx dep_pat = PATTERN (dep_insn);
4917 if (GET_CODE (pat) == PARALLEL)
4918 {
4919 /* This happens for the fldXs,mb patterns. */
4920 pat = XVECEXP (pat, 0, 0);
4921 }
4922 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4923 /* If this happens, we have to extend this to schedule
4924 optimally. Return 0 for now. */
4925 return 0;
4926
4927 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4928 {
4929 if (! recog_memoized (dep_insn))
4930 return 0;
4931 switch (get_attr_type (dep_insn))
4932 {
4933 case TYPE_FPALU:
4934 case TYPE_FPMULSGL:
4935 case TYPE_FPMULDBL:
4936 case TYPE_FPDIVSGL:
4937 case TYPE_FPDIVDBL:
4938 case TYPE_FPSQRTSGL:
4939 case TYPE_FPSQRTDBL:
4940 /* A fpload can't be issued until one cycle before a
4941 preceding arithmetic operation has finished if
4942 the target of the fpload is the destination of the
4943 arithmetic operation.
4944
4945 Exception: For PA7100LC, PA7200 and PA7300, the cost
4946 is 3 cycles, unless they bundle together. We also
4947 pay the penalty if the second insn is a fpload. */
4948 return insn_default_latency (dep_insn) - 1;
4949
4950 default:
4951 return 0;
4952 }
4953 }
4954 }
4955 else if (attr_type == TYPE_FPALU)
4956 {
4957 rtx pat = PATTERN (insn);
4958 rtx dep_pat = PATTERN (dep_insn);
4959 if (GET_CODE (pat) == PARALLEL)
4960 {
4961 /* This happens for the fldXs,mb patterns. */
4962 pat = XVECEXP (pat, 0, 0);
4963 }
4964 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4965 /* If this happens, we have to extend this to schedule
4966 optimally. Return 0 for now. */
4967 return 0;
4968
4969 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4970 {
4971 if (! recog_memoized (dep_insn))
4972 return 0;
4973 switch (get_attr_type (dep_insn))
4974 {
4975 case TYPE_FPDIVSGL:
4976 case TYPE_FPDIVDBL:
4977 case TYPE_FPSQRTSGL:
4978 case TYPE_FPSQRTDBL:
4979 /* An ALU flop can't be issued until two cycles before a
4980 preceding divide or sqrt operation has finished if
4981 the target of the ALU flop is also the target of
4982 the divide or sqrt operation. */
4983 return insn_default_latency (dep_insn) - 2;
4984
4985 default:
4986 return 0;
4987 }
4988 }
4989 }
4990
4991 /* For other output dependencies, the cost is 0. */
4992 return 0;
4993
4994 default:
4995 gcc_unreachable ();
4996 }
4997 }
4998
4999 /* Adjust scheduling priorities. We use this to try and keep addil
5000 and the next use of %r1 close together. */
5001 static int
5002 pa_adjust_priority (rtx_insn *insn, int priority)
5003 {
5004 rtx set = single_set (insn);
5005 rtx src, dest;
5006 if (set)
5007 {
5008 src = SET_SRC (set);
5009 dest = SET_DEST (set);
5010 if (GET_CODE (src) == LO_SUM
5011 && symbolic_operand (XEXP (src, 1), VOIDmode)
5012 && ! read_only_operand (XEXP (src, 1), VOIDmode))
5013 priority >>= 3;
5014
5015 else if (GET_CODE (src) == MEM
5016 && GET_CODE (XEXP (src, 0)) == LO_SUM
5017 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
5018 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
5019 priority >>= 1;
5020
5021 else if (GET_CODE (dest) == MEM
5022 && GET_CODE (XEXP (dest, 0)) == LO_SUM
5023 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
5024 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
5025 priority >>= 3;
5026 }
5027 return priority;
5028 }
5029
5030 /* The 700 can only issue a single insn at a time.
5031 The 7XXX processors can issue two insns at a time.
5032 The 8000 can issue 4 insns at a time. */
5033 static int
5034 pa_issue_rate (void)
5035 {
5036 switch (pa_cpu)
5037 {
5038 case PROCESSOR_700: return 1;
5039 case PROCESSOR_7100: return 2;
5040 case PROCESSOR_7100LC: return 2;
5041 case PROCESSOR_7200: return 2;
5042 case PROCESSOR_7300: return 2;
5043 case PROCESSOR_8000: return 4;
5044
5045 default:
5046 gcc_unreachable ();
5047 }
5048 }
5049
5050
5051
5052 /* Return any length plus adjustment needed by INSN which already has
5053 its length computed as LENGTH. Return LENGTH if no adjustment is
5054 necessary.
5055
5056 Also compute the length of an inline block move here as it is too
5057 complicated to express as a length attribute in pa.md. */
5058 int
5059 pa_adjust_insn_length (rtx_insn *insn, int length)
5060 {
5061 rtx pat = PATTERN (insn);
5062
5063 /* If length is negative or undefined, provide initial length. */
5064 if ((unsigned int) length >= INT_MAX)
5065 {
5066 if (GET_CODE (pat) == SEQUENCE)
5067 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5068
5069 switch (get_attr_type (insn))
5070 {
5071 case TYPE_MILLI:
5072 length = pa_attr_length_millicode_call (insn);
5073 break;
5074 case TYPE_CALL:
5075 length = pa_attr_length_call (insn, 0);
5076 break;
5077 case TYPE_SIBCALL:
5078 length = pa_attr_length_call (insn, 1);
5079 break;
5080 case TYPE_DYNCALL:
5081 length = pa_attr_length_indirect_call (insn);
5082 break;
5083 case TYPE_SH_FUNC_ADRS:
5084 length = pa_attr_length_millicode_call (insn) + 20;
5085 break;
5086 default:
5087 gcc_unreachable ();
5088 }
5089 }
5090
5091 /* Block move pattern. */
5092 if (NONJUMP_INSN_P (insn)
5093 && GET_CODE (pat) == PARALLEL
5094 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5095 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5096 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5097 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5098 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5099 length += compute_movmem_length (insn) - 4;
5100 /* Block clear pattern. */
5101 else if (NONJUMP_INSN_P (insn)
5102 && GET_CODE (pat) == PARALLEL
5103 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5104 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5105 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5106 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5107 length += compute_clrmem_length (insn) - 4;
5108 /* Conditional branch with an unfilled delay slot. */
5109 else if (JUMP_P (insn) && ! simplejump_p (insn))
5110 {
5111 /* Adjust a short backwards conditional with an unfilled delay slot. */
5112 if (GET_CODE (pat) == SET
5113 && length == 4
5114 && JUMP_LABEL (insn) != NULL_RTX
5115 && ! forward_branch_p (insn))
5116 length += 4;
5117 else if (GET_CODE (pat) == PARALLEL
5118 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5119 && length == 4)
5120 length += 4;
5121 /* Adjust dbra insn with short backwards conditional branch with
5122 unfilled delay slot -- only for case where counter is in a
5123 general register register. */
5124 else if (GET_CODE (pat) == PARALLEL
5125 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5126 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5127 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5128 && length == 4
5129 && ! forward_branch_p (insn))
5130 length += 4;
5131 }
5132 return length;
5133 }
5134
5135 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5136
5137 static bool
5138 pa_print_operand_punct_valid_p (unsigned char code)
5139 {
5140 if (code == '@'
5141 || code == '#'
5142 || code == '*'
5143 || code == '^')
5144 return true;
5145
5146 return false;
5147 }
5148
5149 /* Print operand X (an rtx) in assembler syntax to file FILE.
5150 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5151 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5152
5153 void
5154 pa_print_operand (FILE *file, rtx x, int code)
5155 {
5156 switch (code)
5157 {
5158 case '#':
5159 /* Output a 'nop' if there's nothing for the delay slot. */
5160 if (dbr_sequence_length () == 0)
5161 fputs ("\n\tnop", file);
5162 return;
5163 case '*':
5164 /* Output a nullification completer if there's nothing for the */
5165 /* delay slot or nullification is requested. */
5166 if (dbr_sequence_length () == 0 ||
5167 (final_sequence &&
5168 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5169 fputs (",n", file);
5170 return;
5171 case 'R':
5172 /* Print out the second register name of a register pair.
5173 I.e., R (6) => 7. */
5174 fputs (reg_names[REGNO (x) + 1], file);
5175 return;
5176 case 'r':
5177 /* A register or zero. */
5178 if (x == const0_rtx
5179 || (x == CONST0_RTX (DFmode))
5180 || (x == CONST0_RTX (SFmode)))
5181 {
5182 fputs ("%r0", file);
5183 return;
5184 }
5185 else
5186 break;
5187 case 'f':
5188 /* A register or zero (floating point). */
5189 if (x == const0_rtx
5190 || (x == CONST0_RTX (DFmode))
5191 || (x == CONST0_RTX (SFmode)))
5192 {
5193 fputs ("%fr0", file);
5194 return;
5195 }
5196 else
5197 break;
5198 case 'A':
5199 {
5200 rtx xoperands[2];
5201
5202 xoperands[0] = XEXP (XEXP (x, 0), 0);
5203 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5204 pa_output_global_address (file, xoperands[1], 0);
5205 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5206 return;
5207 }
5208
5209 case 'C': /* Plain (C)ondition */
5210 case 'X':
5211 switch (GET_CODE (x))
5212 {
5213 case EQ:
5214 fputs ("=", file); break;
5215 case NE:
5216 fputs ("<>", file); break;
5217 case GT:
5218 fputs (">", file); break;
5219 case GE:
5220 fputs (">=", file); break;
5221 case GEU:
5222 fputs (">>=", file); break;
5223 case GTU:
5224 fputs (">>", file); break;
5225 case LT:
5226 fputs ("<", file); break;
5227 case LE:
5228 fputs ("<=", file); break;
5229 case LEU:
5230 fputs ("<<=", file); break;
5231 case LTU:
5232 fputs ("<<", file); break;
5233 default:
5234 gcc_unreachable ();
5235 }
5236 return;
5237 case 'N': /* Condition, (N)egated */
5238 switch (GET_CODE (x))
5239 {
5240 case EQ:
5241 fputs ("<>", file); break;
5242 case NE:
5243 fputs ("=", file); break;
5244 case GT:
5245 fputs ("<=", file); break;
5246 case GE:
5247 fputs ("<", file); break;
5248 case GEU:
5249 fputs ("<<", file); break;
5250 case GTU:
5251 fputs ("<<=", file); break;
5252 case LT:
5253 fputs (">=", file); break;
5254 case LE:
5255 fputs (">", file); break;
5256 case LEU:
5257 fputs (">>", file); break;
5258 case LTU:
5259 fputs (">>=", file); break;
5260 default:
5261 gcc_unreachable ();
5262 }
5263 return;
5264 /* For floating point comparisons. Note that the output
5265 predicates are the complement of the desired mode. The
5266 conditions for GT, GE, LT, LE and LTGT cause an invalid
5267 operation exception if the result is unordered and this
5268 exception is enabled in the floating-point status register. */
5269 case 'Y':
5270 switch (GET_CODE (x))
5271 {
5272 case EQ:
5273 fputs ("!=", file); break;
5274 case NE:
5275 fputs ("=", file); break;
5276 case GT:
5277 fputs ("!>", file); break;
5278 case GE:
5279 fputs ("!>=", file); break;
5280 case LT:
5281 fputs ("!<", file); break;
5282 case LE:
5283 fputs ("!<=", file); break;
5284 case LTGT:
5285 fputs ("!<>", file); break;
5286 case UNLE:
5287 fputs ("!?<=", file); break;
5288 case UNLT:
5289 fputs ("!?<", file); break;
5290 case UNGE:
5291 fputs ("!?>=", file); break;
5292 case UNGT:
5293 fputs ("!?>", file); break;
5294 case UNEQ:
5295 fputs ("!?=", file); break;
5296 case UNORDERED:
5297 fputs ("!?", file); break;
5298 case ORDERED:
5299 fputs ("?", file); break;
5300 default:
5301 gcc_unreachable ();
5302 }
5303 return;
5304 case 'S': /* Condition, operands are (S)wapped. */
5305 switch (GET_CODE (x))
5306 {
5307 case EQ:
5308 fputs ("=", file); break;
5309 case NE:
5310 fputs ("<>", file); break;
5311 case GT:
5312 fputs ("<", file); break;
5313 case GE:
5314 fputs ("<=", file); break;
5315 case GEU:
5316 fputs ("<<=", file); break;
5317 case GTU:
5318 fputs ("<<", file); break;
5319 case LT:
5320 fputs (">", file); break;
5321 case LE:
5322 fputs (">=", file); break;
5323 case LEU:
5324 fputs (">>=", file); break;
5325 case LTU:
5326 fputs (">>", file); break;
5327 default:
5328 gcc_unreachable ();
5329 }
5330 return;
5331 case 'B': /* Condition, (B)oth swapped and negate. */
5332 switch (GET_CODE (x))
5333 {
5334 case EQ:
5335 fputs ("<>", file); break;
5336 case NE:
5337 fputs ("=", file); break;
5338 case GT:
5339 fputs (">=", file); break;
5340 case GE:
5341 fputs (">", file); break;
5342 case GEU:
5343 fputs (">>", file); break;
5344 case GTU:
5345 fputs (">>=", file); break;
5346 case LT:
5347 fputs ("<=", file); break;
5348 case LE:
5349 fputs ("<", file); break;
5350 case LEU:
5351 fputs ("<<", file); break;
5352 case LTU:
5353 fputs ("<<=", file); break;
5354 default:
5355 gcc_unreachable ();
5356 }
5357 return;
5358 case 'k':
5359 gcc_assert (GET_CODE (x) == CONST_INT);
5360 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5361 return;
5362 case 'Q':
5363 gcc_assert (GET_CODE (x) == CONST_INT);
5364 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5365 return;
5366 case 'L':
5367 gcc_assert (GET_CODE (x) == CONST_INT);
5368 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5369 return;
5370 case 'o':
5371 gcc_assert (GET_CODE (x) == CONST_INT
5372 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5373 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5374 return;
5375 case 'O':
5376 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5377 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5378 return;
5379 case 'p':
5380 gcc_assert (GET_CODE (x) == CONST_INT);
5381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5382 return;
5383 case 'P':
5384 gcc_assert (GET_CODE (x) == CONST_INT);
5385 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5386 return;
5387 case 'I':
5388 if (GET_CODE (x) == CONST_INT)
5389 fputs ("i", file);
5390 return;
5391 case 'M':
5392 case 'F':
5393 switch (GET_CODE (XEXP (x, 0)))
5394 {
5395 case PRE_DEC:
5396 case PRE_INC:
5397 if (ASSEMBLER_DIALECT == 0)
5398 fputs ("s,mb", file);
5399 else
5400 fputs (",mb", file);
5401 break;
5402 case POST_DEC:
5403 case POST_INC:
5404 if (ASSEMBLER_DIALECT == 0)
5405 fputs ("s,ma", file);
5406 else
5407 fputs (",ma", file);
5408 break;
5409 case PLUS:
5410 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5411 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5412 {
5413 if (ASSEMBLER_DIALECT == 0)
5414 fputs ("x", file);
5415 }
5416 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5417 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5418 {
5419 if (ASSEMBLER_DIALECT == 0)
5420 fputs ("x,s", file);
5421 else
5422 fputs (",s", file);
5423 }
5424 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5425 fputs ("s", file);
5426 break;
5427 default:
5428 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5429 fputs ("s", file);
5430 break;
5431 }
5432 return;
5433 case 'G':
5434 pa_output_global_address (file, x, 0);
5435 return;
5436 case 'H':
5437 pa_output_global_address (file, x, 1);
5438 return;
5439 case 0: /* Don't do anything special */
5440 break;
5441 case 'Z':
5442 {
5443 unsigned op[3];
5444 compute_zdepwi_operands (INTVAL (x), op);
5445 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5446 return;
5447 }
5448 case 'z':
5449 {
5450 unsigned op[3];
5451 compute_zdepdi_operands (INTVAL (x), op);
5452 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5453 return;
5454 }
5455 case 'c':
5456 /* We can get here from a .vtable_inherit due to our
5457 CONSTANT_ADDRESS_P rejecting perfectly good constant
5458 addresses. */
5459 break;
5460 default:
5461 gcc_unreachable ();
5462 }
5463 if (GET_CODE (x) == REG)
5464 {
5465 fputs (reg_names [REGNO (x)], file);
5466 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5467 {
5468 fputs ("R", file);
5469 return;
5470 }
5471 if (FP_REG_P (x)
5472 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5473 && (REGNO (x) & 1) == 0)
5474 fputs ("L", file);
5475 }
5476 else if (GET_CODE (x) == MEM)
5477 {
5478 int size = GET_MODE_SIZE (GET_MODE (x));
5479 rtx base = NULL_RTX;
5480 switch (GET_CODE (XEXP (x, 0)))
5481 {
5482 case PRE_DEC:
5483 case POST_DEC:
5484 base = XEXP (XEXP (x, 0), 0);
5485 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5486 break;
5487 case PRE_INC:
5488 case POST_INC:
5489 base = XEXP (XEXP (x, 0), 0);
5490 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5491 break;
5492 case PLUS:
5493 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5494 fprintf (file, "%s(%s)",
5495 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5496 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5497 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5498 fprintf (file, "%s(%s)",
5499 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5500 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5501 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5502 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5503 {
5504 /* Because the REG_POINTER flag can get lost during reload,
5505 pa_legitimate_address_p canonicalizes the order of the
5506 index and base registers in the combined move patterns. */
5507 rtx base = XEXP (XEXP (x, 0), 1);
5508 rtx index = XEXP (XEXP (x, 0), 0);
5509
5510 fprintf (file, "%s(%s)",
5511 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5512 }
5513 else
5514 output_address (GET_MODE (x), XEXP (x, 0));
5515 break;
5516 default:
5517 output_address (GET_MODE (x), XEXP (x, 0));
5518 break;
5519 }
5520 }
5521 else
5522 output_addr_const (file, x);
5523 }
5524
5525 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5526
5527 void
5528 pa_output_global_address (FILE *file, rtx x, int round_constant)
5529 {
5530
5531 /* Imagine (high (const (plus ...))). */
5532 if (GET_CODE (x) == HIGH)
5533 x = XEXP (x, 0);
5534
5535 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5536 output_addr_const (file, x);
5537 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5538 {
5539 output_addr_const (file, x);
5540 fputs ("-$global$", file);
5541 }
5542 else if (GET_CODE (x) == CONST)
5543 {
5544 const char *sep = "";
5545 int offset = 0; /* assembler wants -$global$ at end */
5546 rtx base = NULL_RTX;
5547
5548 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5549 {
5550 case LABEL_REF:
5551 case SYMBOL_REF:
5552 base = XEXP (XEXP (x, 0), 0);
5553 output_addr_const (file, base);
5554 break;
5555 case CONST_INT:
5556 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5557 break;
5558 default:
5559 gcc_unreachable ();
5560 }
5561
5562 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5563 {
5564 case LABEL_REF:
5565 case SYMBOL_REF:
5566 base = XEXP (XEXP (x, 0), 1);
5567 output_addr_const (file, base);
5568 break;
5569 case CONST_INT:
5570 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5571 break;
5572 default:
5573 gcc_unreachable ();
5574 }
5575
5576 /* How bogus. The compiler is apparently responsible for
5577 rounding the constant if it uses an LR field selector.
5578
5579 The linker and/or assembler seem a better place since
5580 they have to do this kind of thing already.
5581
5582 If we fail to do this, HP's optimizing linker may eliminate
5583 an addil, but not update the ldw/stw/ldo instruction that
5584 uses the result of the addil. */
5585 if (round_constant)
5586 offset = ((offset + 0x1000) & ~0x1fff);
5587
5588 switch (GET_CODE (XEXP (x, 0)))
5589 {
5590 case PLUS:
5591 if (offset < 0)
5592 {
5593 offset = -offset;
5594 sep = "-";
5595 }
5596 else
5597 sep = "+";
5598 break;
5599
5600 case MINUS:
5601 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5602 sep = "-";
5603 break;
5604
5605 default:
5606 gcc_unreachable ();
5607 }
5608
5609 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5610 fputs ("-$global$", file);
5611 if (offset)
5612 fprintf (file, "%s%d", sep, offset);
5613 }
5614 else
5615 output_addr_const (file, x);
5616 }
5617
5618 /* Output boilerplate text to appear at the beginning of the file.
5619 There are several possible versions. */
5620 #define aputs(x) fputs(x, asm_out_file)
5621 static inline void
5622 pa_file_start_level (void)
5623 {
5624 if (TARGET_64BIT)
5625 aputs ("\t.LEVEL 2.0w\n");
5626 else if (TARGET_PA_20)
5627 aputs ("\t.LEVEL 2.0\n");
5628 else if (TARGET_PA_11)
5629 aputs ("\t.LEVEL 1.1\n");
5630 else
5631 aputs ("\t.LEVEL 1.0\n");
5632 }
5633
5634 static inline void
5635 pa_file_start_space (int sortspace)
5636 {
5637 aputs ("\t.SPACE $PRIVATE$");
5638 if (sortspace)
5639 aputs (",SORT=16");
5640 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5641 if (flag_tm)
5642 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5643 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5644 "\n\t.SPACE $TEXT$");
5645 if (sortspace)
5646 aputs (",SORT=8");
5647 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5648 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5649 }
5650
5651 static inline void
5652 pa_file_start_file (int want_version)
5653 {
5654 if (write_symbols != NO_DEBUG)
5655 {
5656 output_file_directive (asm_out_file, main_input_filename);
5657 if (want_version)
5658 aputs ("\t.version\t\"01.01\"\n");
5659 }
5660 }
5661
5662 static inline void
5663 pa_file_start_mcount (const char *aswhat)
5664 {
5665 if (profile_flag)
5666 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5667 }
5668
5669 static void
5670 pa_elf_file_start (void)
5671 {
5672 pa_file_start_level ();
5673 pa_file_start_mcount ("ENTRY");
5674 pa_file_start_file (0);
5675 }
5676
5677 static void
5678 pa_som_file_start (void)
5679 {
5680 pa_file_start_level ();
5681 pa_file_start_space (0);
5682 aputs ("\t.IMPORT $global$,DATA\n"
5683 "\t.IMPORT $$dyncall,MILLICODE\n");
5684 pa_file_start_mcount ("CODE");
5685 pa_file_start_file (0);
5686 }
5687
5688 static void
5689 pa_linux_file_start (void)
5690 {
5691 pa_file_start_file (1);
5692 pa_file_start_level ();
5693 pa_file_start_mcount ("CODE");
5694 }
5695
5696 static void
5697 pa_hpux64_gas_file_start (void)
5698 {
5699 pa_file_start_level ();
5700 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5701 if (profile_flag)
5702 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5703 #endif
5704 pa_file_start_file (1);
5705 }
5706
5707 static void
5708 pa_hpux64_hpas_file_start (void)
5709 {
5710 pa_file_start_level ();
5711 pa_file_start_space (1);
5712 pa_file_start_mcount ("CODE");
5713 pa_file_start_file (0);
5714 }
5715 #undef aputs
5716
5717 /* Search the deferred plabel list for SYMBOL and return its internal
5718 label. If an entry for SYMBOL is not found, a new entry is created. */
5719
5720 rtx
5721 pa_get_deferred_plabel (rtx symbol)
5722 {
5723 const char *fname = XSTR (symbol, 0);
5724 size_t i;
5725
5726 /* See if we have already put this function on the list of deferred
5727 plabels. This list is generally small, so a liner search is not
5728 too ugly. If it proves too slow replace it with something faster. */
5729 for (i = 0; i < n_deferred_plabels; i++)
5730 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5731 break;
5732
5733 /* If the deferred plabel list is empty, or this entry was not found
5734 on the list, create a new entry on the list. */
5735 if (deferred_plabels == NULL || i == n_deferred_plabels)
5736 {
5737 tree id;
5738
5739 if (deferred_plabels == 0)
5740 deferred_plabels = ggc_alloc<deferred_plabel> ();
5741 else
5742 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5743 deferred_plabels,
5744 n_deferred_plabels + 1);
5745
5746 i = n_deferred_plabels++;
5747 deferred_plabels[i].internal_label = gen_label_rtx ();
5748 deferred_plabels[i].symbol = symbol;
5749
5750 /* Gross. We have just implicitly taken the address of this
5751 function. Mark it in the same manner as assemble_name. */
5752 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5753 if (id)
5754 mark_referenced (id);
5755 }
5756
5757 return deferred_plabels[i].internal_label;
5758 }
5759
5760 static void
5761 output_deferred_plabels (void)
5762 {
5763 size_t i;
5764
5765 /* If we have some deferred plabels, then we need to switch into the
5766 data or readonly data section, and align it to a 4 byte boundary
5767 before outputting the deferred plabels. */
5768 if (n_deferred_plabels)
5769 {
5770 switch_to_section (flag_pic ? data_section : readonly_data_section);
5771 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5772 }
5773
5774 /* Now output the deferred plabels. */
5775 for (i = 0; i < n_deferred_plabels; i++)
5776 {
5777 targetm.asm_out.internal_label (asm_out_file, "L",
5778 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5779 assemble_integer (deferred_plabels[i].symbol,
5780 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5781 }
5782 }
5783
5784 /* Initialize optabs to point to emulation routines. */
5785
5786 static void
5787 pa_init_libfuncs (void)
5788 {
5789 if (HPUX_LONG_DOUBLE_LIBRARY)
5790 {
5791 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5792 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5793 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5794 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5795 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5796 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5797 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5798 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5799 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5800
5801 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5802 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5803 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5804 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5805 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5806 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5807 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5808
5809 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5810 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5811 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5812 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5813
5814 set_conv_libfunc (sfix_optab, SImode, TFmode,
5815 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5816 : "_U_Qfcnvfxt_quad_to_sgl");
5817 set_conv_libfunc (sfix_optab, DImode, TFmode,
5818 "_U_Qfcnvfxt_quad_to_dbl");
5819 set_conv_libfunc (ufix_optab, SImode, TFmode,
5820 "_U_Qfcnvfxt_quad_to_usgl");
5821 set_conv_libfunc (ufix_optab, DImode, TFmode,
5822 "_U_Qfcnvfxt_quad_to_udbl");
5823
5824 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5825 "_U_Qfcnvxf_sgl_to_quad");
5826 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5827 "_U_Qfcnvxf_dbl_to_quad");
5828 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5829 "_U_Qfcnvxf_usgl_to_quad");
5830 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5831 "_U_Qfcnvxf_udbl_to_quad");
5832 }
5833
5834 if (TARGET_SYNC_LIBCALL)
5835 init_sync_libfuncs (8);
5836 }
5837
5838 /* HP's millicode routines mean something special to the assembler.
5839 Keep track of which ones we have used. */
5840
5841 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5842 static void import_milli (enum millicodes);
5843 static char imported[(int) end1000];
5844 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5845 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5846 #define MILLI_START 10
5847
5848 static void
5849 import_milli (enum millicodes code)
5850 {
5851 char str[sizeof (import_string)];
5852
5853 if (!imported[(int) code])
5854 {
5855 imported[(int) code] = 1;
5856 strcpy (str, import_string);
5857 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5858 output_asm_insn (str, 0);
5859 }
5860 }
5861
5862 /* The register constraints have put the operands and return value in
5863 the proper registers. */
5864
5865 const char *
5866 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5867 {
5868 import_milli (mulI);
5869 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5870 }
5871
5872 /* Emit the rtl for doing a division by a constant. */
5873
5874 /* Do magic division millicodes exist for this value? */
5875 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5876
5877 /* We'll use an array to keep track of the magic millicodes and
5878 whether or not we've used them already. [n][0] is signed, [n][1] is
5879 unsigned. */
5880
5881 static int div_milli[16][2];
5882
5883 int
5884 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5885 {
5886 if (GET_CODE (operands[2]) == CONST_INT
5887 && INTVAL (operands[2]) > 0
5888 && INTVAL (operands[2]) < 16
5889 && pa_magic_milli[INTVAL (operands[2])])
5890 {
5891 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5892
5893 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5894 emit
5895 (gen_rtx_PARALLEL
5896 (VOIDmode,
5897 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5898 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5899 SImode,
5900 gen_rtx_REG (SImode, 26),
5901 operands[2])),
5902 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5903 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5904 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5905 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5906 gen_rtx_CLOBBER (VOIDmode, ret))));
5907 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5908 return 1;
5909 }
5910 return 0;
5911 }
5912
5913 const char *
5914 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5915 {
5916 int divisor;
5917
5918 /* If the divisor is a constant, try to use one of the special
5919 opcodes .*/
5920 if (GET_CODE (operands[0]) == CONST_INT)
5921 {
5922 static char buf[100];
5923 divisor = INTVAL (operands[0]);
5924 if (!div_milli[divisor][unsignedp])
5925 {
5926 div_milli[divisor][unsignedp] = 1;
5927 if (unsignedp)
5928 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5929 else
5930 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5931 }
5932 if (unsignedp)
5933 {
5934 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5935 INTVAL (operands[0]));
5936 return pa_output_millicode_call (insn,
5937 gen_rtx_SYMBOL_REF (SImode, buf));
5938 }
5939 else
5940 {
5941 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5942 INTVAL (operands[0]));
5943 return pa_output_millicode_call (insn,
5944 gen_rtx_SYMBOL_REF (SImode, buf));
5945 }
5946 }
5947 /* Divisor isn't a special constant. */
5948 else
5949 {
5950 if (unsignedp)
5951 {
5952 import_milli (divU);
5953 return pa_output_millicode_call (insn,
5954 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5955 }
5956 else
5957 {
5958 import_milli (divI);
5959 return pa_output_millicode_call (insn,
5960 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5961 }
5962 }
5963 }
5964
5965 /* Output a $$rem millicode to do mod. */
5966
5967 const char *
5968 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5969 {
5970 if (unsignedp)
5971 {
5972 import_milli (remU);
5973 return pa_output_millicode_call (insn,
5974 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5975 }
5976 else
5977 {
5978 import_milli (remI);
5979 return pa_output_millicode_call (insn,
5980 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5981 }
5982 }
5983
5984 void
5985 pa_output_arg_descriptor (rtx_insn *call_insn)
5986 {
5987 const char *arg_regs[4];
5988 machine_mode arg_mode;
5989 rtx link;
5990 int i, output_flag = 0;
5991 int regno;
5992
5993 /* We neither need nor want argument location descriptors for the
5994 64bit runtime environment or the ELF32 environment. */
5995 if (TARGET_64BIT || TARGET_ELF32)
5996 return;
5997
5998 for (i = 0; i < 4; i++)
5999 arg_regs[i] = 0;
6000
6001 /* Specify explicitly that no argument relocations should take place
6002 if using the portable runtime calling conventions. */
6003 if (TARGET_PORTABLE_RUNTIME)
6004 {
6005 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
6006 asm_out_file);
6007 return;
6008 }
6009
6010 gcc_assert (CALL_P (call_insn));
6011 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
6012 link; link = XEXP (link, 1))
6013 {
6014 rtx use = XEXP (link, 0);
6015
6016 if (! (GET_CODE (use) == USE
6017 && GET_CODE (XEXP (use, 0)) == REG
6018 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6019 continue;
6020
6021 arg_mode = GET_MODE (XEXP (use, 0));
6022 regno = REGNO (XEXP (use, 0));
6023 if (regno >= 23 && regno <= 26)
6024 {
6025 arg_regs[26 - regno] = "GR";
6026 if (arg_mode == DImode)
6027 arg_regs[25 - regno] = "GR";
6028 }
6029 else if (regno >= 32 && regno <= 39)
6030 {
6031 if (arg_mode == SFmode)
6032 arg_regs[(regno - 32) / 2] = "FR";
6033 else
6034 {
6035 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6036 arg_regs[(regno - 34) / 2] = "FR";
6037 arg_regs[(regno - 34) / 2 + 1] = "FU";
6038 #else
6039 arg_regs[(regno - 34) / 2] = "FU";
6040 arg_regs[(regno - 34) / 2 + 1] = "FR";
6041 #endif
6042 }
6043 }
6044 }
6045 fputs ("\t.CALL ", asm_out_file);
6046 for (i = 0; i < 4; i++)
6047 {
6048 if (arg_regs[i])
6049 {
6050 if (output_flag++)
6051 fputc (',', asm_out_file);
6052 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6053 }
6054 }
6055 fputc ('\n', asm_out_file);
6056 }
6057 \f
6058 /* Inform reload about cases where moving X with a mode MODE to or from
6059 a register in RCLASS requires an extra scratch or immediate register.
6060 Return the class needed for the immediate register. */
6061
6062 static reg_class_t
6063 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6064 machine_mode mode, secondary_reload_info *sri)
6065 {
6066 int regno;
6067 enum reg_class rclass = (enum reg_class) rclass_i;
6068
6069 /* Handle the easy stuff first. */
6070 if (rclass == R1_REGS)
6071 return NO_REGS;
6072
6073 if (REG_P (x))
6074 {
6075 regno = REGNO (x);
6076 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6077 return NO_REGS;
6078 }
6079 else
6080 regno = -1;
6081
6082 /* If we have something like (mem (mem (...)), we can safely assume the
6083 inner MEM will end up in a general register after reloading, so there's
6084 no need for a secondary reload. */
6085 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6086 return NO_REGS;
6087
6088 /* Trying to load a constant into a FP register during PIC code
6089 generation requires %r1 as a scratch register. For float modes,
6090 the only legitimate constant is CONST0_RTX. However, there are
6091 a few patterns that accept constant double operands. */
6092 if (flag_pic
6093 && FP_REG_CLASS_P (rclass)
6094 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6095 {
6096 switch (mode)
6097 {
6098 case E_SImode:
6099 sri->icode = CODE_FOR_reload_insi_r1;
6100 break;
6101
6102 case E_DImode:
6103 sri->icode = CODE_FOR_reload_indi_r1;
6104 break;
6105
6106 case E_SFmode:
6107 sri->icode = CODE_FOR_reload_insf_r1;
6108 break;
6109
6110 case E_DFmode:
6111 sri->icode = CODE_FOR_reload_indf_r1;
6112 break;
6113
6114 default:
6115 gcc_unreachable ();
6116 }
6117 return NO_REGS;
6118 }
6119
6120 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6121 register when we're generating PIC code or when the operand isn't
6122 readonly. */
6123 if (pa_symbolic_expression_p (x))
6124 {
6125 if (GET_CODE (x) == HIGH)
6126 x = XEXP (x, 0);
6127
6128 if (flag_pic || !read_only_operand (x, VOIDmode))
6129 {
6130 switch (mode)
6131 {
6132 case E_SImode:
6133 sri->icode = CODE_FOR_reload_insi_r1;
6134 break;
6135
6136 case E_DImode:
6137 sri->icode = CODE_FOR_reload_indi_r1;
6138 break;
6139
6140 default:
6141 gcc_unreachable ();
6142 }
6143 return NO_REGS;
6144 }
6145 }
6146
6147 /* Profiling showed the PA port spends about 1.3% of its compilation
6148 time in true_regnum from calls inside pa_secondary_reload_class. */
6149 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6150 regno = true_regnum (x);
6151
6152 /* Handle reloads for floating point loads and stores. */
6153 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6154 && FP_REG_CLASS_P (rclass))
6155 {
6156 if (MEM_P (x))
6157 {
6158 x = XEXP (x, 0);
6159
6160 /* We don't need a secondary reload for indexed memory addresses.
6161
6162 When INT14_OK_STRICT is true, it might appear that we could
6163 directly allow register indirect memory addresses. However,
6164 this doesn't work because we don't support SUBREGs in
6165 floating-point register copies and reload doesn't tell us
6166 when it's going to use a SUBREG. */
6167 if (IS_INDEX_ADDR_P (x))
6168 return NO_REGS;
6169 }
6170
6171 /* Request a secondary reload with a general scratch register
6172 for everything else. ??? Could symbolic operands be handled
6173 directly when generating non-pic PA 2.0 code? */
6174 sri->icode = (in_p
6175 ? direct_optab_handler (reload_in_optab, mode)
6176 : direct_optab_handler (reload_out_optab, mode));
6177 return NO_REGS;
6178 }
6179
6180 /* A SAR<->FP register copy requires an intermediate general register
6181 and secondary memory. We need a secondary reload with a general
6182 scratch register for spills. */
6183 if (rclass == SHIFT_REGS)
6184 {
6185 /* Handle spill. */
6186 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6187 {
6188 sri->icode = (in_p
6189 ? direct_optab_handler (reload_in_optab, mode)
6190 : direct_optab_handler (reload_out_optab, mode));
6191 return NO_REGS;
6192 }
6193
6194 /* Handle FP copy. */
6195 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6196 return GENERAL_REGS;
6197 }
6198
6199 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6200 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6201 && FP_REG_CLASS_P (rclass))
6202 return GENERAL_REGS;
6203
6204 return NO_REGS;
6205 }
6206
6207 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6208
6209 static bool
6210 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
6211 reg_class_t class1 ATTRIBUTE_UNUSED,
6212 reg_class_t class2 ATTRIBUTE_UNUSED)
6213 {
6214 #ifdef PA_SECONDARY_MEMORY_NEEDED
6215 return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
6216 #else
6217 return false;
6218 #endif
6219 }
6220
6221 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6222 is only marked as live on entry by df-scan when it is a fixed
6223 register. It isn't a fixed register in the 64-bit runtime,
6224 so we need to mark it here. */
6225
6226 static void
6227 pa_extra_live_on_entry (bitmap regs)
6228 {
6229 if (TARGET_64BIT)
6230 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6231 }
6232
6233 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6234 to prevent it from being deleted. */
6235
6236 rtx
6237 pa_eh_return_handler_rtx (void)
6238 {
6239 rtx tmp;
6240
6241 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6242 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6243 tmp = gen_rtx_MEM (word_mode, tmp);
6244 tmp->volatil = 1;
6245 return tmp;
6246 }
6247
6248 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6249 by invisible reference. As a GCC extension, we also pass anything
6250 with a zero or variable size by reference.
6251
6252 The 64-bit runtime does not describe passing any types by invisible
6253 reference. The internals of GCC can't currently handle passing
6254 empty structures, and zero or variable length arrays when they are
6255 not passed entirely on the stack or by reference. Thus, as a GCC
6256 extension, we pass these types by reference. The HP compiler doesn't
6257 support these types, so hopefully there shouldn't be any compatibility
6258 issues. This may have to be revisited when HP releases a C99 compiler
6259 or updates the ABI. */
6260
6261 static bool
6262 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6263 machine_mode mode, const_tree type,
6264 bool named ATTRIBUTE_UNUSED)
6265 {
6266 HOST_WIDE_INT size;
6267
6268 if (type)
6269 size = int_size_in_bytes (type);
6270 else
6271 size = GET_MODE_SIZE (mode);
6272
6273 if (TARGET_64BIT)
6274 return size <= 0;
6275 else
6276 return size <= 0 || size > 8;
6277 }
6278
6279 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6280
6281 static pad_direction
6282 pa_function_arg_padding (machine_mode mode, const_tree type)
6283 {
6284 if (mode == BLKmode
6285 || (TARGET_64BIT
6286 && type
6287 && (AGGREGATE_TYPE_P (type)
6288 || TREE_CODE (type) == COMPLEX_TYPE
6289 || TREE_CODE (type) == VECTOR_TYPE)))
6290 {
6291 /* Return PAD_NONE if justification is not required. */
6292 if (type
6293 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6294 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6295 return PAD_NONE;
6296
6297 /* The directions set here are ignored when a BLKmode argument larger
6298 than a word is placed in a register. Different code is used for
6299 the stack and registers. This makes it difficult to have a
6300 consistent data representation for both the stack and registers.
6301 For both runtimes, the justification and padding for arguments on
6302 the stack and in registers should be identical. */
6303 if (TARGET_64BIT)
6304 /* The 64-bit runtime specifies left justification for aggregates. */
6305 return PAD_UPWARD;
6306 else
6307 /* The 32-bit runtime architecture specifies right justification.
6308 When the argument is passed on the stack, the argument is padded
6309 with garbage on the left. The HP compiler pads with zeros. */
6310 return PAD_DOWNWARD;
6311 }
6312
6313 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6314 return PAD_DOWNWARD;
6315 else
6316 return PAD_NONE;
6317 }
6318
6319 \f
6320 /* Do what is necessary for `va_start'. We look at the current function
6321 to determine if stdargs or varargs is used and fill in an initial
6322 va_list. A pointer to this constructor is returned. */
6323
6324 static rtx
6325 hppa_builtin_saveregs (void)
6326 {
6327 rtx offset, dest;
6328 tree fntype = TREE_TYPE (current_function_decl);
6329 int argadj = ((!stdarg_p (fntype))
6330 ? UNITS_PER_WORD : 0);
6331
6332 if (argadj)
6333 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6334 else
6335 offset = crtl->args.arg_offset_rtx;
6336
6337 if (TARGET_64BIT)
6338 {
6339 int i, off;
6340
6341 /* Adjust for varargs/stdarg differences. */
6342 if (argadj)
6343 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6344 else
6345 offset = crtl->args.arg_offset_rtx;
6346
6347 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6348 from the incoming arg pointer and growing to larger addresses. */
6349 for (i = 26, off = -64; i >= 19; i--, off += 8)
6350 emit_move_insn (gen_rtx_MEM (word_mode,
6351 plus_constant (Pmode,
6352 arg_pointer_rtx, off)),
6353 gen_rtx_REG (word_mode, i));
6354
6355 /* The incoming args pointer points just beyond the flushback area;
6356 normally this is not a serious concern. However, when we are doing
6357 varargs/stdargs we want to make the arg pointer point to the start
6358 of the incoming argument area. */
6359 emit_move_insn (virtual_incoming_args_rtx,
6360 plus_constant (Pmode, arg_pointer_rtx, -64));
6361
6362 /* Now return a pointer to the first anonymous argument. */
6363 return copy_to_reg (expand_binop (Pmode, add_optab,
6364 virtual_incoming_args_rtx,
6365 offset, 0, 0, OPTAB_LIB_WIDEN));
6366 }
6367
6368 /* Store general registers on the stack. */
6369 dest = gen_rtx_MEM (BLKmode,
6370 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6371 -16));
6372 set_mem_alias_set (dest, get_varargs_alias_set ());
6373 set_mem_align (dest, BITS_PER_WORD);
6374 move_block_from_reg (23, dest, 4);
6375
6376 /* move_block_from_reg will emit code to store the argument registers
6377 individually as scalar stores.
6378
6379 However, other insns may later load from the same addresses for
6380 a structure load (passing a struct to a varargs routine).
6381
6382 The alias code assumes that such aliasing can never happen, so we
6383 have to keep memory referencing insns from moving up beyond the
6384 last argument register store. So we emit a blockage insn here. */
6385 emit_insn (gen_blockage ());
6386
6387 return copy_to_reg (expand_binop (Pmode, add_optab,
6388 crtl->args.internal_arg_pointer,
6389 offset, 0, 0, OPTAB_LIB_WIDEN));
6390 }
6391
6392 static void
6393 hppa_va_start (tree valist, rtx nextarg)
6394 {
6395 nextarg = expand_builtin_saveregs ();
6396 std_expand_builtin_va_start (valist, nextarg);
6397 }
6398
6399 static tree
6400 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6401 gimple_seq *post_p)
6402 {
6403 if (TARGET_64BIT)
6404 {
6405 /* Args grow upward. We can use the generic routines. */
6406 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6407 }
6408 else /* !TARGET_64BIT */
6409 {
6410 tree ptr = build_pointer_type (type);
6411 tree valist_type;
6412 tree t, u;
6413 unsigned int size, ofs;
6414 bool indirect;
6415
6416 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6417 if (indirect)
6418 {
6419 type = ptr;
6420 ptr = build_pointer_type (type);
6421 }
6422 size = int_size_in_bytes (type);
6423 valist_type = TREE_TYPE (valist);
6424
6425 /* Args grow down. Not handled by generic routines. */
6426
6427 u = fold_convert (sizetype, size_in_bytes (type));
6428 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6429 t = fold_build_pointer_plus (valist, u);
6430
6431 /* Align to 4 or 8 byte boundary depending on argument size. */
6432
6433 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6434 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6435 t = fold_convert (valist_type, t);
6436
6437 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6438
6439 ofs = (8 - size) % 4;
6440 if (ofs != 0)
6441 t = fold_build_pointer_plus_hwi (t, ofs);
6442
6443 t = fold_convert (ptr, t);
6444 t = build_va_arg_indirect_ref (t);
6445
6446 if (indirect)
6447 t = build_va_arg_indirect_ref (t);
6448
6449 return t;
6450 }
6451 }
6452
6453 /* True if MODE is valid for the target. By "valid", we mean able to
6454 be manipulated in non-trivial ways. In particular, this means all
6455 the arithmetic is supported.
6456
6457 Currently, TImode is not valid as the HP 64-bit runtime documentation
6458 doesn't document the alignment and calling conventions for this type.
6459 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6460 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6461
6462 static bool
6463 pa_scalar_mode_supported_p (scalar_mode mode)
6464 {
6465 int precision = GET_MODE_PRECISION (mode);
6466
6467 switch (GET_MODE_CLASS (mode))
6468 {
6469 case MODE_PARTIAL_INT:
6470 case MODE_INT:
6471 if (precision == CHAR_TYPE_SIZE)
6472 return true;
6473 if (precision == SHORT_TYPE_SIZE)
6474 return true;
6475 if (precision == INT_TYPE_SIZE)
6476 return true;
6477 if (precision == LONG_TYPE_SIZE)
6478 return true;
6479 if (precision == LONG_LONG_TYPE_SIZE)
6480 return true;
6481 return false;
6482
6483 case MODE_FLOAT:
6484 if (precision == FLOAT_TYPE_SIZE)
6485 return true;
6486 if (precision == DOUBLE_TYPE_SIZE)
6487 return true;
6488 if (precision == LONG_DOUBLE_TYPE_SIZE)
6489 return true;
6490 return false;
6491
6492 case MODE_DECIMAL_FLOAT:
6493 return false;
6494
6495 default:
6496 gcc_unreachable ();
6497 }
6498 }
6499
6500 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6501 it branches into the delay slot. Otherwise, return FALSE. */
6502
6503 static bool
6504 branch_to_delay_slot_p (rtx_insn *insn)
6505 {
6506 rtx_insn *jump_insn;
6507
6508 if (dbr_sequence_length ())
6509 return FALSE;
6510
6511 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6512 while (insn)
6513 {
6514 insn = next_active_insn (insn);
6515 if (jump_insn == insn)
6516 return TRUE;
6517
6518 /* We can't rely on the length of asms. So, we return FALSE when
6519 the branch is followed by an asm. */
6520 if (!insn
6521 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6522 || asm_noperands (PATTERN (insn)) >= 0
6523 || get_attr_length (insn) > 0)
6524 break;
6525 }
6526
6527 return FALSE;
6528 }
6529
6530 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6531
6532 This occurs when INSN has an unfilled delay slot and is followed
6533 by an asm. Disaster can occur if the asm is empty and the jump
6534 branches into the delay slot. So, we add a nop in the delay slot
6535 when this occurs. */
6536
6537 static bool
6538 branch_needs_nop_p (rtx_insn *insn)
6539 {
6540 rtx_insn *jump_insn;
6541
6542 if (dbr_sequence_length ())
6543 return FALSE;
6544
6545 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6546 while (insn)
6547 {
6548 insn = next_active_insn (insn);
6549 if (!insn || jump_insn == insn)
6550 return TRUE;
6551
6552 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6553 || asm_noperands (PATTERN (insn)) >= 0)
6554 && get_attr_length (insn) > 0)
6555 break;
6556 }
6557
6558 return FALSE;
6559 }
6560
6561 /* Return TRUE if INSN, a forward jump insn, can use nullification
6562 to skip the following instruction. This avoids an extra cycle due
6563 to a mis-predicted branch when we fall through. */
6564
6565 static bool
6566 use_skip_p (rtx_insn *insn)
6567 {
6568 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6569
6570 while (insn)
6571 {
6572 insn = next_active_insn (insn);
6573
6574 /* We can't rely on the length of asms, so we can't skip asms. */
6575 if (!insn
6576 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6577 || asm_noperands (PATTERN (insn)) >= 0)
6578 break;
6579 if (get_attr_length (insn) == 4
6580 && jump_insn == next_active_insn (insn))
6581 return TRUE;
6582 if (get_attr_length (insn) > 0)
6583 break;
6584 }
6585
6586 return FALSE;
6587 }
6588
6589 /* This routine handles all the normal conditional branch sequences we
6590 might need to generate. It handles compare immediate vs compare
6591 register, nullification of delay slots, varying length branches,
6592 negated branches, and all combinations of the above. It returns the
6593 output appropriate to emit the branch corresponding to all given
6594 parameters. */
6595
6596 const char *
6597 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6598 {
6599 static char buf[100];
6600 bool useskip;
6601 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6602 int length = get_attr_length (insn);
6603 int xdelay;
6604
6605 /* A conditional branch to the following instruction (e.g. the delay slot)
6606 is asking for a disaster. This can happen when not optimizing and
6607 when jump optimization fails.
6608
6609 While it is usually safe to emit nothing, this can fail if the
6610 preceding instruction is a nullified branch with an empty delay
6611 slot and the same branch target as this branch. We could check
6612 for this but jump optimization should eliminate nop jumps. It
6613 is always safe to emit a nop. */
6614 if (branch_to_delay_slot_p (insn))
6615 return "nop";
6616
6617 /* The doubleword form of the cmpib instruction doesn't have the LEU
6618 and GTU conditions while the cmpb instruction does. Since we accept
6619 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6620 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6621 operands[2] = gen_rtx_REG (DImode, 0);
6622 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6623 operands[1] = gen_rtx_REG (DImode, 0);
6624
6625 /* If this is a long branch with its delay slot unfilled, set `nullify'
6626 as it can nullify the delay slot and save a nop. */
6627 if (length == 8 && dbr_sequence_length () == 0)
6628 nullify = 1;
6629
6630 /* If this is a short forward conditional branch which did not get
6631 its delay slot filled, the delay slot can still be nullified. */
6632 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6633 nullify = forward_branch_p (insn);
6634
6635 /* A forward branch over a single nullified insn can be done with a
6636 comclr instruction. This avoids a single cycle penalty due to
6637 mis-predicted branch if we fall through (branch not taken). */
6638 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6639
6640 switch (length)
6641 {
6642 /* All short conditional branches except backwards with an unfilled
6643 delay slot. */
6644 case 4:
6645 if (useskip)
6646 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6647 else
6648 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6649 if (GET_MODE (operands[1]) == DImode)
6650 strcat (buf, "*");
6651 if (negated)
6652 strcat (buf, "%B3");
6653 else
6654 strcat (buf, "%S3");
6655 if (useskip)
6656 strcat (buf, " %2,%r1,%%r0");
6657 else if (nullify)
6658 {
6659 if (branch_needs_nop_p (insn))
6660 strcat (buf, ",n %2,%r1,%0%#");
6661 else
6662 strcat (buf, ",n %2,%r1,%0");
6663 }
6664 else
6665 strcat (buf, " %2,%r1,%0");
6666 break;
6667
6668 /* All long conditionals. Note a short backward branch with an
6669 unfilled delay slot is treated just like a long backward branch
6670 with an unfilled delay slot. */
6671 case 8:
6672 /* Handle weird backwards branch with a filled delay slot
6673 which is nullified. */
6674 if (dbr_sequence_length () != 0
6675 && ! forward_branch_p (insn)
6676 && nullify)
6677 {
6678 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6679 if (GET_MODE (operands[1]) == DImode)
6680 strcat (buf, "*");
6681 if (negated)
6682 strcat (buf, "%S3");
6683 else
6684 strcat (buf, "%B3");
6685 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6686 }
6687 /* Handle short backwards branch with an unfilled delay slot.
6688 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6689 taken and untaken branches. */
6690 else if (dbr_sequence_length () == 0
6691 && ! forward_branch_p (insn)
6692 && INSN_ADDRESSES_SET_P ()
6693 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6694 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6695 {
6696 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6697 if (GET_MODE (operands[1]) == DImode)
6698 strcat (buf, "*");
6699 if (negated)
6700 strcat (buf, "%B3 %2,%r1,%0%#");
6701 else
6702 strcat (buf, "%S3 %2,%r1,%0%#");
6703 }
6704 else
6705 {
6706 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6707 if (GET_MODE (operands[1]) == DImode)
6708 strcat (buf, "*");
6709 if (negated)
6710 strcat (buf, "%S3");
6711 else
6712 strcat (buf, "%B3");
6713 if (nullify)
6714 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6715 else
6716 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6717 }
6718 break;
6719
6720 default:
6721 /* The reversed conditional branch must branch over one additional
6722 instruction if the delay slot is filled and needs to be extracted
6723 by pa_output_lbranch. If the delay slot is empty or this is a
6724 nullified forward branch, the instruction after the reversed
6725 condition branch must be nullified. */
6726 if (dbr_sequence_length () == 0
6727 || (nullify && forward_branch_p (insn)))
6728 {
6729 nullify = 1;
6730 xdelay = 0;
6731 operands[4] = GEN_INT (length);
6732 }
6733 else
6734 {
6735 xdelay = 1;
6736 operands[4] = GEN_INT (length + 4);
6737 }
6738
6739 /* Create a reversed conditional branch which branches around
6740 the following insns. */
6741 if (GET_MODE (operands[1]) != DImode)
6742 {
6743 if (nullify)
6744 {
6745 if (negated)
6746 strcpy (buf,
6747 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6748 else
6749 strcpy (buf,
6750 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6751 }
6752 else
6753 {
6754 if (negated)
6755 strcpy (buf,
6756 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6757 else
6758 strcpy (buf,
6759 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6760 }
6761 }
6762 else
6763 {
6764 if (nullify)
6765 {
6766 if (negated)
6767 strcpy (buf,
6768 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6769 else
6770 strcpy (buf,
6771 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6772 }
6773 else
6774 {
6775 if (negated)
6776 strcpy (buf,
6777 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6778 else
6779 strcpy (buf,
6780 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6781 }
6782 }
6783
6784 output_asm_insn (buf, operands);
6785 return pa_output_lbranch (operands[0], insn, xdelay);
6786 }
6787 return buf;
6788 }
6789
6790 /* Output a PIC pc-relative instruction sequence to load the address of
6791 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6792 or a code label. OPERANDS[1] specifies the register to use to load
6793 the program counter. OPERANDS[3] may be used for label generation
6794 The sequence is always three instructions in length. The program
6795 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6796 Register %r1 is clobbered. */
6797
6798 static void
6799 pa_output_pic_pcrel_sequence (rtx *operands)
6800 {
6801 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6802 if (TARGET_PA_20)
6803 {
6804 /* We can use mfia to determine the current program counter. */
6805 if (TARGET_SOM || !TARGET_GAS)
6806 {
6807 operands[3] = gen_label_rtx ();
6808 targetm.asm_out.internal_label (asm_out_file, "L",
6809 CODE_LABEL_NUMBER (operands[3]));
6810 output_asm_insn ("mfia %1", operands);
6811 output_asm_insn ("addil L'%0-%l3,%1", operands);
6812 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6813 }
6814 else
6815 {
6816 output_asm_insn ("mfia %1", operands);
6817 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6818 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6819 }
6820 }
6821 else
6822 {
6823 /* We need to use a branch to determine the current program counter. */
6824 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6825 if (TARGET_SOM || !TARGET_GAS)
6826 {
6827 operands[3] = gen_label_rtx ();
6828 output_asm_insn ("addil L'%0-%l3,%1", operands);
6829 targetm.asm_out.internal_label (asm_out_file, "L",
6830 CODE_LABEL_NUMBER (operands[3]));
6831 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6832 }
6833 else
6834 {
6835 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6836 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6837 }
6838 }
6839 }
6840
6841 /* This routine handles output of long unconditional branches that
6842 exceed the maximum range of a simple branch instruction. Since
6843 we don't have a register available for the branch, we save register
6844 %r1 in the frame marker, load the branch destination DEST into %r1,
6845 execute the branch, and restore %r1 in the delay slot of the branch.
6846
6847 Since long branches may have an insn in the delay slot and the
6848 delay slot is used to restore %r1, we in general need to extract
6849 this insn and execute it before the branch. However, to facilitate
6850 use of this function by conditional branches, we also provide an
6851 option to not extract the delay insn so that it will be emitted
6852 after the long branch. So, if there is an insn in the delay slot,
6853 it is extracted if XDELAY is nonzero.
6854
6855 The lengths of the various long-branch sequences are 20, 16 and 24
6856 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6857
6858 const char *
6859 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6860 {
6861 rtx xoperands[4];
6862
6863 xoperands[0] = dest;
6864
6865 /* First, free up the delay slot. */
6866 if (xdelay && dbr_sequence_length () != 0)
6867 {
6868 /* We can't handle a jump in the delay slot. */
6869 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6870
6871 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6872 optimize, 0, NULL);
6873
6874 /* Now delete the delay insn. */
6875 SET_INSN_DELETED (NEXT_INSN (insn));
6876 }
6877
6878 /* Output an insn to save %r1. The runtime documentation doesn't
6879 specify whether the "Clean Up" slot in the callers frame can
6880 be clobbered by the callee. It isn't copied by HP's builtin
6881 alloca, so this suggests that it can be clobbered if necessary.
6882 The "Static Link" location is copied by HP builtin alloca, so
6883 we avoid using it. Using the cleanup slot might be a problem
6884 if we have to interoperate with languages that pass cleanup
6885 information. However, it should be possible to handle these
6886 situations with GCC's asm feature.
6887
6888 The "Current RP" slot is reserved for the called procedure, so
6889 we try to use it when we don't have a frame of our own. It's
6890 rather unlikely that we won't have a frame when we need to emit
6891 a very long branch.
6892
6893 Really the way to go long term is a register scavenger; goto
6894 the target of the jump and find a register which we can use
6895 as a scratch to hold the value in %r1. Then, we wouldn't have
6896 to free up the delay slot or clobber a slot that may be needed
6897 for other purposes. */
6898 if (TARGET_64BIT)
6899 {
6900 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6901 /* Use the return pointer slot in the frame marker. */
6902 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6903 else
6904 /* Use the slot at -40 in the frame marker since HP builtin
6905 alloca doesn't copy it. */
6906 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6907 }
6908 else
6909 {
6910 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6911 /* Use the return pointer slot in the frame marker. */
6912 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6913 else
6914 /* Use the "Clean Up" slot in the frame marker. In GCC,
6915 the only other use of this location is for copying a
6916 floating point double argument from a floating-point
6917 register to two general registers. The copy is done
6918 as an "atomic" operation when outputting a call, so it
6919 won't interfere with our using the location here. */
6920 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6921 }
6922
6923 if (TARGET_PORTABLE_RUNTIME)
6924 {
6925 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6926 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6927 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6928 }
6929 else if (flag_pic)
6930 {
6931 xoperands[1] = gen_rtx_REG (Pmode, 1);
6932 xoperands[2] = xoperands[1];
6933 pa_output_pic_pcrel_sequence (xoperands);
6934 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6935 }
6936 else
6937 /* Now output a very long branch to the original target. */
6938 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6939
6940 /* Now restore the value of %r1 in the delay slot. */
6941 if (TARGET_64BIT)
6942 {
6943 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6944 return "ldd -16(%%r30),%%r1";
6945 else
6946 return "ldd -40(%%r30),%%r1";
6947 }
6948 else
6949 {
6950 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6951 return "ldw -20(%%r30),%%r1";
6952 else
6953 return "ldw -12(%%r30),%%r1";
6954 }
6955 }
6956
6957 /* This routine handles all the branch-on-bit conditional branch sequences we
6958 might need to generate. It handles nullification of delay slots,
6959 varying length branches, negated branches and all combinations of the
6960 above. it returns the appropriate output template to emit the branch. */
6961
6962 const char *
6963 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6964 {
6965 static char buf[100];
6966 bool useskip;
6967 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6968 int length = get_attr_length (insn);
6969 int xdelay;
6970
6971 /* A conditional branch to the following instruction (e.g. the delay slot) is
6972 asking for a disaster. I do not think this can happen as this pattern
6973 is only used when optimizing; jump optimization should eliminate the
6974 jump. But be prepared just in case. */
6975
6976 if (branch_to_delay_slot_p (insn))
6977 return "nop";
6978
6979 /* If this is a long branch with its delay slot unfilled, set `nullify'
6980 as it can nullify the delay slot and save a nop. */
6981 if (length == 8 && dbr_sequence_length () == 0)
6982 nullify = 1;
6983
6984 /* If this is a short forward conditional branch which did not get
6985 its delay slot filled, the delay slot can still be nullified. */
6986 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6987 nullify = forward_branch_p (insn);
6988
6989 /* A forward branch over a single nullified insn can be done with a
6990 extrs instruction. This avoids a single cycle penalty due to
6991 mis-predicted branch if we fall through (branch not taken). */
6992 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6993
6994 switch (length)
6995 {
6996
6997 /* All short conditional branches except backwards with an unfilled
6998 delay slot. */
6999 case 4:
7000 if (useskip)
7001 strcpy (buf, "{extrs,|extrw,s,}");
7002 else
7003 strcpy (buf, "bb,");
7004 if (useskip && GET_MODE (operands[0]) == DImode)
7005 strcpy (buf, "extrd,s,*");
7006 else if (GET_MODE (operands[0]) == DImode)
7007 strcpy (buf, "bb,*");
7008 if ((which == 0 && negated)
7009 || (which == 1 && ! negated))
7010 strcat (buf, ">=");
7011 else
7012 strcat (buf, "<");
7013 if (useskip)
7014 strcat (buf, " %0,%1,1,%%r0");
7015 else if (nullify && negated)
7016 {
7017 if (branch_needs_nop_p (insn))
7018 strcat (buf, ",n %0,%1,%3%#");
7019 else
7020 strcat (buf, ",n %0,%1,%3");
7021 }
7022 else if (nullify && ! negated)
7023 {
7024 if (branch_needs_nop_p (insn))
7025 strcat (buf, ",n %0,%1,%2%#");
7026 else
7027 strcat (buf, ",n %0,%1,%2");
7028 }
7029 else if (! nullify && negated)
7030 strcat (buf, " %0,%1,%3");
7031 else if (! nullify && ! negated)
7032 strcat (buf, " %0,%1,%2");
7033 break;
7034
7035 /* All long conditionals. Note a short backward branch with an
7036 unfilled delay slot is treated just like a long backward branch
7037 with an unfilled delay slot. */
7038 case 8:
7039 /* Handle weird backwards branch with a filled delay slot
7040 which is nullified. */
7041 if (dbr_sequence_length () != 0
7042 && ! forward_branch_p (insn)
7043 && nullify)
7044 {
7045 strcpy (buf, "bb,");
7046 if (GET_MODE (operands[0]) == DImode)
7047 strcat (buf, "*");
7048 if ((which == 0 && negated)
7049 || (which == 1 && ! negated))
7050 strcat (buf, "<");
7051 else
7052 strcat (buf, ">=");
7053 if (negated)
7054 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7055 else
7056 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7057 }
7058 /* Handle short backwards branch with an unfilled delay slot.
7059 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7060 taken and untaken branches. */
7061 else if (dbr_sequence_length () == 0
7062 && ! forward_branch_p (insn)
7063 && INSN_ADDRESSES_SET_P ()
7064 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7065 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7066 {
7067 strcpy (buf, "bb,");
7068 if (GET_MODE (operands[0]) == DImode)
7069 strcat (buf, "*");
7070 if ((which == 0 && negated)
7071 || (which == 1 && ! negated))
7072 strcat (buf, ">=");
7073 else
7074 strcat (buf, "<");
7075 if (negated)
7076 strcat (buf, " %0,%1,%3%#");
7077 else
7078 strcat (buf, " %0,%1,%2%#");
7079 }
7080 else
7081 {
7082 if (GET_MODE (operands[0]) == DImode)
7083 strcpy (buf, "extrd,s,*");
7084 else
7085 strcpy (buf, "{extrs,|extrw,s,}");
7086 if ((which == 0 && negated)
7087 || (which == 1 && ! negated))
7088 strcat (buf, "<");
7089 else
7090 strcat (buf, ">=");
7091 if (nullify && negated)
7092 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7093 else if (nullify && ! negated)
7094 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7095 else if (negated)
7096 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7097 else
7098 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7099 }
7100 break;
7101
7102 default:
7103 /* The reversed conditional branch must branch over one additional
7104 instruction if the delay slot is filled and needs to be extracted
7105 by pa_output_lbranch. If the delay slot is empty or this is a
7106 nullified forward branch, the instruction after the reversed
7107 condition branch must be nullified. */
7108 if (dbr_sequence_length () == 0
7109 || (nullify && forward_branch_p (insn)))
7110 {
7111 nullify = 1;
7112 xdelay = 0;
7113 operands[4] = GEN_INT (length);
7114 }
7115 else
7116 {
7117 xdelay = 1;
7118 operands[4] = GEN_INT (length + 4);
7119 }
7120
7121 if (GET_MODE (operands[0]) == DImode)
7122 strcpy (buf, "bb,*");
7123 else
7124 strcpy (buf, "bb,");
7125 if ((which == 0 && negated)
7126 || (which == 1 && !negated))
7127 strcat (buf, "<");
7128 else
7129 strcat (buf, ">=");
7130 if (nullify)
7131 strcat (buf, ",n %0,%1,.+%4");
7132 else
7133 strcat (buf, " %0,%1,.+%4");
7134 output_asm_insn (buf, operands);
7135 return pa_output_lbranch (negated ? operands[3] : operands[2],
7136 insn, xdelay);
7137 }
7138 return buf;
7139 }
7140
7141 /* This routine handles all the branch-on-variable-bit conditional branch
7142 sequences we might need to generate. It handles nullification of delay
7143 slots, varying length branches, negated branches and all combinations
7144 of the above. it returns the appropriate output template to emit the
7145 branch. */
7146
7147 const char *
7148 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7149 int which)
7150 {
7151 static char buf[100];
7152 bool useskip;
7153 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7154 int length = get_attr_length (insn);
7155 int xdelay;
7156
7157 /* A conditional branch to the following instruction (e.g. the delay slot) is
7158 asking for a disaster. I do not think this can happen as this pattern
7159 is only used when optimizing; jump optimization should eliminate the
7160 jump. But be prepared just in case. */
7161
7162 if (branch_to_delay_slot_p (insn))
7163 return "nop";
7164
7165 /* If this is a long branch with its delay slot unfilled, set `nullify'
7166 as it can nullify the delay slot and save a nop. */
7167 if (length == 8 && dbr_sequence_length () == 0)
7168 nullify = 1;
7169
7170 /* If this is a short forward conditional branch which did not get
7171 its delay slot filled, the delay slot can still be nullified. */
7172 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7173 nullify = forward_branch_p (insn);
7174
7175 /* A forward branch over a single nullified insn can be done with a
7176 extrs instruction. This avoids a single cycle penalty due to
7177 mis-predicted branch if we fall through (branch not taken). */
7178 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7179
7180 switch (length)
7181 {
7182
7183 /* All short conditional branches except backwards with an unfilled
7184 delay slot. */
7185 case 4:
7186 if (useskip)
7187 strcpy (buf, "{vextrs,|extrw,s,}");
7188 else
7189 strcpy (buf, "{bvb,|bb,}");
7190 if (useskip && GET_MODE (operands[0]) == DImode)
7191 strcpy (buf, "extrd,s,*");
7192 else if (GET_MODE (operands[0]) == DImode)
7193 strcpy (buf, "bb,*");
7194 if ((which == 0 && negated)
7195 || (which == 1 && ! negated))
7196 strcat (buf, ">=");
7197 else
7198 strcat (buf, "<");
7199 if (useskip)
7200 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7201 else if (nullify && negated)
7202 {
7203 if (branch_needs_nop_p (insn))
7204 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7205 else
7206 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7207 }
7208 else if (nullify && ! negated)
7209 {
7210 if (branch_needs_nop_p (insn))
7211 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7212 else
7213 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7214 }
7215 else if (! nullify && negated)
7216 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7217 else if (! nullify && ! negated)
7218 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7219 break;
7220
7221 /* All long conditionals. Note a short backward branch with an
7222 unfilled delay slot is treated just like a long backward branch
7223 with an unfilled delay slot. */
7224 case 8:
7225 /* Handle weird backwards branch with a filled delay slot
7226 which is nullified. */
7227 if (dbr_sequence_length () != 0
7228 && ! forward_branch_p (insn)
7229 && nullify)
7230 {
7231 strcpy (buf, "{bvb,|bb,}");
7232 if (GET_MODE (operands[0]) == DImode)
7233 strcat (buf, "*");
7234 if ((which == 0 && negated)
7235 || (which == 1 && ! negated))
7236 strcat (buf, "<");
7237 else
7238 strcat (buf, ">=");
7239 if (negated)
7240 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7241 else
7242 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7243 }
7244 /* Handle short backwards branch with an unfilled delay slot.
7245 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7246 taken and untaken branches. */
7247 else if (dbr_sequence_length () == 0
7248 && ! forward_branch_p (insn)
7249 && INSN_ADDRESSES_SET_P ()
7250 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7251 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7252 {
7253 strcpy (buf, "{bvb,|bb,}");
7254 if (GET_MODE (operands[0]) == DImode)
7255 strcat (buf, "*");
7256 if ((which == 0 && negated)
7257 || (which == 1 && ! negated))
7258 strcat (buf, ">=");
7259 else
7260 strcat (buf, "<");
7261 if (negated)
7262 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7263 else
7264 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7265 }
7266 else
7267 {
7268 strcpy (buf, "{vextrs,|extrw,s,}");
7269 if (GET_MODE (operands[0]) == DImode)
7270 strcpy (buf, "extrd,s,*");
7271 if ((which == 0 && negated)
7272 || (which == 1 && ! negated))
7273 strcat (buf, "<");
7274 else
7275 strcat (buf, ">=");
7276 if (nullify && negated)
7277 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7278 else if (nullify && ! negated)
7279 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7280 else if (negated)
7281 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7282 else
7283 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7284 }
7285 break;
7286
7287 default:
7288 /* The reversed conditional branch must branch over one additional
7289 instruction if the delay slot is filled and needs to be extracted
7290 by pa_output_lbranch. If the delay slot is empty or this is a
7291 nullified forward branch, the instruction after the reversed
7292 condition branch must be nullified. */
7293 if (dbr_sequence_length () == 0
7294 || (nullify && forward_branch_p (insn)))
7295 {
7296 nullify = 1;
7297 xdelay = 0;
7298 operands[4] = GEN_INT (length);
7299 }
7300 else
7301 {
7302 xdelay = 1;
7303 operands[4] = GEN_INT (length + 4);
7304 }
7305
7306 if (GET_MODE (operands[0]) == DImode)
7307 strcpy (buf, "bb,*");
7308 else
7309 strcpy (buf, "{bvb,|bb,}");
7310 if ((which == 0 && negated)
7311 || (which == 1 && !negated))
7312 strcat (buf, "<");
7313 else
7314 strcat (buf, ">=");
7315 if (nullify)
7316 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7317 else
7318 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7319 output_asm_insn (buf, operands);
7320 return pa_output_lbranch (negated ? operands[3] : operands[2],
7321 insn, xdelay);
7322 }
7323 return buf;
7324 }
7325
7326 /* Return the output template for emitting a dbra type insn.
7327
7328 Note it may perform some output operations on its own before
7329 returning the final output string. */
7330 const char *
7331 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7332 {
7333 int length = get_attr_length (insn);
7334
7335 /* A conditional branch to the following instruction (e.g. the delay slot) is
7336 asking for a disaster. Be prepared! */
7337
7338 if (branch_to_delay_slot_p (insn))
7339 {
7340 if (which_alternative == 0)
7341 return "ldo %1(%0),%0";
7342 else if (which_alternative == 1)
7343 {
7344 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7345 output_asm_insn ("ldw -16(%%r30),%4", operands);
7346 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7347 return "{fldws|fldw} -16(%%r30),%0";
7348 }
7349 else
7350 {
7351 output_asm_insn ("ldw %0,%4", operands);
7352 return "ldo %1(%4),%4\n\tstw %4,%0";
7353 }
7354 }
7355
7356 if (which_alternative == 0)
7357 {
7358 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7359 int xdelay;
7360
7361 /* If this is a long branch with its delay slot unfilled, set `nullify'
7362 as it can nullify the delay slot and save a nop. */
7363 if (length == 8 && dbr_sequence_length () == 0)
7364 nullify = 1;
7365
7366 /* If this is a short forward conditional branch which did not get
7367 its delay slot filled, the delay slot can still be nullified. */
7368 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7369 nullify = forward_branch_p (insn);
7370
7371 switch (length)
7372 {
7373 case 4:
7374 if (nullify)
7375 {
7376 if (branch_needs_nop_p (insn))
7377 return "addib,%C2,n %1,%0,%3%#";
7378 else
7379 return "addib,%C2,n %1,%0,%3";
7380 }
7381 else
7382 return "addib,%C2 %1,%0,%3";
7383
7384 case 8:
7385 /* Handle weird backwards branch with a fulled delay slot
7386 which is nullified. */
7387 if (dbr_sequence_length () != 0
7388 && ! forward_branch_p (insn)
7389 && nullify)
7390 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7391 /* Handle short backwards branch with an unfilled delay slot.
7392 Using a addb;nop rather than addi;bl saves 1 cycle for both
7393 taken and untaken branches. */
7394 else if (dbr_sequence_length () == 0
7395 && ! forward_branch_p (insn)
7396 && INSN_ADDRESSES_SET_P ()
7397 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7398 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7399 return "addib,%C2 %1,%0,%3%#";
7400
7401 /* Handle normal cases. */
7402 if (nullify)
7403 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7404 else
7405 return "addi,%N2 %1,%0,%0\n\tb %3";
7406
7407 default:
7408 /* The reversed conditional branch must branch over one additional
7409 instruction if the delay slot is filled and needs to be extracted
7410 by pa_output_lbranch. If the delay slot is empty or this is a
7411 nullified forward branch, the instruction after the reversed
7412 condition branch must be nullified. */
7413 if (dbr_sequence_length () == 0
7414 || (nullify && forward_branch_p (insn)))
7415 {
7416 nullify = 1;
7417 xdelay = 0;
7418 operands[4] = GEN_INT (length);
7419 }
7420 else
7421 {
7422 xdelay = 1;
7423 operands[4] = GEN_INT (length + 4);
7424 }
7425
7426 if (nullify)
7427 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7428 else
7429 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7430
7431 return pa_output_lbranch (operands[3], insn, xdelay);
7432 }
7433
7434 }
7435 /* Deal with gross reload from FP register case. */
7436 else if (which_alternative == 1)
7437 {
7438 /* Move loop counter from FP register to MEM then into a GR,
7439 increment the GR, store the GR into MEM, and finally reload
7440 the FP register from MEM from within the branch's delay slot. */
7441 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7442 operands);
7443 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7444 if (length == 24)
7445 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7446 else if (length == 28)
7447 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7448 else
7449 {
7450 operands[5] = GEN_INT (length - 16);
7451 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7452 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7453 return pa_output_lbranch (operands[3], insn, 0);
7454 }
7455 }
7456 /* Deal with gross reload from memory case. */
7457 else
7458 {
7459 /* Reload loop counter from memory, the store back to memory
7460 happens in the branch's delay slot. */
7461 output_asm_insn ("ldw %0,%4", operands);
7462 if (length == 12)
7463 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7464 else if (length == 16)
7465 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7466 else
7467 {
7468 operands[5] = GEN_INT (length - 4);
7469 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7470 return pa_output_lbranch (operands[3], insn, 0);
7471 }
7472 }
7473 }
7474
7475 /* Return the output template for emitting a movb type insn.
7476
7477 Note it may perform some output operations on its own before
7478 returning the final output string. */
7479 const char *
7480 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7481 int reverse_comparison)
7482 {
7483 int length = get_attr_length (insn);
7484
7485 /* A conditional branch to the following instruction (e.g. the delay slot) is
7486 asking for a disaster. Be prepared! */
7487
7488 if (branch_to_delay_slot_p (insn))
7489 {
7490 if (which_alternative == 0)
7491 return "copy %1,%0";
7492 else if (which_alternative == 1)
7493 {
7494 output_asm_insn ("stw %1,-16(%%r30)", operands);
7495 return "{fldws|fldw} -16(%%r30),%0";
7496 }
7497 else if (which_alternative == 2)
7498 return "stw %1,%0";
7499 else
7500 return "mtsar %r1";
7501 }
7502
7503 /* Support the second variant. */
7504 if (reverse_comparison)
7505 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7506
7507 if (which_alternative == 0)
7508 {
7509 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7510 int xdelay;
7511
7512 /* If this is a long branch with its delay slot unfilled, set `nullify'
7513 as it can nullify the delay slot and save a nop. */
7514 if (length == 8 && dbr_sequence_length () == 0)
7515 nullify = 1;
7516
7517 /* If this is a short forward conditional branch which did not get
7518 its delay slot filled, the delay slot can still be nullified. */
7519 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7520 nullify = forward_branch_p (insn);
7521
7522 switch (length)
7523 {
7524 case 4:
7525 if (nullify)
7526 {
7527 if (branch_needs_nop_p (insn))
7528 return "movb,%C2,n %1,%0,%3%#";
7529 else
7530 return "movb,%C2,n %1,%0,%3";
7531 }
7532 else
7533 return "movb,%C2 %1,%0,%3";
7534
7535 case 8:
7536 /* Handle weird backwards branch with a filled delay slot
7537 which is nullified. */
7538 if (dbr_sequence_length () != 0
7539 && ! forward_branch_p (insn)
7540 && nullify)
7541 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7542
7543 /* Handle short backwards branch with an unfilled delay slot.
7544 Using a movb;nop rather than or;bl saves 1 cycle for both
7545 taken and untaken branches. */
7546 else if (dbr_sequence_length () == 0
7547 && ! forward_branch_p (insn)
7548 && INSN_ADDRESSES_SET_P ()
7549 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7550 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7551 return "movb,%C2 %1,%0,%3%#";
7552 /* Handle normal cases. */
7553 if (nullify)
7554 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7555 else
7556 return "or,%N2 %1,%%r0,%0\n\tb %3";
7557
7558 default:
7559 /* The reversed conditional branch must branch over one additional
7560 instruction if the delay slot is filled and needs to be extracted
7561 by pa_output_lbranch. If the delay slot is empty or this is a
7562 nullified forward branch, the instruction after the reversed
7563 condition branch must be nullified. */
7564 if (dbr_sequence_length () == 0
7565 || (nullify && forward_branch_p (insn)))
7566 {
7567 nullify = 1;
7568 xdelay = 0;
7569 operands[4] = GEN_INT (length);
7570 }
7571 else
7572 {
7573 xdelay = 1;
7574 operands[4] = GEN_INT (length + 4);
7575 }
7576
7577 if (nullify)
7578 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7579 else
7580 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7581
7582 return pa_output_lbranch (operands[3], insn, xdelay);
7583 }
7584 }
7585 /* Deal with gross reload for FP destination register case. */
7586 else if (which_alternative == 1)
7587 {
7588 /* Move source register to MEM, perform the branch test, then
7589 finally load the FP register from MEM from within the branch's
7590 delay slot. */
7591 output_asm_insn ("stw %1,-16(%%r30)", operands);
7592 if (length == 12)
7593 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7594 else if (length == 16)
7595 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7596 else
7597 {
7598 operands[4] = GEN_INT (length - 4);
7599 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7600 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7601 return pa_output_lbranch (operands[3], insn, 0);
7602 }
7603 }
7604 /* Deal with gross reload from memory case. */
7605 else if (which_alternative == 2)
7606 {
7607 /* Reload loop counter from memory, the store back to memory
7608 happens in the branch's delay slot. */
7609 if (length == 8)
7610 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7611 else if (length == 12)
7612 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7613 else
7614 {
7615 operands[4] = GEN_INT (length);
7616 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7617 operands);
7618 return pa_output_lbranch (operands[3], insn, 0);
7619 }
7620 }
7621 /* Handle SAR as a destination. */
7622 else
7623 {
7624 if (length == 8)
7625 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7626 else if (length == 12)
7627 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7628 else
7629 {
7630 operands[4] = GEN_INT (length);
7631 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7632 operands);
7633 return pa_output_lbranch (operands[3], insn, 0);
7634 }
7635 }
7636 }
7637
7638 /* Copy any FP arguments in INSN into integer registers. */
7639 static void
7640 copy_fp_args (rtx_insn *insn)
7641 {
7642 rtx link;
7643 rtx xoperands[2];
7644
7645 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7646 {
7647 int arg_mode, regno;
7648 rtx use = XEXP (link, 0);
7649
7650 if (! (GET_CODE (use) == USE
7651 && GET_CODE (XEXP (use, 0)) == REG
7652 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7653 continue;
7654
7655 arg_mode = GET_MODE (XEXP (use, 0));
7656 regno = REGNO (XEXP (use, 0));
7657
7658 /* Is it a floating point register? */
7659 if (regno >= 32 && regno <= 39)
7660 {
7661 /* Copy the FP register into an integer register via memory. */
7662 if (arg_mode == SFmode)
7663 {
7664 xoperands[0] = XEXP (use, 0);
7665 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7666 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7667 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7668 }
7669 else
7670 {
7671 xoperands[0] = XEXP (use, 0);
7672 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7673 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7674 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7675 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7676 }
7677 }
7678 }
7679 }
7680
7681 /* Compute length of the FP argument copy sequence for INSN. */
7682 static int
7683 length_fp_args (rtx_insn *insn)
7684 {
7685 int length = 0;
7686 rtx link;
7687
7688 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7689 {
7690 int arg_mode, regno;
7691 rtx use = XEXP (link, 0);
7692
7693 if (! (GET_CODE (use) == USE
7694 && GET_CODE (XEXP (use, 0)) == REG
7695 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7696 continue;
7697
7698 arg_mode = GET_MODE (XEXP (use, 0));
7699 regno = REGNO (XEXP (use, 0));
7700
7701 /* Is it a floating point register? */
7702 if (regno >= 32 && regno <= 39)
7703 {
7704 if (arg_mode == SFmode)
7705 length += 8;
7706 else
7707 length += 12;
7708 }
7709 }
7710
7711 return length;
7712 }
7713
7714 /* Return the attribute length for the millicode call instruction INSN.
7715 The length must match the code generated by pa_output_millicode_call.
7716 We include the delay slot in the returned length as it is better to
7717 over estimate the length than to under estimate it. */
7718
7719 int
7720 pa_attr_length_millicode_call (rtx_insn *insn)
7721 {
7722 unsigned long distance = -1;
7723 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7724
7725 if (INSN_ADDRESSES_SET_P ())
7726 {
7727 distance = (total + insn_current_reference_address (insn));
7728 if (distance < total)
7729 distance = -1;
7730 }
7731
7732 if (TARGET_64BIT)
7733 {
7734 if (!TARGET_LONG_CALLS && distance < 7600000)
7735 return 8;
7736
7737 return 20;
7738 }
7739 else if (TARGET_PORTABLE_RUNTIME)
7740 return 24;
7741 else
7742 {
7743 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7744 return 8;
7745
7746 if (!flag_pic)
7747 return 12;
7748
7749 return 24;
7750 }
7751 }
7752
7753 /* INSN is a function call.
7754
7755 CALL_DEST is the routine we are calling. */
7756
7757 const char *
7758 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7759 {
7760 int attr_length = get_attr_length (insn);
7761 int seq_length = dbr_sequence_length ();
7762 rtx xoperands[4];
7763
7764 xoperands[0] = call_dest;
7765
7766 /* Handle the common case where we are sure that the branch will
7767 reach the beginning of the $CODE$ subspace. The within reach
7768 form of the $$sh_func_adrs call has a length of 28. Because it
7769 has an attribute type of sh_func_adrs, it never has a nonzero
7770 sequence length (i.e., the delay slot is never filled). */
7771 if (!TARGET_LONG_CALLS
7772 && (attr_length == 8
7773 || (attr_length == 28
7774 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7775 {
7776 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7777 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7778 }
7779 else
7780 {
7781 if (TARGET_64BIT)
7782 {
7783 /* It might seem that one insn could be saved by accessing
7784 the millicode function using the linkage table. However,
7785 this doesn't work in shared libraries and other dynamically
7786 loaded objects. Using a pc-relative sequence also avoids
7787 problems related to the implicit use of the gp register. */
7788 xoperands[1] = gen_rtx_REG (Pmode, 1);
7789 xoperands[2] = xoperands[1];
7790 pa_output_pic_pcrel_sequence (xoperands);
7791 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7792 }
7793 else if (TARGET_PORTABLE_RUNTIME)
7794 {
7795 /* Pure portable runtime doesn't allow be/ble; we also don't
7796 have PIC support in the assembler/linker, so this sequence
7797 is needed. */
7798
7799 /* Get the address of our target into %r1. */
7800 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7801 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7802
7803 /* Get our return address into %r31. */
7804 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7805 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7806
7807 /* Jump to our target address in %r1. */
7808 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7809 }
7810 else if (!flag_pic)
7811 {
7812 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7813 if (TARGET_PA_20)
7814 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7815 else
7816 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7817 }
7818 else
7819 {
7820 xoperands[1] = gen_rtx_REG (Pmode, 31);
7821 xoperands[2] = gen_rtx_REG (Pmode, 1);
7822 pa_output_pic_pcrel_sequence (xoperands);
7823
7824 /* Adjust return address. */
7825 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7826
7827 /* Jump to our target address in %r1. */
7828 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7829 }
7830 }
7831
7832 if (seq_length == 0)
7833 output_asm_insn ("nop", xoperands);
7834
7835 return "";
7836 }
7837
7838 /* Return the attribute length of the call instruction INSN. The SIBCALL
7839 flag indicates whether INSN is a regular call or a sibling call. The
7840 length returned must be longer than the code actually generated by
7841 pa_output_call. Since branch shortening is done before delay branch
7842 sequencing, there is no way to determine whether or not the delay
7843 slot will be filled during branch shortening. Even when the delay
7844 slot is filled, we may have to add a nop if the delay slot contains
7845 a branch that can't reach its target. Thus, we always have to include
7846 the delay slot in the length estimate. This used to be done in
7847 pa_adjust_insn_length but we do it here now as some sequences always
7848 fill the delay slot and we can save four bytes in the estimate for
7849 these sequences. */
7850
7851 int
7852 pa_attr_length_call (rtx_insn *insn, int sibcall)
7853 {
7854 int local_call;
7855 rtx call, call_dest;
7856 tree call_decl;
7857 int length = 0;
7858 rtx pat = PATTERN (insn);
7859 unsigned long distance = -1;
7860
7861 gcc_assert (CALL_P (insn));
7862
7863 if (INSN_ADDRESSES_SET_P ())
7864 {
7865 unsigned long total;
7866
7867 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7868 distance = (total + insn_current_reference_address (insn));
7869 if (distance < total)
7870 distance = -1;
7871 }
7872
7873 gcc_assert (GET_CODE (pat) == PARALLEL);
7874
7875 /* Get the call rtx. */
7876 call = XVECEXP (pat, 0, 0);
7877 if (GET_CODE (call) == SET)
7878 call = SET_SRC (call);
7879
7880 gcc_assert (GET_CODE (call) == CALL);
7881
7882 /* Determine if this is a local call. */
7883 call_dest = XEXP (XEXP (call, 0), 0);
7884 call_decl = SYMBOL_REF_DECL (call_dest);
7885 local_call = call_decl && targetm.binds_local_p (call_decl);
7886
7887 /* pc-relative branch. */
7888 if (!TARGET_LONG_CALLS
7889 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7890 || distance < MAX_PCREL17F_OFFSET))
7891 length += 8;
7892
7893 /* 64-bit plabel sequence. */
7894 else if (TARGET_64BIT && !local_call)
7895 length += sibcall ? 28 : 24;
7896
7897 /* non-pic long absolute branch sequence. */
7898 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7899 length += 12;
7900
7901 /* long pc-relative branch sequence. */
7902 else if (TARGET_LONG_PIC_SDIFF_CALL
7903 || (TARGET_GAS && !TARGET_SOM && local_call))
7904 {
7905 length += 20;
7906
7907 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7908 length += 8;
7909 }
7910
7911 /* 32-bit plabel sequence. */
7912 else
7913 {
7914 length += 32;
7915
7916 if (TARGET_SOM)
7917 length += length_fp_args (insn);
7918
7919 if (flag_pic)
7920 length += 4;
7921
7922 if (!TARGET_PA_20)
7923 {
7924 if (!sibcall)
7925 length += 8;
7926
7927 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7928 length += 8;
7929 }
7930 }
7931
7932 return length;
7933 }
7934
7935 /* INSN is a function call.
7936
7937 CALL_DEST is the routine we are calling. */
7938
7939 const char *
7940 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7941 {
7942 int seq_length = dbr_sequence_length ();
7943 tree call_decl = SYMBOL_REF_DECL (call_dest);
7944 int local_call = call_decl && targetm.binds_local_p (call_decl);
7945 rtx xoperands[4];
7946
7947 xoperands[0] = call_dest;
7948
7949 /* Handle the common case where we're sure that the branch will reach
7950 the beginning of the "$CODE$" subspace. This is the beginning of
7951 the current function if we are in a named section. */
7952 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7953 {
7954 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7955 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7956 }
7957 else
7958 {
7959 if (TARGET_64BIT && !local_call)
7960 {
7961 /* ??? As far as I can tell, the HP linker doesn't support the
7962 long pc-relative sequence described in the 64-bit runtime
7963 architecture. So, we use a slightly longer indirect call. */
7964 xoperands[0] = pa_get_deferred_plabel (call_dest);
7965 xoperands[1] = gen_label_rtx ();
7966
7967 /* If this isn't a sibcall, we put the load of %r27 into the
7968 delay slot. We can't do this in a sibcall as we don't
7969 have a second call-clobbered scratch register available.
7970 We don't need to do anything when generating fast indirect
7971 calls. */
7972 if (seq_length != 0 && !sibcall)
7973 {
7974 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7975 optimize, 0, NULL);
7976
7977 /* Now delete the delay insn. */
7978 SET_INSN_DELETED (NEXT_INSN (insn));
7979 seq_length = 0;
7980 }
7981
7982 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7983 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7984 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7985
7986 if (sibcall)
7987 {
7988 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7989 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7990 output_asm_insn ("bve (%%r1)", xoperands);
7991 }
7992 else
7993 {
7994 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7995 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7996 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7997 seq_length = 1;
7998 }
7999 }
8000 else
8001 {
8002 int indirect_call = 0;
8003
8004 /* Emit a long call. There are several different sequences
8005 of increasing length and complexity. In most cases,
8006 they don't allow an instruction in the delay slot. */
8007 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8008 && !TARGET_LONG_PIC_SDIFF_CALL
8009 && !(TARGET_GAS && !TARGET_SOM && local_call)
8010 && !TARGET_64BIT)
8011 indirect_call = 1;
8012
8013 if (seq_length != 0
8014 && !sibcall
8015 && (!TARGET_PA_20
8016 || indirect_call
8017 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
8018 {
8019 /* A non-jump insn in the delay slot. By definition we can
8020 emit this insn before the call (and in fact before argument
8021 relocating. */
8022 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
8023 NULL);
8024
8025 /* Now delete the delay insn. */
8026 SET_INSN_DELETED (NEXT_INSN (insn));
8027 seq_length = 0;
8028 }
8029
8030 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8031 {
8032 /* This is the best sequence for making long calls in
8033 non-pic code. Unfortunately, GNU ld doesn't provide
8034 the stub needed for external calls, and GAS's support
8035 for this with the SOM linker is buggy. It is safe
8036 to use this for local calls. */
8037 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8038 if (sibcall)
8039 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
8040 else
8041 {
8042 if (TARGET_PA_20)
8043 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8044 xoperands);
8045 else
8046 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8047
8048 output_asm_insn ("copy %%r31,%%r2", xoperands);
8049 seq_length = 1;
8050 }
8051 }
8052 else
8053 {
8054 /* The HP assembler and linker can handle relocations for
8055 the difference of two symbols. The HP assembler
8056 recognizes the sequence as a pc-relative call and
8057 the linker provides stubs when needed. */
8058
8059 /* GAS currently can't generate the relocations that
8060 are needed for the SOM linker under HP-UX using this
8061 sequence. The GNU linker doesn't generate the stubs
8062 that are needed for external calls on TARGET_ELF32
8063 with this sequence. For now, we have to use a longer
8064 plabel sequence when using GAS for non local calls. */
8065 if (TARGET_LONG_PIC_SDIFF_CALL
8066 || (TARGET_GAS && !TARGET_SOM && local_call))
8067 {
8068 xoperands[1] = gen_rtx_REG (Pmode, 1);
8069 xoperands[2] = xoperands[1];
8070 pa_output_pic_pcrel_sequence (xoperands);
8071 }
8072 else
8073 {
8074 /* Emit a long plabel-based call sequence. This is
8075 essentially an inline implementation of $$dyncall.
8076 We don't actually try to call $$dyncall as this is
8077 as difficult as calling the function itself. */
8078 xoperands[0] = pa_get_deferred_plabel (call_dest);
8079 xoperands[1] = gen_label_rtx ();
8080
8081 /* Since the call is indirect, FP arguments in registers
8082 need to be copied to the general registers. Then, the
8083 argument relocation stub will copy them back. */
8084 if (TARGET_SOM)
8085 copy_fp_args (insn);
8086
8087 if (flag_pic)
8088 {
8089 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8090 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8091 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8092 }
8093 else
8094 {
8095 output_asm_insn ("addil LR'%0-$global$,%%r27",
8096 xoperands);
8097 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8098 xoperands);
8099 }
8100
8101 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8102 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8103 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8104 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8105
8106 if (!sibcall && !TARGET_PA_20)
8107 {
8108 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8109 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8110 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8111 else
8112 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8113 }
8114 }
8115
8116 if (TARGET_PA_20)
8117 {
8118 if (sibcall)
8119 output_asm_insn ("bve (%%r1)", xoperands);
8120 else
8121 {
8122 if (indirect_call)
8123 {
8124 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8125 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8126 seq_length = 1;
8127 }
8128 else
8129 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8130 }
8131 }
8132 else
8133 {
8134 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8135 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8136 xoperands);
8137
8138 if (sibcall)
8139 {
8140 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8141 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8142 else
8143 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8144 }
8145 else
8146 {
8147 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8148 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8149 else
8150 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8151
8152 if (indirect_call)
8153 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8154 else
8155 output_asm_insn ("copy %%r31,%%r2", xoperands);
8156 seq_length = 1;
8157 }
8158 }
8159 }
8160 }
8161 }
8162
8163 if (seq_length == 0)
8164 output_asm_insn ("nop", xoperands);
8165
8166 return "";
8167 }
8168
8169 /* Return the attribute length of the indirect call instruction INSN.
8170 The length must match the code generated by output_indirect call.
8171 The returned length includes the delay slot. Currently, the delay
8172 slot of an indirect call sequence is not exposed and it is used by
8173 the sequence itself. */
8174
8175 int
8176 pa_attr_length_indirect_call (rtx_insn *insn)
8177 {
8178 unsigned long distance = -1;
8179 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8180
8181 if (INSN_ADDRESSES_SET_P ())
8182 {
8183 distance = (total + insn_current_reference_address (insn));
8184 if (distance < total)
8185 distance = -1;
8186 }
8187
8188 if (TARGET_64BIT)
8189 return 12;
8190
8191 if (TARGET_FAST_INDIRECT_CALLS)
8192 return 8;
8193
8194 if (TARGET_PORTABLE_RUNTIME)
8195 return 16;
8196
8197 /* Inline version of $$dyncall. */
8198 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8199 return 20;
8200
8201 if (!TARGET_LONG_CALLS
8202 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8203 || distance < MAX_PCREL17F_OFFSET))
8204 return 8;
8205
8206 /* Out of reach, can use ble. */
8207 if (!flag_pic)
8208 return 12;
8209
8210 /* Inline version of $$dyncall. */
8211 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8212 return 20;
8213
8214 if (!optimize_size)
8215 return 36;
8216
8217 /* Long PIC pc-relative call. */
8218 return 20;
8219 }
8220
8221 const char *
8222 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8223 {
8224 rtx xoperands[4];
8225 int length;
8226
8227 if (TARGET_64BIT)
8228 {
8229 xoperands[0] = call_dest;
8230 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8231 "bve,l (%%r2),%%r2\n\t"
8232 "ldd 24(%0),%%r27", xoperands);
8233 return "";
8234 }
8235
8236 /* First the special case for kernels, level 0 systems, etc. */
8237 if (TARGET_FAST_INDIRECT_CALLS)
8238 {
8239 pa_output_arg_descriptor (insn);
8240 if (TARGET_PA_20)
8241 return "bve,l,n (%%r22),%%r2\n\tnop";
8242 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8243 }
8244
8245 if (TARGET_PORTABLE_RUNTIME)
8246 {
8247 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8248 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8249 pa_output_arg_descriptor (insn);
8250 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8251 }
8252
8253 /* Maybe emit a fast inline version of $$dyncall. */
8254 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8255 {
8256 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8257 "ldw 2(%%r22),%%r19\n\t"
8258 "ldw -2(%%r22),%%r22", xoperands);
8259 pa_output_arg_descriptor (insn);
8260 if (TARGET_NO_SPACE_REGS)
8261 {
8262 if (TARGET_PA_20)
8263 return "bve,l,n (%%r22),%%r2\n\tnop";
8264 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8265 }
8266 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8267 }
8268
8269 /* Now the normal case -- we can reach $$dyncall directly or
8270 we're sure that we can get there via a long-branch stub.
8271
8272 No need to check target flags as the length uniquely identifies
8273 the remaining cases. */
8274 length = pa_attr_length_indirect_call (insn);
8275 if (length == 8)
8276 {
8277 pa_output_arg_descriptor (insn);
8278
8279 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8280 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8281 variant of the B,L instruction can't be used on the SOM target. */
8282 if (TARGET_PA_20 && !TARGET_SOM)
8283 return "b,l,n $$dyncall,%%r2\n\tnop";
8284 else
8285 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8286 }
8287
8288 /* Long millicode call, but we are not generating PIC or portable runtime
8289 code. */
8290 if (length == 12)
8291 {
8292 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8293 pa_output_arg_descriptor (insn);
8294 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8295 }
8296
8297 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8298 pc-relative call sequence is five instructions. The inline PA 2.0
8299 version of $$dyncall is also five instructions. The PA 1.X versions
8300 are longer but still an overall win. */
8301 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8302 {
8303 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8304 "ldw 2(%%r22),%%r19\n\t"
8305 "ldw -2(%%r22),%%r22", xoperands);
8306 if (TARGET_NO_SPACE_REGS)
8307 {
8308 pa_output_arg_descriptor (insn);
8309 if (TARGET_PA_20)
8310 return "bve,l,n (%%r22),%%r2\n\tnop";
8311 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8312 }
8313 if (TARGET_PA_20)
8314 {
8315 pa_output_arg_descriptor (insn);
8316 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8317 }
8318 output_asm_insn ("bl .+8,%%r2\n\t"
8319 "ldo 16(%%r2),%%r2\n\t"
8320 "ldsid (%%r22),%%r1\n\t"
8321 "mtsp %%r1,%%sr0", xoperands);
8322 pa_output_arg_descriptor (insn);
8323 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8324 }
8325
8326 /* We need a long PIC call to $$dyncall. */
8327 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8328 xoperands[1] = gen_rtx_REG (Pmode, 2);
8329 xoperands[2] = gen_rtx_REG (Pmode, 1);
8330 pa_output_pic_pcrel_sequence (xoperands);
8331 pa_output_arg_descriptor (insn);
8332 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8333 }
8334
8335 /* In HPUX 8.0's shared library scheme, special relocations are needed
8336 for function labels if they might be passed to a function
8337 in a shared library (because shared libraries don't live in code
8338 space), and special magic is needed to construct their address. */
8339
8340 void
8341 pa_encode_label (rtx sym)
8342 {
8343 const char *str = XSTR (sym, 0);
8344 int len = strlen (str) + 1;
8345 char *newstr, *p;
8346
8347 p = newstr = XALLOCAVEC (char, len + 1);
8348 *p++ = '@';
8349 strcpy (p, str);
8350
8351 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8352 }
8353
8354 static void
8355 pa_encode_section_info (tree decl, rtx rtl, int first)
8356 {
8357 int old_referenced = 0;
8358
8359 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8360 old_referenced
8361 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8362
8363 default_encode_section_info (decl, rtl, first);
8364
8365 if (first && TEXT_SPACE_P (decl))
8366 {
8367 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8368 if (TREE_CODE (decl) == FUNCTION_DECL)
8369 pa_encode_label (XEXP (rtl, 0));
8370 }
8371 else if (old_referenced)
8372 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8373 }
8374
8375 /* This is sort of inverse to pa_encode_section_info. */
8376
8377 static const char *
8378 pa_strip_name_encoding (const char *str)
8379 {
8380 str += (*str == '@');
8381 str += (*str == '*');
8382 return str;
8383 }
8384
8385 /* Returns 1 if OP is a function label involved in a simple addition
8386 with a constant. Used to keep certain patterns from matching
8387 during instruction combination. */
8388 int
8389 pa_is_function_label_plus_const (rtx op)
8390 {
8391 /* Strip off any CONST. */
8392 if (GET_CODE (op) == CONST)
8393 op = XEXP (op, 0);
8394
8395 return (GET_CODE (op) == PLUS
8396 && function_label_operand (XEXP (op, 0), VOIDmode)
8397 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8398 }
8399
8400 /* Output assembly code for a thunk to FUNCTION. */
8401
8402 static void
8403 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8404 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8405 tree function)
8406 {
8407 static unsigned int current_thunk_number;
8408 int val_14 = VAL_14_BITS_P (delta);
8409 unsigned int old_last_address = last_address, nbytes = 0;
8410 char label[17];
8411 rtx xoperands[4];
8412
8413 xoperands[0] = XEXP (DECL_RTL (function), 0);
8414 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8415 xoperands[2] = GEN_INT (delta);
8416
8417 final_start_function (emit_barrier (), file, 1);
8418
8419 /* Output the thunk. We know that the function is in the same
8420 translation unit (i.e., the same space) as the thunk, and that
8421 thunks are output after their method. Thus, we don't need an
8422 external branch to reach the function. With SOM and GAS,
8423 functions and thunks are effectively in different sections.
8424 Thus, we can always use a IA-relative branch and the linker
8425 will add a long branch stub if necessary.
8426
8427 However, we have to be careful when generating PIC code on the
8428 SOM port to ensure that the sequence does not transfer to an
8429 import stub for the target function as this could clobber the
8430 return value saved at SP-24. This would also apply to the
8431 32-bit linux port if the multi-space model is implemented. */
8432 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8433 && !(flag_pic && TREE_PUBLIC (function))
8434 && (TARGET_GAS || last_address < 262132))
8435 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8436 && ((targetm_common.have_named_sections
8437 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8438 /* The GNU 64-bit linker has rather poor stub management.
8439 So, we use a long branch from thunks that aren't in
8440 the same section as the target function. */
8441 && ((!TARGET_64BIT
8442 && (DECL_SECTION_NAME (thunk_fndecl)
8443 != DECL_SECTION_NAME (function)))
8444 || ((DECL_SECTION_NAME (thunk_fndecl)
8445 == DECL_SECTION_NAME (function))
8446 && last_address < 262132)))
8447 /* In this case, we need to be able to reach the start of
8448 the stub table even though the function is likely closer
8449 and can be jumped to directly. */
8450 || (targetm_common.have_named_sections
8451 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8452 && DECL_SECTION_NAME (function) == NULL
8453 && total_code_bytes < MAX_PCREL17F_OFFSET)
8454 /* Likewise. */
8455 || (!targetm_common.have_named_sections
8456 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8457 {
8458 if (!val_14)
8459 output_asm_insn ("addil L'%2,%%r26", xoperands);
8460
8461 output_asm_insn ("b %0", xoperands);
8462
8463 if (val_14)
8464 {
8465 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8466 nbytes += 8;
8467 }
8468 else
8469 {
8470 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8471 nbytes += 12;
8472 }
8473 }
8474 else if (TARGET_64BIT)
8475 {
8476 rtx xop[4];
8477
8478 /* We only have one call-clobbered scratch register, so we can't
8479 make use of the delay slot if delta doesn't fit in 14 bits. */
8480 if (!val_14)
8481 {
8482 output_asm_insn ("addil L'%2,%%r26", xoperands);
8483 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8484 }
8485
8486 /* Load function address into %r1. */
8487 xop[0] = xoperands[0];
8488 xop[1] = gen_rtx_REG (Pmode, 1);
8489 xop[2] = xop[1];
8490 pa_output_pic_pcrel_sequence (xop);
8491
8492 if (val_14)
8493 {
8494 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8495 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8496 nbytes += 20;
8497 }
8498 else
8499 {
8500 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8501 nbytes += 24;
8502 }
8503 }
8504 else if (TARGET_PORTABLE_RUNTIME)
8505 {
8506 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8507 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8508
8509 if (!val_14)
8510 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8511
8512 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8513
8514 if (val_14)
8515 {
8516 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8517 nbytes += 16;
8518 }
8519 else
8520 {
8521 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8522 nbytes += 20;
8523 }
8524 }
8525 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8526 {
8527 /* The function is accessible from outside this module. The only
8528 way to avoid an import stub between the thunk and function is to
8529 call the function directly with an indirect sequence similar to
8530 that used by $$dyncall. This is possible because $$dyncall acts
8531 as the import stub in an indirect call. */
8532 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8533 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8534 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8535 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8536 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8537 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8538 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8539 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8540 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8541
8542 if (!val_14)
8543 {
8544 output_asm_insn ("addil L'%2,%%r26", xoperands);
8545 nbytes += 4;
8546 }
8547
8548 if (TARGET_PA_20)
8549 {
8550 output_asm_insn ("bve (%%r22)", xoperands);
8551 nbytes += 36;
8552 }
8553 else if (TARGET_NO_SPACE_REGS)
8554 {
8555 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8556 nbytes += 36;
8557 }
8558 else
8559 {
8560 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8561 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8562 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8563 nbytes += 44;
8564 }
8565
8566 if (val_14)
8567 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8568 else
8569 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8570 }
8571 else if (flag_pic)
8572 {
8573 rtx xop[4];
8574
8575 /* Load function address into %r22. */
8576 xop[0] = xoperands[0];
8577 xop[1] = gen_rtx_REG (Pmode, 1);
8578 xop[2] = gen_rtx_REG (Pmode, 22);
8579 pa_output_pic_pcrel_sequence (xop);
8580
8581 if (!val_14)
8582 output_asm_insn ("addil L'%2,%%r26", xoperands);
8583
8584 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8585
8586 if (val_14)
8587 {
8588 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8589 nbytes += 20;
8590 }
8591 else
8592 {
8593 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8594 nbytes += 24;
8595 }
8596 }
8597 else
8598 {
8599 if (!val_14)
8600 output_asm_insn ("addil L'%2,%%r26", xoperands);
8601
8602 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8603 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8604
8605 if (val_14)
8606 {
8607 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8608 nbytes += 12;
8609 }
8610 else
8611 {
8612 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8613 nbytes += 16;
8614 }
8615 }
8616
8617 final_end_function ();
8618
8619 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8620 {
8621 switch_to_section (data_section);
8622 output_asm_insn (".align 4", xoperands);
8623 ASM_OUTPUT_LABEL (file, label);
8624 output_asm_insn (".word P'%0", xoperands);
8625 }
8626
8627 current_thunk_number++;
8628 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8629 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8630 last_address += nbytes;
8631 if (old_last_address > last_address)
8632 last_address = UINT_MAX;
8633 update_total_code_bytes (nbytes);
8634 }
8635
8636 /* Only direct calls to static functions are allowed to be sibling (tail)
8637 call optimized.
8638
8639 This restriction is necessary because some linker generated stubs will
8640 store return pointers into rp' in some cases which might clobber a
8641 live value already in rp'.
8642
8643 In a sibcall the current function and the target function share stack
8644 space. Thus if the path to the current function and the path to the
8645 target function save a value in rp', they save the value into the
8646 same stack slot, which has undesirable consequences.
8647
8648 Because of the deferred binding nature of shared libraries any function
8649 with external scope could be in a different load module and thus require
8650 rp' to be saved when calling that function. So sibcall optimizations
8651 can only be safe for static function.
8652
8653 Note that GCC never needs return value relocations, so we don't have to
8654 worry about static calls with return value relocations (which require
8655 saving rp').
8656
8657 It is safe to perform a sibcall optimization when the target function
8658 will never return. */
8659 static bool
8660 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8661 {
8662 /* Sibcalls are not ok because the arg pointer register is not a fixed
8663 register. This prevents the sibcall optimization from occurring. In
8664 addition, there are problems with stub placement using GNU ld. This
8665 is because a normal sibcall branch uses a 17-bit relocation while
8666 a regular call branch uses a 22-bit relocation. As a result, more
8667 care needs to be taken in the placement of long-branch stubs. */
8668 if (TARGET_64BIT)
8669 return false;
8670
8671 if (TARGET_PORTABLE_RUNTIME)
8672 return false;
8673
8674 /* Sibcalls are only ok within a translation unit. */
8675 return decl && targetm.binds_local_p (decl);
8676 }
8677
8678 /* ??? Addition is not commutative on the PA due to the weird implicit
8679 space register selection rules for memory addresses. Therefore, we
8680 don't consider a + b == b + a, as this might be inside a MEM. */
8681 static bool
8682 pa_commutative_p (const_rtx x, int outer_code)
8683 {
8684 return (COMMUTATIVE_P (x)
8685 && (TARGET_NO_SPACE_REGS
8686 || (outer_code != UNKNOWN && outer_code != MEM)
8687 || GET_CODE (x) != PLUS));
8688 }
8689
8690 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8691 use in fmpyadd instructions. */
8692 int
8693 pa_fmpyaddoperands (rtx *operands)
8694 {
8695 machine_mode mode = GET_MODE (operands[0]);
8696
8697 /* Must be a floating point mode. */
8698 if (mode != SFmode && mode != DFmode)
8699 return 0;
8700
8701 /* All modes must be the same. */
8702 if (! (mode == GET_MODE (operands[1])
8703 && mode == GET_MODE (operands[2])
8704 && mode == GET_MODE (operands[3])
8705 && mode == GET_MODE (operands[4])
8706 && mode == GET_MODE (operands[5])))
8707 return 0;
8708
8709 /* All operands must be registers. */
8710 if (! (GET_CODE (operands[1]) == REG
8711 && GET_CODE (operands[2]) == REG
8712 && GET_CODE (operands[3]) == REG
8713 && GET_CODE (operands[4]) == REG
8714 && GET_CODE (operands[5]) == REG))
8715 return 0;
8716
8717 /* Only 2 real operands to the addition. One of the input operands must
8718 be the same as the output operand. */
8719 if (! rtx_equal_p (operands[3], operands[4])
8720 && ! rtx_equal_p (operands[3], operands[5]))
8721 return 0;
8722
8723 /* Inout operand of add cannot conflict with any operands from multiply. */
8724 if (rtx_equal_p (operands[3], operands[0])
8725 || rtx_equal_p (operands[3], operands[1])
8726 || rtx_equal_p (operands[3], operands[2]))
8727 return 0;
8728
8729 /* multiply cannot feed into addition operands. */
8730 if (rtx_equal_p (operands[4], operands[0])
8731 || rtx_equal_p (operands[5], operands[0]))
8732 return 0;
8733
8734 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8735 if (mode == SFmode
8736 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8737 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8738 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8739 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8740 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8741 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8742 return 0;
8743
8744 /* Passed. Operands are suitable for fmpyadd. */
8745 return 1;
8746 }
8747
8748 #if !defined(USE_COLLECT2)
8749 static void
8750 pa_asm_out_constructor (rtx symbol, int priority)
8751 {
8752 if (!function_label_operand (symbol, VOIDmode))
8753 pa_encode_label (symbol);
8754
8755 #ifdef CTORS_SECTION_ASM_OP
8756 default_ctor_section_asm_out_constructor (symbol, priority);
8757 #else
8758 # ifdef TARGET_ASM_NAMED_SECTION
8759 default_named_section_asm_out_constructor (symbol, priority);
8760 # else
8761 default_stabs_asm_out_constructor (symbol, priority);
8762 # endif
8763 #endif
8764 }
8765
8766 static void
8767 pa_asm_out_destructor (rtx symbol, int priority)
8768 {
8769 if (!function_label_operand (symbol, VOIDmode))
8770 pa_encode_label (symbol);
8771
8772 #ifdef DTORS_SECTION_ASM_OP
8773 default_dtor_section_asm_out_destructor (symbol, priority);
8774 #else
8775 # ifdef TARGET_ASM_NAMED_SECTION
8776 default_named_section_asm_out_destructor (symbol, priority);
8777 # else
8778 default_stabs_asm_out_destructor (symbol, priority);
8779 # endif
8780 #endif
8781 }
8782 #endif
8783
8784 /* This function places uninitialized global data in the bss section.
8785 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8786 function on the SOM port to prevent uninitialized global data from
8787 being placed in the data section. */
8788
8789 void
8790 pa_asm_output_aligned_bss (FILE *stream,
8791 const char *name,
8792 unsigned HOST_WIDE_INT size,
8793 unsigned int align)
8794 {
8795 switch_to_section (bss_section);
8796 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8797
8798 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8799 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8800 #endif
8801
8802 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8803 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8804 #endif
8805
8806 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8807 ASM_OUTPUT_LABEL (stream, name);
8808 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8809 }
8810
8811 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8812 that doesn't allow the alignment of global common storage to be directly
8813 specified. The SOM linker aligns common storage based on the rounded
8814 value of the NUM_BYTES parameter in the .comm directive. It's not
8815 possible to use the .align directive as it doesn't affect the alignment
8816 of the label associated with a .comm directive. */
8817
8818 void
8819 pa_asm_output_aligned_common (FILE *stream,
8820 const char *name,
8821 unsigned HOST_WIDE_INT size,
8822 unsigned int align)
8823 {
8824 unsigned int max_common_align;
8825
8826 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8827 if (align > max_common_align)
8828 {
8829 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8830 "for global common data. Using %u",
8831 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8832 align = max_common_align;
8833 }
8834
8835 switch_to_section (bss_section);
8836
8837 assemble_name (stream, name);
8838 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8839 MAX (size, align / BITS_PER_UNIT));
8840 }
8841
8842 /* We can't use .comm for local common storage as the SOM linker effectively
8843 treats the symbol as universal and uses the same storage for local symbols
8844 with the same name in different object files. The .block directive
8845 reserves an uninitialized block of storage. However, it's not common
8846 storage. Fortunately, GCC never requests common storage with the same
8847 name in any given translation unit. */
8848
8849 void
8850 pa_asm_output_aligned_local (FILE *stream,
8851 const char *name,
8852 unsigned HOST_WIDE_INT size,
8853 unsigned int align)
8854 {
8855 switch_to_section (bss_section);
8856 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8857
8858 #ifdef LOCAL_ASM_OP
8859 fprintf (stream, "%s", LOCAL_ASM_OP);
8860 assemble_name (stream, name);
8861 fprintf (stream, "\n");
8862 #endif
8863
8864 ASM_OUTPUT_LABEL (stream, name);
8865 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8866 }
8867
8868 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8869 use in fmpysub instructions. */
8870 int
8871 pa_fmpysuboperands (rtx *operands)
8872 {
8873 machine_mode mode = GET_MODE (operands[0]);
8874
8875 /* Must be a floating point mode. */
8876 if (mode != SFmode && mode != DFmode)
8877 return 0;
8878
8879 /* All modes must be the same. */
8880 if (! (mode == GET_MODE (operands[1])
8881 && mode == GET_MODE (operands[2])
8882 && mode == GET_MODE (operands[3])
8883 && mode == GET_MODE (operands[4])
8884 && mode == GET_MODE (operands[5])))
8885 return 0;
8886
8887 /* All operands must be registers. */
8888 if (! (GET_CODE (operands[1]) == REG
8889 && GET_CODE (operands[2]) == REG
8890 && GET_CODE (operands[3]) == REG
8891 && GET_CODE (operands[4]) == REG
8892 && GET_CODE (operands[5]) == REG))
8893 return 0;
8894
8895 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8896 operation, so operands[4] must be the same as operand[3]. */
8897 if (! rtx_equal_p (operands[3], operands[4]))
8898 return 0;
8899
8900 /* multiply cannot feed into subtraction. */
8901 if (rtx_equal_p (operands[5], operands[0]))
8902 return 0;
8903
8904 /* Inout operand of sub cannot conflict with any operands from multiply. */
8905 if (rtx_equal_p (operands[3], operands[0])
8906 || rtx_equal_p (operands[3], operands[1])
8907 || rtx_equal_p (operands[3], operands[2]))
8908 return 0;
8909
8910 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8911 if (mode == SFmode
8912 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8913 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8914 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8915 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8916 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8917 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8918 return 0;
8919
8920 /* Passed. Operands are suitable for fmpysub. */
8921 return 1;
8922 }
8923
8924 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8925 constants for a MULT embedded inside a memory address. */
8926 int
8927 pa_mem_shadd_constant_p (int val)
8928 {
8929 if (val == 2 || val == 4 || val == 8)
8930 return 1;
8931 else
8932 return 0;
8933 }
8934
8935 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8936 constants for shadd instructions. */
8937 int
8938 pa_shadd_constant_p (int val)
8939 {
8940 if (val == 1 || val == 2 || val == 3)
8941 return 1;
8942 else
8943 return 0;
8944 }
8945
8946 /* Return TRUE if INSN branches forward. */
8947
8948 static bool
8949 forward_branch_p (rtx_insn *insn)
8950 {
8951 rtx lab = JUMP_LABEL (insn);
8952
8953 /* The INSN must have a jump label. */
8954 gcc_assert (lab != NULL_RTX);
8955
8956 if (INSN_ADDRESSES_SET_P ())
8957 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8958
8959 while (insn)
8960 {
8961 if (insn == lab)
8962 return true;
8963 else
8964 insn = NEXT_INSN (insn);
8965 }
8966
8967 return false;
8968 }
8969
8970 /* Output an unconditional move and branch insn. */
8971
8972 const char *
8973 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8974 {
8975 int length = get_attr_length (insn);
8976
8977 /* These are the cases in which we win. */
8978 if (length == 4)
8979 return "mov%I1b,tr %1,%0,%2";
8980
8981 /* None of the following cases win, but they don't lose either. */
8982 if (length == 8)
8983 {
8984 if (dbr_sequence_length () == 0)
8985 {
8986 /* Nothing in the delay slot, fake it by putting the combined
8987 insn (the copy or add) in the delay slot of a bl. */
8988 if (GET_CODE (operands[1]) == CONST_INT)
8989 return "b %2\n\tldi %1,%0";
8990 else
8991 return "b %2\n\tcopy %1,%0";
8992 }
8993 else
8994 {
8995 /* Something in the delay slot, but we've got a long branch. */
8996 if (GET_CODE (operands[1]) == CONST_INT)
8997 return "ldi %1,%0\n\tb %2";
8998 else
8999 return "copy %1,%0\n\tb %2";
9000 }
9001 }
9002
9003 if (GET_CODE (operands[1]) == CONST_INT)
9004 output_asm_insn ("ldi %1,%0", operands);
9005 else
9006 output_asm_insn ("copy %1,%0", operands);
9007 return pa_output_lbranch (operands[2], insn, 1);
9008 }
9009
9010 /* Output an unconditional add and branch insn. */
9011
9012 const char *
9013 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
9014 {
9015 int length = get_attr_length (insn);
9016
9017 /* To make life easy we want operand0 to be the shared input/output
9018 operand and operand1 to be the readonly operand. */
9019 if (operands[0] == operands[1])
9020 operands[1] = operands[2];
9021
9022 /* These are the cases in which we win. */
9023 if (length == 4)
9024 return "add%I1b,tr %1,%0,%3";
9025
9026 /* None of the following cases win, but they don't lose either. */
9027 if (length == 8)
9028 {
9029 if (dbr_sequence_length () == 0)
9030 /* Nothing in the delay slot, fake it by putting the combined
9031 insn (the copy or add) in the delay slot of a bl. */
9032 return "b %3\n\tadd%I1 %1,%0,%0";
9033 else
9034 /* Something in the delay slot, but we've got a long branch. */
9035 return "add%I1 %1,%0,%0\n\tb %3";
9036 }
9037
9038 output_asm_insn ("add%I1 %1,%0,%0", operands);
9039 return pa_output_lbranch (operands[3], insn, 1);
9040 }
9041
9042 /* We use this hook to perform a PA specific optimization which is difficult
9043 to do in earlier passes. */
9044
9045 static void
9046 pa_reorg (void)
9047 {
9048 remove_useless_addtr_insns (1);
9049
9050 if (pa_cpu < PROCESSOR_8000)
9051 pa_combine_instructions ();
9052 }
9053
9054 /* The PA has a number of odd instructions which can perform multiple
9055 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9056 it may be profitable to combine two instructions into one instruction
9057 with two outputs. It's not profitable PA2.0 machines because the
9058 two outputs would take two slots in the reorder buffers.
9059
9060 This routine finds instructions which can be combined and combines
9061 them. We only support some of the potential combinations, and we
9062 only try common ways to find suitable instructions.
9063
9064 * addb can add two registers or a register and a small integer
9065 and jump to a nearby (+-8k) location. Normally the jump to the
9066 nearby location is conditional on the result of the add, but by
9067 using the "true" condition we can make the jump unconditional.
9068 Thus addb can perform two independent operations in one insn.
9069
9070 * movb is similar to addb in that it can perform a reg->reg
9071 or small immediate->reg copy and jump to a nearby (+-8k location).
9072
9073 * fmpyadd and fmpysub can perform a FP multiply and either an
9074 FP add or FP sub if the operands of the multiply and add/sub are
9075 independent (there are other minor restrictions). Note both
9076 the fmpy and fadd/fsub can in theory move to better spots according
9077 to data dependencies, but for now we require the fmpy stay at a
9078 fixed location.
9079
9080 * Many of the memory operations can perform pre & post updates
9081 of index registers. GCC's pre/post increment/decrement addressing
9082 is far too simple to take advantage of all the possibilities. This
9083 pass may not be suitable since those insns may not be independent.
9084
9085 * comclr can compare two ints or an int and a register, nullify
9086 the following instruction and zero some other register. This
9087 is more difficult to use as it's harder to find an insn which
9088 will generate a comclr than finding something like an unconditional
9089 branch. (conditional moves & long branches create comclr insns).
9090
9091 * Most arithmetic operations can conditionally skip the next
9092 instruction. They can be viewed as "perform this operation
9093 and conditionally jump to this nearby location" (where nearby
9094 is an insns away). These are difficult to use due to the
9095 branch length restrictions. */
9096
9097 static void
9098 pa_combine_instructions (void)
9099 {
9100 rtx_insn *anchor;
9101
9102 /* This can get expensive since the basic algorithm is on the
9103 order of O(n^2) (or worse). Only do it for -O2 or higher
9104 levels of optimization. */
9105 if (optimize < 2)
9106 return;
9107
9108 /* Walk down the list of insns looking for "anchor" insns which
9109 may be combined with "floating" insns. As the name implies,
9110 "anchor" instructions don't move, while "floating" insns may
9111 move around. */
9112 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9113 rtx_insn *new_rtx = make_insn_raw (par);
9114
9115 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9116 {
9117 enum attr_pa_combine_type anchor_attr;
9118 enum attr_pa_combine_type floater_attr;
9119
9120 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9121 Also ignore any special USE insns. */
9122 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9123 || GET_CODE (PATTERN (anchor)) == USE
9124 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9125 continue;
9126
9127 anchor_attr = get_attr_pa_combine_type (anchor);
9128 /* See if anchor is an insn suitable for combination. */
9129 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9130 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9131 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9132 && ! forward_branch_p (anchor)))
9133 {
9134 rtx_insn *floater;
9135
9136 for (floater = PREV_INSN (anchor);
9137 floater;
9138 floater = PREV_INSN (floater))
9139 {
9140 if (NOTE_P (floater)
9141 || (NONJUMP_INSN_P (floater)
9142 && (GET_CODE (PATTERN (floater)) == USE
9143 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9144 continue;
9145
9146 /* Anything except a regular INSN will stop our search. */
9147 if (! NONJUMP_INSN_P (floater))
9148 {
9149 floater = NULL;
9150 break;
9151 }
9152
9153 /* See if FLOATER is suitable for combination with the
9154 anchor. */
9155 floater_attr = get_attr_pa_combine_type (floater);
9156 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9157 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9158 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9159 && floater_attr == PA_COMBINE_TYPE_FMPY))
9160 {
9161 /* If ANCHOR and FLOATER can be combined, then we're
9162 done with this pass. */
9163 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9164 SET_DEST (PATTERN (floater)),
9165 XEXP (SET_SRC (PATTERN (floater)), 0),
9166 XEXP (SET_SRC (PATTERN (floater)), 1)))
9167 break;
9168 }
9169
9170 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9171 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9172 {
9173 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9174 {
9175 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9176 SET_DEST (PATTERN (floater)),
9177 XEXP (SET_SRC (PATTERN (floater)), 0),
9178 XEXP (SET_SRC (PATTERN (floater)), 1)))
9179 break;
9180 }
9181 else
9182 {
9183 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9184 SET_DEST (PATTERN (floater)),
9185 SET_SRC (PATTERN (floater)),
9186 SET_SRC (PATTERN (floater))))
9187 break;
9188 }
9189 }
9190 }
9191
9192 /* If we didn't find anything on the backwards scan try forwards. */
9193 if (!floater
9194 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9195 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9196 {
9197 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9198 {
9199 if (NOTE_P (floater)
9200 || (NONJUMP_INSN_P (floater)
9201 && (GET_CODE (PATTERN (floater)) == USE
9202 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9203
9204 continue;
9205
9206 /* Anything except a regular INSN will stop our search. */
9207 if (! NONJUMP_INSN_P (floater))
9208 {
9209 floater = NULL;
9210 break;
9211 }
9212
9213 /* See if FLOATER is suitable for combination with the
9214 anchor. */
9215 floater_attr = get_attr_pa_combine_type (floater);
9216 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9217 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9218 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9219 && floater_attr == PA_COMBINE_TYPE_FMPY))
9220 {
9221 /* If ANCHOR and FLOATER can be combined, then we're
9222 done with this pass. */
9223 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9224 SET_DEST (PATTERN (floater)),
9225 XEXP (SET_SRC (PATTERN (floater)),
9226 0),
9227 XEXP (SET_SRC (PATTERN (floater)),
9228 1)))
9229 break;
9230 }
9231 }
9232 }
9233
9234 /* FLOATER will be nonzero if we found a suitable floating
9235 insn for combination with ANCHOR. */
9236 if (floater
9237 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9238 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9239 {
9240 /* Emit the new instruction and delete the old anchor. */
9241 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9242 copy_rtx (PATTERN (floater)));
9243 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9244 emit_insn_before (temp, anchor);
9245
9246 SET_INSN_DELETED (anchor);
9247
9248 /* Emit a special USE insn for FLOATER, then delete
9249 the floating insn. */
9250 temp = copy_rtx (PATTERN (floater));
9251 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9252 delete_insn (floater);
9253
9254 continue;
9255 }
9256 else if (floater
9257 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9258 {
9259 /* Emit the new_jump instruction and delete the old anchor. */
9260 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9261 copy_rtx (PATTERN (floater)));
9262 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9263 temp = emit_jump_insn_before (temp, anchor);
9264
9265 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9266 SET_INSN_DELETED (anchor);
9267
9268 /* Emit a special USE insn for FLOATER, then delete
9269 the floating insn. */
9270 temp = copy_rtx (PATTERN (floater));
9271 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9272 delete_insn (floater);
9273 continue;
9274 }
9275 }
9276 }
9277 }
9278
9279 static int
9280 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9281 int reversed, rtx dest,
9282 rtx src1, rtx src2)
9283 {
9284 int insn_code_number;
9285 rtx_insn *start, *end;
9286
9287 /* Create a PARALLEL with the patterns of ANCHOR and
9288 FLOATER, try to recognize it, then test constraints
9289 for the resulting pattern.
9290
9291 If the pattern doesn't match or the constraints
9292 aren't met keep searching for a suitable floater
9293 insn. */
9294 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9295 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9296 INSN_CODE (new_rtx) = -1;
9297 insn_code_number = recog_memoized (new_rtx);
9298 basic_block bb = BLOCK_FOR_INSN (anchor);
9299 if (insn_code_number < 0
9300 || (extract_insn (new_rtx),
9301 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9302 return 0;
9303
9304 if (reversed)
9305 {
9306 start = anchor;
9307 end = floater;
9308 }
9309 else
9310 {
9311 start = floater;
9312 end = anchor;
9313 }
9314
9315 /* There's up to three operands to consider. One
9316 output and two inputs.
9317
9318 The output must not be used between FLOATER & ANCHOR
9319 exclusive. The inputs must not be set between
9320 FLOATER and ANCHOR exclusive. */
9321
9322 if (reg_used_between_p (dest, start, end))
9323 return 0;
9324
9325 if (reg_set_between_p (src1, start, end))
9326 return 0;
9327
9328 if (reg_set_between_p (src2, start, end))
9329 return 0;
9330
9331 /* If we get here, then everything is good. */
9332 return 1;
9333 }
9334
9335 /* Return nonzero if references for INSN are delayed.
9336
9337 Millicode insns are actually function calls with some special
9338 constraints on arguments and register usage.
9339
9340 Millicode calls always expect their arguments in the integer argument
9341 registers, and always return their result in %r29 (ret1). They
9342 are expected to clobber their arguments, %r1, %r29, and the return
9343 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9344
9345 This function tells reorg that the references to arguments and
9346 millicode calls do not appear to happen until after the millicode call.
9347 This allows reorg to put insns which set the argument registers into the
9348 delay slot of the millicode call -- thus they act more like traditional
9349 CALL_INSNs.
9350
9351 Note we cannot consider side effects of the insn to be delayed because
9352 the branch and link insn will clobber the return pointer. If we happened
9353 to use the return pointer in the delay slot of the call, then we lose.
9354
9355 get_attr_type will try to recognize the given insn, so make sure to
9356 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9357 in particular. */
9358 int
9359 pa_insn_refs_are_delayed (rtx_insn *insn)
9360 {
9361 return ((NONJUMP_INSN_P (insn)
9362 && GET_CODE (PATTERN (insn)) != SEQUENCE
9363 && GET_CODE (PATTERN (insn)) != USE
9364 && GET_CODE (PATTERN (insn)) != CLOBBER
9365 && get_attr_type (insn) == TYPE_MILLI));
9366 }
9367
9368 /* Promote the return value, but not the arguments. */
9369
9370 static machine_mode
9371 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9372 machine_mode mode,
9373 int *punsignedp ATTRIBUTE_UNUSED,
9374 const_tree fntype ATTRIBUTE_UNUSED,
9375 int for_return)
9376 {
9377 if (for_return == 0)
9378 return mode;
9379 return promote_mode (type, mode, punsignedp);
9380 }
9381
9382 /* On the HP-PA the value is found in register(s) 28(-29), unless
9383 the mode is SF or DF. Then the value is returned in fr4 (32).
9384
9385 This must perform the same promotions as PROMOTE_MODE, else promoting
9386 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9387
9388 Small structures must be returned in a PARALLEL on PA64 in order
9389 to match the HP Compiler ABI. */
9390
9391 static rtx
9392 pa_function_value (const_tree valtype,
9393 const_tree func ATTRIBUTE_UNUSED,
9394 bool outgoing ATTRIBUTE_UNUSED)
9395 {
9396 machine_mode valmode;
9397
9398 if (AGGREGATE_TYPE_P (valtype)
9399 || TREE_CODE (valtype) == COMPLEX_TYPE
9400 || TREE_CODE (valtype) == VECTOR_TYPE)
9401 {
9402 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9403
9404 /* Handle aggregates that fit exactly in a word or double word. */
9405 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9406 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9407
9408 if (TARGET_64BIT)
9409 {
9410 /* Aggregates with a size less than or equal to 128 bits are
9411 returned in GR 28(-29). They are left justified. The pad
9412 bits are undefined. Larger aggregates are returned in
9413 memory. */
9414 rtx loc[2];
9415 int i, offset = 0;
9416 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9417
9418 for (i = 0; i < ub; i++)
9419 {
9420 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9421 gen_rtx_REG (DImode, 28 + i),
9422 GEN_INT (offset));
9423 offset += 8;
9424 }
9425
9426 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9427 }
9428 else if (valsize > UNITS_PER_WORD)
9429 {
9430 /* Aggregates 5 to 8 bytes in size are returned in general
9431 registers r28-r29 in the same manner as other non
9432 floating-point objects. The data is right-justified and
9433 zero-extended to 64 bits. This is opposite to the normal
9434 justification used on big endian targets and requires
9435 special treatment. */
9436 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9437 gen_rtx_REG (DImode, 28), const0_rtx);
9438 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9439 }
9440 }
9441
9442 if ((INTEGRAL_TYPE_P (valtype)
9443 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9444 || POINTER_TYPE_P (valtype))
9445 valmode = word_mode;
9446 else
9447 valmode = TYPE_MODE (valtype);
9448
9449 if (TREE_CODE (valtype) == REAL_TYPE
9450 && !AGGREGATE_TYPE_P (valtype)
9451 && TYPE_MODE (valtype) != TFmode
9452 && !TARGET_SOFT_FLOAT)
9453 return gen_rtx_REG (valmode, 32);
9454
9455 return gen_rtx_REG (valmode, 28);
9456 }
9457
9458 /* Implement the TARGET_LIBCALL_VALUE hook. */
9459
9460 static rtx
9461 pa_libcall_value (machine_mode mode,
9462 const_rtx fun ATTRIBUTE_UNUSED)
9463 {
9464 if (! TARGET_SOFT_FLOAT
9465 && (mode == SFmode || mode == DFmode))
9466 return gen_rtx_REG (mode, 32);
9467 else
9468 return gen_rtx_REG (mode, 28);
9469 }
9470
9471 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9472
9473 static bool
9474 pa_function_value_regno_p (const unsigned int regno)
9475 {
9476 if (regno == 28
9477 || (! TARGET_SOFT_FLOAT && regno == 32))
9478 return true;
9479
9480 return false;
9481 }
9482
9483 /* Update the data in CUM to advance over an argument
9484 of mode MODE and data type TYPE.
9485 (TYPE is null for libcalls where that information may not be available.) */
9486
9487 static void
9488 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9489 const_tree type, bool named ATTRIBUTE_UNUSED)
9490 {
9491 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9492 int arg_size = pa_function_arg_size (mode, type);
9493
9494 cum->nargs_prototype--;
9495 cum->words += (arg_size
9496 + ((cum->words & 01)
9497 && type != NULL_TREE
9498 && arg_size > 1));
9499 }
9500
9501 /* Return the location of a parameter that is passed in a register or NULL
9502 if the parameter has any component that is passed in memory.
9503
9504 This is new code and will be pushed to into the net sources after
9505 further testing.
9506
9507 ??? We might want to restructure this so that it looks more like other
9508 ports. */
9509 static rtx
9510 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9511 const_tree type, bool named ATTRIBUTE_UNUSED)
9512 {
9513 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9514 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9515 int alignment = 0;
9516 int arg_size;
9517 int fpr_reg_base;
9518 int gpr_reg_base;
9519 rtx retval;
9520
9521 if (mode == VOIDmode)
9522 return NULL_RTX;
9523
9524 arg_size = pa_function_arg_size (mode, type);
9525
9526 /* If this arg would be passed partially or totally on the stack, then
9527 this routine should return zero. pa_arg_partial_bytes will
9528 handle arguments which are split between regs and stack slots if
9529 the ABI mandates split arguments. */
9530 if (!TARGET_64BIT)
9531 {
9532 /* The 32-bit ABI does not split arguments. */
9533 if (cum->words + arg_size > max_arg_words)
9534 return NULL_RTX;
9535 }
9536 else
9537 {
9538 if (arg_size > 1)
9539 alignment = cum->words & 1;
9540 if (cum->words + alignment >= max_arg_words)
9541 return NULL_RTX;
9542 }
9543
9544 /* The 32bit ABIs and the 64bit ABIs are rather different,
9545 particularly in their handling of FP registers. We might
9546 be able to cleverly share code between them, but I'm not
9547 going to bother in the hope that splitting them up results
9548 in code that is more easily understood. */
9549
9550 if (TARGET_64BIT)
9551 {
9552 /* Advance the base registers to their current locations.
9553
9554 Remember, gprs grow towards smaller register numbers while
9555 fprs grow to higher register numbers. Also remember that
9556 although FP regs are 32-bit addressable, we pretend that
9557 the registers are 64-bits wide. */
9558 gpr_reg_base = 26 - cum->words;
9559 fpr_reg_base = 32 + cum->words;
9560
9561 /* Arguments wider than one word and small aggregates need special
9562 treatment. */
9563 if (arg_size > 1
9564 || mode == BLKmode
9565 || (type && (AGGREGATE_TYPE_P (type)
9566 || TREE_CODE (type) == COMPLEX_TYPE
9567 || TREE_CODE (type) == VECTOR_TYPE)))
9568 {
9569 /* Double-extended precision (80-bit), quad-precision (128-bit)
9570 and aggregates including complex numbers are aligned on
9571 128-bit boundaries. The first eight 64-bit argument slots
9572 are associated one-to-one, with general registers r26
9573 through r19, and also with floating-point registers fr4
9574 through fr11. Arguments larger than one word are always
9575 passed in general registers.
9576
9577 Using a PARALLEL with a word mode register results in left
9578 justified data on a big-endian target. */
9579
9580 rtx loc[8];
9581 int i, offset = 0, ub = arg_size;
9582
9583 /* Align the base register. */
9584 gpr_reg_base -= alignment;
9585
9586 ub = MIN (ub, max_arg_words - cum->words - alignment);
9587 for (i = 0; i < ub; i++)
9588 {
9589 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9590 gen_rtx_REG (DImode, gpr_reg_base),
9591 GEN_INT (offset));
9592 gpr_reg_base -= 1;
9593 offset += 8;
9594 }
9595
9596 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9597 }
9598 }
9599 else
9600 {
9601 /* If the argument is larger than a word, then we know precisely
9602 which registers we must use. */
9603 if (arg_size > 1)
9604 {
9605 if (cum->words)
9606 {
9607 gpr_reg_base = 23;
9608 fpr_reg_base = 38;
9609 }
9610 else
9611 {
9612 gpr_reg_base = 25;
9613 fpr_reg_base = 34;
9614 }
9615
9616 /* Structures 5 to 8 bytes in size are passed in the general
9617 registers in the same manner as other non floating-point
9618 objects. The data is right-justified and zero-extended
9619 to 64 bits. This is opposite to the normal justification
9620 used on big endian targets and requires special treatment.
9621 We now define BLOCK_REG_PADDING to pad these objects.
9622 Aggregates, complex and vector types are passed in the same
9623 manner as structures. */
9624 if (mode == BLKmode
9625 || (type && (AGGREGATE_TYPE_P (type)
9626 || TREE_CODE (type) == COMPLEX_TYPE
9627 || TREE_CODE (type) == VECTOR_TYPE)))
9628 {
9629 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9630 gen_rtx_REG (DImode, gpr_reg_base),
9631 const0_rtx);
9632 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9633 }
9634 }
9635 else
9636 {
9637 /* We have a single word (32 bits). A simple computation
9638 will get us the register #s we need. */
9639 gpr_reg_base = 26 - cum->words;
9640 fpr_reg_base = 32 + 2 * cum->words;
9641 }
9642 }
9643
9644 /* Determine if the argument needs to be passed in both general and
9645 floating point registers. */
9646 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9647 /* If we are doing soft-float with portable runtime, then there
9648 is no need to worry about FP regs. */
9649 && !TARGET_SOFT_FLOAT
9650 /* The parameter must be some kind of scalar float, else we just
9651 pass it in integer registers. */
9652 && GET_MODE_CLASS (mode) == MODE_FLOAT
9653 /* The target function must not have a prototype. */
9654 && cum->nargs_prototype <= 0
9655 /* libcalls do not need to pass items in both FP and general
9656 registers. */
9657 && type != NULL_TREE
9658 /* All this hair applies to "outgoing" args only. This includes
9659 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9660 && !cum->incoming)
9661 /* Also pass outgoing floating arguments in both registers in indirect
9662 calls with the 32 bit ABI and the HP assembler since there is no
9663 way to the specify argument locations in static functions. */
9664 || (!TARGET_64BIT
9665 && !TARGET_GAS
9666 && !cum->incoming
9667 && cum->indirect
9668 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9669 {
9670 retval
9671 = gen_rtx_PARALLEL
9672 (mode,
9673 gen_rtvec (2,
9674 gen_rtx_EXPR_LIST (VOIDmode,
9675 gen_rtx_REG (mode, fpr_reg_base),
9676 const0_rtx),
9677 gen_rtx_EXPR_LIST (VOIDmode,
9678 gen_rtx_REG (mode, gpr_reg_base),
9679 const0_rtx)));
9680 }
9681 else
9682 {
9683 /* See if we should pass this parameter in a general register. */
9684 if (TARGET_SOFT_FLOAT
9685 /* Indirect calls in the normal 32bit ABI require all arguments
9686 to be passed in general registers. */
9687 || (!TARGET_PORTABLE_RUNTIME
9688 && !TARGET_64BIT
9689 && !TARGET_ELF32
9690 && cum->indirect)
9691 /* If the parameter is not a scalar floating-point parameter,
9692 then it belongs in GPRs. */
9693 || GET_MODE_CLASS (mode) != MODE_FLOAT
9694 /* Structure with single SFmode field belongs in GPR. */
9695 || (type && AGGREGATE_TYPE_P (type)))
9696 retval = gen_rtx_REG (mode, gpr_reg_base);
9697 else
9698 retval = gen_rtx_REG (mode, fpr_reg_base);
9699 }
9700 return retval;
9701 }
9702
9703 /* Arguments larger than one word are double word aligned. */
9704
9705 static unsigned int
9706 pa_function_arg_boundary (machine_mode mode, const_tree type)
9707 {
9708 bool singleword = (type
9709 ? (integer_zerop (TYPE_SIZE (type))
9710 || !TREE_CONSTANT (TYPE_SIZE (type))
9711 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9712 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9713
9714 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9715 }
9716
9717 /* If this arg would be passed totally in registers or totally on the stack,
9718 then this routine should return zero. */
9719
9720 static int
9721 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9722 tree type, bool named ATTRIBUTE_UNUSED)
9723 {
9724 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9725 unsigned int max_arg_words = 8;
9726 unsigned int offset = 0;
9727
9728 if (!TARGET_64BIT)
9729 return 0;
9730
9731 if (pa_function_arg_size (mode, type) > 1 && (cum->words & 1))
9732 offset = 1;
9733
9734 if (cum->words + offset + pa_function_arg_size (mode, type) <= max_arg_words)
9735 /* Arg fits fully into registers. */
9736 return 0;
9737 else if (cum->words + offset >= max_arg_words)
9738 /* Arg fully on the stack. */
9739 return 0;
9740 else
9741 /* Arg is split. */
9742 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9743 }
9744
9745
9746 /* A get_unnamed_section callback for switching to the text section.
9747
9748 This function is only used with SOM. Because we don't support
9749 named subspaces, we can only create a new subspace or switch back
9750 to the default text subspace. */
9751
9752 static void
9753 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9754 {
9755 gcc_assert (TARGET_SOM);
9756 if (TARGET_GAS)
9757 {
9758 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9759 {
9760 /* We only want to emit a .nsubspa directive once at the
9761 start of the function. */
9762 cfun->machine->in_nsubspa = 1;
9763
9764 /* Create a new subspace for the text. This provides
9765 better stub placement and one-only functions. */
9766 if (cfun->decl
9767 && DECL_ONE_ONLY (cfun->decl)
9768 && !DECL_WEAK (cfun->decl))
9769 {
9770 output_section_asm_op ("\t.SPACE $TEXT$\n"
9771 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9772 "ACCESS=44,SORT=24,COMDAT");
9773 return;
9774 }
9775 }
9776 else
9777 {
9778 /* There isn't a current function or the body of the current
9779 function has been completed. So, we are changing to the
9780 text section to output debugging information. Thus, we
9781 need to forget that we are in the text section so that
9782 varasm.c will call us when text_section is selected again. */
9783 gcc_assert (!cfun || !cfun->machine
9784 || cfun->machine->in_nsubspa == 2);
9785 in_section = NULL;
9786 }
9787 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9788 return;
9789 }
9790 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9791 }
9792
9793 /* A get_unnamed_section callback for switching to comdat data
9794 sections. This function is only used with SOM. */
9795
9796 static void
9797 som_output_comdat_data_section_asm_op (const void *data)
9798 {
9799 in_section = NULL;
9800 output_section_asm_op (data);
9801 }
9802
9803 /* Implement TARGET_ASM_INIT_SECTIONS. */
9804
9805 static void
9806 pa_som_asm_init_sections (void)
9807 {
9808 text_section
9809 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9810
9811 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9812 is not being generated. */
9813 som_readonly_data_section
9814 = get_unnamed_section (0, output_section_asm_op,
9815 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9816
9817 /* When secondary definitions are not supported, SOM makes readonly
9818 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9819 the comdat flag. */
9820 som_one_only_readonly_data_section
9821 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9822 "\t.SPACE $TEXT$\n"
9823 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9824 "ACCESS=0x2c,SORT=16,COMDAT");
9825
9826
9827 /* When secondary definitions are not supported, SOM makes data one-only
9828 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9829 som_one_only_data_section
9830 = get_unnamed_section (SECTION_WRITE,
9831 som_output_comdat_data_section_asm_op,
9832 "\t.SPACE $PRIVATE$\n"
9833 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9834 "ACCESS=31,SORT=24,COMDAT");
9835
9836 if (flag_tm)
9837 som_tm_clone_table_section
9838 = get_unnamed_section (0, output_section_asm_op,
9839 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9840
9841 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9842 which reference data within the $TEXT$ space (for example constant
9843 strings in the $LIT$ subspace).
9844
9845 The assemblers (GAS and HP as) both have problems with handling
9846 the difference of two symbols which is the other correct way to
9847 reference constant data during PIC code generation.
9848
9849 So, there's no way to reference constant data which is in the
9850 $TEXT$ space during PIC generation. Instead place all constant
9851 data into the $PRIVATE$ subspace (this reduces sharing, but it
9852 works correctly). */
9853 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9854
9855 /* We must not have a reference to an external symbol defined in a
9856 shared library in a readonly section, else the SOM linker will
9857 complain.
9858
9859 So, we force exception information into the data section. */
9860 exception_section = data_section;
9861 }
9862
9863 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9864
9865 static section *
9866 pa_som_tm_clone_table_section (void)
9867 {
9868 return som_tm_clone_table_section;
9869 }
9870
9871 /* On hpux10, the linker will give an error if we have a reference
9872 in the read-only data section to a symbol defined in a shared
9873 library. Therefore, expressions that might require a reloc can
9874 not be placed in the read-only data section. */
9875
9876 static section *
9877 pa_select_section (tree exp, int reloc,
9878 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9879 {
9880 if (TREE_CODE (exp) == VAR_DECL
9881 && TREE_READONLY (exp)
9882 && !TREE_THIS_VOLATILE (exp)
9883 && DECL_INITIAL (exp)
9884 && (DECL_INITIAL (exp) == error_mark_node
9885 || TREE_CONSTANT (DECL_INITIAL (exp)))
9886 && !reloc)
9887 {
9888 if (TARGET_SOM
9889 && DECL_ONE_ONLY (exp)
9890 && !DECL_WEAK (exp))
9891 return som_one_only_readonly_data_section;
9892 else
9893 return readonly_data_section;
9894 }
9895 else if (CONSTANT_CLASS_P (exp) && !reloc)
9896 return readonly_data_section;
9897 else if (TARGET_SOM
9898 && TREE_CODE (exp) == VAR_DECL
9899 && DECL_ONE_ONLY (exp)
9900 && !DECL_WEAK (exp))
9901 return som_one_only_data_section;
9902 else
9903 return data_section;
9904 }
9905
9906 /* Implement pa_reloc_rw_mask. */
9907
9908 static int
9909 pa_reloc_rw_mask (void)
9910 {
9911 /* We force (const (plus (symbol) (const_int))) to memory when the
9912 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9913 handle this construct in read-only memory and we want to avoid
9914 this for ELF. So, we always force an RTX needing relocation to
9915 the data section. */
9916 return 3;
9917 }
9918
9919 static void
9920 pa_globalize_label (FILE *stream, const char *name)
9921 {
9922 /* We only handle DATA objects here, functions are globalized in
9923 ASM_DECLARE_FUNCTION_NAME. */
9924 if (! FUNCTION_NAME_P (name))
9925 {
9926 fputs ("\t.EXPORT ", stream);
9927 assemble_name (stream, name);
9928 fputs (",DATA\n", stream);
9929 }
9930 }
9931
9932 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9933
9934 static rtx
9935 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9936 int incoming ATTRIBUTE_UNUSED)
9937 {
9938 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9939 }
9940
9941 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9942
9943 bool
9944 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9945 {
9946 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9947 PA64 ABI says that objects larger than 128 bits are returned in memory.
9948 Note, int_size_in_bytes can return -1 if the size of the object is
9949 variable or larger than the maximum value that can be expressed as
9950 a HOST_WIDE_INT. It can also return zero for an empty type. The
9951 simplest way to handle variable and empty types is to pass them in
9952 memory. This avoids problems in defining the boundaries of argument
9953 slots, allocating registers, etc. */
9954 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9955 || int_size_in_bytes (type) <= 0);
9956 }
9957
9958 /* Structure to hold declaration and name of external symbols that are
9959 emitted by GCC. We generate a vector of these symbols and output them
9960 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9961 This avoids putting out names that are never really used. */
9962
9963 typedef struct GTY(()) extern_symbol
9964 {
9965 tree decl;
9966 const char *name;
9967 } extern_symbol;
9968
9969 /* Define gc'd vector type for extern_symbol. */
9970
9971 /* Vector of extern_symbol pointers. */
9972 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9973
9974 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9975 /* Mark DECL (name NAME) as an external reference (assembler output
9976 file FILE). This saves the names to output at the end of the file
9977 if actually referenced. */
9978
9979 void
9980 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9981 {
9982 gcc_assert (file == asm_out_file);
9983 extern_symbol p = {decl, name};
9984 vec_safe_push (extern_symbols, p);
9985 }
9986 #endif
9987
9988 /* Output text required at the end of an assembler file.
9989 This includes deferred plabels and .import directives for
9990 all external symbols that were actually referenced. */
9991
9992 static void
9993 pa_file_end (void)
9994 {
9995 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9996 unsigned int i;
9997 extern_symbol *p;
9998
9999 if (!NO_DEFERRED_PROFILE_COUNTERS)
10000 output_deferred_profile_counters ();
10001 #endif
10002
10003 output_deferred_plabels ();
10004
10005 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10006 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
10007 {
10008 tree decl = p->decl;
10009
10010 if (!TREE_ASM_WRITTEN (decl)
10011 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10012 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10013 }
10014
10015 vec_free (extern_symbols);
10016 #endif
10017
10018 if (NEED_INDICATE_EXEC_STACK)
10019 file_end_indicate_exec_stack ();
10020 }
10021
10022 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10023
10024 static bool
10025 pa_can_change_mode_class (machine_mode from, machine_mode to,
10026 reg_class_t rclass)
10027 {
10028 if (from == to)
10029 return true;
10030
10031 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10032 return true;
10033
10034 /* Reject changes to/from modes with zero size. */
10035 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
10036 return false;
10037
10038 /* Reject changes to/from complex and vector modes. */
10039 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10040 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10041 return false;
10042
10043 /* There is no way to load QImode or HImode values directly from memory
10044 to a FP register. SImode loads to the FP registers are not zero
10045 extended. On the 64-bit target, this conflicts with the definition
10046 of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
10047 different sizes in the floating-point registers. */
10048 if (MAYBE_FP_REG_CLASS_P (rclass))
10049 return false;
10050
10051 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10052 in specific sets of registers. Thus, we cannot allow changing
10053 to a larger mode when it's larger than a word. */
10054 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10055 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10056 return false;
10057
10058 return true;
10059 }
10060
10061 /* Implement TARGET_MODES_TIEABLE_P.
10062
10063 We should return FALSE for QImode and HImode because these modes
10064 are not ok in the floating-point registers. However, this prevents
10065 tieing these modes to SImode and DImode in the general registers.
10066 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10067 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10068 in the floating-point registers. */
10069
10070 static bool
10071 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10072 {
10073 /* Don't tie modes in different classes. */
10074 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10075 return false;
10076
10077 return true;
10078 }
10079
10080 \f
10081 /* Length in units of the trampoline instruction code. */
10082
10083 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10084
10085
10086 /* Output assembler code for a block containing the constant parts
10087 of a trampoline, leaving space for the variable parts.\
10088
10089 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10090 and then branches to the specified routine.
10091
10092 This code template is copied from text segment to stack location
10093 and then patched with pa_trampoline_init to contain valid values,
10094 and then entered as a subroutine.
10095
10096 It is best to keep this as small as possible to avoid having to
10097 flush multiple lines in the cache. */
10098
10099 static void
10100 pa_asm_trampoline_template (FILE *f)
10101 {
10102 if (!TARGET_64BIT)
10103 {
10104 fputs ("\tldw 36(%r22),%r21\n", f);
10105 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10106 if (ASSEMBLER_DIALECT == 0)
10107 fputs ("\tdepi 0,31,2,%r21\n", f);
10108 else
10109 fputs ("\tdepwi 0,31,2,%r21\n", f);
10110 fputs ("\tldw 4(%r21),%r19\n", f);
10111 fputs ("\tldw 0(%r21),%r21\n", f);
10112 if (TARGET_PA_20)
10113 {
10114 fputs ("\tbve (%r21)\n", f);
10115 fputs ("\tldw 40(%r22),%r29\n", f);
10116 fputs ("\t.word 0\n", f);
10117 fputs ("\t.word 0\n", f);
10118 }
10119 else
10120 {
10121 fputs ("\tldsid (%r21),%r1\n", f);
10122 fputs ("\tmtsp %r1,%sr0\n", f);
10123 fputs ("\tbe 0(%sr0,%r21)\n", f);
10124 fputs ("\tldw 40(%r22),%r29\n", f);
10125 }
10126 fputs ("\t.word 0\n", f);
10127 fputs ("\t.word 0\n", f);
10128 fputs ("\t.word 0\n", f);
10129 fputs ("\t.word 0\n", f);
10130 }
10131 else
10132 {
10133 fputs ("\t.dword 0\n", f);
10134 fputs ("\t.dword 0\n", f);
10135 fputs ("\t.dword 0\n", f);
10136 fputs ("\t.dword 0\n", f);
10137 fputs ("\tmfia %r31\n", f);
10138 fputs ("\tldd 24(%r31),%r1\n", f);
10139 fputs ("\tldd 24(%r1),%r27\n", f);
10140 fputs ("\tldd 16(%r1),%r1\n", f);
10141 fputs ("\tbve (%r1)\n", f);
10142 fputs ("\tldd 32(%r31),%r31\n", f);
10143 fputs ("\t.dword 0 ; fptr\n", f);
10144 fputs ("\t.dword 0 ; static link\n", f);
10145 }
10146 }
10147
10148 /* Emit RTL insns to initialize the variable parts of a trampoline.
10149 FNADDR is an RTX for the address of the function's pure code.
10150 CXT is an RTX for the static chain value for the function.
10151
10152 Move the function address to the trampoline template at offset 36.
10153 Move the static chain value to trampoline template at offset 40.
10154 Move the trampoline address to trampoline template at offset 44.
10155 Move r19 to trampoline template at offset 48. The latter two
10156 words create a plabel for the indirect call to the trampoline.
10157
10158 A similar sequence is used for the 64-bit port but the plabel is
10159 at the beginning of the trampoline.
10160
10161 Finally, the cache entries for the trampoline code are flushed.
10162 This is necessary to ensure that the trampoline instruction sequence
10163 is written to memory prior to any attempts at prefetching the code
10164 sequence. */
10165
10166 static void
10167 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10168 {
10169 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10170 rtx start_addr = gen_reg_rtx (Pmode);
10171 rtx end_addr = gen_reg_rtx (Pmode);
10172 rtx line_length = gen_reg_rtx (Pmode);
10173 rtx r_tramp, tmp;
10174
10175 emit_block_move (m_tramp, assemble_trampoline_template (),
10176 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10177 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10178
10179 if (!TARGET_64BIT)
10180 {
10181 tmp = adjust_address (m_tramp, Pmode, 36);
10182 emit_move_insn (tmp, fnaddr);
10183 tmp = adjust_address (m_tramp, Pmode, 40);
10184 emit_move_insn (tmp, chain_value);
10185
10186 /* Create a fat pointer for the trampoline. */
10187 tmp = adjust_address (m_tramp, Pmode, 44);
10188 emit_move_insn (tmp, r_tramp);
10189 tmp = adjust_address (m_tramp, Pmode, 48);
10190 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10191
10192 /* fdc and fic only use registers for the address to flush,
10193 they do not accept integer displacements. We align the
10194 start and end addresses to the beginning of their respective
10195 cache lines to minimize the number of lines flushed. */
10196 emit_insn (gen_andsi3 (start_addr, r_tramp,
10197 GEN_INT (-MIN_CACHELINE_SIZE)));
10198 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10199 TRAMPOLINE_CODE_SIZE-1));
10200 emit_insn (gen_andsi3 (end_addr, tmp,
10201 GEN_INT (-MIN_CACHELINE_SIZE)));
10202 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10203 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10204 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10205 gen_reg_rtx (Pmode),
10206 gen_reg_rtx (Pmode)));
10207 }
10208 else
10209 {
10210 tmp = adjust_address (m_tramp, Pmode, 56);
10211 emit_move_insn (tmp, fnaddr);
10212 tmp = adjust_address (m_tramp, Pmode, 64);
10213 emit_move_insn (tmp, chain_value);
10214
10215 /* Create a fat pointer for the trampoline. */
10216 tmp = adjust_address (m_tramp, Pmode, 16);
10217 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10218 r_tramp, 32)));
10219 tmp = adjust_address (m_tramp, Pmode, 24);
10220 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10221
10222 /* fdc and fic only use registers for the address to flush,
10223 they do not accept integer displacements. We align the
10224 start and end addresses to the beginning of their respective
10225 cache lines to minimize the number of lines flushed. */
10226 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10227 emit_insn (gen_anddi3 (start_addr, tmp,
10228 GEN_INT (-MIN_CACHELINE_SIZE)));
10229 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10230 TRAMPOLINE_CODE_SIZE - 1));
10231 emit_insn (gen_anddi3 (end_addr, tmp,
10232 GEN_INT (-MIN_CACHELINE_SIZE)));
10233 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10234 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10235 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10236 gen_reg_rtx (Pmode),
10237 gen_reg_rtx (Pmode)));
10238 }
10239
10240 #ifdef HAVE_ENABLE_EXECUTE_STACK
10241  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10242 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10243 #endif
10244 }
10245
10246 /* Perform any machine-specific adjustment in the address of the trampoline.
10247 ADDR contains the address that was passed to pa_trampoline_init.
10248 Adjust the trampoline address to point to the plabel at offset 44. */
10249
10250 static rtx
10251 pa_trampoline_adjust_address (rtx addr)
10252 {
10253 if (!TARGET_64BIT)
10254 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10255 return addr;
10256 }
10257
10258 static rtx
10259 pa_delegitimize_address (rtx orig_x)
10260 {
10261 rtx x = delegitimize_mem_from_attrs (orig_x);
10262
10263 if (GET_CODE (x) == LO_SUM
10264 && GET_CODE (XEXP (x, 1)) == UNSPEC
10265 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10266 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10267 return x;
10268 }
10269 \f
10270 static rtx
10271 pa_internal_arg_pointer (void)
10272 {
10273 /* The argument pointer and the hard frame pointer are the same in
10274 the 32-bit runtime, so we don't need a copy. */
10275 if (TARGET_64BIT)
10276 return copy_to_reg (virtual_incoming_args_rtx);
10277 else
10278 return virtual_incoming_args_rtx;
10279 }
10280
10281 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10282 Frame pointer elimination is automatically handled. */
10283
10284 static bool
10285 pa_can_eliminate (const int from, const int to)
10286 {
10287 /* The argument cannot be eliminated in the 64-bit runtime. */
10288 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10289 return false;
10290
10291 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10292 ? ! frame_pointer_needed
10293 : true);
10294 }
10295
10296 /* Define the offset between two registers, FROM to be eliminated and its
10297 replacement TO, at the start of a routine. */
10298 HOST_WIDE_INT
10299 pa_initial_elimination_offset (int from, int to)
10300 {
10301 HOST_WIDE_INT offset;
10302
10303 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10304 && to == STACK_POINTER_REGNUM)
10305 offset = -pa_compute_frame_size (get_frame_size (), 0);
10306 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10307 offset = 0;
10308 else
10309 gcc_unreachable ();
10310
10311 return offset;
10312 }
10313
10314 static void
10315 pa_conditional_register_usage (void)
10316 {
10317 int i;
10318
10319 if (!TARGET_64BIT && !TARGET_PA_11)
10320 {
10321 for (i = 56; i <= FP_REG_LAST; i++)
10322 fixed_regs[i] = call_used_regs[i] = 1;
10323 for (i = 33; i < 56; i += 2)
10324 fixed_regs[i] = call_used_regs[i] = 1;
10325 }
10326 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10327 {
10328 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10329 fixed_regs[i] = call_used_regs[i] = 1;
10330 }
10331 if (flag_pic)
10332 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10333 }
10334
10335 /* Target hook for c_mode_for_suffix. */
10336
10337 static machine_mode
10338 pa_c_mode_for_suffix (char suffix)
10339 {
10340 if (HPUX_LONG_DOUBLE_LIBRARY)
10341 {
10342 if (suffix == 'q')
10343 return TFmode;
10344 }
10345
10346 return VOIDmode;
10347 }
10348
10349 /* Target hook for function_section. */
10350
10351 static section *
10352 pa_function_section (tree decl, enum node_frequency freq,
10353 bool startup, bool exit)
10354 {
10355 /* Put functions in text section if target doesn't have named sections. */
10356 if (!targetm_common.have_named_sections)
10357 return text_section;
10358
10359 /* Force nested functions into the same section as the containing
10360 function. */
10361 if (decl
10362 && DECL_SECTION_NAME (decl) == NULL
10363 && DECL_CONTEXT (decl) != NULL_TREE
10364 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10365 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10366 return function_section (DECL_CONTEXT (decl));
10367
10368 /* Otherwise, use the default function section. */
10369 return default_function_section (decl, freq, startup, exit);
10370 }
10371
10372 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10373
10374 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10375 that need more than three instructions to load prior to reload. This
10376 limit is somewhat arbitrary. It takes three instructions to load a
10377 CONST_INT from memory but two are memory accesses. It may be better
10378 to increase the allowed range for CONST_INTS. We may also be able
10379 to handle CONST_DOUBLES. */
10380
10381 static bool
10382 pa_legitimate_constant_p (machine_mode mode, rtx x)
10383 {
10384 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10385 return false;
10386
10387 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10388 return false;
10389
10390 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10391 legitimate constants. The other variants can't be handled by
10392 the move patterns after reload starts. */
10393 if (tls_referenced_p (x))
10394 return false;
10395
10396 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10397 return false;
10398
10399 if (TARGET_64BIT
10400 && HOST_BITS_PER_WIDE_INT > 32
10401 && GET_CODE (x) == CONST_INT
10402 && !reload_in_progress
10403 && !reload_completed
10404 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10405 && !pa_cint_ok_for_move (UINTVAL (x)))
10406 return false;
10407
10408 if (function_label_operand (x, mode))
10409 return false;
10410
10411 return true;
10412 }
10413
10414 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10415
10416 static unsigned int
10417 pa_section_type_flags (tree decl, const char *name, int reloc)
10418 {
10419 unsigned int flags;
10420
10421 flags = default_section_type_flags (decl, name, reloc);
10422
10423 /* Function labels are placed in the constant pool. This can
10424 cause a section conflict if decls are put in ".data.rel.ro"
10425 or ".data.rel.ro.local" using the __attribute__ construct. */
10426 if (strcmp (name, ".data.rel.ro") == 0
10427 || strcmp (name, ".data.rel.ro.local") == 0)
10428 flags |= SECTION_WRITE | SECTION_RELRO;
10429
10430 return flags;
10431 }
10432
10433 /* pa_legitimate_address_p recognizes an RTL expression that is a
10434 valid memory address for an instruction. The MODE argument is the
10435 machine mode for the MEM expression that wants to use this address.
10436
10437 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10438 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10439 available with floating point loads and stores, and integer loads.
10440 We get better code by allowing indexed addresses in the initial
10441 RTL generation.
10442
10443 The acceptance of indexed addresses as legitimate implies that we
10444 must provide patterns for doing indexed integer stores, or the move
10445 expanders must force the address of an indexed store to a register.
10446 We have adopted the latter approach.
10447
10448 Another function of pa_legitimate_address_p is to ensure that
10449 the base register is a valid pointer for indexed instructions.
10450 On targets that have non-equivalent space registers, we have to
10451 know at the time of assembler output which register in a REG+REG
10452 pair is the base register. The REG_POINTER flag is sometimes lost
10453 in reload and the following passes, so it can't be relied on during
10454 code generation. Thus, we either have to canonicalize the order
10455 of the registers in REG+REG indexed addresses, or treat REG+REG
10456 addresses separately and provide patterns for both permutations.
10457
10458 The latter approach requires several hundred additional lines of
10459 code in pa.md. The downside to canonicalizing is that a PLUS
10460 in the wrong order can't combine to form to make a scaled indexed
10461 memory operand. As we won't need to canonicalize the operands if
10462 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10463
10464 We initially break out scaled indexed addresses in canonical order
10465 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10466 scaled indexed addresses during RTL generation. However, fold_rtx
10467 has its own opinion on how the operands of a PLUS should be ordered.
10468 If one of the operands is equivalent to a constant, it will make
10469 that operand the second operand. As the base register is likely to
10470 be equivalent to a SYMBOL_REF, we have made it the second operand.
10471
10472 pa_legitimate_address_p accepts REG+REG as legitimate when the
10473 operands are in the order INDEX+BASE on targets with non-equivalent
10474 space registers, and in any order on targets with equivalent space
10475 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10476
10477 We treat a SYMBOL_REF as legitimate if it is part of the current
10478 function's constant-pool, because such addresses can actually be
10479 output as REG+SMALLINT. */
10480
10481 static bool
10482 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10483 {
10484 if ((REG_P (x)
10485 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10486 : REG_OK_FOR_BASE_P (x)))
10487 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10488 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10489 && REG_P (XEXP (x, 0))
10490 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10491 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10492 return true;
10493
10494 if (GET_CODE (x) == PLUS)
10495 {
10496 rtx base, index;
10497
10498 /* For REG+REG, the base register should be in XEXP (x, 1),
10499 so check it first. */
10500 if (REG_P (XEXP (x, 1))
10501 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10502 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10503 base = XEXP (x, 1), index = XEXP (x, 0);
10504 else if (REG_P (XEXP (x, 0))
10505 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10506 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10507 base = XEXP (x, 0), index = XEXP (x, 1);
10508 else
10509 return false;
10510
10511 if (GET_CODE (index) == CONST_INT)
10512 {
10513 if (INT_5_BITS (index))
10514 return true;
10515
10516 /* When INT14_OK_STRICT is false, a secondary reload is needed
10517 to adjust the displacement of SImode and DImode floating point
10518 instructions but this may fail when the register also needs
10519 reloading. So, we return false when STRICT is true. We
10520 also reject long displacements for float mode addresses since
10521 the majority of accesses will use floating point instructions
10522 that don't support 14-bit offsets. */
10523 if (!INT14_OK_STRICT
10524 && (strict || !(reload_in_progress || reload_completed))
10525 && mode != QImode
10526 && mode != HImode)
10527 return false;
10528
10529 return base14_operand (index, mode);
10530 }
10531
10532 if (!TARGET_DISABLE_INDEXING
10533 /* Only accept the "canonical" INDEX+BASE operand order
10534 on targets with non-equivalent space registers. */
10535 && (TARGET_NO_SPACE_REGS
10536 ? REG_P (index)
10537 : (base == XEXP (x, 1) && REG_P (index)
10538 && (reload_completed
10539 || (reload_in_progress && HARD_REGISTER_P (base))
10540 || REG_POINTER (base))
10541 && (reload_completed
10542 || (reload_in_progress && HARD_REGISTER_P (index))
10543 || !REG_POINTER (index))))
10544 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10545 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10546 : REG_OK_FOR_INDEX_P (index))
10547 && borx_reg_operand (base, Pmode)
10548 && borx_reg_operand (index, Pmode))
10549 return true;
10550
10551 if (!TARGET_DISABLE_INDEXING
10552 && GET_CODE (index) == MULT
10553 /* Only accept base operands with the REG_POINTER flag prior to
10554 reload on targets with non-equivalent space registers. */
10555 && (TARGET_NO_SPACE_REGS
10556 || (base == XEXP (x, 1)
10557 && (reload_completed
10558 || (reload_in_progress && HARD_REGISTER_P (base))
10559 || REG_POINTER (base))))
10560 && REG_P (XEXP (index, 0))
10561 && GET_MODE (XEXP (index, 0)) == Pmode
10562 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10563 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10564 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10565 && GET_CODE (XEXP (index, 1)) == CONST_INT
10566 && INTVAL (XEXP (index, 1))
10567 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10568 && borx_reg_operand (base, Pmode))
10569 return true;
10570
10571 return false;
10572 }
10573
10574 if (GET_CODE (x) == LO_SUM)
10575 {
10576 rtx y = XEXP (x, 0);
10577
10578 if (GET_CODE (y) == SUBREG)
10579 y = SUBREG_REG (y);
10580
10581 if (REG_P (y)
10582 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10583 : REG_OK_FOR_BASE_P (y)))
10584 {
10585 /* Needed for -fPIC */
10586 if (mode == Pmode
10587 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10588 return true;
10589
10590 if (!INT14_OK_STRICT
10591 && (strict || !(reload_in_progress || reload_completed))
10592 && mode != QImode
10593 && mode != HImode)
10594 return false;
10595
10596 if (CONSTANT_P (XEXP (x, 1)))
10597 return true;
10598 }
10599 return false;
10600 }
10601
10602 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10603 return true;
10604
10605 return false;
10606 }
10607
10608 /* Look for machine dependent ways to make the invalid address AD a
10609 valid address.
10610
10611 For the PA, transform:
10612
10613 memory(X + <large int>)
10614
10615 into:
10616
10617 if (<large int> & mask) >= 16
10618 Y = (<large int> & ~mask) + mask + 1 Round up.
10619 else
10620 Y = (<large int> & ~mask) Round down.
10621 Z = X + Y
10622 memory (Z + (<large int> - Y));
10623
10624 This makes reload inheritance and reload_cse work better since Z
10625 can be reused.
10626
10627 There may be more opportunities to improve code with this hook. */
10628
10629 rtx
10630 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10631 int opnum, int type,
10632 int ind_levels ATTRIBUTE_UNUSED)
10633 {
10634 long offset, newoffset, mask;
10635 rtx new_rtx, temp = NULL_RTX;
10636
10637 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10638 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10639
10640 if (optimize && GET_CODE (ad) == PLUS)
10641 temp = simplify_binary_operation (PLUS, Pmode,
10642 XEXP (ad, 0), XEXP (ad, 1));
10643
10644 new_rtx = temp ? temp : ad;
10645
10646 if (optimize
10647 && GET_CODE (new_rtx) == PLUS
10648 && GET_CODE (XEXP (new_rtx, 0)) == REG
10649 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10650 {
10651 offset = INTVAL (XEXP ((new_rtx), 1));
10652
10653 /* Choose rounding direction. Round up if we are >= halfway. */
10654 if ((offset & mask) >= ((mask + 1) / 2))
10655 newoffset = (offset & ~mask) + mask + 1;
10656 else
10657 newoffset = offset & ~mask;
10658
10659 /* Ensure that long displacements are aligned. */
10660 if (mask == 0x3fff
10661 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10662 || (TARGET_64BIT && (mode) == DImode)))
10663 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10664
10665 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10666 {
10667 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10668 GEN_INT (newoffset));
10669 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10670 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10671 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10672 opnum, (enum reload_type) type);
10673 return ad;
10674 }
10675 }
10676
10677 return NULL_RTX;
10678 }
10679
10680 /* Output address vector. */
10681
10682 void
10683 pa_output_addr_vec (rtx lab, rtx body)
10684 {
10685 int idx, vlen = XVECLEN (body, 0);
10686
10687 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10688 if (TARGET_GAS)
10689 fputs ("\t.begin_brtab\n", asm_out_file);
10690 for (idx = 0; idx < vlen; idx++)
10691 {
10692 ASM_OUTPUT_ADDR_VEC_ELT
10693 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10694 }
10695 if (TARGET_GAS)
10696 fputs ("\t.end_brtab\n", asm_out_file);
10697 }
10698
10699 /* Output address difference vector. */
10700
10701 void
10702 pa_output_addr_diff_vec (rtx lab, rtx body)
10703 {
10704 rtx base = XEXP (XEXP (body, 0), 0);
10705 int idx, vlen = XVECLEN (body, 1);
10706
10707 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10708 if (TARGET_GAS)
10709 fputs ("\t.begin_brtab\n", asm_out_file);
10710 for (idx = 0; idx < vlen; idx++)
10711 {
10712 ASM_OUTPUT_ADDR_DIFF_ELT
10713 (asm_out_file,
10714 body,
10715 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10716 CODE_LABEL_NUMBER (base));
10717 }
10718 if (TARGET_GAS)
10719 fputs ("\t.end_brtab\n", asm_out_file);
10720 }
10721
10722 /* This is a helper function for the other atomic operations. This function
10723 emits a loop that contains SEQ that iterates until a compare-and-swap
10724 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10725 a set of instructions that takes a value from OLD_REG as an input and
10726 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10727 set to the current contents of MEM. After SEQ, a compare-and-swap will
10728 attempt to update MEM with NEW_REG. The function returns true when the
10729 loop was generated successfully. */
10730
10731 static bool
10732 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10733 {
10734 machine_mode mode = GET_MODE (mem);
10735 rtx_code_label *label;
10736 rtx cmp_reg, success, oldval;
10737
10738 /* The loop we want to generate looks like
10739
10740 cmp_reg = mem;
10741 label:
10742 old_reg = cmp_reg;
10743 seq;
10744 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10745 if (success)
10746 goto label;
10747
10748 Note that we only do the plain load from memory once. Subsequent
10749 iterations use the value loaded by the compare-and-swap pattern. */
10750
10751 label = gen_label_rtx ();
10752 cmp_reg = gen_reg_rtx (mode);
10753
10754 emit_move_insn (cmp_reg, mem);
10755 emit_label (label);
10756 emit_move_insn (old_reg, cmp_reg);
10757 if (seq)
10758 emit_insn (seq);
10759
10760 success = NULL_RTX;
10761 oldval = cmp_reg;
10762 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10763 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10764 MEMMODEL_RELAXED))
10765 return false;
10766
10767 if (oldval != cmp_reg)
10768 emit_move_insn (cmp_reg, oldval);
10769
10770 /* Mark this jump predicted not taken. */
10771 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10772 GET_MODE (success), 1, label,
10773 profile_probability::guessed_never ());
10774 return true;
10775 }
10776
10777 /* This function tries to implement an atomic exchange operation using a
10778 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10779 *MEM are returned, using TARGET if possible. No memory model is required
10780 since a compare_and_swap loop is seq-cst. */
10781
10782 rtx
10783 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10784 {
10785 machine_mode mode = GET_MODE (mem);
10786
10787 if (can_compare_and_swap_p (mode, true))
10788 {
10789 if (!target || !register_operand (target, mode))
10790 target = gen_reg_rtx (mode);
10791 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10792 return target;
10793 }
10794
10795 return NULL_RTX;
10796 }
10797
10798 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10799 arguments passed by hidden reference in the 32-bit HP runtime. Users
10800 can override this behavior for better compatibility with openmp at the
10801 risk of library incompatibilities. Arguments are always passed by value
10802 in the 64-bit HP runtime. */
10803
10804 static bool
10805 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10806 machine_mode mode ATTRIBUTE_UNUSED,
10807 const_tree type ATTRIBUTE_UNUSED,
10808 bool named ATTRIBUTE_UNUSED)
10809 {
10810 return !TARGET_CALLER_COPIES;
10811 }
10812
10813 /* Implement TARGET_HARD_REGNO_NREGS. */
10814
10815 static unsigned int
10816 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
10817 {
10818 return PA_HARD_REGNO_NREGS (regno, mode);
10819 }
10820
10821 /* Implement TARGET_HARD_REGNO_MODE_OK. */
10822
10823 static bool
10824 pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10825 {
10826 return PA_HARD_REGNO_MODE_OK (regno, mode);
10827 }
10828
10829 /* Implement TARGET_STARTING_FRAME_OFFSET.
10830
10831 On the 32-bit ports, we reserve one slot for the previous frame
10832 pointer and one fill slot. The fill slot is for compatibility
10833 with HP compiled programs. On the 64-bit ports, we reserve one
10834 slot for the previous frame pointer. */
10835
10836 static HOST_WIDE_INT
10837 pa_starting_frame_offset (void)
10838 {
10839 return 8;
10840 }
10841
10842 /* Figure out the size in words of the function argument. The size
10843 returned by this function should always be greater than zero because
10844 we pass variable and zero sized objects by reference. */
10845
10846 HOST_WIDE_INT
10847 pa_function_arg_size (machine_mode mode, const_tree type)
10848 {
10849 HOST_WIDE_INT size;
10850
10851 size = mode != BLKmode ? GET_MODE_SIZE (mode) : int_size_in_bytes (type);
10852 return CEIL (size, UNITS_PER_WORD);
10853 }
10854
10855 #include "gt-pa.h"