1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
43 #include "basic-block.h"
46 #include "target-def.h"
48 #include "langhooks.h"
56 /* Define the specific costs for a given cpu. */
58 struct processor_costs
61 const int m
; /* cost of an M instruction. */
62 const int mghi
; /* cost of an MGHI instruction. */
63 const int mh
; /* cost of an MH instruction. */
64 const int mhi
; /* cost of an MHI instruction. */
65 const int ml
; /* cost of an ML instruction. */
66 const int mr
; /* cost of an MR instruction. */
67 const int ms
; /* cost of an MS instruction. */
68 const int msg
; /* cost of an MSG instruction. */
69 const int msgf
; /* cost of an MSGF instruction. */
70 const int msgfr
; /* cost of an MSGFR instruction. */
71 const int msgr
; /* cost of an MSGR instruction. */
72 const int msr
; /* cost of an MSR instruction. */
73 const int mult_df
; /* cost of multiplication in DFmode. */
76 const int sqxbr
; /* cost of square root in TFmode. */
77 const int sqdbr
; /* cost of square root in DFmode. */
78 const int sqebr
; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr
; /* cost of multiply and add in DFmode. */
81 const int maebr
; /* cost of multiply and add in SFmode. */
93 const struct processor_costs
*s390_cost
;
96 struct processor_costs z900_cost
=
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
128 struct processor_costs z990_cost
=
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
160 struct processor_costs z9_109_cost
=
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
192 struct processor_costs z10_cost
=
194 COSTS_N_INSNS (10), /* M */
195 COSTS_N_INSNS (10), /* MGHI */
196 COSTS_N_INSNS (10), /* MH */
197 COSTS_N_INSNS (10), /* MHI */
198 COSTS_N_INSNS (10), /* ML */
199 COSTS_N_INSNS (10), /* MR */
200 COSTS_N_INSNS (10), /* MS */
201 COSTS_N_INSNS (10), /* MSG */
202 COSTS_N_INSNS (10), /* MSGF */
203 COSTS_N_INSNS (10), /* MSGFR */
204 COSTS_N_INSNS (10), /* MSGR */
205 COSTS_N_INSNS (10), /* MSR */
206 COSTS_N_INSNS (1) , /* multiplication in DFmode */
207 COSTS_N_INSNS (50), /* MXBR */
208 COSTS_N_INSNS (120), /* SQXBR */
209 COSTS_N_INSNS (52), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (111), /* DXBR */
214 COSTS_N_INSNS (39), /* DDBR */
215 COSTS_N_INSNS (32), /* DEBR */
216 COSTS_N_INSNS (160), /* DLGR */
217 COSTS_N_INSNS (71), /* DLR */
218 COSTS_N_INSNS (71), /* DR */
219 COSTS_N_INSNS (71), /* DSGFR */
220 COSTS_N_INSNS (71), /* DSGR */
224 struct processor_costs z196_cost
=
226 COSTS_N_INSNS (7), /* M */
227 COSTS_N_INSNS (5), /* MGHI */
228 COSTS_N_INSNS (5), /* MH */
229 COSTS_N_INSNS (5), /* MHI */
230 COSTS_N_INSNS (7), /* ML */
231 COSTS_N_INSNS (7), /* MR */
232 COSTS_N_INSNS (6), /* MS */
233 COSTS_N_INSNS (8), /* MSG */
234 COSTS_N_INSNS (6), /* MSGF */
235 COSTS_N_INSNS (6), /* MSGFR */
236 COSTS_N_INSNS (8), /* MSGR */
237 COSTS_N_INSNS (6), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (40), /* MXBR B+40 */
240 COSTS_N_INSNS (100), /* SQXBR B+100 */
241 COSTS_N_INSNS (42), /* SQDBR B+42 */
242 COSTS_N_INSNS (28), /* SQEBR B+28 */
243 COSTS_N_INSNS (1), /* MADBR B */
244 COSTS_N_INSNS (1), /* MAEBR B */
245 COSTS_N_INSNS (101), /* DXBR B+101 */
246 COSTS_N_INSNS (29), /* DDBR */
247 COSTS_N_INSNS (22), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR cracked */
249 COSTS_N_INSNS (160), /* DLR cracked */
250 COSTS_N_INSNS (160), /* DR expanded */
251 COSTS_N_INSNS (160), /* DSGFR cracked */
252 COSTS_N_INSNS (160), /* DSGR cracked */
256 struct processor_costs zEC12_cost
=
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (131), /* DXBR B+131 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
287 extern int reload_completed
;
289 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
290 static rtx last_scheduled_insn
;
292 /* Structure used to hold the components of a S/390 memory
293 address. A legitimate address on S/390 is of the general
295 base + index + displacement
296 where any of the components is optional.
298 base and index are registers of the class ADDR_REGS,
299 displacement is an unsigned 12-bit immediate constant. */
310 /* The following structure is embedded in the machine
311 specific part of struct function. */
313 struct GTY (()) s390_frame_layout
315 /* Offset within stack frame. */
316 HOST_WIDE_INT gprs_offset
;
317 HOST_WIDE_INT f0_offset
;
318 HOST_WIDE_INT f4_offset
;
319 HOST_WIDE_INT f8_offset
;
320 HOST_WIDE_INT backchain_offset
;
322 /* Number of first and last gpr where slots in the register
323 save area are reserved for. */
324 int first_save_gpr_slot
;
325 int last_save_gpr_slot
;
327 /* Number of first and last gpr to be saved, restored. */
329 int first_restore_gpr
;
331 int last_restore_gpr
;
333 /* Bits standing for floating point registers. Set, if the
334 respective register has to be saved. Starting with reg 16 (f0)
335 at the rightmost bit.
336 Bit 15 - 8 7 6 5 4 3 2 1 0
337 fpr 15 - 8 7 5 3 1 6 4 2 0
338 reg 31 - 24 23 22 21 20 19 18 17 16 */
339 unsigned int fpr_bitmap
;
341 /* Number of floating point registers f8-f15 which must be saved. */
344 /* Set if return address needs to be saved.
345 This flag is set by s390_return_addr_rtx if it could not use
346 the initial value of r14 and therefore depends on r14 saved
348 bool save_return_addr_p
;
350 /* Size of stack frame. */
351 HOST_WIDE_INT frame_size
;
354 /* Define the structure for the machine field in struct function. */
356 struct GTY(()) machine_function
358 struct s390_frame_layout frame_layout
;
360 /* Literal pool base register. */
363 /* True if we may need to perform branch splitting. */
364 bool split_branches_pending_p
;
366 /* Some local-dynamic TLS symbol name. */
367 const char *some_ld_name
;
369 bool has_landing_pad_p
;
372 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
374 #define cfun_frame_layout (cfun->machine->frame_layout)
375 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
376 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
377 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
378 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
380 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
383 /* Number of GPRs and FPRs used for argument passing. */
384 #define GP_ARG_NUM_REG 5
385 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
387 /* A couple of shortcuts. */
388 #define CONST_OK_FOR_J(x) \
389 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
390 #define CONST_OK_FOR_K(x) \
391 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
392 #define CONST_OK_FOR_Os(x) \
393 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
394 #define CONST_OK_FOR_Op(x) \
395 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
396 #define CONST_OK_FOR_On(x) \
397 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
399 #define REGNO_PAIR_OK(REGNO, MODE) \
400 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
402 /* That's the read ahead of the dynamic branch prediction unit in
403 bytes on a z10 (or higher) CPU. */
404 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
406 /* Return the alignment for LABEL. We default to the -falign-labels
407 value except for the literal pool base label. */
409 s390_label_align (rtx label
)
411 rtx prev_insn
= prev_active_insn (label
);
413 if (prev_insn
== NULL_RTX
)
416 prev_insn
= single_set (prev_insn
);
418 if (prev_insn
== NULL_RTX
)
421 prev_insn
= SET_SRC (prev_insn
);
423 /* Don't align literal pool base labels. */
424 if (GET_CODE (prev_insn
) == UNSPEC
425 && XINT (prev_insn
, 1) == UNSPEC_MAIN_BASE
)
429 return align_labels_log
;
432 static enum machine_mode
433 s390_libgcc_cmp_return_mode (void)
435 return TARGET_64BIT
? DImode
: SImode
;
438 static enum machine_mode
439 s390_libgcc_shift_count_mode (void)
441 return TARGET_64BIT
? DImode
: SImode
;
444 static enum machine_mode
445 s390_unwind_word_mode (void)
447 return TARGET_64BIT
? DImode
: SImode
;
450 /* Return true if the back end supports mode MODE. */
452 s390_scalar_mode_supported_p (enum machine_mode mode
)
454 /* In contrast to the default implementation reject TImode constants on 31bit
455 TARGET_ZARCH for ABI compliance. */
456 if (!TARGET_64BIT
&& TARGET_ZARCH
&& mode
== TImode
)
459 if (DECIMAL_FLOAT_MODE_P (mode
))
460 return default_decimal_float_supported_p ();
462 return default_scalar_mode_supported_p (mode
);
465 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
468 s390_set_has_landing_pad_p (bool value
)
470 cfun
->machine
->has_landing_pad_p
= value
;
473 /* If two condition code modes are compatible, return a condition code
474 mode which is compatible with both. Otherwise, return
477 static enum machine_mode
478 s390_cc_modes_compatible (enum machine_mode m1
, enum machine_mode m2
)
486 if (m2
== CCUmode
|| m2
== CCTmode
|| m2
== CCZ1mode
487 || m2
== CCSmode
|| m2
== CCSRmode
|| m2
== CCURmode
)
508 /* Return true if SET either doesn't set the CC register, or else
509 the source and destination have matching CC modes and that
510 CC mode is at least as constrained as REQ_MODE. */
513 s390_match_ccmode_set (rtx set
, enum machine_mode req_mode
)
515 enum machine_mode set_mode
;
517 gcc_assert (GET_CODE (set
) == SET
);
519 if (GET_CODE (SET_DEST (set
)) != REG
|| !CC_REGNO_P (REGNO (SET_DEST (set
))))
522 set_mode
= GET_MODE (SET_DEST (set
));
536 if (req_mode
!= set_mode
)
541 if (req_mode
!= CCSmode
&& req_mode
!= CCUmode
&& req_mode
!= CCTmode
542 && req_mode
!= CCSRmode
&& req_mode
!= CCURmode
)
548 if (req_mode
!= CCAmode
)
556 return (GET_MODE (SET_SRC (set
)) == set_mode
);
559 /* Return true if every SET in INSN that sets the CC register
560 has source and destination with matching CC modes and that
561 CC mode is at least as constrained as REQ_MODE.
562 If REQ_MODE is VOIDmode, always return false. */
565 s390_match_ccmode (rtx insn
, enum machine_mode req_mode
)
569 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
570 if (req_mode
== VOIDmode
)
573 if (GET_CODE (PATTERN (insn
)) == SET
)
574 return s390_match_ccmode_set (PATTERN (insn
), req_mode
);
576 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
577 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
579 rtx set
= XVECEXP (PATTERN (insn
), 0, i
);
580 if (GET_CODE (set
) == SET
)
581 if (!s390_match_ccmode_set (set
, req_mode
))
588 /* If a test-under-mask instruction can be used to implement
589 (compare (and ... OP1) OP2), return the CC mode required
590 to do that. Otherwise, return VOIDmode.
591 MIXED is true if the instruction can distinguish between
592 CC1 and CC2 for mixed selected bits (TMxx), it is false
593 if the instruction cannot (TM). */
596 s390_tm_ccmode (rtx op1
, rtx op2
, bool mixed
)
600 /* ??? Fixme: should work on CONST_DOUBLE as well. */
601 if (GET_CODE (op1
) != CONST_INT
|| GET_CODE (op2
) != CONST_INT
)
604 /* Selected bits all zero: CC0.
605 e.g.: int a; if ((a & (16 + 128)) == 0) */
606 if (INTVAL (op2
) == 0)
609 /* Selected bits all one: CC3.
610 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
611 if (INTVAL (op2
) == INTVAL (op1
))
614 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
616 if ((a & (16 + 128)) == 16) -> CCT1
617 if ((a & (16 + 128)) == 128) -> CCT2 */
620 bit1
= exact_log2 (INTVAL (op2
));
621 bit0
= exact_log2 (INTVAL (op1
) ^ INTVAL (op2
));
622 if (bit0
!= -1 && bit1
!= -1)
623 return bit0
> bit1
? CCT1mode
: CCT2mode
;
629 /* Given a comparison code OP (EQ, NE, etc.) and the operands
630 OP0 and OP1 of a COMPARE, return the mode to be used for the
634 s390_select_ccmode (enum rtx_code code
, rtx op0
, rtx op1
)
640 if ((GET_CODE (op0
) == NEG
|| GET_CODE (op0
) == ABS
)
641 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
643 if (GET_CODE (op0
) == PLUS
&& GET_CODE (XEXP (op0
, 1)) == CONST_INT
644 && CONST_OK_FOR_K (INTVAL (XEXP (op0
, 1))))
646 if ((GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
647 || GET_CODE (op1
) == NEG
)
648 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
651 if (GET_CODE (op0
) == AND
)
653 /* Check whether we can potentially do it via TM. */
654 enum machine_mode ccmode
;
655 ccmode
= s390_tm_ccmode (XEXP (op0
, 1), op1
, 1);
656 if (ccmode
!= VOIDmode
)
658 /* Relax CCTmode to CCZmode to allow fall-back to AND
659 if that turns out to be beneficial. */
660 return ccmode
== CCTmode
? CCZmode
: ccmode
;
664 if (register_operand (op0
, HImode
)
665 && GET_CODE (op1
) == CONST_INT
666 && (INTVAL (op1
) == -1 || INTVAL (op1
) == 65535))
668 if (register_operand (op0
, QImode
)
669 && GET_CODE (op1
) == CONST_INT
670 && (INTVAL (op1
) == -1 || INTVAL (op1
) == 255))
679 /* The only overflow condition of NEG and ABS happens when
680 -INT_MAX is used as parameter, which stays negative. So
681 we have an overflow from a positive value to a negative.
682 Using CCAP mode the resulting cc can be used for comparisons. */
683 if ((GET_CODE (op0
) == NEG
|| GET_CODE (op0
) == ABS
)
684 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
687 /* If constants are involved in an add instruction it is possible to use
688 the resulting cc for comparisons with zero. Knowing the sign of the
689 constant the overflow behavior gets predictable. e.g.:
690 int a, b; if ((b = a + c) > 0)
691 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
692 if (GET_CODE (op0
) == PLUS
&& GET_CODE (XEXP (op0
, 1)) == CONST_INT
693 && (CONST_OK_FOR_K (INTVAL (XEXP (op0
, 1)))
694 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0
, 1)), 'O', "Os")
695 /* Avoid INT32_MIN on 32 bit. */
696 && (!TARGET_ZARCH
|| INTVAL (XEXP (op0
, 1)) != -0x7fffffff - 1))))
698 if (INTVAL (XEXP((op0
), 1)) < 0)
712 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
713 && GET_CODE (op1
) != CONST_INT
)
719 if (GET_CODE (op0
) == PLUS
720 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
723 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
724 && GET_CODE (op1
) != CONST_INT
)
730 if (GET_CODE (op0
) == MINUS
731 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
734 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
735 && GET_CODE (op1
) != CONST_INT
)
744 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
745 that we can implement more efficiently. */
748 s390_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
749 bool op0_preserve_value
)
751 if (op0_preserve_value
)
754 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
755 if ((*code
== EQ
|| *code
== NE
)
756 && *op1
== const0_rtx
757 && GET_CODE (*op0
) == ZERO_EXTRACT
758 && GET_CODE (XEXP (*op0
, 1)) == CONST_INT
759 && GET_CODE (XEXP (*op0
, 2)) == CONST_INT
760 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0
, 0))))
762 rtx inner
= XEXP (*op0
, 0);
763 HOST_WIDE_INT modesize
= GET_MODE_BITSIZE (GET_MODE (inner
));
764 HOST_WIDE_INT len
= INTVAL (XEXP (*op0
, 1));
765 HOST_WIDE_INT pos
= INTVAL (XEXP (*op0
, 2));
767 if (len
> 0 && len
< modesize
768 && pos
>= 0 && pos
+ len
<= modesize
769 && modesize
<= HOST_BITS_PER_WIDE_INT
)
771 unsigned HOST_WIDE_INT block
;
772 block
= ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
773 block
<<= modesize
- pos
- len
;
775 *op0
= gen_rtx_AND (GET_MODE (inner
), inner
,
776 gen_int_mode (block
, GET_MODE (inner
)));
780 /* Narrow AND of memory against immediate to enable TM. */
781 if ((*code
== EQ
|| *code
== NE
)
782 && *op1
== const0_rtx
783 && GET_CODE (*op0
) == AND
784 && GET_CODE (XEXP (*op0
, 1)) == CONST_INT
785 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0
, 0))))
787 rtx inner
= XEXP (*op0
, 0);
788 rtx mask
= XEXP (*op0
, 1);
790 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
791 if (GET_CODE (inner
) == SUBREG
792 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner
)))
793 && (GET_MODE_SIZE (GET_MODE (inner
))
794 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
796 & GET_MODE_MASK (GET_MODE (inner
))
797 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner
))))
799 inner
= SUBREG_REG (inner
);
801 /* Do not change volatile MEMs. */
802 if (MEM_P (inner
) && !MEM_VOLATILE_P (inner
))
804 int part
= s390_single_part (XEXP (*op0
, 1),
805 GET_MODE (inner
), QImode
, 0);
808 mask
= gen_int_mode (s390_extract_part (mask
, QImode
, 0), QImode
);
809 inner
= adjust_address_nv (inner
, QImode
, part
);
810 *op0
= gen_rtx_AND (QImode
, inner
, mask
);
815 /* Narrow comparisons against 0xffff to HImode if possible. */
816 if ((*code
== EQ
|| *code
== NE
)
817 && GET_CODE (*op1
) == CONST_INT
818 && INTVAL (*op1
) == 0xffff
819 && SCALAR_INT_MODE_P (GET_MODE (*op0
))
820 && (nonzero_bits (*op0
, GET_MODE (*op0
))
821 & ~(unsigned HOST_WIDE_INT
) 0xffff) == 0)
823 *op0
= gen_lowpart (HImode
, *op0
);
827 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
828 if (GET_CODE (*op0
) == UNSPEC
829 && XINT (*op0
, 1) == UNSPEC_CCU_TO_INT
830 && XVECLEN (*op0
, 0) == 1
831 && GET_MODE (XVECEXP (*op0
, 0, 0)) == CCUmode
832 && GET_CODE (XVECEXP (*op0
, 0, 0)) == REG
833 && REGNO (XVECEXP (*op0
, 0, 0)) == CC_REGNUM
834 && *op1
== const0_rtx
)
836 enum rtx_code new_code
= UNKNOWN
;
839 case EQ
: new_code
= EQ
; break;
840 case NE
: new_code
= NE
; break;
841 case LT
: new_code
= GTU
; break;
842 case GT
: new_code
= LTU
; break;
843 case LE
: new_code
= GEU
; break;
844 case GE
: new_code
= LEU
; break;
848 if (new_code
!= UNKNOWN
)
850 *op0
= XVECEXP (*op0
, 0, 0);
855 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
856 if (GET_CODE (*op0
) == UNSPEC
857 && XINT (*op0
, 1) == UNSPEC_CCZ_TO_INT
858 && XVECLEN (*op0
, 0) == 1
859 && GET_MODE (XVECEXP (*op0
, 0, 0)) == CCZmode
860 && GET_CODE (XVECEXP (*op0
, 0, 0)) == REG
861 && REGNO (XVECEXP (*op0
, 0, 0)) == CC_REGNUM
862 && *op1
== const0_rtx
)
864 enum rtx_code new_code
= UNKNOWN
;
867 case EQ
: new_code
= EQ
; break;
868 case NE
: new_code
= NE
; break;
872 if (new_code
!= UNKNOWN
)
874 *op0
= XVECEXP (*op0
, 0, 0);
879 /* Simplify cascaded EQ, NE with const0_rtx. */
880 if ((*code
== NE
|| *code
== EQ
)
881 && (GET_CODE (*op0
) == EQ
|| GET_CODE (*op0
) == NE
)
882 && GET_MODE (*op0
) == SImode
883 && GET_MODE (XEXP (*op0
, 0)) == CCZ1mode
884 && REG_P (XEXP (*op0
, 0))
885 && XEXP (*op0
, 1) == const0_rtx
886 && *op1
== const0_rtx
)
888 if ((*code
== EQ
&& GET_CODE (*op0
) == NE
)
889 || (*code
== NE
&& GET_CODE (*op0
) == EQ
))
893 *op0
= XEXP (*op0
, 0);
896 /* Prefer register over memory as first operand. */
897 if (MEM_P (*op0
) && REG_P (*op1
))
899 rtx tem
= *op0
; *op0
= *op1
; *op1
= tem
;
900 *code
= (int)swap_condition ((enum rtx_code
)*code
);
904 /* Emit a compare instruction suitable to implement the comparison
905 OP0 CODE OP1. Return the correct condition RTL to be placed in
906 the IF_THEN_ELSE of the conditional branch testing the result. */
909 s390_emit_compare (enum rtx_code code
, rtx op0
, rtx op1
)
911 enum machine_mode mode
= s390_select_ccmode (code
, op0
, op1
);
914 /* Do not output a redundant compare instruction if a compare_and_swap
915 pattern already computed the result and the machine modes are compatible. */
916 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
918 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0
), mode
)
924 cc
= gen_rtx_REG (mode
, CC_REGNUM
);
925 emit_insn (gen_rtx_SET (VOIDmode
, cc
, gen_rtx_COMPARE (mode
, op0
, op1
)));
928 return gen_rtx_fmt_ee (code
, VOIDmode
, cc
, const0_rtx
);
931 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
933 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
934 conditional branch testing the result. */
937 s390_emit_compare_and_swap (enum rtx_code code
, rtx old
, rtx mem
,
938 rtx cmp
, rtx new_rtx
)
940 emit_insn (gen_atomic_compare_and_swapsi_internal (old
, mem
, cmp
, new_rtx
));
941 return s390_emit_compare (code
, gen_rtx_REG (CCZ1mode
, CC_REGNUM
),
945 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
946 unconditional jump, else a conditional jump under condition COND. */
949 s390_emit_jump (rtx target
, rtx cond
)
953 target
= gen_rtx_LABEL_REF (VOIDmode
, target
);
955 target
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, target
, pc_rtx
);
957 insn
= gen_rtx_SET (VOIDmode
, pc_rtx
, target
);
958 emit_jump_insn (insn
);
961 /* Return branch condition mask to implement a branch
962 specified by CODE. Return -1 for invalid comparisons. */
965 s390_branch_condition_mask (rtx code
)
967 const int CC0
= 1 << 3;
968 const int CC1
= 1 << 2;
969 const int CC2
= 1 << 1;
970 const int CC3
= 1 << 0;
972 gcc_assert (GET_CODE (XEXP (code
, 0)) == REG
);
973 gcc_assert (REGNO (XEXP (code
, 0)) == CC_REGNUM
);
974 gcc_assert (XEXP (code
, 1) == const0_rtx
);
976 switch (GET_MODE (XEXP (code
, 0)))
980 switch (GET_CODE (code
))
983 case NE
: return CC1
| CC2
| CC3
;
989 switch (GET_CODE (code
))
992 case NE
: return CC0
| CC2
| CC3
;
998 switch (GET_CODE (code
))
1000 case EQ
: return CC2
;
1001 case NE
: return CC0
| CC1
| CC3
;
1007 switch (GET_CODE (code
))
1009 case EQ
: return CC3
;
1010 case NE
: return CC0
| CC1
| CC2
;
1016 switch (GET_CODE (code
))
1018 case EQ
: return CC0
| CC2
;
1019 case NE
: return CC1
| CC3
;
1025 switch (GET_CODE (code
))
1027 case LTU
: return CC2
| CC3
; /* carry */
1028 case GEU
: return CC0
| CC1
; /* no carry */
1034 switch (GET_CODE (code
))
1036 case GTU
: return CC0
| CC1
; /* borrow */
1037 case LEU
: return CC2
| CC3
; /* no borrow */
1043 switch (GET_CODE (code
))
1045 case EQ
: return CC0
| CC2
;
1046 case NE
: return CC1
| CC3
;
1047 case LTU
: return CC1
;
1048 case GTU
: return CC3
;
1049 case LEU
: return CC1
| CC2
;
1050 case GEU
: return CC2
| CC3
;
1055 switch (GET_CODE (code
))
1057 case EQ
: return CC0
;
1058 case NE
: return CC1
| CC2
| CC3
;
1059 case LTU
: return CC1
;
1060 case GTU
: return CC2
;
1061 case LEU
: return CC0
| CC1
;
1062 case GEU
: return CC0
| CC2
;
1068 switch (GET_CODE (code
))
1070 case EQ
: return CC0
;
1071 case NE
: return CC2
| CC1
| CC3
;
1072 case LTU
: return CC2
;
1073 case GTU
: return CC1
;
1074 case LEU
: return CC0
| CC2
;
1075 case GEU
: return CC0
| CC1
;
1081 switch (GET_CODE (code
))
1083 case EQ
: return CC0
;
1084 case NE
: return CC1
| CC2
| CC3
;
1085 case LT
: return CC1
| CC3
;
1086 case GT
: return CC2
;
1087 case LE
: return CC0
| CC1
| CC3
;
1088 case GE
: return CC0
| CC2
;
1094 switch (GET_CODE (code
))
1096 case EQ
: return CC0
;
1097 case NE
: return CC1
| CC2
| CC3
;
1098 case LT
: return CC1
;
1099 case GT
: return CC2
| CC3
;
1100 case LE
: return CC0
| CC1
;
1101 case GE
: return CC0
| CC2
| CC3
;
1107 switch (GET_CODE (code
))
1109 case EQ
: return CC0
;
1110 case NE
: return CC1
| CC2
| CC3
;
1111 case LT
: return CC1
;
1112 case GT
: return CC2
;
1113 case LE
: return CC0
| CC1
;
1114 case GE
: return CC0
| CC2
;
1115 case UNORDERED
: return CC3
;
1116 case ORDERED
: return CC0
| CC1
| CC2
;
1117 case UNEQ
: return CC0
| CC3
;
1118 case UNLT
: return CC1
| CC3
;
1119 case UNGT
: return CC2
| CC3
;
1120 case UNLE
: return CC0
| CC1
| CC3
;
1121 case UNGE
: return CC0
| CC2
| CC3
;
1122 case LTGT
: return CC1
| CC2
;
1128 switch (GET_CODE (code
))
1130 case EQ
: return CC0
;
1131 case NE
: return CC2
| CC1
| CC3
;
1132 case LT
: return CC2
;
1133 case GT
: return CC1
;
1134 case LE
: return CC0
| CC2
;
1135 case GE
: return CC0
| CC1
;
1136 case UNORDERED
: return CC3
;
1137 case ORDERED
: return CC0
| CC2
| CC1
;
1138 case UNEQ
: return CC0
| CC3
;
1139 case UNLT
: return CC2
| CC3
;
1140 case UNGT
: return CC1
| CC3
;
1141 case UNLE
: return CC0
| CC2
| CC3
;
1142 case UNGE
: return CC0
| CC1
| CC3
;
1143 case LTGT
: return CC2
| CC1
;
1154 /* Return branch condition mask to implement a compare and branch
1155 specified by CODE. Return -1 for invalid comparisons. */
1158 s390_compare_and_branch_condition_mask (rtx code
)
1160 const int CC0
= 1 << 3;
1161 const int CC1
= 1 << 2;
1162 const int CC2
= 1 << 1;
1164 switch (GET_CODE (code
))
1188 /* If INV is false, return assembler mnemonic string to implement
1189 a branch specified by CODE. If INV is true, return mnemonic
1190 for the corresponding inverted branch. */
1193 s390_branch_condition_mnemonic (rtx code
, int inv
)
1197 static const char *const mnemonic
[16] =
1199 NULL
, "o", "h", "nle",
1200 "l", "nhe", "lh", "ne",
1201 "e", "nlh", "he", "nl",
1202 "le", "nh", "no", NULL
1205 if (GET_CODE (XEXP (code
, 0)) == REG
1206 && REGNO (XEXP (code
, 0)) == CC_REGNUM
1207 && XEXP (code
, 1) == const0_rtx
)
1208 mask
= s390_branch_condition_mask (code
);
1210 mask
= s390_compare_and_branch_condition_mask (code
);
1212 gcc_assert (mask
>= 0);
1217 gcc_assert (mask
>= 1 && mask
<= 14);
1219 return mnemonic
[mask
];
1222 /* Return the part of op which has a value different from def.
1223 The size of the part is determined by mode.
1224 Use this function only if you already know that op really
1225 contains such a part. */
1227 unsigned HOST_WIDE_INT
1228 s390_extract_part (rtx op
, enum machine_mode mode
, int def
)
1230 unsigned HOST_WIDE_INT value
= 0;
1231 int max_parts
= HOST_BITS_PER_WIDE_INT
/ GET_MODE_BITSIZE (mode
);
1232 int part_bits
= GET_MODE_BITSIZE (mode
);
1233 unsigned HOST_WIDE_INT part_mask
1234 = ((unsigned HOST_WIDE_INT
)1 << part_bits
) - 1;
1237 for (i
= 0; i
< max_parts
; i
++)
1240 value
= (unsigned HOST_WIDE_INT
) INTVAL (op
);
1242 value
>>= part_bits
;
1244 if ((value
& part_mask
) != (def
& part_mask
))
1245 return value
& part_mask
;
1251 /* If OP is an integer constant of mode MODE with exactly one
1252 part of mode PART_MODE unequal to DEF, return the number of that
1253 part. Otherwise, return -1. */
1256 s390_single_part (rtx op
,
1257 enum machine_mode mode
,
1258 enum machine_mode part_mode
,
1261 unsigned HOST_WIDE_INT value
= 0;
1262 int n_parts
= GET_MODE_SIZE (mode
) / GET_MODE_SIZE (part_mode
);
1263 unsigned HOST_WIDE_INT part_mask
1264 = ((unsigned HOST_WIDE_INT
)1 << GET_MODE_BITSIZE (part_mode
)) - 1;
1267 if (GET_CODE (op
) != CONST_INT
)
1270 for (i
= 0; i
< n_parts
; i
++)
1273 value
= (unsigned HOST_WIDE_INT
) INTVAL (op
);
1275 value
>>= GET_MODE_BITSIZE (part_mode
);
1277 if ((value
& part_mask
) != (def
& part_mask
))
1285 return part
== -1 ? -1 : n_parts
- 1 - part
;
1288 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1289 bits and no other bits are set in IN. POS and LENGTH can be used
1290 to obtain the start position and the length of the bitfield.
1292 POS gives the position of the first bit of the bitfield counting
1293 from the lowest order bit starting with zero. In order to use this
1294 value for S/390 instructions this has to be converted to "bits big
1298 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in
, int size
,
1299 int *pos
, int *length
)
1304 unsigned HOST_WIDE_INT mask
= 1ULL;
1305 bool contiguous
= false;
1307 for (i
= 0; i
< size
; mask
<<= 1, i
++)
1331 /* Calculate a mask for all bits beyond the contiguous bits. */
1332 mask
= (-1LL & ~(((1ULL << (tmp_length
+ tmp_pos
- 1)) << 1) - 1));
1337 if (tmp_length
+ tmp_pos
- 1 > size
)
1341 *length
= tmp_length
;
1349 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1350 equivalent to a shift followed by the AND. In particular, CONTIG
1351 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1352 for ROTL indicate a rotate to the right. */
1355 s390_extzv_shift_ok (int bitsize
, int rotl
, unsigned HOST_WIDE_INT contig
)
1360 ok
= s390_contiguous_bitmask_p (contig
, bitsize
, &pos
, &len
);
1363 return ((rotl
>= 0 && rotl
<= pos
)
1364 || (rotl
< 0 && -rotl
<= bitsize
- len
- pos
));
1367 /* Check whether we can (and want to) split a double-word
1368 move in mode MODE from SRC to DST into two single-word
1369 moves, moving the subword FIRST_SUBWORD first. */
1372 s390_split_ok_p (rtx dst
, rtx src
, enum machine_mode mode
, int first_subword
)
1374 /* Floating point registers cannot be split. */
1375 if (FP_REG_P (src
) || FP_REG_P (dst
))
1378 /* We don't need to split if operands are directly accessible. */
1379 if (s_operand (src
, mode
) || s_operand (dst
, mode
))
1382 /* Non-offsettable memory references cannot be split. */
1383 if ((GET_CODE (src
) == MEM
&& !offsettable_memref_p (src
))
1384 || (GET_CODE (dst
) == MEM
&& !offsettable_memref_p (dst
)))
1387 /* Moving the first subword must not clobber a register
1388 needed to move the second subword. */
1389 if (register_operand (dst
, mode
))
1391 rtx subreg
= operand_subword (dst
, first_subword
, 0, mode
);
1392 if (reg_overlap_mentioned_p (subreg
, src
))
1399 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1400 and [MEM2, MEM2 + SIZE] do overlap and false
1404 s390_overlap_p (rtx mem1
, rtx mem2
, HOST_WIDE_INT size
)
1406 rtx addr1
, addr2
, addr_delta
;
1407 HOST_WIDE_INT delta
;
1409 if (GET_CODE (mem1
) != MEM
|| GET_CODE (mem2
) != MEM
)
1415 addr1
= XEXP (mem1
, 0);
1416 addr2
= XEXP (mem2
, 0);
1418 addr_delta
= simplify_binary_operation (MINUS
, Pmode
, addr2
, addr1
);
1420 /* This overlapping check is used by peepholes merging memory block operations.
1421 Overlapping operations would otherwise be recognized by the S/390 hardware
1422 and would fall back to a slower implementation. Allowing overlapping
1423 operations would lead to slow code but not to wrong code. Therefore we are
1424 somewhat optimistic if we cannot prove that the memory blocks are
1426 That's why we return false here although this may accept operations on
1427 overlapping memory areas. */
1428 if (!addr_delta
|| GET_CODE (addr_delta
) != CONST_INT
)
1431 delta
= INTVAL (addr_delta
);
1434 || (delta
> 0 && delta
< size
)
1435 || (delta
< 0 && -delta
< size
))
1441 /* Check whether the address of memory reference MEM2 equals exactly
1442 the address of memory reference MEM1 plus DELTA. Return true if
1443 we can prove this to be the case, false otherwise. */
1446 s390_offset_p (rtx mem1
, rtx mem2
, rtx delta
)
1448 rtx addr1
, addr2
, addr_delta
;
1450 if (GET_CODE (mem1
) != MEM
|| GET_CODE (mem2
) != MEM
)
1453 addr1
= XEXP (mem1
, 0);
1454 addr2
= XEXP (mem2
, 0);
1456 addr_delta
= simplify_binary_operation (MINUS
, Pmode
, addr2
, addr1
);
1457 if (!addr_delta
|| !rtx_equal_p (addr_delta
, delta
))
1463 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1466 s390_expand_logical_operator (enum rtx_code code
, enum machine_mode mode
,
1469 enum machine_mode wmode
= mode
;
1470 rtx dst
= operands
[0];
1471 rtx src1
= operands
[1];
1472 rtx src2
= operands
[2];
1475 /* If we cannot handle the operation directly, use a temp register. */
1476 if (!s390_logical_operator_ok_p (operands
))
1477 dst
= gen_reg_rtx (mode
);
1479 /* QImode and HImode patterns make sense only if we have a destination
1480 in memory. Otherwise perform the operation in SImode. */
1481 if ((mode
== QImode
|| mode
== HImode
) && GET_CODE (dst
) != MEM
)
1484 /* Widen operands if required. */
1487 if (GET_CODE (dst
) == SUBREG
1488 && (tem
= simplify_subreg (wmode
, dst
, mode
, 0)) != 0)
1490 else if (REG_P (dst
))
1491 dst
= gen_rtx_SUBREG (wmode
, dst
, 0);
1493 dst
= gen_reg_rtx (wmode
);
1495 if (GET_CODE (src1
) == SUBREG
1496 && (tem
= simplify_subreg (wmode
, src1
, mode
, 0)) != 0)
1498 else if (GET_MODE (src1
) != VOIDmode
)
1499 src1
= gen_rtx_SUBREG (wmode
, force_reg (mode
, src1
), 0);
1501 if (GET_CODE (src2
) == SUBREG
1502 && (tem
= simplify_subreg (wmode
, src2
, mode
, 0)) != 0)
1504 else if (GET_MODE (src2
) != VOIDmode
)
1505 src2
= gen_rtx_SUBREG (wmode
, force_reg (mode
, src2
), 0);
1508 /* Emit the instruction. */
1509 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, wmode
, src1
, src2
));
1510 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
1511 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
1513 /* Fix up the destination if needed. */
1514 if (dst
!= operands
[0])
1515 emit_move_insn (operands
[0], gen_lowpart (mode
, dst
));
1518 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1521 s390_logical_operator_ok_p (rtx
*operands
)
1523 /* If the destination operand is in memory, it needs to coincide
1524 with one of the source operands. After reload, it has to be
1525 the first source operand. */
1526 if (GET_CODE (operands
[0]) == MEM
)
1527 return rtx_equal_p (operands
[0], operands
[1])
1528 || (!reload_completed
&& rtx_equal_p (operands
[0], operands
[2]));
1533 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1534 operand IMMOP to switch from SS to SI type instructions. */
1537 s390_narrow_logical_operator (enum rtx_code code
, rtx
*memop
, rtx
*immop
)
1539 int def
= code
== AND
? -1 : 0;
1543 gcc_assert (GET_CODE (*memop
) == MEM
);
1544 gcc_assert (!MEM_VOLATILE_P (*memop
));
1546 mask
= s390_extract_part (*immop
, QImode
, def
);
1547 part
= s390_single_part (*immop
, GET_MODE (*memop
), QImode
, def
);
1548 gcc_assert (part
>= 0);
1550 *memop
= adjust_address (*memop
, QImode
, part
);
1551 *immop
= gen_int_mode (mask
, QImode
);
1555 /* How to allocate a 'struct machine_function'. */
1557 static struct machine_function
*
1558 s390_init_machine_status (void)
1560 return ggc_alloc_cleared_machine_function ();
1564 s390_option_override (void)
1566 /* Set up function hooks. */
1567 init_machine_status
= s390_init_machine_status
;
1569 /* Architecture mode defaults according to ABI. */
1570 if (!(target_flags_explicit
& MASK_ZARCH
))
1573 target_flags
|= MASK_ZARCH
;
1575 target_flags
&= ~MASK_ZARCH
;
1578 /* Set the march default in case it hasn't been specified on
1580 if (s390_arch
== PROCESSOR_max
)
1582 s390_arch_string
= TARGET_ZARCH
? "z900" : "g5";
1583 s390_arch
= TARGET_ZARCH
? PROCESSOR_2064_Z900
: PROCESSOR_9672_G5
;
1584 s390_arch_flags
= processor_flags_table
[(int)s390_arch
];
1587 /* Determine processor to tune for. */
1588 if (s390_tune
== PROCESSOR_max
)
1590 s390_tune
= s390_arch
;
1591 s390_tune_flags
= s390_arch_flags
;
1594 /* Sanity checks. */
1595 if (TARGET_ZARCH
&& !TARGET_CPU_ZARCH
)
1596 error ("z/Architecture mode not supported on %s", s390_arch_string
);
1597 if (TARGET_64BIT
&& !TARGET_ZARCH
)
1598 error ("64-bit ABI not supported in ESA/390 mode");
1600 /* Use hardware DFP if available and not explicitly disabled by
1601 user. E.g. with -m31 -march=z10 -mzarch */
1602 if (!(target_flags_explicit
& MASK_HARD_DFP
) && TARGET_DFP
)
1603 target_flags
|= MASK_HARD_DFP
;
1605 if (TARGET_HARD_DFP
&& !TARGET_DFP
)
1607 if (target_flags_explicit
& MASK_HARD_DFP
)
1609 if (!TARGET_CPU_DFP
)
1610 error ("hardware decimal floating point instructions"
1611 " not available on %s", s390_arch_string
);
1613 error ("hardware decimal floating point instructions"
1614 " not available in ESA/390 mode");
1617 target_flags
&= ~MASK_HARD_DFP
;
1620 if ((target_flags_explicit
& MASK_SOFT_FLOAT
) && TARGET_SOFT_FLOAT
)
1622 if ((target_flags_explicit
& MASK_HARD_DFP
) && TARGET_HARD_DFP
)
1623 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1625 target_flags
&= ~MASK_HARD_DFP
;
1628 /* Set processor cost function. */
1631 case PROCESSOR_2084_Z990
:
1632 s390_cost
= &z990_cost
;
1634 case PROCESSOR_2094_Z9_109
:
1635 s390_cost
= &z9_109_cost
;
1637 case PROCESSOR_2097_Z10
:
1638 s390_cost
= &z10_cost
;
1640 case PROCESSOR_2817_Z196
:
1641 s390_cost
= &z196_cost
;
1643 case PROCESSOR_2827_ZEC12
:
1644 s390_cost
= &zEC12_cost
;
1647 s390_cost
= &z900_cost
;
1650 if (TARGET_BACKCHAIN
&& TARGET_PACKED_STACK
&& TARGET_HARD_FLOAT
)
1651 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1654 if (s390_stack_size
)
1656 if (s390_stack_guard
>= s390_stack_size
)
1657 error ("stack size must be greater than the stack guard value");
1658 else if (s390_stack_size
> 1 << 16)
1659 error ("stack size must not be greater than 64k");
1661 else if (s390_stack_guard
)
1662 error ("-mstack-guard implies use of -mstack-size");
1664 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1665 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
1666 target_flags
|= MASK_LONG_DOUBLE_128
;
1669 if (s390_tune
== PROCESSOR_2097_Z10
1670 || s390_tune
== PROCESSOR_2817_Z196
1671 || s390_tune
== PROCESSOR_2827_ZEC12
)
1673 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS
, 100,
1674 global_options
.x_param_values
,
1675 global_options_set
.x_param_values
);
1676 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES
, 32,
1677 global_options
.x_param_values
,
1678 global_options_set
.x_param_values
);
1679 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 2000,
1680 global_options
.x_param_values
,
1681 global_options_set
.x_param_values
);
1682 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES
, 64,
1683 global_options
.x_param_values
,
1684 global_options_set
.x_param_values
);
1687 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH
, 256,
1688 global_options
.x_param_values
,
1689 global_options_set
.x_param_values
);
1690 /* values for loop prefetching */
1691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
, 256,
1692 global_options
.x_param_values
,
1693 global_options_set
.x_param_values
);
1694 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, 128,
1695 global_options
.x_param_values
,
1696 global_options_set
.x_param_values
);
1697 /* s390 has more than 2 levels and the size is much larger. Since
1698 we are always running virtualized assume that we only get a small
1699 part of the caches above l1. */
1700 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, 1500,
1701 global_options
.x_param_values
,
1702 global_options_set
.x_param_values
);
1703 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO
, 2,
1704 global_options
.x_param_values
,
1705 global_options_set
.x_param_values
);
1706 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
, 6,
1707 global_options
.x_param_values
,
1708 global_options_set
.x_param_values
);
1710 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1711 requires the arch flags to be evaluated already. Since prefetching
1712 is beneficial on s390, we enable it if available. */
1713 if (flag_prefetch_loop_arrays
< 0 && HAVE_prefetch
&& optimize
>= 3)
1714 flag_prefetch_loop_arrays
= 1;
1716 /* Use the alternative scheduling-pressure algorithm by default. */
1717 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
, 2,
1718 global_options
.x_param_values
,
1719 global_options_set
.x_param_values
);
1723 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1724 debuggers do not yet support DWARF 3/4. */
1725 if (!global_options_set
.x_dwarf_strict
)
1727 if (!global_options_set
.x_dwarf_version
)
1732 /* Map for smallest class containing reg regno. */
1734 const enum reg_class regclass_map
[FIRST_PSEUDO_REGISTER
] =
1735 { GENERAL_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1736 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1737 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1738 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1739 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1740 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1741 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1742 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1743 ADDR_REGS
, CC_REGS
, ADDR_REGS
, ADDR_REGS
,
1744 ACCESS_REGS
, ACCESS_REGS
1747 /* Return attribute type of insn. */
1749 static enum attr_type
1750 s390_safe_attr_type (rtx insn
)
1752 if (recog_memoized (insn
) >= 0)
1753 return get_attr_type (insn
);
1758 /* Return true if DISP is a valid short displacement. */
1761 s390_short_displacement (rtx disp
)
1763 /* No displacement is OK. */
1767 /* Without the long displacement facility we don't need to
1768 distingiush between long and short displacement. */
1769 if (!TARGET_LONG_DISPLACEMENT
)
1772 /* Integer displacement in range. */
1773 if (GET_CODE (disp
) == CONST_INT
)
1774 return INTVAL (disp
) >= 0 && INTVAL (disp
) < 4096;
1776 /* GOT offset is not OK, the GOT can be large. */
1777 if (GET_CODE (disp
) == CONST
1778 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
1779 && (XINT (XEXP (disp
, 0), 1) == UNSPEC_GOT
1780 || XINT (XEXP (disp
, 0), 1) == UNSPEC_GOTNTPOFF
))
1783 /* All other symbolic constants are literal pool references,
1784 which are OK as the literal pool must be small. */
1785 if (GET_CODE (disp
) == CONST
)
1791 /* Decompose a RTL expression ADDR for a memory address into
1792 its components, returned in OUT.
1794 Returns false if ADDR is not a valid memory address, true
1795 otherwise. If OUT is NULL, don't return the components,
1796 but check for validity only.
1798 Note: Only addresses in canonical form are recognized.
1799 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1800 canonical form so that they will be recognized. */
1803 s390_decompose_address (rtx addr
, struct s390_address
*out
)
1805 HOST_WIDE_INT offset
= 0;
1806 rtx base
= NULL_RTX
;
1807 rtx indx
= NULL_RTX
;
1808 rtx disp
= NULL_RTX
;
1810 bool pointer
= false;
1811 bool base_ptr
= false;
1812 bool indx_ptr
= false;
1813 bool literal_pool
= false;
1815 /* We may need to substitute the literal pool base register into the address
1816 below. However, at this point we do not know which register is going to
1817 be used as base, so we substitute the arg pointer register. This is going
1818 to be treated as holding a pointer below -- it shouldn't be used for any
1820 rtx fake_pool_base
= gen_rtx_REG (Pmode
, ARG_POINTER_REGNUM
);
1822 /* Decompose address into base + index + displacement. */
1824 if (GET_CODE (addr
) == REG
|| GET_CODE (addr
) == UNSPEC
)
1827 else if (GET_CODE (addr
) == PLUS
)
1829 rtx op0
= XEXP (addr
, 0);
1830 rtx op1
= XEXP (addr
, 1);
1831 enum rtx_code code0
= GET_CODE (op0
);
1832 enum rtx_code code1
= GET_CODE (op1
);
1834 if (code0
== REG
|| code0
== UNSPEC
)
1836 if (code1
== REG
|| code1
== UNSPEC
)
1838 indx
= op0
; /* index + base */
1844 base
= op0
; /* base + displacement */
1849 else if (code0
== PLUS
)
1851 indx
= XEXP (op0
, 0); /* index + base + disp */
1852 base
= XEXP (op0
, 1);
1863 disp
= addr
; /* displacement */
1865 /* Extract integer part of displacement. */
1869 if (GET_CODE (disp
) == CONST_INT
)
1871 offset
= INTVAL (disp
);
1874 else if (GET_CODE (disp
) == CONST
1875 && GET_CODE (XEXP (disp
, 0)) == PLUS
1876 && GET_CODE (XEXP (XEXP (disp
, 0), 1)) == CONST_INT
)
1878 offset
= INTVAL (XEXP (XEXP (disp
, 0), 1));
1879 disp
= XEXP (XEXP (disp
, 0), 0);
1883 /* Strip off CONST here to avoid special case tests later. */
1884 if (disp
&& GET_CODE (disp
) == CONST
)
1885 disp
= XEXP (disp
, 0);
1887 /* We can convert literal pool addresses to
1888 displacements by basing them off the base register. */
1889 if (disp
&& GET_CODE (disp
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (disp
))
1891 /* Either base or index must be free to hold the base register. */
1893 base
= fake_pool_base
, literal_pool
= true;
1895 indx
= fake_pool_base
, literal_pool
= true;
1899 /* Mark up the displacement. */
1900 disp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, disp
),
1901 UNSPEC_LTREL_OFFSET
);
1904 /* Validate base register. */
1907 if (GET_CODE (base
) == UNSPEC
)
1908 switch (XINT (base
, 1))
1912 disp
= gen_rtx_UNSPEC (Pmode
,
1913 gen_rtvec (1, XVECEXP (base
, 0, 0)),
1914 UNSPEC_LTREL_OFFSET
);
1918 base
= XVECEXP (base
, 0, 1);
1921 case UNSPEC_LTREL_BASE
:
1922 if (XVECLEN (base
, 0) == 1)
1923 base
= fake_pool_base
, literal_pool
= true;
1925 base
= XVECEXP (base
, 0, 1);
1933 || (GET_MODE (base
) != SImode
1934 && GET_MODE (base
) != Pmode
))
1937 if (REGNO (base
) == STACK_POINTER_REGNUM
1938 || REGNO (base
) == FRAME_POINTER_REGNUM
1939 || ((reload_completed
|| reload_in_progress
)
1940 && frame_pointer_needed
1941 && REGNO (base
) == HARD_FRAME_POINTER_REGNUM
)
1942 || REGNO (base
) == ARG_POINTER_REGNUM
1944 && REGNO (base
) == PIC_OFFSET_TABLE_REGNUM
))
1945 pointer
= base_ptr
= true;
1947 if ((reload_completed
|| reload_in_progress
)
1948 && base
== cfun
->machine
->base_reg
)
1949 pointer
= base_ptr
= literal_pool
= true;
1952 /* Validate index register. */
1955 if (GET_CODE (indx
) == UNSPEC
)
1956 switch (XINT (indx
, 1))
1960 disp
= gen_rtx_UNSPEC (Pmode
,
1961 gen_rtvec (1, XVECEXP (indx
, 0, 0)),
1962 UNSPEC_LTREL_OFFSET
);
1966 indx
= XVECEXP (indx
, 0, 1);
1969 case UNSPEC_LTREL_BASE
:
1970 if (XVECLEN (indx
, 0) == 1)
1971 indx
= fake_pool_base
, literal_pool
= true;
1973 indx
= XVECEXP (indx
, 0, 1);
1981 || (GET_MODE (indx
) != SImode
1982 && GET_MODE (indx
) != Pmode
))
1985 if (REGNO (indx
) == STACK_POINTER_REGNUM
1986 || REGNO (indx
) == FRAME_POINTER_REGNUM
1987 || ((reload_completed
|| reload_in_progress
)
1988 && frame_pointer_needed
1989 && REGNO (indx
) == HARD_FRAME_POINTER_REGNUM
)
1990 || REGNO (indx
) == ARG_POINTER_REGNUM
1992 && REGNO (indx
) == PIC_OFFSET_TABLE_REGNUM
))
1993 pointer
= indx_ptr
= true;
1995 if ((reload_completed
|| reload_in_progress
)
1996 && indx
== cfun
->machine
->base_reg
)
1997 pointer
= indx_ptr
= literal_pool
= true;
2000 /* Prefer to use pointer as base, not index. */
2001 if (base
&& indx
&& !base_ptr
2002 && (indx_ptr
|| (!REG_POINTER (base
) && REG_POINTER (indx
))))
2009 /* Validate displacement. */
2012 /* If virtual registers are involved, the displacement will change later
2013 anyway as the virtual registers get eliminated. This could make a
2014 valid displacement invalid, but it is more likely to make an invalid
2015 displacement valid, because we sometimes access the register save area
2016 via negative offsets to one of those registers.
2017 Thus we don't check the displacement for validity here. If after
2018 elimination the displacement turns out to be invalid after all,
2019 this is fixed up by reload in any case. */
2020 /* LRA maintains always displacements up to date and we need to
2021 know the displacement is right during all LRA not only at the
2022 final elimination. */
2024 || (base
!= arg_pointer_rtx
2025 && indx
!= arg_pointer_rtx
2026 && base
!= return_address_pointer_rtx
2027 && indx
!= return_address_pointer_rtx
2028 && base
!= frame_pointer_rtx
2029 && indx
!= frame_pointer_rtx
2030 && base
!= virtual_stack_vars_rtx
2031 && indx
!= virtual_stack_vars_rtx
))
2032 if (!DISP_IN_RANGE (offset
))
2037 /* All the special cases are pointers. */
2040 /* In the small-PIC case, the linker converts @GOT
2041 and @GOTNTPOFF offsets to possible displacements. */
2042 if (GET_CODE (disp
) == UNSPEC
2043 && (XINT (disp
, 1) == UNSPEC_GOT
2044 || XINT (disp
, 1) == UNSPEC_GOTNTPOFF
)
2050 /* Accept pool label offsets. */
2051 else if (GET_CODE (disp
) == UNSPEC
2052 && XINT (disp
, 1) == UNSPEC_POOL_OFFSET
)
2055 /* Accept literal pool references. */
2056 else if (GET_CODE (disp
) == UNSPEC
2057 && XINT (disp
, 1) == UNSPEC_LTREL_OFFSET
)
2059 /* In case CSE pulled a non literal pool reference out of
2060 the pool we have to reject the address. This is
2061 especially important when loading the GOT pointer on non
2062 zarch CPUs. In this case the literal pool contains an lt
2063 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2064 will most likely exceed the displacement. */
2065 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
2066 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp
, 0, 0)))
2069 orig_disp
= gen_rtx_CONST (Pmode
, disp
);
2072 /* If we have an offset, make sure it does not
2073 exceed the size of the constant pool entry. */
2074 rtx sym
= XVECEXP (disp
, 0, 0);
2075 if (offset
>= GET_MODE_SIZE (get_pool_mode (sym
)))
2078 orig_disp
= plus_constant (Pmode
, orig_disp
, offset
);
2093 out
->disp
= orig_disp
;
2094 out
->pointer
= pointer
;
2095 out
->literal_pool
= literal_pool
;
2101 /* Decompose a RTL expression OP for a shift count into its components,
2102 and return the base register in BASE and the offset in OFFSET.
2104 Return true if OP is a valid shift count, false if not. */
2107 s390_decompose_shift_count (rtx op
, rtx
*base
, HOST_WIDE_INT
*offset
)
2109 HOST_WIDE_INT off
= 0;
2111 /* We can have an integer constant, an address register,
2112 or a sum of the two. */
2113 if (GET_CODE (op
) == CONST_INT
)
2118 if (op
&& GET_CODE (op
) == PLUS
&& GET_CODE (XEXP (op
, 1)) == CONST_INT
)
2120 off
= INTVAL (XEXP (op
, 1));
2123 while (op
&& GET_CODE (op
) == SUBREG
)
2124 op
= SUBREG_REG (op
);
2126 if (op
&& GET_CODE (op
) != REG
)
2138 /* Return true if CODE is a valid address without index. */
2141 s390_legitimate_address_without_index_p (rtx op
)
2143 struct s390_address addr
;
2145 if (!s390_decompose_address (XEXP (op
, 0), &addr
))
2154 /* Return TRUE if ADDR is an operand valid for a load/store relative
2155 instruction. Be aware that the alignment of the operand needs to
2156 be checked separately.
2157 Valid addresses are single references or a sum of a reference and a
2158 constant integer. Return these parts in SYMREF and ADDEND. You can
2159 pass NULL in REF and/or ADDEND if you are not interested in these
2160 values. Literal pool references are *not* considered symbol
2164 s390_loadrelative_operand_p (rtx addr
, rtx
*symref
, HOST_WIDE_INT
*addend
)
2166 HOST_WIDE_INT tmpaddend
= 0;
2168 if (GET_CODE (addr
) == CONST
)
2169 addr
= XEXP (addr
, 0);
2171 if (GET_CODE (addr
) == PLUS
)
2173 if (!CONST_INT_P (XEXP (addr
, 1)))
2176 tmpaddend
= INTVAL (XEXP (addr
, 1));
2177 addr
= XEXP (addr
, 0);
2180 if ((GET_CODE (addr
) == SYMBOL_REF
&& !CONSTANT_POOL_ADDRESS_P (addr
))
2181 || (GET_CODE (addr
) == UNSPEC
2182 && (XINT (addr
, 1) == UNSPEC_GOTENT
2183 || (TARGET_CPU_ZARCH
&& XINT (addr
, 1) == UNSPEC_PLT
))))
2188 *addend
= tmpaddend
;
2195 /* Return true if the address in OP is valid for constraint letter C
2196 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2197 pool MEMs should be accepted. Only the Q, R, S, T constraint
2198 letters are allowed for C. */
2201 s390_check_qrst_address (char c
, rtx op
, bool lit_pool_ok
)
2203 struct s390_address addr
;
2204 bool decomposed
= false;
2206 /* This check makes sure that no symbolic address (except literal
2207 pool references) are accepted by the R or T constraints. */
2208 if (s390_loadrelative_operand_p (op
, NULL
, NULL
))
2211 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2214 if (!s390_decompose_address (op
, &addr
))
2216 if (addr
.literal_pool
)
2223 case 'Q': /* no index short displacement */
2224 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2228 if (!s390_short_displacement (addr
.disp
))
2232 case 'R': /* with index short displacement */
2233 if (TARGET_LONG_DISPLACEMENT
)
2235 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2237 if (!s390_short_displacement (addr
.disp
))
2240 /* Any invalid address here will be fixed up by reload,
2241 so accept it for the most generic constraint. */
2244 case 'S': /* no index long displacement */
2245 if (!TARGET_LONG_DISPLACEMENT
)
2247 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2251 if (s390_short_displacement (addr
.disp
))
2255 case 'T': /* with index long displacement */
2256 if (!TARGET_LONG_DISPLACEMENT
)
2258 /* Any invalid address here will be fixed up by reload,
2259 so accept it for the most generic constraint. */
2260 if ((decomposed
|| s390_decompose_address (op
, &addr
))
2261 && s390_short_displacement (addr
.disp
))
2271 /* Evaluates constraint strings described by the regular expression
2272 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2273 the constraint given in STR, or 0 else. */
2276 s390_mem_constraint (const char *str
, rtx op
)
2283 /* Check for offsettable variants of memory constraints. */
2284 if (!MEM_P (op
) || MEM_VOLATILE_P (op
))
2286 if ((reload_completed
|| reload_in_progress
)
2287 ? !offsettable_memref_p (op
) : !offsettable_nonstrict_memref_p (op
))
2289 return s390_check_qrst_address (str
[1], XEXP (op
, 0), true);
2291 /* Check for non-literal-pool variants of memory constraints. */
2294 return s390_check_qrst_address (str
[1], XEXP (op
, 0), false);
2299 if (GET_CODE (op
) != MEM
)
2301 return s390_check_qrst_address (c
, XEXP (op
, 0), true);
2303 return (s390_check_qrst_address ('Q', op
, true)
2304 || s390_check_qrst_address ('R', op
, true));
2306 return (s390_check_qrst_address ('S', op
, true)
2307 || s390_check_qrst_address ('T', op
, true));
2309 /* Simply check for the basic form of a shift count. Reload will
2310 take care of making sure we have a proper base register. */
2311 if (!s390_decompose_shift_count (op
, NULL
, NULL
))
2315 return s390_check_qrst_address (str
[1], op
, true);
2323 /* Evaluates constraint strings starting with letter O. Input
2324 parameter C is the second letter following the "O" in the constraint
2325 string. Returns 1 if VALUE meets the respective constraint and 0
2329 s390_O_constraint_str (const char c
, HOST_WIDE_INT value
)
2337 return trunc_int_for_mode (value
, SImode
) == value
;
2341 || s390_single_part (GEN_INT (value
), DImode
, SImode
, 0) == 1;
2344 return s390_single_part (GEN_INT (value
- 1), DImode
, SImode
, -1) == 1;
2352 /* Evaluates constraint strings starting with letter N. Parameter STR
2353 contains the letters following letter "N" in the constraint string.
2354 Returns true if VALUE matches the constraint. */
2357 s390_N_constraint_str (const char *str
, HOST_WIDE_INT value
)
2359 enum machine_mode mode
, part_mode
;
2361 int part
, part_goal
;
2367 part_goal
= str
[0] - '0';
2411 if (GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (part_mode
))
2414 part
= s390_single_part (GEN_INT (value
), mode
, part_mode
, def
);
2417 if (part_goal
!= -1 && part_goal
!= part
)
2424 /* Returns true if the input parameter VALUE is a float zero. */
2427 s390_float_const_zero_p (rtx value
)
2429 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
2430 && value
== CONST0_RTX (GET_MODE (value
)));
2433 /* Implement TARGET_REGISTER_MOVE_COST. */
2436 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2437 reg_class_t from
, reg_class_t to
)
2439 /* On s390, copy between fprs and gprs is expensive as long as no
2440 ldgr/lgdr can be used. */
2441 if ((!TARGET_Z10
|| GET_MODE_SIZE (mode
) != 8)
2442 && ((reg_classes_intersect_p (from
, GENERAL_REGS
)
2443 && reg_classes_intersect_p (to
, FP_REGS
))
2444 || (reg_classes_intersect_p (from
, FP_REGS
)
2445 && reg_classes_intersect_p (to
, GENERAL_REGS
))))
2451 /* Implement TARGET_MEMORY_MOVE_COST. */
2454 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2455 reg_class_t rclass ATTRIBUTE_UNUSED
,
2456 bool in ATTRIBUTE_UNUSED
)
2461 /* Compute a (partial) cost for rtx X. Return true if the complete
2462 cost has been computed, and false if subexpressions should be
2463 scanned. In either case, *TOTAL contains the cost result.
2464 CODE contains GET_CODE (x), OUTER_CODE contains the code
2465 of the superexpression of x. */
2468 s390_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2469 int *total
, bool speed ATTRIBUTE_UNUSED
)
2492 *total
= COSTS_N_INSNS (1);
2497 *total
= COSTS_N_INSNS (1);
2501 switch (GET_MODE (x
))
2505 rtx left
= XEXP (x
, 0);
2506 rtx right
= XEXP (x
, 1);
2507 if (GET_CODE (right
) == CONST_INT
2508 && CONST_OK_FOR_K (INTVAL (right
)))
2509 *total
= s390_cost
->mhi
;
2510 else if (GET_CODE (left
) == SIGN_EXTEND
)
2511 *total
= s390_cost
->mh
;
2513 *total
= s390_cost
->ms
; /* msr, ms, msy */
2518 rtx left
= XEXP (x
, 0);
2519 rtx right
= XEXP (x
, 1);
2522 if (GET_CODE (right
) == CONST_INT
2523 && CONST_OK_FOR_K (INTVAL (right
)))
2524 *total
= s390_cost
->mghi
;
2525 else if (GET_CODE (left
) == SIGN_EXTEND
)
2526 *total
= s390_cost
->msgf
;
2528 *total
= s390_cost
->msg
; /* msgr, msg */
2530 else /* TARGET_31BIT */
2532 if (GET_CODE (left
) == SIGN_EXTEND
2533 && GET_CODE (right
) == SIGN_EXTEND
)
2534 /* mulsidi case: mr, m */
2535 *total
= s390_cost
->m
;
2536 else if (GET_CODE (left
) == ZERO_EXTEND
2537 && GET_CODE (right
) == ZERO_EXTEND
2538 && TARGET_CPU_ZARCH
)
2539 /* umulsidi case: ml, mlr */
2540 *total
= s390_cost
->ml
;
2542 /* Complex calculation is required. */
2543 *total
= COSTS_N_INSNS (40);
2549 *total
= s390_cost
->mult_df
;
2552 *total
= s390_cost
->mxbr
;
2560 switch (GET_MODE (x
))
2563 *total
= s390_cost
->madbr
;
2566 *total
= s390_cost
->maebr
;
2571 /* Negate in the third argument is free: FMSUB. */
2572 if (GET_CODE (XEXP (x
, 2)) == NEG
)
2574 *total
+= (rtx_cost (XEXP (x
, 0), FMA
, 0, speed
)
2575 + rtx_cost (XEXP (x
, 1), FMA
, 1, speed
)
2576 + rtx_cost (XEXP (XEXP (x
, 2), 0), FMA
, 2, speed
));
2583 if (GET_MODE (x
) == TImode
) /* 128 bit division */
2584 *total
= s390_cost
->dlgr
;
2585 else if (GET_MODE (x
) == DImode
)
2587 rtx right
= XEXP (x
, 1);
2588 if (GET_CODE (right
) == ZERO_EXTEND
) /* 64 by 32 bit division */
2589 *total
= s390_cost
->dlr
;
2590 else /* 64 by 64 bit division */
2591 *total
= s390_cost
->dlgr
;
2593 else if (GET_MODE (x
) == SImode
) /* 32 bit division */
2594 *total
= s390_cost
->dlr
;
2599 if (GET_MODE (x
) == DImode
)
2601 rtx right
= XEXP (x
, 1);
2602 if (GET_CODE (right
) == ZERO_EXTEND
) /* 64 by 32 bit division */
2604 *total
= s390_cost
->dsgfr
;
2606 *total
= s390_cost
->dr
;
2607 else /* 64 by 64 bit division */
2608 *total
= s390_cost
->dsgr
;
2610 else if (GET_MODE (x
) == SImode
) /* 32 bit division */
2611 *total
= s390_cost
->dlr
;
2612 else if (GET_MODE (x
) == SFmode
)
2614 *total
= s390_cost
->debr
;
2616 else if (GET_MODE (x
) == DFmode
)
2618 *total
= s390_cost
->ddbr
;
2620 else if (GET_MODE (x
) == TFmode
)
2622 *total
= s390_cost
->dxbr
;
2627 if (GET_MODE (x
) == SFmode
)
2628 *total
= s390_cost
->sqebr
;
2629 else if (GET_MODE (x
) == DFmode
)
2630 *total
= s390_cost
->sqdbr
;
2632 *total
= s390_cost
->sqxbr
;
2637 if (outer_code
== MULT
|| outer_code
== DIV
|| outer_code
== MOD
2638 || outer_code
== PLUS
|| outer_code
== MINUS
2639 || outer_code
== COMPARE
)
2644 *total
= COSTS_N_INSNS (1);
2645 if (GET_CODE (XEXP (x
, 0)) == AND
2646 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2647 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
2649 rtx op0
= XEXP (XEXP (x
, 0), 0);
2650 rtx op1
= XEXP (XEXP (x
, 0), 1);
2651 rtx op2
= XEXP (x
, 1);
2653 if (memory_operand (op0
, GET_MODE (op0
))
2654 && s390_tm_ccmode (op1
, op2
, 0) != VOIDmode
)
2656 if (register_operand (op0
, GET_MODE (op0
))
2657 && s390_tm_ccmode (op1
, op2
, 1) != VOIDmode
)
2667 /* Return the cost of an address rtx ADDR. */
2670 s390_address_cost (rtx addr
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2671 addr_space_t as ATTRIBUTE_UNUSED
,
2672 bool speed ATTRIBUTE_UNUSED
)
2674 struct s390_address ad
;
2675 if (!s390_decompose_address (addr
, &ad
))
2678 return ad
.indx
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2681 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2682 otherwise return 0. */
2685 tls_symbolic_operand (rtx op
)
2687 if (GET_CODE (op
) != SYMBOL_REF
)
2689 return SYMBOL_REF_TLS_MODEL (op
);
2692 /* Split DImode access register reference REG (on 64-bit) into its constituent
2693 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2694 gen_highpart cannot be used as they assume all registers are word-sized,
2695 while our access registers have only half that size. */
2698 s390_split_access_reg (rtx reg
, rtx
*lo
, rtx
*hi
)
2700 gcc_assert (TARGET_64BIT
);
2701 gcc_assert (ACCESS_REG_P (reg
));
2702 gcc_assert (GET_MODE (reg
) == DImode
);
2703 gcc_assert (!(REGNO (reg
) & 1));
2705 *lo
= gen_rtx_REG (SImode
, REGNO (reg
) + 1);
2706 *hi
= gen_rtx_REG (SImode
, REGNO (reg
));
2709 /* Return true if OP contains a symbol reference */
2712 symbolic_reference_mentioned_p (rtx op
)
2717 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
2720 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
2721 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
2727 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
2728 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
2732 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
2739 /* Return true if OP contains a reference to a thread-local symbol. */
2742 tls_symbolic_reference_mentioned_p (rtx op
)
2747 if (GET_CODE (op
) == SYMBOL_REF
)
2748 return tls_symbolic_operand (op
);
2750 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
2751 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
2757 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
2758 if (tls_symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
2762 else if (fmt
[i
] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op
, i
)))
2770 /* Return true if OP is a legitimate general operand when
2771 generating PIC code. It is given that flag_pic is on
2772 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2775 legitimate_pic_operand_p (rtx op
)
2777 /* Accept all non-symbolic constants. */
2778 if (!SYMBOLIC_CONST (op
))
2781 /* Reject everything else; must be handled
2782 via emit_symbolic_move. */
2786 /* Returns true if the constant value OP is a legitimate general operand.
2787 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2790 s390_legitimate_constant_p (enum machine_mode mode
, rtx op
)
2792 /* Accept all non-symbolic constants. */
2793 if (!SYMBOLIC_CONST (op
))
2796 /* Accept immediate LARL operands. */
2797 if (TARGET_CPU_ZARCH
&& larl_operand (op
, mode
))
2800 /* Thread-local symbols are never legal constants. This is
2801 so that emit_call knows that computing such addresses
2802 might require a function call. */
2803 if (TLS_SYMBOLIC_CONST (op
))
2806 /* In the PIC case, symbolic constants must *not* be
2807 forced into the literal pool. We accept them here,
2808 so that they will be handled by emit_symbolic_move. */
2812 /* All remaining non-PIC symbolic constants are
2813 forced into the literal pool. */
2817 /* Determine if it's legal to put X into the constant pool. This
2818 is not possible if X contains the address of a symbol that is
2819 not constant (TLS) or not known at final link time (PIC). */
2822 s390_cannot_force_const_mem (enum machine_mode mode
, rtx x
)
2824 switch (GET_CODE (x
))
2828 /* Accept all non-symbolic constants. */
2832 /* Labels are OK iff we are non-PIC. */
2833 return flag_pic
!= 0;
2836 /* 'Naked' TLS symbol references are never OK,
2837 non-TLS symbols are OK iff we are non-PIC. */
2838 if (tls_symbolic_operand (x
))
2841 return flag_pic
!= 0;
2844 return s390_cannot_force_const_mem (mode
, XEXP (x
, 0));
2847 return s390_cannot_force_const_mem (mode
, XEXP (x
, 0))
2848 || s390_cannot_force_const_mem (mode
, XEXP (x
, 1));
2851 switch (XINT (x
, 1))
2853 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2854 case UNSPEC_LTREL_OFFSET
:
2862 case UNSPEC_GOTNTPOFF
:
2863 case UNSPEC_INDNTPOFF
:
2866 /* If the literal pool shares the code section, be put
2867 execute template placeholders into the pool as well. */
2869 return TARGET_CPU_ZARCH
;
2881 /* Returns true if the constant value OP is a legitimate general
2882 operand during and after reload. The difference to
2883 legitimate_constant_p is that this function will not accept
2884 a constant that would need to be forced to the literal pool
2885 before it can be used as operand.
2886 This function accepts all constants which can be loaded directly
2890 legitimate_reload_constant_p (rtx op
)
2892 /* Accept la(y) operands. */
2893 if (GET_CODE (op
) == CONST_INT
2894 && DISP_IN_RANGE (INTVAL (op
)))
2897 /* Accept l(g)hi/l(g)fi operands. */
2898 if (GET_CODE (op
) == CONST_INT
2899 && (CONST_OK_FOR_K (INTVAL (op
)) || CONST_OK_FOR_Os (INTVAL (op
))))
2902 /* Accept lliXX operands. */
2904 && GET_CODE (op
) == CONST_INT
2905 && trunc_int_for_mode (INTVAL (op
), word_mode
) == INTVAL (op
)
2906 && s390_single_part (op
, word_mode
, HImode
, 0) >= 0)
2910 && GET_CODE (op
) == CONST_INT
2911 && trunc_int_for_mode (INTVAL (op
), word_mode
) == INTVAL (op
)
2912 && s390_single_part (op
, word_mode
, SImode
, 0) >= 0)
2915 /* Accept larl operands. */
2916 if (TARGET_CPU_ZARCH
2917 && larl_operand (op
, VOIDmode
))
2920 /* Accept floating-point zero operands that fit into a single GPR. */
2921 if (GET_CODE (op
) == CONST_DOUBLE
2922 && s390_float_const_zero_p (op
)
2923 && GET_MODE_SIZE (GET_MODE (op
)) <= UNITS_PER_WORD
)
2926 /* Accept double-word operands that can be split. */
2927 if (GET_CODE (op
) == CONST_INT
2928 && trunc_int_for_mode (INTVAL (op
), word_mode
) != INTVAL (op
))
2930 enum machine_mode dword_mode
= word_mode
== SImode
? DImode
: TImode
;
2931 rtx hi
= operand_subword (op
, 0, 0, dword_mode
);
2932 rtx lo
= operand_subword (op
, 1, 0, dword_mode
);
2933 return legitimate_reload_constant_p (hi
)
2934 && legitimate_reload_constant_p (lo
);
2937 /* Everything else cannot be handled without reload. */
2941 /* Returns true if the constant value OP is a legitimate fp operand
2942 during and after reload.
2943 This function accepts all constants which can be loaded directly
2947 legitimate_reload_fp_constant_p (rtx op
)
2949 /* Accept floating-point zero operands if the load zero instruction
2950 can be used. Prior to z196 the load fp zero instruction caused a
2951 performance penalty if the result is used as BFP number. */
2953 && GET_CODE (op
) == CONST_DOUBLE
2954 && s390_float_const_zero_p (op
))
2960 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2961 return the class of reg to actually use. */
2964 s390_preferred_reload_class (rtx op
, reg_class_t rclass
)
2966 switch (GET_CODE (op
))
2968 /* Constants we cannot reload into general registers
2969 must be forced into the literal pool. */
2972 if (reg_class_subset_p (GENERAL_REGS
, rclass
)
2973 && legitimate_reload_constant_p (op
))
2974 return GENERAL_REGS
;
2975 else if (reg_class_subset_p (ADDR_REGS
, rclass
)
2976 && legitimate_reload_constant_p (op
))
2978 else if (reg_class_subset_p (FP_REGS
, rclass
)
2979 && legitimate_reload_fp_constant_p (op
))
2983 /* If a symbolic constant or a PLUS is reloaded,
2984 it is most likely being used as an address, so
2985 prefer ADDR_REGS. If 'class' is not a superset
2986 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2988 /* A larl operand with odd addend will get fixed via secondary
2989 reload. So don't request it to be pushed into literal
2991 if (TARGET_CPU_ZARCH
2992 && GET_CODE (XEXP (op
, 0)) == PLUS
2993 && GET_CODE (XEXP (XEXP(op
, 0), 0)) == SYMBOL_REF
2994 && GET_CODE (XEXP (XEXP(op
, 0), 1)) == CONST_INT
)
2996 if (reg_class_subset_p (ADDR_REGS
, rclass
))
3004 if (!legitimate_reload_constant_p (op
))
3008 /* load address will be used. */
3009 if (reg_class_subset_p (ADDR_REGS
, rclass
))
3021 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3022 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3026 s390_check_symref_alignment (rtx addr
, HOST_WIDE_INT alignment
)
3028 HOST_WIDE_INT addend
;
3031 if (!s390_loadrelative_operand_p (addr
, &symref
, &addend
))
3034 if (addend
& (alignment
- 1))
3037 if (GET_CODE (symref
) == SYMBOL_REF
3038 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref
))
3041 if (GET_CODE (symref
) == UNSPEC
3042 && alignment
<= UNITS_PER_LONG
)
3048 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3049 operand SCRATCH is used to reload the even part of the address and
3053 s390_reload_larl_operand (rtx reg
, rtx addr
, rtx scratch
)
3055 HOST_WIDE_INT addend
;
3058 if (!s390_loadrelative_operand_p (addr
, &symref
, &addend
))
3062 /* Easy case. The addend is even so larl will do fine. */
3063 emit_move_insn (reg
, addr
);
3066 /* We can leave the scratch register untouched if the target
3067 register is a valid base register. */
3068 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
3069 && REGNO_REG_CLASS (REGNO (reg
)) == ADDR_REGS
)
3072 gcc_assert (REGNO (scratch
) < FIRST_PSEUDO_REGISTER
);
3073 gcc_assert (REGNO_REG_CLASS (REGNO (scratch
)) == ADDR_REGS
);
3076 emit_move_insn (scratch
,
3077 gen_rtx_CONST (Pmode
,
3078 gen_rtx_PLUS (Pmode
, symref
,
3079 GEN_INT (addend
- 1))));
3081 emit_move_insn (scratch
, symref
);
3083 /* Increment the address using la in order to avoid clobbering cc. */
3084 s390_load_address (reg
, gen_rtx_PLUS (Pmode
, scratch
, const1_rtx
));
3088 /* Generate what is necessary to move between REG and MEM using
3089 SCRATCH. The direction is given by TOMEM. */
3092 s390_reload_symref_address (rtx reg
, rtx mem
, rtx scratch
, bool tomem
)
3094 /* Reload might have pulled a constant out of the literal pool.
3095 Force it back in. */
3096 if (CONST_INT_P (mem
) || GET_CODE (mem
) == CONST_DOUBLE
3097 || GET_CODE (mem
) == CONST
)
3098 mem
= force_const_mem (GET_MODE (reg
), mem
);
3100 gcc_assert (MEM_P (mem
));
3102 /* For a load from memory we can leave the scratch register
3103 untouched if the target register is a valid base register. */
3105 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
3106 && REGNO_REG_CLASS (REGNO (reg
)) == ADDR_REGS
3107 && GET_MODE (reg
) == GET_MODE (scratch
))
3110 /* Load address into scratch register. Since we can't have a
3111 secondary reload for a secondary reload we have to cover the case
3112 where larl would need a secondary reload here as well. */
3113 s390_reload_larl_operand (scratch
, XEXP (mem
, 0), scratch
);
3115 /* Now we can use a standard load/store to do the move. */
3117 emit_move_insn (replace_equiv_address (mem
, scratch
), reg
);
3119 emit_move_insn (reg
, replace_equiv_address (mem
, scratch
));
3122 /* Inform reload about cases where moving X with a mode MODE to a register in
3123 RCLASS requires an extra scratch or immediate register. Return the class
3124 needed for the immediate register. */
3127 s390_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
3128 enum machine_mode mode
, secondary_reload_info
*sri
)
3130 enum reg_class rclass
= (enum reg_class
) rclass_i
;
3132 /* Intermediate register needed. */
3133 if (reg_classes_intersect_p (CC_REGS
, rclass
))
3134 return GENERAL_REGS
;
3138 HOST_WIDE_INT offset
;
3141 /* On z10 several optimizer steps may generate larl operands with
3144 && s390_loadrelative_operand_p (x
, &symref
, &offset
)
3146 && !SYMBOL_REF_ALIGN1_P (symref
)
3147 && (offset
& 1) == 1)
3148 sri
->icode
= ((mode
== DImode
) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3149 : CODE_FOR_reloadsi_larl_odd_addend_z10
);
3151 /* On z10 we need a scratch register when moving QI, TI or floating
3152 point mode values from or to a memory location with a SYMBOL_REF
3153 or if the symref addend of a SI or DI move is not aligned to the
3154 width of the access. */
3156 && s390_loadrelative_operand_p (XEXP (x
, 0), NULL
, NULL
)
3157 && (mode
== QImode
|| mode
== TImode
|| FLOAT_MODE_P (mode
)
3158 || (!TARGET_ZARCH
&& mode
== DImode
)
3159 || ((mode
== HImode
|| mode
== SImode
|| mode
== DImode
)
3160 && (!s390_check_symref_alignment (XEXP (x
, 0),
3161 GET_MODE_SIZE (mode
))))))
3163 #define __SECONDARY_RELOAD_CASE(M,m) \
3166 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3167 CODE_FOR_reload##m##di_tomem_z10; \
3169 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3170 CODE_FOR_reload##m##si_tomem_z10; \
3173 switch (GET_MODE (x
))
3175 __SECONDARY_RELOAD_CASE (QI
, qi
);
3176 __SECONDARY_RELOAD_CASE (HI
, hi
);
3177 __SECONDARY_RELOAD_CASE (SI
, si
);
3178 __SECONDARY_RELOAD_CASE (DI
, di
);
3179 __SECONDARY_RELOAD_CASE (TI
, ti
);
3180 __SECONDARY_RELOAD_CASE (SF
, sf
);
3181 __SECONDARY_RELOAD_CASE (DF
, df
);
3182 __SECONDARY_RELOAD_CASE (TF
, tf
);
3183 __SECONDARY_RELOAD_CASE (SD
, sd
);
3184 __SECONDARY_RELOAD_CASE (DD
, dd
);
3185 __SECONDARY_RELOAD_CASE (TD
, td
);
3190 #undef __SECONDARY_RELOAD_CASE
3194 /* We need a scratch register when loading a PLUS expression which
3195 is not a legitimate operand of the LOAD ADDRESS instruction. */
3196 /* LRA can deal with transformation of plus op very well -- so we
3197 don't need to prompt LRA in this case. */
3198 if (! lra_in_progress
&& in_p
&& s390_plus_operand (x
, mode
))
3199 sri
->icode
= (TARGET_64BIT
?
3200 CODE_FOR_reloaddi_plus
: CODE_FOR_reloadsi_plus
);
3202 /* Performing a multiword move from or to memory we have to make sure the
3203 second chunk in memory is addressable without causing a displacement
3204 overflow. If that would be the case we calculate the address in
3205 a scratch register. */
3207 && GET_CODE (XEXP (x
, 0)) == PLUS
3208 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3209 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x
, 0), 1))
3210 + GET_MODE_SIZE (mode
) - 1))
3212 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3213 in a s_operand address since we may fallback to lm/stm. So we only
3214 have to care about overflows in the b+i+d case. */
3215 if ((reg_classes_intersect_p (GENERAL_REGS
, rclass
)
3216 && s390_class_max_nregs (GENERAL_REGS
, mode
) > 1
3217 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
3218 /* For FP_REGS no lm/stm is available so this check is triggered
3219 for displacement overflows in b+i+d and b+d like addresses. */
3220 || (reg_classes_intersect_p (FP_REGS
, rclass
)
3221 && s390_class_max_nregs (FP_REGS
, mode
) > 1))
3224 sri
->icode
= (TARGET_64BIT
?
3225 CODE_FOR_reloaddi_nonoffmem_in
:
3226 CODE_FOR_reloadsi_nonoffmem_in
);
3228 sri
->icode
= (TARGET_64BIT
?
3229 CODE_FOR_reloaddi_nonoffmem_out
:
3230 CODE_FOR_reloadsi_nonoffmem_out
);
3234 /* A scratch address register is needed when a symbolic constant is
3235 copied to r0 compiling with -fPIC. In other cases the target
3236 register might be used as temporary (see legitimize_pic_address). */
3237 if (in_p
&& SYMBOLIC_CONST (x
) && flag_pic
== 2 && rclass
!= ADDR_REGS
)
3238 sri
->icode
= (TARGET_64BIT
?
3239 CODE_FOR_reloaddi_PIC_addr
:
3240 CODE_FOR_reloadsi_PIC_addr
);
3242 /* Either scratch or no register needed. */
3246 /* Generate code to load SRC, which is PLUS that is not a
3247 legitimate operand for the LA instruction, into TARGET.
3248 SCRATCH may be used as scratch register. */
3251 s390_expand_plus_operand (rtx target
, rtx src
,
3255 struct s390_address ad
;
3257 /* src must be a PLUS; get its two operands. */
3258 gcc_assert (GET_CODE (src
) == PLUS
);
3259 gcc_assert (GET_MODE (src
) == Pmode
);
3261 /* Check if any of the two operands is already scheduled
3262 for replacement by reload. This can happen e.g. when
3263 float registers occur in an address. */
3264 sum1
= find_replacement (&XEXP (src
, 0));
3265 sum2
= find_replacement (&XEXP (src
, 1));
3266 src
= gen_rtx_PLUS (Pmode
, sum1
, sum2
);
3268 /* If the address is already strictly valid, there's nothing to do. */
3269 if (!s390_decompose_address (src
, &ad
)
3270 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
3271 || (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
))))
3273 /* Otherwise, one of the operands cannot be an address register;
3274 we reload its value into the scratch register. */
3275 if (true_regnum (sum1
) < 1 || true_regnum (sum1
) > 15)
3277 emit_move_insn (scratch
, sum1
);
3280 if (true_regnum (sum2
) < 1 || true_regnum (sum2
) > 15)
3282 emit_move_insn (scratch
, sum2
);
3286 /* According to the way these invalid addresses are generated
3287 in reload.c, it should never happen (at least on s390) that
3288 *neither* of the PLUS components, after find_replacements
3289 was applied, is an address register. */
3290 if (sum1
== scratch
&& sum2
== scratch
)
3296 src
= gen_rtx_PLUS (Pmode
, sum1
, sum2
);
3299 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3300 is only ever performed on addresses, so we can mark the
3301 sum as legitimate for LA in any case. */
3302 s390_load_address (target
, src
);
3306 /* Return true if ADDR is a valid memory address.
3307 STRICT specifies whether strict register checking applies. */
3310 s390_legitimate_address_p (enum machine_mode mode
, rtx addr
, bool strict
)
3312 struct s390_address ad
;
3315 && larl_operand (addr
, VOIDmode
)
3316 && (mode
== VOIDmode
3317 || s390_check_symref_alignment (addr
, GET_MODE_SIZE (mode
))))
3320 if (!s390_decompose_address (addr
, &ad
))
3325 if (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
3328 if (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
)))
3334 && !(REGNO (ad
.base
) >= FIRST_PSEUDO_REGISTER
3335 || REGNO_REG_CLASS (REGNO (ad
.base
)) == ADDR_REGS
))
3339 && !(REGNO (ad
.indx
) >= FIRST_PSEUDO_REGISTER
3340 || REGNO_REG_CLASS (REGNO (ad
.indx
)) == ADDR_REGS
))
3346 /* Return true if OP is a valid operand for the LA instruction.
3347 In 31-bit, we need to prove that the result is used as an
3348 address, as LA performs only a 31-bit addition. */
3351 legitimate_la_operand_p (rtx op
)
3353 struct s390_address addr
;
3354 if (!s390_decompose_address (op
, &addr
))
3357 return (TARGET_64BIT
|| addr
.pointer
);
3360 /* Return true if it is valid *and* preferable to use LA to
3361 compute the sum of OP1 and OP2. */
3364 preferred_la_operand_p (rtx op1
, rtx op2
)
3366 struct s390_address addr
;
3368 if (op2
!= const0_rtx
)
3369 op1
= gen_rtx_PLUS (Pmode
, op1
, op2
);
3371 if (!s390_decompose_address (op1
, &addr
))
3373 if (addr
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (addr
.base
)))
3375 if (addr
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (addr
.indx
)))
3378 /* Avoid LA instructions with index register on z196; it is
3379 preferable to use regular add instructions when possible.
3380 Starting with zEC12 the la with index register is "uncracked"
3382 if (addr
.indx
&& s390_tune
== PROCESSOR_2817_Z196
)
3385 if (!TARGET_64BIT
&& !addr
.pointer
)
3391 if ((addr
.base
&& REG_P (addr
.base
) && REG_POINTER (addr
.base
))
3392 || (addr
.indx
&& REG_P (addr
.indx
) && REG_POINTER (addr
.indx
)))
3398 /* Emit a forced load-address operation to load SRC into DST.
3399 This will use the LOAD ADDRESS instruction even in situations
3400 where legitimate_la_operand_p (SRC) returns false. */
3403 s390_load_address (rtx dst
, rtx src
)
3406 emit_move_insn (dst
, src
);
3408 emit_insn (gen_force_la_31 (dst
, src
));
3411 /* Return a legitimate reference for ORIG (an address) using the
3412 register REG. If REG is 0, a new pseudo is generated.
3414 There are two types of references that must be handled:
3416 1. Global data references must load the address from the GOT, via
3417 the PIC reg. An insn is emitted to do this load, and the reg is
3420 2. Static data references, constant pool addresses, and code labels
3421 compute the address as an offset from the GOT, whose base is in
3422 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3423 differentiate them from global data objects. The returned
3424 address is the PIC reg + an unspec constant.
3426 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3427 reg also appears in the address. */
3430 legitimize_pic_address (rtx orig
, rtx reg
)
3433 rtx addend
= const0_rtx
;
3436 gcc_assert (!TLS_SYMBOLIC_CONST (addr
));
3438 if (GET_CODE (addr
) == CONST
)
3439 addr
= XEXP (addr
, 0);
3441 if (GET_CODE (addr
) == PLUS
)
3443 addend
= XEXP (addr
, 1);
3444 addr
= XEXP (addr
, 0);
3447 if ((GET_CODE (addr
) == LABEL_REF
3448 || (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (addr
))
3449 || (GET_CODE (addr
) == UNSPEC
&&
3450 (XINT (addr
, 1) == UNSPEC_GOTENT
3451 || (TARGET_CPU_ZARCH
&& XINT (addr
, 1) == UNSPEC_PLT
))))
3452 && GET_CODE (addend
) == CONST_INT
)
3454 /* This can be locally addressed. */
3456 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3457 rtx const_addr
= (GET_CODE (addr
) == UNSPEC
?
3458 gen_rtx_CONST (Pmode
, addr
) : addr
);
3460 if (TARGET_CPU_ZARCH
3461 && larl_operand (const_addr
, VOIDmode
)
3462 && INTVAL (addend
) < (HOST_WIDE_INT
)1 << 31
3463 && INTVAL (addend
) >= -((HOST_WIDE_INT
)1 << 31))
3465 if (INTVAL (addend
) & 1)
3467 /* LARL can't handle odd offsets, so emit a pair of LARL
3469 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3471 if (!DISP_IN_RANGE (INTVAL (addend
)))
3473 HOST_WIDE_INT even
= INTVAL (addend
) - 1;
3474 addr
= gen_rtx_PLUS (Pmode
, addr
, GEN_INT (even
));
3475 addr
= gen_rtx_CONST (Pmode
, addr
);
3476 addend
= const1_rtx
;
3479 emit_move_insn (temp
, addr
);
3480 new_rtx
= gen_rtx_PLUS (Pmode
, temp
, addend
);
3484 s390_load_address (reg
, new_rtx
);
3490 /* If the offset is even, we can just use LARL. This
3491 will happen automatically. */
3496 /* No larl - Access local symbols relative to the GOT. */
3498 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3500 if (reload_in_progress
|| reload_completed
)
3501 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3503 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
3504 if (addend
!= const0_rtx
)
3505 addr
= gen_rtx_PLUS (Pmode
, addr
, addend
);
3506 addr
= gen_rtx_CONST (Pmode
, addr
);
3507 addr
= force_const_mem (Pmode
, addr
);
3508 emit_move_insn (temp
, addr
);
3510 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3513 s390_load_address (reg
, new_rtx
);
3518 else if (GET_CODE (addr
) == SYMBOL_REF
&& addend
== const0_rtx
)
3520 /* A non-local symbol reference without addend.
3522 The symbol ref is wrapped into an UNSPEC to make sure the
3523 proper operand modifier (@GOT or @GOTENT) will be emitted.
3524 This will tell the linker to put the symbol into the GOT.
3526 Additionally the code dereferencing the GOT slot is emitted here.
3528 An addend to the symref needs to be added afterwards.
3529 legitimize_pic_address calls itself recursively to handle
3530 that case. So no need to do it here. */
3533 reg
= gen_reg_rtx (Pmode
);
3537 /* Use load relative if possible.
3538 lgrl <target>, sym@GOTENT */
3539 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTENT
);
3540 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3541 new_rtx
= gen_const_mem (GET_MODE (reg
), new_rtx
);
3543 emit_move_insn (reg
, new_rtx
);
3546 else if (flag_pic
== 1)
3548 /* Assume GOT offset is a valid displacement operand (< 4k
3549 or < 512k with z990). This is handled the same way in
3550 both 31- and 64-bit code (@GOT).
3551 lg <target>, sym@GOT(r12) */
3553 if (reload_in_progress
|| reload_completed
)
3554 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3556 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
3557 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3558 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
3559 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3560 emit_move_insn (reg
, new_rtx
);
3563 else if (TARGET_CPU_ZARCH
)
3565 /* If the GOT offset might be >= 4k, we determine the position
3566 of the GOT entry via a PC-relative LARL (@GOTENT).
3567 larl temp, sym@GOTENT
3568 lg <target>, 0(temp) */
3570 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3572 gcc_assert (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
3573 || REGNO_REG_CLASS (REGNO (temp
)) == ADDR_REGS
);
3575 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTENT
);
3576 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3577 emit_move_insn (temp
, new_rtx
);
3579 new_rtx
= gen_const_mem (Pmode
, temp
);
3580 emit_move_insn (reg
, new_rtx
);
3586 /* If the GOT offset might be >= 4k, we have to load it
3587 from the literal pool (@GOT).
3589 lg temp, lit-litbase(r13)
3590 lg <target>, 0(temp)
3591 lit: .long sym@GOT */
3593 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3595 gcc_assert (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
3596 || REGNO_REG_CLASS (REGNO (temp
)) == ADDR_REGS
);
3598 if (reload_in_progress
|| reload_completed
)
3599 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3601 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
3602 addr
= gen_rtx_CONST (Pmode
, addr
);
3603 addr
= force_const_mem (Pmode
, addr
);
3604 emit_move_insn (temp
, addr
);
3606 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3607 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3608 emit_move_insn (reg
, new_rtx
);
3612 else if (GET_CODE (addr
) == UNSPEC
&& GET_CODE (addend
) == CONST_INT
)
3614 gcc_assert (XVECLEN (addr
, 0) == 1);
3615 switch (XINT (addr
, 1))
3617 /* These address symbols (or PLT slots) relative to the GOT
3618 (not GOT slots!). In general this will exceed the
3619 displacement range so these value belong into the literal
3623 new_rtx
= force_const_mem (Pmode
, orig
);
3626 /* For -fPIC the GOT size might exceed the displacement
3627 range so make sure the value is in the literal pool. */
3630 new_rtx
= force_const_mem (Pmode
, orig
);
3633 /* For @GOTENT larl is used. This is handled like local
3639 /* @PLT is OK as is on 64-bit, must be converted to
3640 GOT-relative @PLTOFF on 31-bit. */
3642 if (!TARGET_CPU_ZARCH
)
3644 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3646 if (reload_in_progress
|| reload_completed
)
3647 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3649 addr
= XVECEXP (addr
, 0, 0);
3650 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
),
3652 if (addend
!= const0_rtx
)
3653 addr
= gen_rtx_PLUS (Pmode
, addr
, addend
);
3654 addr
= gen_rtx_CONST (Pmode
, addr
);
3655 addr
= force_const_mem (Pmode
, addr
);
3656 emit_move_insn (temp
, addr
);
3658 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3661 s390_load_address (reg
, new_rtx
);
3666 /* On 64 bit larl can be used. This case is handled like
3667 local symbol refs. */
3671 /* Everything else cannot happen. */
3676 else if (addend
!= const0_rtx
)
3678 /* Otherwise, compute the sum. */
3680 rtx base
= legitimize_pic_address (addr
, reg
);
3681 new_rtx
= legitimize_pic_address (addend
,
3682 base
== reg
? NULL_RTX
: reg
);
3683 if (GET_CODE (new_rtx
) == CONST_INT
)
3684 new_rtx
= plus_constant (Pmode
, base
, INTVAL (new_rtx
));
3687 if (GET_CODE (new_rtx
) == PLUS
&& CONSTANT_P (XEXP (new_rtx
, 1)))
3689 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (new_rtx
, 0));
3690 new_rtx
= XEXP (new_rtx
, 1);
3692 new_rtx
= gen_rtx_PLUS (Pmode
, base
, new_rtx
);
3695 if (GET_CODE (new_rtx
) == CONST
)
3696 new_rtx
= XEXP (new_rtx
, 0);
3697 new_rtx
= force_operand (new_rtx
, 0);
3703 /* Load the thread pointer into a register. */
3706 s390_get_thread_pointer (void)
3708 rtx tp
= gen_reg_rtx (Pmode
);
3710 emit_move_insn (tp
, gen_rtx_REG (Pmode
, TP_REGNUM
));
3711 mark_reg_pointer (tp
, BITS_PER_WORD
);
3716 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3717 in s390_tls_symbol which always refers to __tls_get_offset.
3718 The returned offset is written to RESULT_REG and an USE rtx is
3719 generated for TLS_CALL. */
3721 static GTY(()) rtx s390_tls_symbol
;
3724 s390_emit_tls_call_insn (rtx result_reg
, rtx tls_call
)
3729 emit_insn (s390_load_got ());
3731 if (!s390_tls_symbol
)
3732 s390_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tls_get_offset");
3734 insn
= s390_emit_call (s390_tls_symbol
, tls_call
, result_reg
,
3735 gen_rtx_REG (Pmode
, RETURN_REGNUM
));
3737 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), result_reg
);
3738 RTL_CONST_CALL_P (insn
) = 1;
3741 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3742 this (thread-local) address. REG may be used as temporary. */
3745 legitimize_tls_address (rtx addr
, rtx reg
)
3747 rtx new_rtx
, tls_call
, temp
, base
, r2
, insn
;
3749 if (GET_CODE (addr
) == SYMBOL_REF
)
3750 switch (tls_symbolic_operand (addr
))
3752 case TLS_MODEL_GLOBAL_DYNAMIC
:
3754 r2
= gen_rtx_REG (Pmode
, 2);
3755 tls_call
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_TLSGD
);
3756 new_rtx
= gen_rtx_CONST (Pmode
, tls_call
);
3757 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3758 emit_move_insn (r2
, new_rtx
);
3759 s390_emit_tls_call_insn (r2
, tls_call
);
3760 insn
= get_insns ();
3763 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_NTPOFF
);
3764 temp
= gen_reg_rtx (Pmode
);
3765 emit_libcall_block (insn
, temp
, r2
, new_rtx
);
3767 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3770 s390_load_address (reg
, new_rtx
);
3775 case TLS_MODEL_LOCAL_DYNAMIC
:
3777 r2
= gen_rtx_REG (Pmode
, 2);
3778 tls_call
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TLSLDM
);
3779 new_rtx
= gen_rtx_CONST (Pmode
, tls_call
);
3780 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3781 emit_move_insn (r2
, new_rtx
);
3782 s390_emit_tls_call_insn (r2
, tls_call
);
3783 insn
= get_insns ();
3786 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TLSLDM_NTPOFF
);
3787 temp
= gen_reg_rtx (Pmode
);
3788 emit_libcall_block (insn
, temp
, r2
, new_rtx
);
3790 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3791 base
= gen_reg_rtx (Pmode
);
3792 s390_load_address (base
, new_rtx
);
3794 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_DTPOFF
);
3795 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3796 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3797 temp
= gen_reg_rtx (Pmode
);
3798 emit_move_insn (temp
, new_rtx
);
3800 new_rtx
= gen_rtx_PLUS (Pmode
, base
, temp
);
3803 s390_load_address (reg
, new_rtx
);
3808 case TLS_MODEL_INITIAL_EXEC
:
3811 /* Assume GOT offset < 4k. This is handled the same way
3812 in both 31- and 64-bit code. */
3814 if (reload_in_progress
|| reload_completed
)
3815 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3817 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTNTPOFF
);
3818 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3819 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
3820 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3821 temp
= gen_reg_rtx (Pmode
);
3822 emit_move_insn (temp
, new_rtx
);
3824 else if (TARGET_CPU_ZARCH
)
3826 /* If the GOT offset might be >= 4k, we determine the position
3827 of the GOT entry via a PC-relative LARL. */
3829 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_INDNTPOFF
);
3830 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3831 temp
= gen_reg_rtx (Pmode
);
3832 emit_move_insn (temp
, new_rtx
);
3834 new_rtx
= gen_const_mem (Pmode
, temp
);
3835 temp
= gen_reg_rtx (Pmode
);
3836 emit_move_insn (temp
, new_rtx
);
3840 /* If the GOT offset might be >= 4k, we have to load it
3841 from the literal pool. */
3843 if (reload_in_progress
|| reload_completed
)
3844 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3846 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTNTPOFF
);
3847 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3848 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3849 temp
= gen_reg_rtx (Pmode
);
3850 emit_move_insn (temp
, new_rtx
);
3852 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3853 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3855 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, new_rtx
, addr
), UNSPEC_TLS_LOAD
);
3856 temp
= gen_reg_rtx (Pmode
);
3857 emit_insn (gen_rtx_SET (Pmode
, temp
, new_rtx
));
3861 /* In position-dependent code, load the absolute address of
3862 the GOT entry from the literal pool. */
3864 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_INDNTPOFF
);
3865 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3866 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3867 temp
= gen_reg_rtx (Pmode
);
3868 emit_move_insn (temp
, new_rtx
);
3871 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3872 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, new_rtx
, addr
), UNSPEC_TLS_LOAD
);
3873 temp
= gen_reg_rtx (Pmode
);
3874 emit_insn (gen_rtx_SET (Pmode
, temp
, new_rtx
));
3877 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3880 s390_load_address (reg
, new_rtx
);
3885 case TLS_MODEL_LOCAL_EXEC
:
3886 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_NTPOFF
);
3887 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3888 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3889 temp
= gen_reg_rtx (Pmode
);
3890 emit_move_insn (temp
, new_rtx
);
3892 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3895 s390_load_address (reg
, new_rtx
);
3904 else if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == UNSPEC
)
3906 switch (XINT (XEXP (addr
, 0), 1))
3908 case UNSPEC_INDNTPOFF
:
3909 gcc_assert (TARGET_CPU_ZARCH
);
3918 else if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == PLUS
3919 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
3921 new_rtx
= XEXP (XEXP (addr
, 0), 0);
3922 if (GET_CODE (new_rtx
) != SYMBOL_REF
)
3923 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3925 new_rtx
= legitimize_tls_address (new_rtx
, reg
);
3926 new_rtx
= plus_constant (Pmode
, new_rtx
,
3927 INTVAL (XEXP (XEXP (addr
, 0), 1)));
3928 new_rtx
= force_operand (new_rtx
, 0);
3932 gcc_unreachable (); /* for now ... */
3937 /* Emit insns making the address in operands[1] valid for a standard
3938 move to operands[0]. operands[1] is replaced by an address which
3939 should be used instead of the former RTX to emit the move
3943 emit_symbolic_move (rtx
*operands
)
3945 rtx temp
= !can_create_pseudo_p () ? operands
[0] : gen_reg_rtx (Pmode
);
3947 if (GET_CODE (operands
[0]) == MEM
)
3948 operands
[1] = force_reg (Pmode
, operands
[1]);
3949 else if (TLS_SYMBOLIC_CONST (operands
[1]))
3950 operands
[1] = legitimize_tls_address (operands
[1], temp
);
3952 operands
[1] = legitimize_pic_address (operands
[1], temp
);
3955 /* Try machine-dependent ways of modifying an illegitimate address X
3956 to be legitimate. If we find one, return the new, valid address.
3958 OLDX is the address as it was before break_out_memory_refs was called.
3959 In some cases it is useful to look at this to decide what needs to be done.
3961 MODE is the mode of the operand pointed to by X.
3963 When -fpic is used, special handling is needed for symbolic references.
3964 See comments by legitimize_pic_address for details. */
3967 s390_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3968 enum machine_mode mode ATTRIBUTE_UNUSED
)
3970 rtx constant_term
= const0_rtx
;
3972 if (TLS_SYMBOLIC_CONST (x
))
3974 x
= legitimize_tls_address (x
, 0);
3976 if (s390_legitimate_address_p (mode
, x
, FALSE
))
3979 else if (GET_CODE (x
) == PLUS
3980 && (TLS_SYMBOLIC_CONST (XEXP (x
, 0))
3981 || TLS_SYMBOLIC_CONST (XEXP (x
, 1))))
3987 if (SYMBOLIC_CONST (x
)
3988 || (GET_CODE (x
) == PLUS
3989 && (SYMBOLIC_CONST (XEXP (x
, 0))
3990 || SYMBOLIC_CONST (XEXP (x
, 1)))))
3991 x
= legitimize_pic_address (x
, 0);
3993 if (s390_legitimate_address_p (mode
, x
, FALSE
))
3997 x
= eliminate_constant_term (x
, &constant_term
);
3999 /* Optimize loading of large displacements by splitting them
4000 into the multiple of 4K and the rest; this allows the
4001 former to be CSE'd if possible.
4003 Don't do this if the displacement is added to a register
4004 pointing into the stack frame, as the offsets will
4005 change later anyway. */
4007 if (GET_CODE (constant_term
) == CONST_INT
4008 && !TARGET_LONG_DISPLACEMENT
4009 && !DISP_IN_RANGE (INTVAL (constant_term
))
4010 && !(REG_P (x
) && REGNO_PTR_FRAME_P (REGNO (x
))))
4012 HOST_WIDE_INT lower
= INTVAL (constant_term
) & 0xfff;
4013 HOST_WIDE_INT upper
= INTVAL (constant_term
) ^ lower
;
4015 rtx temp
= gen_reg_rtx (Pmode
);
4016 rtx val
= force_operand (GEN_INT (upper
), temp
);
4018 emit_move_insn (temp
, val
);
4020 x
= gen_rtx_PLUS (Pmode
, x
, temp
);
4021 constant_term
= GEN_INT (lower
);
4024 if (GET_CODE (x
) == PLUS
)
4026 if (GET_CODE (XEXP (x
, 0)) == REG
)
4028 rtx temp
= gen_reg_rtx (Pmode
);
4029 rtx val
= force_operand (XEXP (x
, 1), temp
);
4031 emit_move_insn (temp
, val
);
4033 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0), temp
);
4036 else if (GET_CODE (XEXP (x
, 1)) == REG
)
4038 rtx temp
= gen_reg_rtx (Pmode
);
4039 rtx val
= force_operand (XEXP (x
, 0), temp
);
4041 emit_move_insn (temp
, val
);
4043 x
= gen_rtx_PLUS (Pmode
, temp
, XEXP (x
, 1));
4047 if (constant_term
!= const0_rtx
)
4048 x
= gen_rtx_PLUS (Pmode
, x
, constant_term
);
4053 /* Try a machine-dependent way of reloading an illegitimate address AD
4054 operand. If we find one, push the reload and return the new address.
4056 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4057 and TYPE is the reload type of the current reload. */
4060 legitimize_reload_address (rtx ad
, enum machine_mode mode ATTRIBUTE_UNUSED
,
4061 int opnum
, int type
)
4063 if (!optimize
|| TARGET_LONG_DISPLACEMENT
)
4066 if (GET_CODE (ad
) == PLUS
)
4068 rtx tem
= simplify_binary_operation (PLUS
, Pmode
,
4069 XEXP (ad
, 0), XEXP (ad
, 1));
4074 if (GET_CODE (ad
) == PLUS
4075 && GET_CODE (XEXP (ad
, 0)) == REG
4076 && GET_CODE (XEXP (ad
, 1)) == CONST_INT
4077 && !DISP_IN_RANGE (INTVAL (XEXP (ad
, 1))))
4079 HOST_WIDE_INT lower
= INTVAL (XEXP (ad
, 1)) & 0xfff;
4080 HOST_WIDE_INT upper
= INTVAL (XEXP (ad
, 1)) ^ lower
;
4081 rtx cst
, tem
, new_rtx
;
4083 cst
= GEN_INT (upper
);
4084 if (!legitimate_reload_constant_p (cst
))
4085 cst
= force_const_mem (Pmode
, cst
);
4087 tem
= gen_rtx_PLUS (Pmode
, XEXP (ad
, 0), cst
);
4088 new_rtx
= gen_rtx_PLUS (Pmode
, tem
, GEN_INT (lower
));
4090 push_reload (XEXP (tem
, 1), 0, &XEXP (tem
, 1), 0,
4091 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4092 opnum
, (enum reload_type
) type
);
4099 /* Emit code to move LEN bytes from DST to SRC. */
4102 s390_expand_movmem (rtx dst
, rtx src
, rtx len
)
4104 /* When tuning for z10 or higher we rely on the Glibc functions to
4105 do the right thing. Only for constant lengths below 64k we will
4106 generate inline code. */
4107 if (s390_tune
>= PROCESSOR_2097_Z10
4108 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > (1<<16)))
4111 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) >= 0 && INTVAL (len
) <= 256)
4113 if (INTVAL (len
) > 0)
4114 emit_insn (gen_movmem_short (dst
, src
, GEN_INT (INTVAL (len
) - 1)));
4117 else if (TARGET_MVCLE
)
4119 emit_insn (gen_movmem_long (dst
, src
, convert_to_mode (Pmode
, len
, 1)));
4124 rtx dst_addr
, src_addr
, count
, blocks
, temp
;
4125 rtx loop_start_label
= gen_label_rtx ();
4126 rtx loop_end_label
= gen_label_rtx ();
4127 rtx end_label
= gen_label_rtx ();
4128 enum machine_mode mode
;
4130 mode
= GET_MODE (len
);
4131 if (mode
== VOIDmode
)
4134 dst_addr
= gen_reg_rtx (Pmode
);
4135 src_addr
= gen_reg_rtx (Pmode
);
4136 count
= gen_reg_rtx (mode
);
4137 blocks
= gen_reg_rtx (mode
);
4139 convert_move (count
, len
, 1);
4140 emit_cmp_and_jump_insns (count
, const0_rtx
,
4141 EQ
, NULL_RTX
, mode
, 1, end_label
);
4143 emit_move_insn (dst_addr
, force_operand (XEXP (dst
, 0), NULL_RTX
));
4144 emit_move_insn (src_addr
, force_operand (XEXP (src
, 0), NULL_RTX
));
4145 dst
= change_address (dst
, VOIDmode
, dst_addr
);
4146 src
= change_address (src
, VOIDmode
, src_addr
);
4148 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4151 emit_move_insn (count
, temp
);
4153 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4156 emit_move_insn (blocks
, temp
);
4158 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4159 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4161 emit_label (loop_start_label
);
4164 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 768))
4168 /* Issue a read prefetch for the +3 cache line. */
4169 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, src_addr
, GEN_INT (768)),
4170 const0_rtx
, const0_rtx
);
4171 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4172 emit_insn (prefetch
);
4174 /* Issue a write prefetch for the +3 cache line. */
4175 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (768)),
4176 const1_rtx
, const0_rtx
);
4177 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4178 emit_insn (prefetch
);
4181 emit_insn (gen_movmem_short (dst
, src
, GEN_INT (255)));
4182 s390_load_address (dst_addr
,
4183 gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (256)));
4184 s390_load_address (src_addr
,
4185 gen_rtx_PLUS (Pmode
, src_addr
, GEN_INT (256)));
4187 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4190 emit_move_insn (blocks
, temp
);
4192 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4193 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4195 emit_jump (loop_start_label
);
4196 emit_label (loop_end_label
);
4198 emit_insn (gen_movmem_short (dst
, src
,
4199 convert_to_mode (Pmode
, count
, 1)));
4200 emit_label (end_label
);
4205 /* Emit code to set LEN bytes at DST to VAL.
4206 Make use of clrmem if VAL is zero. */
4209 s390_expand_setmem (rtx dst
, rtx len
, rtx val
)
4211 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) == 0)
4214 gcc_assert (GET_CODE (val
) == CONST_INT
|| GET_MODE (val
) == QImode
);
4216 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) > 0 && INTVAL (len
) <= 257)
4218 if (val
== const0_rtx
&& INTVAL (len
) <= 256)
4219 emit_insn (gen_clrmem_short (dst
, GEN_INT (INTVAL (len
) - 1)));
4222 /* Initialize memory by storing the first byte. */
4223 emit_move_insn (adjust_address (dst
, QImode
, 0), val
);
4225 if (INTVAL (len
) > 1)
4227 /* Initiate 1 byte overlap move.
4228 The first byte of DST is propagated through DSTP1.
4229 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4230 DST is set to size 1 so the rest of the memory location
4231 does not count as source operand. */
4232 rtx dstp1
= adjust_address (dst
, VOIDmode
, 1);
4233 set_mem_size (dst
, 1);
4235 emit_insn (gen_movmem_short (dstp1
, dst
,
4236 GEN_INT (INTVAL (len
) - 2)));
4241 else if (TARGET_MVCLE
)
4243 val
= force_not_mem (convert_modes (Pmode
, QImode
, val
, 1));
4244 emit_insn (gen_setmem_long (dst
, convert_to_mode (Pmode
, len
, 1), val
));
4249 rtx dst_addr
, count
, blocks
, temp
, dstp1
= NULL_RTX
;
4250 rtx loop_start_label
= gen_label_rtx ();
4251 rtx loop_end_label
= gen_label_rtx ();
4252 rtx end_label
= gen_label_rtx ();
4253 enum machine_mode mode
;
4255 mode
= GET_MODE (len
);
4256 if (mode
== VOIDmode
)
4259 dst_addr
= gen_reg_rtx (Pmode
);
4260 count
= gen_reg_rtx (mode
);
4261 blocks
= gen_reg_rtx (mode
);
4263 convert_move (count
, len
, 1);
4264 emit_cmp_and_jump_insns (count
, const0_rtx
,
4265 EQ
, NULL_RTX
, mode
, 1, end_label
);
4267 emit_move_insn (dst_addr
, force_operand (XEXP (dst
, 0), NULL_RTX
));
4268 dst
= change_address (dst
, VOIDmode
, dst_addr
);
4270 if (val
== const0_rtx
)
4271 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4275 dstp1
= adjust_address (dst
, VOIDmode
, 1);
4276 set_mem_size (dst
, 1);
4278 /* Initialize memory by storing the first byte. */
4279 emit_move_insn (adjust_address (dst
, QImode
, 0), val
);
4281 /* If count is 1 we are done. */
4282 emit_cmp_and_jump_insns (count
, const1_rtx
,
4283 EQ
, NULL_RTX
, mode
, 1, end_label
);
4285 temp
= expand_binop (mode
, add_optab
, count
, GEN_INT (-2), count
, 1,
4289 emit_move_insn (count
, temp
);
4291 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4294 emit_move_insn (blocks
, temp
);
4296 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4297 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4299 emit_label (loop_start_label
);
4302 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 1024))
4304 /* Issue a write prefetch for the +4 cache line. */
4305 rtx prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, dst_addr
,
4307 const1_rtx
, const0_rtx
);
4308 emit_insn (prefetch
);
4309 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4312 if (val
== const0_rtx
)
4313 emit_insn (gen_clrmem_short (dst
, GEN_INT (255)));
4315 emit_insn (gen_movmem_short (dstp1
, dst
, GEN_INT (255)));
4316 s390_load_address (dst_addr
,
4317 gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (256)));
4319 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4322 emit_move_insn (blocks
, temp
);
4324 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4325 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4327 emit_jump (loop_start_label
);
4328 emit_label (loop_end_label
);
4330 if (val
== const0_rtx
)
4331 emit_insn (gen_clrmem_short (dst
, convert_to_mode (Pmode
, count
, 1)));
4333 emit_insn (gen_movmem_short (dstp1
, dst
, convert_to_mode (Pmode
, count
, 1)));
4334 emit_label (end_label
);
4338 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4339 and return the result in TARGET. */
4342 s390_expand_cmpmem (rtx target
, rtx op0
, rtx op1
, rtx len
)
4344 rtx ccreg
= gen_rtx_REG (CCUmode
, CC_REGNUM
);
4347 /* When tuning for z10 or higher we rely on the Glibc functions to
4348 do the right thing. Only for constant lengths below 64k we will
4349 generate inline code. */
4350 if (s390_tune
>= PROCESSOR_2097_Z10
4351 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > (1<<16)))
4354 /* As the result of CMPINT is inverted compared to what we need,
4355 we have to swap the operands. */
4356 tmp
= op0
; op0
= op1
; op1
= tmp
;
4358 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) >= 0 && INTVAL (len
) <= 256)
4360 if (INTVAL (len
) > 0)
4362 emit_insn (gen_cmpmem_short (op0
, op1
, GEN_INT (INTVAL (len
) - 1)));
4363 emit_insn (gen_cmpint (target
, ccreg
));
4366 emit_move_insn (target
, const0_rtx
);
4368 else if (TARGET_MVCLE
)
4370 emit_insn (gen_cmpmem_long (op0
, op1
, convert_to_mode (Pmode
, len
, 1)));
4371 emit_insn (gen_cmpint (target
, ccreg
));
4375 rtx addr0
, addr1
, count
, blocks
, temp
;
4376 rtx loop_start_label
= gen_label_rtx ();
4377 rtx loop_end_label
= gen_label_rtx ();
4378 rtx end_label
= gen_label_rtx ();
4379 enum machine_mode mode
;
4381 mode
= GET_MODE (len
);
4382 if (mode
== VOIDmode
)
4385 addr0
= gen_reg_rtx (Pmode
);
4386 addr1
= gen_reg_rtx (Pmode
);
4387 count
= gen_reg_rtx (mode
);
4388 blocks
= gen_reg_rtx (mode
);
4390 convert_move (count
, len
, 1);
4391 emit_cmp_and_jump_insns (count
, const0_rtx
,
4392 EQ
, NULL_RTX
, mode
, 1, end_label
);
4394 emit_move_insn (addr0
, force_operand (XEXP (op0
, 0), NULL_RTX
));
4395 emit_move_insn (addr1
, force_operand (XEXP (op1
, 0), NULL_RTX
));
4396 op0
= change_address (op0
, VOIDmode
, addr0
);
4397 op1
= change_address (op1
, VOIDmode
, addr1
);
4399 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4402 emit_move_insn (count
, temp
);
4404 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4407 emit_move_insn (blocks
, temp
);
4409 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4410 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4412 emit_label (loop_start_label
);
4415 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 512))
4419 /* Issue a read prefetch for the +2 cache line of operand 1. */
4420 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, addr0
, GEN_INT (512)),
4421 const0_rtx
, const0_rtx
);
4422 emit_insn (prefetch
);
4423 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4425 /* Issue a read prefetch for the +2 cache line of operand 2. */
4426 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, addr1
, GEN_INT (512)),
4427 const0_rtx
, const0_rtx
);
4428 emit_insn (prefetch
);
4429 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4432 emit_insn (gen_cmpmem_short (op0
, op1
, GEN_INT (255)));
4433 temp
= gen_rtx_NE (VOIDmode
, ccreg
, const0_rtx
);
4434 temp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, temp
,
4435 gen_rtx_LABEL_REF (VOIDmode
, end_label
), pc_rtx
);
4436 temp
= gen_rtx_SET (VOIDmode
, pc_rtx
, temp
);
4437 emit_jump_insn (temp
);
4439 s390_load_address (addr0
,
4440 gen_rtx_PLUS (Pmode
, addr0
, GEN_INT (256)));
4441 s390_load_address (addr1
,
4442 gen_rtx_PLUS (Pmode
, addr1
, GEN_INT (256)));
4444 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4447 emit_move_insn (blocks
, temp
);
4449 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4450 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4452 emit_jump (loop_start_label
);
4453 emit_label (loop_end_label
);
4455 emit_insn (gen_cmpmem_short (op0
, op1
,
4456 convert_to_mode (Pmode
, count
, 1)));
4457 emit_label (end_label
);
4459 emit_insn (gen_cmpint (target
, ccreg
));
4465 /* Expand conditional increment or decrement using alc/slb instructions.
4466 Should generate code setting DST to either SRC or SRC + INCREMENT,
4467 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4468 Returns true if successful, false otherwise.
4470 That makes it possible to implement some if-constructs without jumps e.g.:
4471 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4472 unsigned int a, b, c;
4473 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4474 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4475 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4476 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4478 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4479 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4480 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4481 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4482 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4485 s390_expand_addcc (enum rtx_code cmp_code
, rtx cmp_op0
, rtx cmp_op1
,
4486 rtx dst
, rtx src
, rtx increment
)
4488 enum machine_mode cmp_mode
;
4489 enum machine_mode cc_mode
;
4495 if ((GET_MODE (cmp_op0
) == SImode
|| GET_MODE (cmp_op0
) == VOIDmode
)
4496 && (GET_MODE (cmp_op1
) == SImode
|| GET_MODE (cmp_op1
) == VOIDmode
))
4498 else if ((GET_MODE (cmp_op0
) == DImode
|| GET_MODE (cmp_op0
) == VOIDmode
)
4499 && (GET_MODE (cmp_op1
) == DImode
|| GET_MODE (cmp_op1
) == VOIDmode
))
4504 /* Try ADD LOGICAL WITH CARRY. */
4505 if (increment
== const1_rtx
)
4507 /* Determine CC mode to use. */
4508 if (cmp_code
== EQ
|| cmp_code
== NE
)
4510 if (cmp_op1
!= const0_rtx
)
4512 cmp_op0
= expand_simple_binop (cmp_mode
, XOR
, cmp_op0
, cmp_op1
,
4513 NULL_RTX
, 0, OPTAB_WIDEN
);
4514 cmp_op1
= const0_rtx
;
4517 cmp_code
= cmp_code
== EQ
? LEU
: GTU
;
4520 if (cmp_code
== LTU
|| cmp_code
== LEU
)
4525 cmp_code
= swap_condition (cmp_code
);
4542 /* Emit comparison instruction pattern. */
4543 if (!register_operand (cmp_op0
, cmp_mode
))
4544 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
4546 insn
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (cc_mode
, CC_REGNUM
),
4547 gen_rtx_COMPARE (cc_mode
, cmp_op0
, cmp_op1
));
4548 /* We use insn_invalid_p here to add clobbers if required. */
4549 ret
= insn_invalid_p (emit_insn (insn
), false);
4552 /* Emit ALC instruction pattern. */
4553 op_res
= gen_rtx_fmt_ee (cmp_code
, GET_MODE (dst
),
4554 gen_rtx_REG (cc_mode
, CC_REGNUM
),
4557 if (src
!= const0_rtx
)
4559 if (!register_operand (src
, GET_MODE (dst
)))
4560 src
= force_reg (GET_MODE (dst
), src
);
4562 op_res
= gen_rtx_PLUS (GET_MODE (dst
), op_res
, src
);
4563 op_res
= gen_rtx_PLUS (GET_MODE (dst
), op_res
, const0_rtx
);
4566 p
= rtvec_alloc (2);
4568 gen_rtx_SET (VOIDmode
, dst
, op_res
);
4570 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4571 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
4576 /* Try SUBTRACT LOGICAL WITH BORROW. */
4577 if (increment
== constm1_rtx
)
4579 /* Determine CC mode to use. */
4580 if (cmp_code
== EQ
|| cmp_code
== NE
)
4582 if (cmp_op1
!= const0_rtx
)
4584 cmp_op0
= expand_simple_binop (cmp_mode
, XOR
, cmp_op0
, cmp_op1
,
4585 NULL_RTX
, 0, OPTAB_WIDEN
);
4586 cmp_op1
= const0_rtx
;
4589 cmp_code
= cmp_code
== EQ
? LEU
: GTU
;
4592 if (cmp_code
== GTU
|| cmp_code
== GEU
)
4597 cmp_code
= swap_condition (cmp_code
);
4614 /* Emit comparison instruction pattern. */
4615 if (!register_operand (cmp_op0
, cmp_mode
))
4616 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
4618 insn
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (cc_mode
, CC_REGNUM
),
4619 gen_rtx_COMPARE (cc_mode
, cmp_op0
, cmp_op1
));
4620 /* We use insn_invalid_p here to add clobbers if required. */
4621 ret
= insn_invalid_p (emit_insn (insn
), false);
4624 /* Emit SLB instruction pattern. */
4625 if (!register_operand (src
, GET_MODE (dst
)))
4626 src
= force_reg (GET_MODE (dst
), src
);
4628 op_res
= gen_rtx_MINUS (GET_MODE (dst
),
4629 gen_rtx_MINUS (GET_MODE (dst
), src
, const0_rtx
),
4630 gen_rtx_fmt_ee (cmp_code
, GET_MODE (dst
),
4631 gen_rtx_REG (cc_mode
, CC_REGNUM
),
4633 p
= rtvec_alloc (2);
4635 gen_rtx_SET (VOIDmode
, dst
, op_res
);
4637 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4638 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
4646 /* Expand code for the insv template. Return true if successful. */
4649 s390_expand_insv (rtx dest
, rtx op1
, rtx op2
, rtx src
)
4651 int bitsize
= INTVAL (op1
);
4652 int bitpos
= INTVAL (op2
);
4653 enum machine_mode mode
= GET_MODE (dest
);
4654 enum machine_mode smode
;
4655 int smode_bsize
, mode_bsize
;
4658 if (bitsize
+ bitpos
> GET_MODE_SIZE (mode
))
4661 /* Generate INSERT IMMEDIATE (IILL et al). */
4662 /* (set (ze (reg)) (const_int)). */
4664 && register_operand (dest
, word_mode
)
4665 && (bitpos
% 16) == 0
4666 && (bitsize
% 16) == 0
4667 && const_int_operand (src
, VOIDmode
))
4669 HOST_WIDE_INT val
= INTVAL (src
);
4670 int regpos
= bitpos
+ bitsize
;
4672 while (regpos
> bitpos
)
4674 enum machine_mode putmode
;
4677 if (TARGET_EXTIMM
&& (regpos
% 32 == 0) && (regpos
>= bitpos
+ 32))
4682 putsize
= GET_MODE_BITSIZE (putmode
);
4684 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
,
4687 gen_int_mode (val
, putmode
));
4690 gcc_assert (regpos
== bitpos
);
4694 smode
= smallest_mode_for_size (bitsize
, MODE_INT
);
4695 smode_bsize
= GET_MODE_BITSIZE (smode
);
4696 mode_bsize
= GET_MODE_BITSIZE (mode
);
4698 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4700 && (bitsize
% BITS_PER_UNIT
) == 0
4702 && (register_operand (src
, word_mode
)
4703 || const_int_operand (src
, VOIDmode
)))
4705 /* Emit standard pattern if possible. */
4706 if (smode_bsize
== bitsize
)
4708 emit_move_insn (adjust_address (dest
, smode
, 0),
4709 gen_lowpart (smode
, src
));
4713 /* (set (ze (mem)) (const_int)). */
4714 else if (const_int_operand (src
, VOIDmode
))
4716 int size
= bitsize
/ BITS_PER_UNIT
;
4717 rtx src_mem
= adjust_address (force_const_mem (word_mode
, src
),
4719 UNITS_PER_WORD
- size
);
4721 dest
= adjust_address (dest
, BLKmode
, 0);
4722 set_mem_size (dest
, size
);
4723 s390_expand_movmem (dest
, src_mem
, GEN_INT (size
));
4727 /* (set (ze (mem)) (reg)). */
4728 else if (register_operand (src
, word_mode
))
4731 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
, op1
,
4735 /* Emit st,stcmh sequence. */
4736 int stcmh_width
= bitsize
- 32;
4737 int size
= stcmh_width
/ BITS_PER_UNIT
;
4739 emit_move_insn (adjust_address (dest
, SImode
, size
),
4740 gen_lowpart (SImode
, src
));
4741 set_mem_size (dest
, size
);
4742 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
,
4743 GEN_INT (stcmh_width
),
4745 gen_rtx_LSHIFTRT (word_mode
, src
, GEN_INT (32)));
4751 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4752 if ((bitpos
% BITS_PER_UNIT
) == 0
4753 && (bitsize
% BITS_PER_UNIT
) == 0
4754 && (bitpos
& 32) == ((bitpos
+ bitsize
- 1) & 32)
4756 && (mode
== DImode
|| mode
== SImode
)
4757 && register_operand (dest
, mode
))
4759 /* Emit a strict_low_part pattern if possible. */
4760 if (smode_bsize
== bitsize
&& bitpos
== mode_bsize
- smode_bsize
)
4762 op
= gen_rtx_STRICT_LOW_PART (VOIDmode
, gen_lowpart (smode
, dest
));
4763 op
= gen_rtx_SET (VOIDmode
, op
, gen_lowpart (smode
, src
));
4764 clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4765 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clobber
)));
4769 /* ??? There are more powerful versions of ICM that are not
4770 completely represented in the md file. */
4773 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4774 if (TARGET_Z10
&& (mode
== DImode
|| mode
== SImode
))
4776 enum machine_mode mode_s
= GET_MODE (src
);
4778 if (mode_s
== VOIDmode
)
4780 /* Assume const_int etc already in the proper mode. */
4781 src
= force_reg (mode
, src
);
4783 else if (mode_s
!= mode
)
4785 gcc_assert (GET_MODE_BITSIZE (mode_s
) >= bitsize
);
4786 src
= force_reg (mode_s
, src
);
4787 src
= gen_lowpart (mode
, src
);
4790 op
= gen_rtx_ZERO_EXTRACT (mode
, dest
, op1
, op2
),
4791 op
= gen_rtx_SET (VOIDmode
, op
, src
);
4795 clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4796 op
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clobber
));
4806 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4807 register that holds VAL of mode MODE shifted by COUNT bits. */
4810 s390_expand_mask_and_shift (rtx val
, enum machine_mode mode
, rtx count
)
4812 val
= expand_simple_binop (SImode
, AND
, val
, GEN_INT (GET_MODE_MASK (mode
)),
4813 NULL_RTX
, 1, OPTAB_DIRECT
);
4814 return expand_simple_binop (SImode
, ASHIFT
, val
, count
,
4815 NULL_RTX
, 1, OPTAB_DIRECT
);
4818 /* Structure to hold the initial parameters for a compare_and_swap operation
4819 in HImode and QImode. */
4821 struct alignment_context
4823 rtx memsi
; /* SI aligned memory location. */
4824 rtx shift
; /* Bit offset with regard to lsb. */
4825 rtx modemask
; /* Mask of the HQImode shifted by SHIFT bits. */
4826 rtx modemaski
; /* ~modemask */
4827 bool aligned
; /* True if memory is aligned, false else. */
4830 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4831 structure AC for transparent simplifying, if the memory alignment is known
4832 to be at least 32bit. MEM is the memory location for the actual operation
4833 and MODE its mode. */
4836 init_alignment_context (struct alignment_context
*ac
, rtx mem
,
4837 enum machine_mode mode
)
4839 ac
->shift
= GEN_INT (GET_MODE_SIZE (SImode
) - GET_MODE_SIZE (mode
));
4840 ac
->aligned
= (MEM_ALIGN (mem
) >= GET_MODE_BITSIZE (SImode
));
4843 ac
->memsi
= adjust_address (mem
, SImode
, 0); /* Memory is aligned. */
4846 /* Alignment is unknown. */
4847 rtx byteoffset
, addr
, align
;
4849 /* Force the address into a register. */
4850 addr
= force_reg (Pmode
, XEXP (mem
, 0));
4852 /* Align it to SImode. */
4853 align
= expand_simple_binop (Pmode
, AND
, addr
,
4854 GEN_INT (-GET_MODE_SIZE (SImode
)),
4855 NULL_RTX
, 1, OPTAB_DIRECT
);
4857 ac
->memsi
= gen_rtx_MEM (SImode
, align
);
4858 MEM_VOLATILE_P (ac
->memsi
) = MEM_VOLATILE_P (mem
);
4859 set_mem_alias_set (ac
->memsi
, ALIAS_SET_MEMORY_BARRIER
);
4860 set_mem_align (ac
->memsi
, GET_MODE_BITSIZE (SImode
));
4862 /* Calculate shiftcount. */
4863 byteoffset
= expand_simple_binop (Pmode
, AND
, addr
,
4864 GEN_INT (GET_MODE_SIZE (SImode
) - 1),
4865 NULL_RTX
, 1, OPTAB_DIRECT
);
4866 /* As we already have some offset, evaluate the remaining distance. */
4867 ac
->shift
= expand_simple_binop (SImode
, MINUS
, ac
->shift
, byteoffset
,
4868 NULL_RTX
, 1, OPTAB_DIRECT
);
4871 /* Shift is the byte count, but we need the bitcount. */
4872 ac
->shift
= expand_simple_binop (SImode
, ASHIFT
, ac
->shift
, GEN_INT (3),
4873 NULL_RTX
, 1, OPTAB_DIRECT
);
4875 /* Calculate masks. */
4876 ac
->modemask
= expand_simple_binop (SImode
, ASHIFT
,
4877 GEN_INT (GET_MODE_MASK (mode
)),
4878 ac
->shift
, NULL_RTX
, 1, OPTAB_DIRECT
);
4879 ac
->modemaski
= expand_simple_unop (SImode
, NOT
, ac
->modemask
,
4883 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4884 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4885 perform the merge in SEQ2. */
4888 s390_two_part_insv (struct alignment_context
*ac
, rtx
*seq1
, rtx
*seq2
,
4889 enum machine_mode mode
, rtx val
, rtx ins
)
4896 tmp
= copy_to_mode_reg (SImode
, val
);
4897 if (s390_expand_insv (tmp
, GEN_INT (GET_MODE_BITSIZE (mode
)),
4901 *seq2
= get_insns ();
4908 /* Failed to use insv. Generate a two part shift and mask. */
4910 tmp
= s390_expand_mask_and_shift (ins
, mode
, ac
->shift
);
4911 *seq1
= get_insns ();
4915 tmp
= expand_simple_binop (SImode
, IOR
, tmp
, val
, NULL_RTX
, 1, OPTAB_DIRECT
);
4916 *seq2
= get_insns ();
4922 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4923 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4924 value to set if CMP == MEM. */
4927 s390_expand_cs_hqi (enum machine_mode mode
, rtx btarget
, rtx vtarget
, rtx mem
,
4928 rtx cmp
, rtx new_rtx
, bool is_weak
)
4930 struct alignment_context ac
;
4931 rtx cmpv
, newv
, val
, cc
, seq0
, seq1
, seq2
, seq3
;
4932 rtx res
= gen_reg_rtx (SImode
);
4933 rtx csloop
= NULL
, csend
= NULL
;
4935 gcc_assert (MEM_P (mem
));
4937 init_alignment_context (&ac
, mem
, mode
);
4939 /* Load full word. Subsequent loads are performed by CS. */
4940 val
= expand_simple_binop (SImode
, AND
, ac
.memsi
, ac
.modemaski
,
4941 NULL_RTX
, 1, OPTAB_DIRECT
);
4943 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4944 possible, we try to use insv to make this happen efficiently. If
4945 that fails we'll generate code both inside and outside the loop. */
4946 cmpv
= s390_two_part_insv (&ac
, &seq0
, &seq2
, mode
, val
, cmp
);
4947 newv
= s390_two_part_insv (&ac
, &seq1
, &seq3
, mode
, val
, new_rtx
);
4954 /* Start CS loop. */
4957 /* Begin assuming success. */
4958 emit_move_insn (btarget
, const1_rtx
);
4960 csloop
= gen_label_rtx ();
4961 csend
= gen_label_rtx ();
4962 emit_label (csloop
);
4965 /* val = "<mem>00..0<mem>"
4966 * cmp = "00..0<cmp>00..0"
4967 * new = "00..0<new>00..0"
4973 cc
= s390_emit_compare_and_swap (EQ
, res
, ac
.memsi
, cmpv
, newv
);
4975 emit_insn (gen_cstorecc4 (btarget
, cc
, XEXP (cc
, 0), XEXP (cc
, 1)));
4980 /* Jump to end if we're done (likely?). */
4981 s390_emit_jump (csend
, cc
);
4983 /* Check for changes outside mode, and loop internal if so.
4984 Arrange the moves so that the compare is adjacent to the
4985 branch so that we can generate CRJ. */
4986 tmp
= copy_to_reg (val
);
4987 force_expand_binop (SImode
, and_optab
, res
, ac
.modemaski
, val
,
4989 cc
= s390_emit_compare (NE
, val
, tmp
);
4990 s390_emit_jump (csloop
, cc
);
4993 emit_move_insn (btarget
, const0_rtx
);
4997 /* Return the correct part of the bitfield. */
4998 convert_move (vtarget
, expand_simple_binop (SImode
, LSHIFTRT
, res
, ac
.shift
,
4999 NULL_RTX
, 1, OPTAB_DIRECT
), 1);
5002 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
5003 and VAL the value to play with. If AFTER is true then store the value
5004 MEM holds after the operation, if AFTER is false then store the value MEM
5005 holds before the operation. If TARGET is zero then discard that value, else
5006 store it to TARGET. */
5009 s390_expand_atomic (enum machine_mode mode
, enum rtx_code code
,
5010 rtx target
, rtx mem
, rtx val
, bool after
)
5012 struct alignment_context ac
;
5014 rtx new_rtx
= gen_reg_rtx (SImode
);
5015 rtx orig
= gen_reg_rtx (SImode
);
5016 rtx csloop
= gen_label_rtx ();
5018 gcc_assert (!target
|| register_operand (target
, VOIDmode
));
5019 gcc_assert (MEM_P (mem
));
5021 init_alignment_context (&ac
, mem
, mode
);
5023 /* Shift val to the correct bit positions.
5024 Preserve "icm", but prevent "ex icm". */
5025 if (!(ac
.aligned
&& code
== SET
&& MEM_P (val
)))
5026 val
= s390_expand_mask_and_shift (val
, mode
, ac
.shift
);
5028 /* Further preparation insns. */
5029 if (code
== PLUS
|| code
== MINUS
)
5030 emit_move_insn (orig
, val
);
5031 else if (code
== MULT
|| code
== AND
) /* val = "11..1<val>11..1" */
5032 val
= expand_simple_binop (SImode
, XOR
, val
, ac
.modemaski
,
5033 NULL_RTX
, 1, OPTAB_DIRECT
);
5035 /* Load full word. Subsequent loads are performed by CS. */
5036 cmp
= force_reg (SImode
, ac
.memsi
);
5038 /* Start CS loop. */
5039 emit_label (csloop
);
5040 emit_move_insn (new_rtx
, cmp
);
5042 /* Patch new with val at correct position. */
5047 val
= expand_simple_binop (SImode
, code
, new_rtx
, orig
,
5048 NULL_RTX
, 1, OPTAB_DIRECT
);
5049 val
= expand_simple_binop (SImode
, AND
, val
, ac
.modemask
,
5050 NULL_RTX
, 1, OPTAB_DIRECT
);
5053 if (ac
.aligned
&& MEM_P (val
))
5054 store_bit_field (new_rtx
, GET_MODE_BITSIZE (mode
), 0,
5058 new_rtx
= expand_simple_binop (SImode
, AND
, new_rtx
, ac
.modemaski
,
5059 NULL_RTX
, 1, OPTAB_DIRECT
);
5060 new_rtx
= expand_simple_binop (SImode
, IOR
, new_rtx
, val
,
5061 NULL_RTX
, 1, OPTAB_DIRECT
);
5067 new_rtx
= expand_simple_binop (SImode
, code
, new_rtx
, val
,
5068 NULL_RTX
, 1, OPTAB_DIRECT
);
5070 case MULT
: /* NAND */
5071 new_rtx
= expand_simple_binop (SImode
, AND
, new_rtx
, val
,
5072 NULL_RTX
, 1, OPTAB_DIRECT
);
5073 new_rtx
= expand_simple_binop (SImode
, XOR
, new_rtx
, ac
.modemask
,
5074 NULL_RTX
, 1, OPTAB_DIRECT
);
5080 s390_emit_jump (csloop
, s390_emit_compare_and_swap (NE
, cmp
,
5081 ac
.memsi
, cmp
, new_rtx
));
5083 /* Return the correct part of the bitfield. */
5085 convert_move (target
, expand_simple_binop (SImode
, LSHIFTRT
,
5086 after
? new_rtx
: cmp
, ac
.shift
,
5087 NULL_RTX
, 1, OPTAB_DIRECT
), 1);
5090 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5091 We need to emit DTP-relative relocations. */
5093 static void s390_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
5096 s390_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5101 fputs ("\t.long\t", file
);
5104 fputs ("\t.quad\t", file
);
5109 output_addr_const (file
, x
);
5110 fputs ("@DTPOFF", file
);
5113 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5114 /* Implement TARGET_MANGLE_TYPE. */
5117 s390_mangle_type (const_tree type
)
5119 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
5120 && TARGET_LONG_DOUBLE_128
)
5123 /* For all other types, use normal C++ mangling. */
5128 /* In the name of slightly smaller debug output, and to cater to
5129 general assembler lossage, recognize various UNSPEC sequences
5130 and turn them back into a direct symbol reference. */
5133 s390_delegitimize_address (rtx orig_x
)
5137 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5140 /* Extract the symbol ref from:
5141 (plus:SI (reg:SI 12 %r12)
5142 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5143 UNSPEC_GOTOFF/PLTOFF)))
5145 (plus:SI (reg:SI 12 %r12)
5146 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5147 UNSPEC_GOTOFF/PLTOFF)
5148 (const_int 4 [0x4])))) */
5149 if (GET_CODE (x
) == PLUS
5150 && REG_P (XEXP (x
, 0))
5151 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
5152 && GET_CODE (XEXP (x
, 1)) == CONST
)
5154 HOST_WIDE_INT offset
= 0;
5156 /* The const operand. */
5157 y
= XEXP (XEXP (x
, 1), 0);
5159 if (GET_CODE (y
) == PLUS
5160 && GET_CODE (XEXP (y
, 1)) == CONST_INT
)
5162 offset
= INTVAL (XEXP (y
, 1));
5166 if (GET_CODE (y
) == UNSPEC
5167 && (XINT (y
, 1) == UNSPEC_GOTOFF
5168 || XINT (y
, 1) == UNSPEC_PLTOFF
))
5169 return plus_constant (Pmode
, XVECEXP (y
, 0, 0), offset
);
5172 if (GET_CODE (x
) != MEM
)
5176 if (GET_CODE (x
) == PLUS
5177 && GET_CODE (XEXP (x
, 1)) == CONST
5178 && GET_CODE (XEXP (x
, 0)) == REG
5179 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
)
5181 y
= XEXP (XEXP (x
, 1), 0);
5182 if (GET_CODE (y
) == UNSPEC
5183 && XINT (y
, 1) == UNSPEC_GOT
)
5184 y
= XVECEXP (y
, 0, 0);
5188 else if (GET_CODE (x
) == CONST
)
5190 /* Extract the symbol ref from:
5191 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5192 UNSPEC_PLT/GOTENT))) */
5195 if (GET_CODE (y
) == UNSPEC
5196 && (XINT (y
, 1) == UNSPEC_GOTENT
5197 || XINT (y
, 1) == UNSPEC_PLT
))
5198 y
= XVECEXP (y
, 0, 0);
5205 if (GET_MODE (orig_x
) != Pmode
)
5207 if (GET_MODE (orig_x
) == BLKmode
)
5209 y
= lowpart_subreg (GET_MODE (orig_x
), y
, Pmode
);
5216 /* Output operand OP to stdio stream FILE.
5217 OP is an address (register + offset) which is not used to address data;
5218 instead the rightmost bits are interpreted as the value. */
5221 print_shift_count_operand (FILE *file
, rtx op
)
5223 HOST_WIDE_INT offset
;
5226 /* Extract base register and offset. */
5227 if (!s390_decompose_shift_count (op
, &base
, &offset
))
5233 gcc_assert (GET_CODE (base
) == REG
);
5234 gcc_assert (REGNO (base
) < FIRST_PSEUDO_REGISTER
);
5235 gcc_assert (REGNO_REG_CLASS (REGNO (base
)) == ADDR_REGS
);
5238 /* Offsets are constricted to twelve bits. */
5239 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
& ((1 << 12) - 1));
5241 fprintf (file
, "(%s)", reg_names
[REGNO (base
)]);
5244 /* See 'get_some_local_dynamic_name'. */
5247 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
5251 if (GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
5253 x
= get_pool_constant (x
);
5254 return for_each_rtx (&x
, get_some_local_dynamic_name_1
, 0);
5257 if (GET_CODE (x
) == SYMBOL_REF
5258 && tls_symbolic_operand (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
5260 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
5267 /* Locate some local-dynamic symbol still in use by this function
5268 so that we can print its name in local-dynamic base patterns. */
5271 get_some_local_dynamic_name (void)
5275 if (cfun
->machine
->some_ld_name
)
5276 return cfun
->machine
->some_ld_name
;
5278 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5280 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
5281 return cfun
->machine
->some_ld_name
;
5286 /* Output machine-dependent UNSPECs occurring in address constant X
5287 in assembler syntax to stdio stream FILE. Returns true if the
5288 constant X could be recognized, false otherwise. */
5291 s390_output_addr_const_extra (FILE *file
, rtx x
)
5293 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 1)
5294 switch (XINT (x
, 1))
5297 output_addr_const (file
, XVECEXP (x
, 0, 0));
5298 fprintf (file
, "@GOTENT");
5301 output_addr_const (file
, XVECEXP (x
, 0, 0));
5302 fprintf (file
, "@GOT");
5305 output_addr_const (file
, XVECEXP (x
, 0, 0));
5306 fprintf (file
, "@GOTOFF");
5309 output_addr_const (file
, XVECEXP (x
, 0, 0));
5310 fprintf (file
, "@PLT");
5313 output_addr_const (file
, XVECEXP (x
, 0, 0));
5314 fprintf (file
, "@PLTOFF");
5317 output_addr_const (file
, XVECEXP (x
, 0, 0));
5318 fprintf (file
, "@TLSGD");
5321 assemble_name (file
, get_some_local_dynamic_name ());
5322 fprintf (file
, "@TLSLDM");
5325 output_addr_const (file
, XVECEXP (x
, 0, 0));
5326 fprintf (file
, "@DTPOFF");
5329 output_addr_const (file
, XVECEXP (x
, 0, 0));
5330 fprintf (file
, "@NTPOFF");
5332 case UNSPEC_GOTNTPOFF
:
5333 output_addr_const (file
, XVECEXP (x
, 0, 0));
5334 fprintf (file
, "@GOTNTPOFF");
5336 case UNSPEC_INDNTPOFF
:
5337 output_addr_const (file
, XVECEXP (x
, 0, 0));
5338 fprintf (file
, "@INDNTPOFF");
5342 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 2)
5343 switch (XINT (x
, 1))
5345 case UNSPEC_POOL_OFFSET
:
5346 x
= gen_rtx_MINUS (GET_MODE (x
), XVECEXP (x
, 0, 0), XVECEXP (x
, 0, 1));
5347 output_addr_const (file
, x
);
5353 /* Output address operand ADDR in assembler syntax to
5354 stdio stream FILE. */
5357 print_operand_address (FILE *file
, rtx addr
)
5359 struct s390_address ad
;
5361 if (s390_loadrelative_operand_p (addr
, NULL
, NULL
))
5365 output_operand_lossage ("symbolic memory references are "
5366 "only supported on z10 or later");
5369 output_addr_const (file
, addr
);
5373 if (!s390_decompose_address (addr
, &ad
)
5374 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5375 || (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
))))
5376 output_operand_lossage ("cannot decompose address");
5379 output_addr_const (file
, ad
.disp
);
5381 fprintf (file
, "0");
5383 if (ad
.base
&& ad
.indx
)
5384 fprintf (file
, "(%s,%s)", reg_names
[REGNO (ad
.indx
)],
5385 reg_names
[REGNO (ad
.base
)]);
5387 fprintf (file
, "(%s)", reg_names
[REGNO (ad
.base
)]);
5390 /* Output operand X in assembler syntax to stdio stream FILE.
5391 CODE specified the format flag. The following format flags
5394 'C': print opcode suffix for branch condition.
5395 'D': print opcode suffix for inverse branch condition.
5396 'E': print opcode suffix for branch on index instruction.
5397 'G': print the size of the operand in bytes.
5398 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5399 'M': print the second word of a TImode operand.
5400 'N': print the second word of a DImode operand.
5401 'O': print only the displacement of a memory reference.
5402 'R': print only the base register of a memory reference.
5403 'S': print S-type memory reference (base+displacement).
5404 'Y': print shift count operand.
5406 'b': print integer X as if it's an unsigned byte.
5407 'c': print integer X as if it's an signed byte.
5408 'e': "end" of DImode contiguous bitmask X.
5409 'f': "end" of SImode contiguous bitmask X.
5410 'h': print integer X as if it's a signed halfword.
5411 'i': print the first nonzero HImode part of X.
5412 'j': print the first HImode part unequal to -1 of X.
5413 'k': print the first nonzero SImode part of X.
5414 'm': print the first SImode part unequal to -1 of X.
5415 'o': print integer X as if it's an unsigned 32bit word.
5416 's': "start" of DImode contiguous bitmask X.
5417 't': "start" of SImode contiguous bitmask X.
5418 'x': print integer X as if it's an unsigned halfword.
5422 print_operand (FILE *file
, rtx x
, int code
)
5429 fprintf (file
, s390_branch_condition_mnemonic (x
, FALSE
));
5433 fprintf (file
, s390_branch_condition_mnemonic (x
, TRUE
));
5437 if (GET_CODE (x
) == LE
)
5438 fprintf (file
, "l");
5439 else if (GET_CODE (x
) == GT
)
5440 fprintf (file
, "h");
5442 output_operand_lossage ("invalid comparison operator "
5443 "for 'E' output modifier");
5447 if (GET_CODE (x
) == SYMBOL_REF
)
5449 fprintf (file
, "%s", ":tls_load:");
5450 output_addr_const (file
, x
);
5452 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD
)
5454 fprintf (file
, "%s", ":tls_gdcall:");
5455 output_addr_const (file
, XVECEXP (x
, 0, 0));
5457 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM
)
5459 fprintf (file
, "%s", ":tls_ldcall:");
5460 assemble_name (file
, get_some_local_dynamic_name ());
5463 output_operand_lossage ("invalid reference for 'J' output modifier");
5467 fprintf (file
, "%u", GET_MODE_SIZE (GET_MODE (x
)));
5472 struct s390_address ad
;
5477 output_operand_lossage ("memory reference expected for "
5478 "'O' output modifier");
5482 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5485 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5488 output_operand_lossage ("invalid address for 'O' output modifier");
5493 output_addr_const (file
, ad
.disp
);
5495 fprintf (file
, "0");
5501 struct s390_address ad
;
5506 output_operand_lossage ("memory reference expected for "
5507 "'R' output modifier");
5511 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5514 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5517 output_operand_lossage ("invalid address for 'R' output modifier");
5522 fprintf (file
, "%s", reg_names
[REGNO (ad
.base
)]);
5524 fprintf (file
, "0");
5530 struct s390_address ad
;
5535 output_operand_lossage ("memory reference expected for "
5536 "'S' output modifier");
5539 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5542 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5545 output_operand_lossage ("invalid address for 'S' output modifier");
5550 output_addr_const (file
, ad
.disp
);
5552 fprintf (file
, "0");
5555 fprintf (file
, "(%s)", reg_names
[REGNO (ad
.base
)]);
5560 if (GET_CODE (x
) == REG
)
5561 x
= gen_rtx_REG (GET_MODE (x
), REGNO (x
) + 1);
5562 else if (GET_CODE (x
) == MEM
)
5563 x
= change_address (x
, VOIDmode
,
5564 plus_constant (Pmode
, XEXP (x
, 0), 4));
5566 output_operand_lossage ("register or memory expression expected "
5567 "for 'N' output modifier");
5571 if (GET_CODE (x
) == REG
)
5572 x
= gen_rtx_REG (GET_MODE (x
), REGNO (x
) + 1);
5573 else if (GET_CODE (x
) == MEM
)
5574 x
= change_address (x
, VOIDmode
,
5575 plus_constant (Pmode
, XEXP (x
, 0), 8));
5577 output_operand_lossage ("register or memory expression expected "
5578 "for 'M' output modifier");
5582 print_shift_count_operand (file
, x
);
5586 switch (GET_CODE (x
))
5589 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5593 output_address (XEXP (x
, 0));
5600 output_addr_const (file
, x
);
5613 ival
= ((ival
& 0xff) ^ 0x80) - 0x80;
5619 ival
= ((ival
& 0xffff) ^ 0x8000) - 0x8000;
5622 ival
= s390_extract_part (x
, HImode
, 0);
5625 ival
= s390_extract_part (x
, HImode
, -1);
5628 ival
= s390_extract_part (x
, SImode
, 0);
5631 ival
= s390_extract_part (x
, SImode
, -1);
5642 len
= (code
== 's' || code
== 'e' ? 64 : 32);
5643 ok
= s390_contiguous_bitmask_p (ival
, len
, &pos
, &len
);
5645 if (code
== 's' || code
== 't')
5646 ival
= 64 - pos
- len
;
5648 ival
= 64 - 1 - pos
;
5652 output_operand_lossage ("invalid constant for output modifier '%c'", code
);
5654 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
5658 gcc_assert (GET_MODE (x
) == VOIDmode
);
5660 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
) & 0xff);
5661 else if (code
== 'x')
5662 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
) & 0xffff);
5663 else if (code
== 'h')
5664 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5665 ((CONST_DOUBLE_LOW (x
) & 0xffff) ^ 0x8000) - 0x8000);
5669 output_operand_lossage ("invalid constant - try using "
5670 "an output modifier");
5672 output_operand_lossage ("invalid constant for output modifier '%c'",
5679 output_operand_lossage ("invalid expression - try using "
5680 "an output modifier");
5682 output_operand_lossage ("invalid expression for output "
5683 "modifier '%c'", code
);
5688 /* Target hook for assembling integer objects. We need to define it
5689 here to work a round a bug in some versions of GAS, which couldn't
5690 handle values smaller than INT_MIN when printed in decimal. */
5693 s390_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
5695 if (size
== 8 && aligned_p
5696 && GET_CODE (x
) == CONST_INT
&& INTVAL (x
) < INT_MIN
)
5698 fprintf (asm_out_file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n",
5702 return default_assemble_integer (x
, size
, aligned_p
);
5705 /* Returns true if register REGNO is used for forming
5706 a memory address in expression X. */
5709 reg_used_in_mem_p (int regno
, rtx x
)
5711 enum rtx_code code
= GET_CODE (x
);
5717 if (refers_to_regno_p (regno
, regno
+1,
5721 else if (code
== SET
5722 && GET_CODE (SET_DEST (x
)) == PC
)
5724 if (refers_to_regno_p (regno
, regno
+1,
5729 fmt
= GET_RTX_FORMAT (code
);
5730 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
5733 && reg_used_in_mem_p (regno
, XEXP (x
, i
)))
5736 else if (fmt
[i
] == 'E')
5737 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
5738 if (reg_used_in_mem_p (regno
, XVECEXP (x
, i
, j
)))
5744 /* Returns true if expression DEP_RTX sets an address register
5745 used by instruction INSN to address memory. */
5748 addr_generation_dependency_p (rtx dep_rtx
, rtx insn
)
5752 if (NONJUMP_INSN_P (dep_rtx
))
5753 dep_rtx
= PATTERN (dep_rtx
);
5755 if (GET_CODE (dep_rtx
) == SET
)
5757 target
= SET_DEST (dep_rtx
);
5758 if (GET_CODE (target
) == STRICT_LOW_PART
)
5759 target
= XEXP (target
, 0);
5760 while (GET_CODE (target
) == SUBREG
)
5761 target
= SUBREG_REG (target
);
5763 if (GET_CODE (target
) == REG
)
5765 int regno
= REGNO (target
);
5767 if (s390_safe_attr_type (insn
) == TYPE_LA
)
5769 pat
= PATTERN (insn
);
5770 if (GET_CODE (pat
) == PARALLEL
)
5772 gcc_assert (XVECLEN (pat
, 0) == 2);
5773 pat
= XVECEXP (pat
, 0, 0);
5775 gcc_assert (GET_CODE (pat
) == SET
);
5776 return refers_to_regno_p (regno
, regno
+1, SET_SRC (pat
), 0);
5778 else if (get_attr_atype (insn
) == ATYPE_AGEN
)
5779 return reg_used_in_mem_p (regno
, PATTERN (insn
));
5785 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5788 s390_agen_dep_p (rtx dep_insn
, rtx insn
)
5790 rtx dep_rtx
= PATTERN (dep_insn
);
5793 if (GET_CODE (dep_rtx
) == SET
5794 && addr_generation_dependency_p (dep_rtx
, insn
))
5796 else if (GET_CODE (dep_rtx
) == PARALLEL
)
5798 for (i
= 0; i
< XVECLEN (dep_rtx
, 0); i
++)
5800 if (addr_generation_dependency_p (XVECEXP (dep_rtx
, 0, i
), insn
))
5808 /* A C statement (sans semicolon) to update the integer scheduling priority
5809 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5810 reduce the priority to execute INSN later. Do not define this macro if
5811 you do not need to adjust the scheduling priorities of insns.
5813 A STD instruction should be scheduled earlier,
5814 in order to use the bypass. */
5816 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
5818 if (! INSN_P (insn
))
5821 if (s390_tune
!= PROCESSOR_2084_Z990
5822 && s390_tune
!= PROCESSOR_2094_Z9_109
5823 && s390_tune
!= PROCESSOR_2097_Z10
5824 && s390_tune
!= PROCESSOR_2817_Z196
5825 && s390_tune
!= PROCESSOR_2827_ZEC12
)
5828 switch (s390_safe_attr_type (insn
))
5832 priority
= priority
<< 3;
5836 priority
= priority
<< 1;
5845 /* The number of instructions that can be issued per cycle. */
5848 s390_issue_rate (void)
5852 case PROCESSOR_2084_Z990
:
5853 case PROCESSOR_2094_Z9_109
:
5854 case PROCESSOR_2817_Z196
:
5856 case PROCESSOR_2097_Z10
:
5857 case PROCESSOR_2827_ZEC12
:
5865 s390_first_cycle_multipass_dfa_lookahead (void)
5870 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5871 Fix up MEMs as required. */
5874 annotate_constant_pool_refs (rtx
*x
)
5879 gcc_assert (GET_CODE (*x
) != SYMBOL_REF
5880 || !CONSTANT_POOL_ADDRESS_P (*x
));
5882 /* Literal pool references can only occur inside a MEM ... */
5883 if (GET_CODE (*x
) == MEM
)
5885 rtx memref
= XEXP (*x
, 0);
5887 if (GET_CODE (memref
) == SYMBOL_REF
5888 && CONSTANT_POOL_ADDRESS_P (memref
))
5890 rtx base
= cfun
->machine
->base_reg
;
5891 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, memref
, base
),
5894 *x
= replace_equiv_address (*x
, addr
);
5898 if (GET_CODE (memref
) == CONST
5899 && GET_CODE (XEXP (memref
, 0)) == PLUS
5900 && GET_CODE (XEXP (XEXP (memref
, 0), 1)) == CONST_INT
5901 && GET_CODE (XEXP (XEXP (memref
, 0), 0)) == SYMBOL_REF
5902 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref
, 0), 0)))
5904 HOST_WIDE_INT off
= INTVAL (XEXP (XEXP (memref
, 0), 1));
5905 rtx sym
= XEXP (XEXP (memref
, 0), 0);
5906 rtx base
= cfun
->machine
->base_reg
;
5907 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, sym
, base
),
5910 *x
= replace_equiv_address (*x
, plus_constant (Pmode
, addr
, off
));
5915 /* ... or a load-address type pattern. */
5916 if (GET_CODE (*x
) == SET
)
5918 rtx addrref
= SET_SRC (*x
);
5920 if (GET_CODE (addrref
) == SYMBOL_REF
5921 && CONSTANT_POOL_ADDRESS_P (addrref
))
5923 rtx base
= cfun
->machine
->base_reg
;
5924 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, addrref
, base
),
5927 SET_SRC (*x
) = addr
;
5931 if (GET_CODE (addrref
) == CONST
5932 && GET_CODE (XEXP (addrref
, 0)) == PLUS
5933 && GET_CODE (XEXP (XEXP (addrref
, 0), 1)) == CONST_INT
5934 && GET_CODE (XEXP (XEXP (addrref
, 0), 0)) == SYMBOL_REF
5935 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref
, 0), 0)))
5937 HOST_WIDE_INT off
= INTVAL (XEXP (XEXP (addrref
, 0), 1));
5938 rtx sym
= XEXP (XEXP (addrref
, 0), 0);
5939 rtx base
= cfun
->machine
->base_reg
;
5940 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, sym
, base
),
5943 SET_SRC (*x
) = plus_constant (Pmode
, addr
, off
);
5948 /* Annotate LTREL_BASE as well. */
5949 if (GET_CODE (*x
) == UNSPEC
5950 && XINT (*x
, 1) == UNSPEC_LTREL_BASE
)
5952 rtx base
= cfun
->machine
->base_reg
;
5953 *x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, XVECEXP (*x
, 0, 0), base
),
5958 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
5959 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
5963 annotate_constant_pool_refs (&XEXP (*x
, i
));
5965 else if (fmt
[i
] == 'E')
5967 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
5968 annotate_constant_pool_refs (&XVECEXP (*x
, i
, j
));
5973 /* Split all branches that exceed the maximum distance.
5974 Returns true if this created a new literal pool entry. */
5977 s390_split_branches (void)
5979 rtx temp_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
5980 int new_literal
= 0, ret
;
5981 rtx insn
, pat
, tmp
, target
;
5984 /* We need correct insn addresses. */
5986 shorten_branches (get_insns ());
5988 /* Find all branches that exceed 64KB, and split them. */
5990 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5992 if (! JUMP_P (insn
))
5995 pat
= PATTERN (insn
);
5996 if (GET_CODE (pat
) == PARALLEL
&& XVECLEN (pat
, 0) > 2)
5997 pat
= XVECEXP (pat
, 0, 0);
5998 if (GET_CODE (pat
) != SET
|| SET_DEST (pat
) != pc_rtx
)
6001 if (GET_CODE (SET_SRC (pat
)) == LABEL_REF
)
6003 label
= &SET_SRC (pat
);
6005 else if (GET_CODE (SET_SRC (pat
)) == IF_THEN_ELSE
)
6007 if (GET_CODE (XEXP (SET_SRC (pat
), 1)) == LABEL_REF
)
6008 label
= &XEXP (SET_SRC (pat
), 1);
6009 else if (GET_CODE (XEXP (SET_SRC (pat
), 2)) == LABEL_REF
)
6010 label
= &XEXP (SET_SRC (pat
), 2);
6017 if (get_attr_length (insn
) <= 4)
6020 /* We are going to use the return register as scratch register,
6021 make sure it will be saved/restored by the prologue/epilogue. */
6022 cfun_frame_layout
.save_return_addr_p
= 1;
6027 tmp
= force_const_mem (Pmode
, *label
);
6028 tmp
= emit_insn_before (gen_rtx_SET (Pmode
, temp_reg
, tmp
), insn
);
6029 INSN_ADDRESSES_NEW (tmp
, -1);
6030 annotate_constant_pool_refs (&PATTERN (tmp
));
6037 target
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, *label
),
6038 UNSPEC_LTREL_OFFSET
);
6039 target
= gen_rtx_CONST (Pmode
, target
);
6040 target
= force_const_mem (Pmode
, target
);
6041 tmp
= emit_insn_before (gen_rtx_SET (Pmode
, temp_reg
, target
), insn
);
6042 INSN_ADDRESSES_NEW (tmp
, -1);
6043 annotate_constant_pool_refs (&PATTERN (tmp
));
6045 target
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, XEXP (target
, 0),
6046 cfun
->machine
->base_reg
),
6048 target
= gen_rtx_PLUS (Pmode
, temp_reg
, target
);
6051 ret
= validate_change (insn
, label
, target
, 0);
6059 /* Find an annotated literal pool symbol referenced in RTX X,
6060 and store it at REF. Will abort if X contains references to
6061 more than one such pool symbol; multiple references to the same
6062 symbol are allowed, however.
6064 The rtx pointed to by REF must be initialized to NULL_RTX
6065 by the caller before calling this routine. */
6068 find_constant_pool_ref (rtx x
, rtx
*ref
)
6073 /* Ignore LTREL_BASE references. */
6074 if (GET_CODE (x
) == UNSPEC
6075 && XINT (x
, 1) == UNSPEC_LTREL_BASE
)
6077 /* Likewise POOL_ENTRY insns. */
6078 if (GET_CODE (x
) == UNSPEC_VOLATILE
6079 && XINT (x
, 1) == UNSPECV_POOL_ENTRY
)
6082 gcc_assert (GET_CODE (x
) != SYMBOL_REF
6083 || !CONSTANT_POOL_ADDRESS_P (x
));
6085 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_LTREF
)
6087 rtx sym
= XVECEXP (x
, 0, 0);
6088 gcc_assert (GET_CODE (sym
) == SYMBOL_REF
6089 && CONSTANT_POOL_ADDRESS_P (sym
));
6091 if (*ref
== NULL_RTX
)
6094 gcc_assert (*ref
== sym
);
6099 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6100 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6104 find_constant_pool_ref (XEXP (x
, i
), ref
);
6106 else if (fmt
[i
] == 'E')
6108 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
6109 find_constant_pool_ref (XVECEXP (x
, i
, j
), ref
);
6114 /* Replace every reference to the annotated literal pool
6115 symbol REF in X by its base plus OFFSET. */
6118 replace_constant_pool_ref (rtx
*x
, rtx ref
, rtx offset
)
6123 gcc_assert (*x
!= ref
);
6125 if (GET_CODE (*x
) == UNSPEC
6126 && XINT (*x
, 1) == UNSPEC_LTREF
6127 && XVECEXP (*x
, 0, 0) == ref
)
6129 *x
= gen_rtx_PLUS (Pmode
, XVECEXP (*x
, 0, 1), offset
);
6133 if (GET_CODE (*x
) == PLUS
6134 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
6135 && GET_CODE (XEXP (*x
, 0)) == UNSPEC
6136 && XINT (XEXP (*x
, 0), 1) == UNSPEC_LTREF
6137 && XVECEXP (XEXP (*x
, 0), 0, 0) == ref
)
6139 rtx addr
= gen_rtx_PLUS (Pmode
, XVECEXP (XEXP (*x
, 0), 0, 1), offset
);
6140 *x
= plus_constant (Pmode
, addr
, INTVAL (XEXP (*x
, 1)));
6144 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
6145 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
6149 replace_constant_pool_ref (&XEXP (*x
, i
), ref
, offset
);
6151 else if (fmt
[i
] == 'E')
6153 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
6154 replace_constant_pool_ref (&XVECEXP (*x
, i
, j
), ref
, offset
);
6159 /* Check whether X contains an UNSPEC_LTREL_BASE.
6160 Return its constant pool symbol if found, NULL_RTX otherwise. */
6163 find_ltrel_base (rtx x
)
6168 if (GET_CODE (x
) == UNSPEC
6169 && XINT (x
, 1) == UNSPEC_LTREL_BASE
)
6170 return XVECEXP (x
, 0, 0);
6172 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6173 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6177 rtx fnd
= find_ltrel_base (XEXP (x
, i
));
6181 else if (fmt
[i
] == 'E')
6183 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
6185 rtx fnd
= find_ltrel_base (XVECEXP (x
, i
, j
));
6195 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6198 replace_ltrel_base (rtx
*x
)
6203 if (GET_CODE (*x
) == UNSPEC
6204 && XINT (*x
, 1) == UNSPEC_LTREL_BASE
)
6206 *x
= XVECEXP (*x
, 0, 1);
6210 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
6211 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
6215 replace_ltrel_base (&XEXP (*x
, i
));
6217 else if (fmt
[i
] == 'E')
6219 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
6220 replace_ltrel_base (&XVECEXP (*x
, i
, j
));
6226 /* We keep a list of constants which we have to add to internal
6227 constant tables in the middle of large functions. */
6229 #define NR_C_MODES 11
6230 enum machine_mode constant_modes
[NR_C_MODES
] =
6232 TFmode
, TImode
, TDmode
,
6233 DFmode
, DImode
, DDmode
,
6234 SFmode
, SImode
, SDmode
,
6241 struct constant
*next
;
6246 struct constant_pool
6248 struct constant_pool
*next
;
6252 rtx emit_pool_after
;
6254 struct constant
*constants
[NR_C_MODES
];
6255 struct constant
*execute
;
6260 /* Allocate new constant_pool structure. */
6262 static struct constant_pool
*
6263 s390_alloc_pool (void)
6265 struct constant_pool
*pool
;
6268 pool
= (struct constant_pool
*) xmalloc (sizeof *pool
);
6270 for (i
= 0; i
< NR_C_MODES
; i
++)
6271 pool
->constants
[i
] = NULL
;
6273 pool
->execute
= NULL
;
6274 pool
->label
= gen_label_rtx ();
6275 pool
->first_insn
= NULL_RTX
;
6276 pool
->pool_insn
= NULL_RTX
;
6277 pool
->insns
= BITMAP_ALLOC (NULL
);
6279 pool
->emit_pool_after
= NULL_RTX
;
6284 /* Create new constant pool covering instructions starting at INSN
6285 and chain it to the end of POOL_LIST. */
6287 static struct constant_pool
*
6288 s390_start_pool (struct constant_pool
**pool_list
, rtx insn
)
6290 struct constant_pool
*pool
, **prev
;
6292 pool
= s390_alloc_pool ();
6293 pool
->first_insn
= insn
;
6295 for (prev
= pool_list
; *prev
; prev
= &(*prev
)->next
)
6302 /* End range of instructions covered by POOL at INSN and emit
6303 placeholder insn representing the pool. */
6306 s390_end_pool (struct constant_pool
*pool
, rtx insn
)
6308 rtx pool_size
= GEN_INT (pool
->size
+ 8 /* alignment slop */);
6311 insn
= get_last_insn ();
6313 pool
->pool_insn
= emit_insn_after (gen_pool (pool_size
), insn
);
6314 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6317 /* Add INSN to the list of insns covered by POOL. */
6320 s390_add_pool_insn (struct constant_pool
*pool
, rtx insn
)
6322 bitmap_set_bit (pool
->insns
, INSN_UID (insn
));
6325 /* Return pool out of POOL_LIST that covers INSN. */
6327 static struct constant_pool
*
6328 s390_find_pool (struct constant_pool
*pool_list
, rtx insn
)
6330 struct constant_pool
*pool
;
6332 for (pool
= pool_list
; pool
; pool
= pool
->next
)
6333 if (bitmap_bit_p (pool
->insns
, INSN_UID (insn
)))
6339 /* Add constant VAL of mode MODE to the constant pool POOL. */
6342 s390_add_constant (struct constant_pool
*pool
, rtx val
, enum machine_mode mode
)
6347 for (i
= 0; i
< NR_C_MODES
; i
++)
6348 if (constant_modes
[i
] == mode
)
6350 gcc_assert (i
!= NR_C_MODES
);
6352 for (c
= pool
->constants
[i
]; c
!= NULL
; c
= c
->next
)
6353 if (rtx_equal_p (val
, c
->value
))
6358 c
= (struct constant
*) xmalloc (sizeof *c
);
6360 c
->label
= gen_label_rtx ();
6361 c
->next
= pool
->constants
[i
];
6362 pool
->constants
[i
] = c
;
6363 pool
->size
+= GET_MODE_SIZE (mode
);
6367 /* Return an rtx that represents the offset of X from the start of
6371 s390_pool_offset (struct constant_pool
*pool
, rtx x
)
6375 label
= gen_rtx_LABEL_REF (GET_MODE (x
), pool
->label
);
6376 x
= gen_rtx_UNSPEC (GET_MODE (x
), gen_rtvec (2, x
, label
),
6377 UNSPEC_POOL_OFFSET
);
6378 return gen_rtx_CONST (GET_MODE (x
), x
);
6381 /* Find constant VAL of mode MODE in the constant pool POOL.
6382 Return an RTX describing the distance from the start of
6383 the pool to the location of the new constant. */
6386 s390_find_constant (struct constant_pool
*pool
, rtx val
,
6387 enum machine_mode mode
)
6392 for (i
= 0; i
< NR_C_MODES
; i
++)
6393 if (constant_modes
[i
] == mode
)
6395 gcc_assert (i
!= NR_C_MODES
);
6397 for (c
= pool
->constants
[i
]; c
!= NULL
; c
= c
->next
)
6398 if (rtx_equal_p (val
, c
->value
))
6403 return s390_pool_offset (pool
, gen_rtx_LABEL_REF (Pmode
, c
->label
));
6406 /* Check whether INSN is an execute. Return the label_ref to its
6407 execute target template if so, NULL_RTX otherwise. */
6410 s390_execute_label (rtx insn
)
6412 if (NONJUMP_INSN_P (insn
)
6413 && GET_CODE (PATTERN (insn
)) == PARALLEL
6414 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == UNSPEC
6415 && XINT (XVECEXP (PATTERN (insn
), 0, 0), 1) == UNSPEC_EXECUTE
)
6416 return XVECEXP (XVECEXP (PATTERN (insn
), 0, 0), 0, 2);
6421 /* Add execute target for INSN to the constant pool POOL. */
6424 s390_add_execute (struct constant_pool
*pool
, rtx insn
)
6428 for (c
= pool
->execute
; c
!= NULL
; c
= c
->next
)
6429 if (INSN_UID (insn
) == INSN_UID (c
->value
))
6434 c
= (struct constant
*) xmalloc (sizeof *c
);
6436 c
->label
= gen_label_rtx ();
6437 c
->next
= pool
->execute
;
6443 /* Find execute target for INSN in the constant pool POOL.
6444 Return an RTX describing the distance from the start of
6445 the pool to the location of the execute target. */
6448 s390_find_execute (struct constant_pool
*pool
, rtx insn
)
6452 for (c
= pool
->execute
; c
!= NULL
; c
= c
->next
)
6453 if (INSN_UID (insn
) == INSN_UID (c
->value
))
6458 return s390_pool_offset (pool
, gen_rtx_LABEL_REF (Pmode
, c
->label
));
6461 /* For an execute INSN, extract the execute target template. */
6464 s390_execute_target (rtx insn
)
6466 rtx pattern
= PATTERN (insn
);
6467 gcc_assert (s390_execute_label (insn
));
6469 if (XVECLEN (pattern
, 0) == 2)
6471 pattern
= copy_rtx (XVECEXP (pattern
, 0, 1));
6475 rtvec vec
= rtvec_alloc (XVECLEN (pattern
, 0) - 1);
6478 for (i
= 0; i
< XVECLEN (pattern
, 0) - 1; i
++)
6479 RTVEC_ELT (vec
, i
) = copy_rtx (XVECEXP (pattern
, 0, i
+ 1));
6481 pattern
= gen_rtx_PARALLEL (VOIDmode
, vec
);
6487 /* Indicate that INSN cannot be duplicated. This is the case for
6488 execute insns that carry a unique label. */
6491 s390_cannot_copy_insn_p (rtx insn
)
6493 rtx label
= s390_execute_label (insn
);
6494 return label
&& label
!= const0_rtx
;
6497 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6498 do not emit the pool base label. */
6501 s390_dump_pool (struct constant_pool
*pool
, bool remote_label
)
6504 rtx insn
= pool
->pool_insn
;
6507 /* Switch to rodata section. */
6508 if (TARGET_CPU_ZARCH
)
6510 insn
= emit_insn_after (gen_pool_section_start (), insn
);
6511 INSN_ADDRESSES_NEW (insn
, -1);
6514 /* Ensure minimum pool alignment. */
6515 if (TARGET_CPU_ZARCH
)
6516 insn
= emit_insn_after (gen_pool_align (GEN_INT (8)), insn
);
6518 insn
= emit_insn_after (gen_pool_align (GEN_INT (4)), insn
);
6519 INSN_ADDRESSES_NEW (insn
, -1);
6521 /* Emit pool base label. */
6524 insn
= emit_label_after (pool
->label
, insn
);
6525 INSN_ADDRESSES_NEW (insn
, -1);
6528 /* Dump constants in descending alignment requirement order,
6529 ensuring proper alignment for every constant. */
6530 for (i
= 0; i
< NR_C_MODES
; i
++)
6531 for (c
= pool
->constants
[i
]; c
; c
= c
->next
)
6533 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6534 rtx value
= copy_rtx (c
->value
);
6535 if (GET_CODE (value
) == CONST
6536 && GET_CODE (XEXP (value
, 0)) == UNSPEC
6537 && XINT (XEXP (value
, 0), 1) == UNSPEC_LTREL_OFFSET
6538 && XVECLEN (XEXP (value
, 0), 0) == 1)
6539 value
= s390_pool_offset (pool
, XVECEXP (XEXP (value
, 0), 0, 0));
6541 insn
= emit_label_after (c
->label
, insn
);
6542 INSN_ADDRESSES_NEW (insn
, -1);
6544 value
= gen_rtx_UNSPEC_VOLATILE (constant_modes
[i
],
6545 gen_rtvec (1, value
),
6546 UNSPECV_POOL_ENTRY
);
6547 insn
= emit_insn_after (value
, insn
);
6548 INSN_ADDRESSES_NEW (insn
, -1);
6551 /* Ensure minimum alignment for instructions. */
6552 insn
= emit_insn_after (gen_pool_align (GEN_INT (2)), insn
);
6553 INSN_ADDRESSES_NEW (insn
, -1);
6555 /* Output in-pool execute template insns. */
6556 for (c
= pool
->execute
; c
; c
= c
->next
)
6558 insn
= emit_label_after (c
->label
, insn
);
6559 INSN_ADDRESSES_NEW (insn
, -1);
6561 insn
= emit_insn_after (s390_execute_target (c
->value
), insn
);
6562 INSN_ADDRESSES_NEW (insn
, -1);
6565 /* Switch back to previous section. */
6566 if (TARGET_CPU_ZARCH
)
6568 insn
= emit_insn_after (gen_pool_section_end (), insn
);
6569 INSN_ADDRESSES_NEW (insn
, -1);
6572 insn
= emit_barrier_after (insn
);
6573 INSN_ADDRESSES_NEW (insn
, -1);
6575 /* Remove placeholder insn. */
6576 remove_insn (pool
->pool_insn
);
6579 /* Free all memory used by POOL. */
6582 s390_free_pool (struct constant_pool
*pool
)
6584 struct constant
*c
, *next
;
6587 for (i
= 0; i
< NR_C_MODES
; i
++)
6588 for (c
= pool
->constants
[i
]; c
; c
= next
)
6594 for (c
= pool
->execute
; c
; c
= next
)
6600 BITMAP_FREE (pool
->insns
);
6605 /* Collect main literal pool. Return NULL on overflow. */
6607 static struct constant_pool
*
6608 s390_mainpool_start (void)
6610 struct constant_pool
*pool
;
6613 pool
= s390_alloc_pool ();
6615 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6617 if (NONJUMP_INSN_P (insn
)
6618 && GET_CODE (PATTERN (insn
)) == SET
6619 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC_VOLATILE
6620 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPECV_MAIN_POOL
)
6622 gcc_assert (!pool
->pool_insn
);
6623 pool
->pool_insn
= insn
;
6626 if (!TARGET_CPU_ZARCH
&& s390_execute_label (insn
))
6628 s390_add_execute (pool
, insn
);
6630 else if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
6632 rtx pool_ref
= NULL_RTX
;
6633 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6636 rtx constant
= get_pool_constant (pool_ref
);
6637 enum machine_mode mode
= get_pool_mode (pool_ref
);
6638 s390_add_constant (pool
, constant
, mode
);
6642 /* If hot/cold partitioning is enabled we have to make sure that
6643 the literal pool is emitted in the same section where the
6644 initialization of the literal pool base pointer takes place.
6645 emit_pool_after is only used in the non-overflow case on non
6646 Z cpus where we can emit the literal pool at the end of the
6647 function body within the text section. */
6649 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6650 && !pool
->emit_pool_after
)
6651 pool
->emit_pool_after
= PREV_INSN (insn
);
6654 gcc_assert (pool
->pool_insn
|| pool
->size
== 0);
6656 if (pool
->size
>= 4096)
6658 /* We're going to chunkify the pool, so remove the main
6659 pool placeholder insn. */
6660 remove_insn (pool
->pool_insn
);
6662 s390_free_pool (pool
);
6666 /* If the functions ends with the section where the literal pool
6667 should be emitted set the marker to its end. */
6668 if (pool
&& !pool
->emit_pool_after
)
6669 pool
->emit_pool_after
= get_last_insn ();
6674 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6675 Modify the current function to output the pool constants as well as
6676 the pool register setup instruction. */
6679 s390_mainpool_finish (struct constant_pool
*pool
)
6681 rtx base_reg
= cfun
->machine
->base_reg
;
6684 /* If the pool is empty, we're done. */
6685 if (pool
->size
== 0)
6687 /* We don't actually need a base register after all. */
6688 cfun
->machine
->base_reg
= NULL_RTX
;
6690 if (pool
->pool_insn
)
6691 remove_insn (pool
->pool_insn
);
6692 s390_free_pool (pool
);
6696 /* We need correct insn addresses. */
6697 shorten_branches (get_insns ());
6699 /* On zSeries, we use a LARL to load the pool register. The pool is
6700 located in the .rodata section, so we emit it after the function. */
6701 if (TARGET_CPU_ZARCH
)
6703 insn
= gen_main_base_64 (base_reg
, pool
->label
);
6704 insn
= emit_insn_after (insn
, pool
->pool_insn
);
6705 INSN_ADDRESSES_NEW (insn
, -1);
6706 remove_insn (pool
->pool_insn
);
6708 insn
= get_last_insn ();
6709 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6710 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6712 s390_dump_pool (pool
, 0);
6715 /* On S/390, if the total size of the function's code plus literal pool
6716 does not exceed 4096 bytes, we use BASR to set up a function base
6717 pointer, and emit the literal pool at the end of the function. */
6718 else if (INSN_ADDRESSES (INSN_UID (pool
->emit_pool_after
))
6719 + pool
->size
+ 8 /* alignment slop */ < 4096)
6721 insn
= gen_main_base_31_small (base_reg
, pool
->label
);
6722 insn
= emit_insn_after (insn
, pool
->pool_insn
);
6723 INSN_ADDRESSES_NEW (insn
, -1);
6724 remove_insn (pool
->pool_insn
);
6726 insn
= emit_label_after (pool
->label
, insn
);
6727 INSN_ADDRESSES_NEW (insn
, -1);
6729 /* emit_pool_after will be set by s390_mainpool_start to the
6730 last insn of the section where the literal pool should be
6732 insn
= pool
->emit_pool_after
;
6734 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6735 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6737 s390_dump_pool (pool
, 1);
6740 /* Otherwise, we emit an inline literal pool and use BASR to branch
6741 over it, setting up the pool register at the same time. */
6744 rtx pool_end
= gen_label_rtx ();
6746 insn
= gen_main_base_31_large (base_reg
, pool
->label
, pool_end
);
6747 insn
= emit_jump_insn_after (insn
, pool
->pool_insn
);
6748 JUMP_LABEL (insn
) = pool_end
;
6749 INSN_ADDRESSES_NEW (insn
, -1);
6750 remove_insn (pool
->pool_insn
);
6752 insn
= emit_label_after (pool
->label
, insn
);
6753 INSN_ADDRESSES_NEW (insn
, -1);
6755 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6756 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6758 insn
= emit_label_after (pool_end
, pool
->pool_insn
);
6759 INSN_ADDRESSES_NEW (insn
, -1);
6761 s390_dump_pool (pool
, 1);
6765 /* Replace all literal pool references. */
6767 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6770 replace_ltrel_base (&PATTERN (insn
));
6772 if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
6774 rtx addr
, pool_ref
= NULL_RTX
;
6775 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6778 if (s390_execute_label (insn
))
6779 addr
= s390_find_execute (pool
, insn
);
6781 addr
= s390_find_constant (pool
, get_pool_constant (pool_ref
),
6782 get_pool_mode (pool_ref
));
6784 replace_constant_pool_ref (&PATTERN (insn
), pool_ref
, addr
);
6785 INSN_CODE (insn
) = -1;
6791 /* Free the pool. */
6792 s390_free_pool (pool
);
6795 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6796 We have decided we cannot use this pool, so revert all changes
6797 to the current function that were done by s390_mainpool_start. */
6799 s390_mainpool_cancel (struct constant_pool
*pool
)
6801 /* We didn't actually change the instruction stream, so simply
6802 free the pool memory. */
6803 s390_free_pool (pool
);
6807 /* Chunkify the literal pool. */
6809 #define S390_POOL_CHUNK_MIN 0xc00
6810 #define S390_POOL_CHUNK_MAX 0xe00
6812 static struct constant_pool
*
6813 s390_chunkify_start (void)
6815 struct constant_pool
*curr_pool
= NULL
, *pool_list
= NULL
;
6818 rtx pending_ltrel
= NULL_RTX
;
6821 rtx (*gen_reload_base
) (rtx
, rtx
) =
6822 TARGET_CPU_ZARCH
? gen_reload_base_64
: gen_reload_base_31
;
6825 /* We need correct insn addresses. */
6827 shorten_branches (get_insns ());
6829 /* Scan all insns and move literals to pool chunks. */
6831 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6833 bool section_switch_p
= false;
6835 /* Check for pending LTREL_BASE. */
6838 rtx ltrel_base
= find_ltrel_base (PATTERN (insn
));
6841 gcc_assert (ltrel_base
== pending_ltrel
);
6842 pending_ltrel
= NULL_RTX
;
6846 if (!TARGET_CPU_ZARCH
&& s390_execute_label (insn
))
6849 curr_pool
= s390_start_pool (&pool_list
, insn
);
6851 s390_add_execute (curr_pool
, insn
);
6852 s390_add_pool_insn (curr_pool
, insn
);
6854 else if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
6856 rtx pool_ref
= NULL_RTX
;
6857 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6860 rtx constant
= get_pool_constant (pool_ref
);
6861 enum machine_mode mode
= get_pool_mode (pool_ref
);
6864 curr_pool
= s390_start_pool (&pool_list
, insn
);
6866 s390_add_constant (curr_pool
, constant
, mode
);
6867 s390_add_pool_insn (curr_pool
, insn
);
6869 /* Don't split the pool chunk between a LTREL_OFFSET load
6870 and the corresponding LTREL_BASE. */
6871 if (GET_CODE (constant
) == CONST
6872 && GET_CODE (XEXP (constant
, 0)) == UNSPEC
6873 && XINT (XEXP (constant
, 0), 1) == UNSPEC_LTREL_OFFSET
)
6875 gcc_assert (!pending_ltrel
);
6876 pending_ltrel
= pool_ref
;
6881 if (JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
) || LABEL_P (insn
))
6884 s390_add_pool_insn (curr_pool
, insn
);
6885 /* An LTREL_BASE must follow within the same basic block. */
6886 gcc_assert (!pending_ltrel
);
6890 switch (NOTE_KIND (insn
))
6892 case NOTE_INSN_SWITCH_TEXT_SECTIONS
:
6893 section_switch_p
= true;
6895 case NOTE_INSN_VAR_LOCATION
:
6896 case NOTE_INSN_CALL_ARG_LOCATION
:
6903 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn
)
6904 || INSN_ADDRESSES (INSN_UID (insn
)) == -1)
6907 if (TARGET_CPU_ZARCH
)
6909 if (curr_pool
->size
< S390_POOL_CHUNK_MAX
)
6912 s390_end_pool (curr_pool
, NULL_RTX
);
6917 int chunk_size
= INSN_ADDRESSES (INSN_UID (insn
))
6918 - INSN_ADDRESSES (INSN_UID (curr_pool
->first_insn
))
6921 /* We will later have to insert base register reload insns.
6922 Those will have an effect on code size, which we need to
6923 consider here. This calculation makes rather pessimistic
6924 worst-case assumptions. */
6928 if (chunk_size
< S390_POOL_CHUNK_MIN
6929 && curr_pool
->size
< S390_POOL_CHUNK_MIN
6930 && !section_switch_p
)
6933 /* Pool chunks can only be inserted after BARRIERs ... */
6934 if (BARRIER_P (insn
))
6936 s390_end_pool (curr_pool
, insn
);
6941 /* ... so if we don't find one in time, create one. */
6942 else if (chunk_size
> S390_POOL_CHUNK_MAX
6943 || curr_pool
->size
> S390_POOL_CHUNK_MAX
6944 || section_switch_p
)
6946 rtx label
, jump
, barrier
, next
, prev
;
6948 if (!section_switch_p
)
6950 /* We can insert the barrier only after a 'real' insn. */
6951 if (! NONJUMP_INSN_P (insn
) && ! CALL_P (insn
))
6953 if (get_attr_length (insn
) == 0)
6955 /* Don't separate LTREL_BASE from the corresponding
6956 LTREL_OFFSET load. */
6963 next
= NEXT_INSN (insn
);
6967 && (NOTE_KIND (next
) == NOTE_INSN_VAR_LOCATION
6968 || NOTE_KIND (next
) == NOTE_INSN_CALL_ARG_LOCATION
));
6972 gcc_assert (!pending_ltrel
);
6974 /* The old pool has to end before the section switch
6975 note in order to make it part of the current
6977 insn
= PREV_INSN (insn
);
6980 label
= gen_label_rtx ();
6982 if (prev
&& NOTE_P (prev
))
6983 prev
= prev_nonnote_insn (prev
);
6985 jump
= emit_jump_insn_after_setloc (gen_jump (label
), insn
,
6986 INSN_LOCATION (prev
));
6988 jump
= emit_jump_insn_after_noloc (gen_jump (label
), insn
);
6989 barrier
= emit_barrier_after (jump
);
6990 insn
= emit_label_after (label
, barrier
);
6991 JUMP_LABEL (jump
) = label
;
6992 LABEL_NUSES (label
) = 1;
6994 INSN_ADDRESSES_NEW (jump
, -1);
6995 INSN_ADDRESSES_NEW (barrier
, -1);
6996 INSN_ADDRESSES_NEW (insn
, -1);
6998 s390_end_pool (curr_pool
, barrier
);
7006 s390_end_pool (curr_pool
, NULL_RTX
);
7007 gcc_assert (!pending_ltrel
);
7009 /* Find all labels that are branched into
7010 from an insn belonging to a different chunk. */
7012 far_labels
= BITMAP_ALLOC (NULL
);
7014 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7016 /* Labels marked with LABEL_PRESERVE_P can be target
7017 of non-local jumps, so we have to mark them.
7018 The same holds for named labels.
7020 Don't do that, however, if it is the label before
7024 && (LABEL_PRESERVE_P (insn
) || LABEL_NAME (insn
)))
7026 rtx vec_insn
= next_active_insn (insn
);
7027 if (! vec_insn
|| ! JUMP_TABLE_DATA_P (vec_insn
))
7028 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (insn
));
7031 /* If we have a direct jump (conditional or unconditional)
7032 or a casesi jump, check all potential targets. */
7033 else if (JUMP_P (insn
))
7035 rtx pat
= PATTERN (insn
);
7036 if (GET_CODE (pat
) == PARALLEL
&& XVECLEN (pat
, 0) > 2)
7037 pat
= XVECEXP (pat
, 0, 0);
7039 if (GET_CODE (pat
) == SET
)
7041 rtx label
= JUMP_LABEL (insn
);
7044 if (s390_find_pool (pool_list
, label
)
7045 != s390_find_pool (pool_list
, insn
))
7046 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (label
));
7049 else if (GET_CODE (pat
) == PARALLEL
7050 && XVECLEN (pat
, 0) == 2
7051 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
7052 && GET_CODE (XVECEXP (pat
, 0, 1)) == USE
7053 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == LABEL_REF
)
7055 /* Find the jump table used by this casesi jump. */
7056 rtx vec_label
= XEXP (XEXP (XVECEXP (pat
, 0, 1), 0), 0);
7057 rtx vec_insn
= next_active_insn (vec_label
);
7058 if (vec_insn
&& JUMP_TABLE_DATA_P (vec_insn
))
7060 rtx vec_pat
= PATTERN (vec_insn
);
7061 int i
, diff_p
= GET_CODE (vec_pat
) == ADDR_DIFF_VEC
;
7063 for (i
= 0; i
< XVECLEN (vec_pat
, diff_p
); i
++)
7065 rtx label
= XEXP (XVECEXP (vec_pat
, diff_p
, i
), 0);
7067 if (s390_find_pool (pool_list
, label
)
7068 != s390_find_pool (pool_list
, insn
))
7069 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (label
));
7076 /* Insert base register reload insns before every pool. */
7078 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
7080 rtx new_insn
= gen_reload_base (cfun
->machine
->base_reg
,
7082 rtx insn
= curr_pool
->first_insn
;
7083 INSN_ADDRESSES_NEW (emit_insn_before (new_insn
, insn
), -1);
7086 /* Insert base register reload insns at every far label. */
7090 fprintf (dump_file
, "Function: %s\n", current_function_name ());
7091 fprintf (dump_file
, "far labels:\n");
7094 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7096 && bitmap_bit_p (far_labels
, CODE_LABEL_NUMBER (insn
)))
7098 struct constant_pool
*pool
= s390_find_pool (pool_list
, insn
);
7104 rtx new_insn
= gen_reload_base (cfun
->machine
->base_reg
,
7106 INSN_ADDRESSES_NEW (emit_insn_after (new_insn
, insn
), -1);
7111 BITMAP_FREE (far_labels
);
7114 /* Recompute insn addresses. */
7116 init_insn_lengths ();
7117 shorten_branches (get_insns ());
7122 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7123 After we have decided to use this list, finish implementing
7124 all changes to the current function as required. */
7127 s390_chunkify_finish (struct constant_pool
*pool_list
)
7129 struct constant_pool
*curr_pool
= NULL
;
7133 /* Replace all literal pool references. */
7135 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7138 replace_ltrel_base (&PATTERN (insn
));
7140 curr_pool
= s390_find_pool (pool_list
, insn
);
7144 if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
7146 rtx addr
, pool_ref
= NULL_RTX
;
7147 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
7150 if (s390_execute_label (insn
))
7151 addr
= s390_find_execute (curr_pool
, insn
);
7153 addr
= s390_find_constant (curr_pool
,
7154 get_pool_constant (pool_ref
),
7155 get_pool_mode (pool_ref
));
7157 replace_constant_pool_ref (&PATTERN (insn
), pool_ref
, addr
);
7158 INSN_CODE (insn
) = -1;
7163 /* Dump out all literal pools. */
7165 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
7166 s390_dump_pool (curr_pool
, 0);
7168 /* Free pool list. */
7172 struct constant_pool
*next
= pool_list
->next
;
7173 s390_free_pool (pool_list
);
7178 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7179 We have decided we cannot use this list, so revert all changes
7180 to the current function that were done by s390_chunkify_start. */
7183 s390_chunkify_cancel (struct constant_pool
*pool_list
)
7185 struct constant_pool
*curr_pool
= NULL
;
7188 /* Remove all pool placeholder insns. */
7190 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
7192 /* Did we insert an extra barrier? Remove it. */
7193 rtx barrier
= PREV_INSN (curr_pool
->pool_insn
);
7194 rtx jump
= barrier
? PREV_INSN (barrier
) : NULL_RTX
;
7195 rtx label
= NEXT_INSN (curr_pool
->pool_insn
);
7197 if (jump
&& JUMP_P (jump
)
7198 && barrier
&& BARRIER_P (barrier
)
7199 && label
&& LABEL_P (label
)
7200 && GET_CODE (PATTERN (jump
)) == SET
7201 && SET_DEST (PATTERN (jump
)) == pc_rtx
7202 && GET_CODE (SET_SRC (PATTERN (jump
))) == LABEL_REF
7203 && XEXP (SET_SRC (PATTERN (jump
)), 0) == label
)
7206 remove_insn (barrier
);
7207 remove_insn (label
);
7210 remove_insn (curr_pool
->pool_insn
);
7213 /* Remove all base register reload insns. */
7215 for (insn
= get_insns (); insn
; )
7217 rtx next_insn
= NEXT_INSN (insn
);
7219 if (NONJUMP_INSN_P (insn
)
7220 && GET_CODE (PATTERN (insn
)) == SET
7221 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC
7222 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPEC_RELOAD_BASE
)
7228 /* Free pool list. */
7232 struct constant_pool
*next
= pool_list
->next
;
7233 s390_free_pool (pool_list
);
7238 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7241 s390_output_pool_entry (rtx exp
, enum machine_mode mode
, unsigned int align
)
7245 switch (GET_MODE_CLASS (mode
))
7248 case MODE_DECIMAL_FLOAT
:
7249 gcc_assert (GET_CODE (exp
) == CONST_DOUBLE
);
7251 REAL_VALUE_FROM_CONST_DOUBLE (r
, exp
);
7252 assemble_real (r
, mode
, align
);
7256 assemble_integer (exp
, GET_MODE_SIZE (mode
), align
, 1);
7257 mark_symbol_refs_as_used (exp
);
7266 /* Return an RTL expression representing the value of the return address
7267 for the frame COUNT steps up from the current frame. FRAME is the
7268 frame pointer of that frame. */
7271 s390_return_addr_rtx (int count
, rtx frame ATTRIBUTE_UNUSED
)
7276 /* Without backchain, we fail for all but the current frame. */
7278 if (!TARGET_BACKCHAIN
&& count
> 0)
7281 /* For the current frame, we need to make sure the initial
7282 value of RETURN_REGNUM is actually saved. */
7286 /* On non-z architectures branch splitting could overwrite r14. */
7287 if (TARGET_CPU_ZARCH
)
7288 return get_hard_reg_initial_val (Pmode
, RETURN_REGNUM
);
7291 cfun_frame_layout
.save_return_addr_p
= true;
7292 return gen_rtx_MEM (Pmode
, return_address_pointer_rtx
);
7296 if (TARGET_PACKED_STACK
)
7297 offset
= -2 * UNITS_PER_LONG
;
7299 offset
= RETURN_REGNUM
* UNITS_PER_LONG
;
7301 addr
= plus_constant (Pmode
, frame
, offset
);
7302 addr
= memory_address (Pmode
, addr
);
7303 return gen_rtx_MEM (Pmode
, addr
);
7306 /* Return an RTL expression representing the back chain stored in
7307 the current stack frame. */
7310 s390_back_chain_rtx (void)
7314 gcc_assert (TARGET_BACKCHAIN
);
7316 if (TARGET_PACKED_STACK
)
7317 chain
= plus_constant (Pmode
, stack_pointer_rtx
,
7318 STACK_POINTER_OFFSET
- UNITS_PER_LONG
);
7320 chain
= stack_pointer_rtx
;
7322 chain
= gen_rtx_MEM (Pmode
, chain
);
7326 /* Find first call clobbered register unused in a function.
7327 This could be used as base register in a leaf function
7328 or for holding the return address before epilogue. */
7331 find_unused_clobbered_reg (void)
7334 for (i
= 0; i
< 6; i
++)
7335 if (!df_regs_ever_live_p (i
))
7341 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7342 clobbered hard regs in SETREG. */
7345 s390_reg_clobbered_rtx (rtx setreg
, const_rtx set_insn ATTRIBUTE_UNUSED
, void *data
)
7347 int *regs_ever_clobbered
= (int *)data
;
7348 unsigned int i
, regno
;
7349 enum machine_mode mode
= GET_MODE (setreg
);
7351 if (GET_CODE (setreg
) == SUBREG
)
7353 rtx inner
= SUBREG_REG (setreg
);
7354 if (!GENERAL_REG_P (inner
))
7356 regno
= subreg_regno (setreg
);
7358 else if (GENERAL_REG_P (setreg
))
7359 regno
= REGNO (setreg
);
7364 i
< regno
+ HARD_REGNO_NREGS (regno
, mode
);
7366 regs_ever_clobbered
[i
] = 1;
7369 /* Walks through all basic blocks of the current function looking
7370 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7371 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7372 each of those regs. */
7375 s390_regs_ever_clobbered (int *regs_ever_clobbered
)
7381 memset (regs_ever_clobbered
, 0, 16 * sizeof (int));
7383 /* For non-leaf functions we have to consider all call clobbered regs to be
7387 for (i
= 0; i
< 16; i
++)
7388 regs_ever_clobbered
[i
] = call_really_used_regs
[i
];
7391 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7392 this work is done by liveness analysis (mark_regs_live_at_end).
7393 Special care is needed for functions containing landing pads. Landing pads
7394 may use the eh registers, but the code which sets these registers is not
7395 contained in that function. Hence s390_regs_ever_clobbered is not able to
7396 deal with this automatically. */
7397 if (crtl
->calls_eh_return
|| cfun
->machine
->has_landing_pad_p
)
7398 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; i
++)
7399 if (crtl
->calls_eh_return
7400 || (cfun
->machine
->has_landing_pad_p
7401 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i
))))
7402 regs_ever_clobbered
[EH_RETURN_DATA_REGNO (i
)] = 1;
7404 /* For nonlocal gotos all call-saved registers have to be saved.
7405 This flag is also set for the unwinding code in libgcc.
7406 See expand_builtin_unwind_init. For regs_ever_live this is done by
7408 if (cfun
->has_nonlocal_label
)
7409 for (i
= 0; i
< 16; i
++)
7410 if (!call_really_used_regs
[i
])
7411 regs_ever_clobbered
[i
] = 1;
7413 FOR_EACH_BB (cur_bb
)
7415 FOR_BB_INSNS (cur_bb
, cur_insn
)
7417 if (INSN_P (cur_insn
))
7418 note_stores (PATTERN (cur_insn
),
7419 s390_reg_clobbered_rtx
,
7420 regs_ever_clobbered
);
7425 /* Determine the frame area which actually has to be accessed
7426 in the function epilogue. The values are stored at the
7427 given pointers AREA_BOTTOM (address of the lowest used stack
7428 address) and AREA_TOP (address of the first item which does
7429 not belong to the stack frame). */
7432 s390_frame_area (int *area_bottom
, int *area_top
)
7440 if (cfun_frame_layout
.first_restore_gpr
!= -1)
7442 b
= (cfun_frame_layout
.gprs_offset
7443 + cfun_frame_layout
.first_restore_gpr
* UNITS_PER_LONG
);
7444 t
= b
+ (cfun_frame_layout
.last_restore_gpr
7445 - cfun_frame_layout
.first_restore_gpr
+ 1) * UNITS_PER_LONG
;
7448 if (TARGET_64BIT
&& cfun_save_high_fprs_p
)
7450 b
= MIN (b
, cfun_frame_layout
.f8_offset
);
7451 t
= MAX (t
, (cfun_frame_layout
.f8_offset
7452 + cfun_frame_layout
.high_fprs
* 8));
7456 for (i
= 2; i
< 4; i
++)
7457 if (cfun_fpr_bit_p (i
))
7459 b
= MIN (b
, cfun_frame_layout
.f4_offset
+ (i
- 2) * 8);
7460 t
= MAX (t
, cfun_frame_layout
.f4_offset
+ (i
- 1) * 8);
7467 /* Fill cfun->machine with info about register usage of current function.
7468 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7471 s390_register_info (int clobbered_regs
[])
7475 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7476 cfun_frame_layout
.fpr_bitmap
= 0;
7477 cfun_frame_layout
.high_fprs
= 0;
7479 for (i
= 24; i
< 32; i
++)
7480 if (df_regs_ever_live_p (i
) && !global_regs
[i
])
7482 cfun_set_fpr_bit (i
- 16);
7483 cfun_frame_layout
.high_fprs
++;
7486 /* Find first and last gpr to be saved. We trust regs_ever_live
7487 data, except that we don't save and restore global registers.
7489 Also, all registers with special meaning to the compiler need
7490 to be handled extra. */
7492 s390_regs_ever_clobbered (clobbered_regs
);
7494 for (i
= 0; i
< 16; i
++)
7495 clobbered_regs
[i
] = clobbered_regs
[i
] && !global_regs
[i
] && !fixed_regs
[i
];
7497 if (frame_pointer_needed
)
7498 clobbered_regs
[HARD_FRAME_POINTER_REGNUM
] = 1;
7501 clobbered_regs
[PIC_OFFSET_TABLE_REGNUM
]
7502 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
);
7504 clobbered_regs
[BASE_REGNUM
]
7505 |= (cfun
->machine
->base_reg
7506 && REGNO (cfun
->machine
->base_reg
) == BASE_REGNUM
);
7508 clobbered_regs
[RETURN_REGNUM
]
7510 || TARGET_TPF_PROFILING
7511 || cfun
->machine
->split_branches_pending_p
7512 || cfun_frame_layout
.save_return_addr_p
7513 || crtl
->calls_eh_return
7516 clobbered_regs
[STACK_POINTER_REGNUM
]
7518 || TARGET_TPF_PROFILING
7519 || cfun_save_high_fprs_p
7520 || get_frame_size () > 0
7521 || cfun
->calls_alloca
7524 for (i
= 6; i
< 16; i
++)
7525 if (df_regs_ever_live_p (i
) || clobbered_regs
[i
])
7527 for (j
= 15; j
> i
; j
--)
7528 if (df_regs_ever_live_p (j
) || clobbered_regs
[j
])
7533 /* Nothing to save/restore. */
7534 cfun_frame_layout
.first_save_gpr_slot
= -1;
7535 cfun_frame_layout
.last_save_gpr_slot
= -1;
7536 cfun_frame_layout
.first_save_gpr
= -1;
7537 cfun_frame_layout
.first_restore_gpr
= -1;
7538 cfun_frame_layout
.last_save_gpr
= -1;
7539 cfun_frame_layout
.last_restore_gpr
= -1;
7543 /* Save slots for gprs from i to j. */
7544 cfun_frame_layout
.first_save_gpr_slot
= i
;
7545 cfun_frame_layout
.last_save_gpr_slot
= j
;
7547 for (i
= cfun_frame_layout
.first_save_gpr_slot
;
7548 i
< cfun_frame_layout
.last_save_gpr_slot
+ 1;
7550 if (clobbered_regs
[i
])
7553 for (j
= cfun_frame_layout
.last_save_gpr_slot
; j
> i
; j
--)
7554 if (clobbered_regs
[j
])
7557 if (i
== cfun_frame_layout
.last_save_gpr_slot
+ 1)
7559 /* Nothing to save/restore. */
7560 cfun_frame_layout
.first_save_gpr
= -1;
7561 cfun_frame_layout
.first_restore_gpr
= -1;
7562 cfun_frame_layout
.last_save_gpr
= -1;
7563 cfun_frame_layout
.last_restore_gpr
= -1;
7567 /* Save / Restore from gpr i to j. */
7568 cfun_frame_layout
.first_save_gpr
= i
;
7569 cfun_frame_layout
.first_restore_gpr
= i
;
7570 cfun_frame_layout
.last_save_gpr
= j
;
7571 cfun_frame_layout
.last_restore_gpr
= j
;
7577 /* Varargs functions need to save gprs 2 to 6. */
7578 if (cfun
->va_list_gpr_size
7579 && crtl
->args
.info
.gprs
< GP_ARG_NUM_REG
)
7581 int min_gpr
= crtl
->args
.info
.gprs
;
7582 int max_gpr
= min_gpr
+ cfun
->va_list_gpr_size
;
7583 if (max_gpr
> GP_ARG_NUM_REG
)
7584 max_gpr
= GP_ARG_NUM_REG
;
7586 if (cfun_frame_layout
.first_save_gpr
== -1
7587 || cfun_frame_layout
.first_save_gpr
> 2 + min_gpr
)
7589 cfun_frame_layout
.first_save_gpr
= 2 + min_gpr
;
7590 cfun_frame_layout
.first_save_gpr_slot
= 2 + min_gpr
;
7593 if (cfun_frame_layout
.last_save_gpr
== -1
7594 || cfun_frame_layout
.last_save_gpr
< 2 + max_gpr
- 1)
7596 cfun_frame_layout
.last_save_gpr
= 2 + max_gpr
- 1;
7597 cfun_frame_layout
.last_save_gpr_slot
= 2 + max_gpr
- 1;
7601 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7602 if (TARGET_HARD_FLOAT
&& cfun
->va_list_fpr_size
7603 && crtl
->args
.info
.fprs
< FP_ARG_NUM_REG
)
7605 int min_fpr
= crtl
->args
.info
.fprs
;
7606 int max_fpr
= min_fpr
+ cfun
->va_list_fpr_size
;
7607 if (max_fpr
> FP_ARG_NUM_REG
)
7608 max_fpr
= FP_ARG_NUM_REG
;
7610 /* ??? This is currently required to ensure proper location
7611 of the fpr save slots within the va_list save area. */
7612 if (TARGET_PACKED_STACK
)
7615 for (i
= min_fpr
; i
< max_fpr
; i
++)
7616 cfun_set_fpr_bit (i
);
7621 for (i
= 2; i
< 4; i
++)
7622 if (df_regs_ever_live_p (i
+ 16) && !global_regs
[i
+ 16])
7623 cfun_set_fpr_bit (i
);
7626 /* Fill cfun->machine with info about frame of current function. */
7629 s390_frame_info (void)
7633 cfun_frame_layout
.frame_size
= get_frame_size ();
7634 if (!TARGET_64BIT
&& cfun_frame_layout
.frame_size
> 0x7fff0000)
7635 fatal_error ("total size of local variables exceeds architecture limit");
7637 if (!TARGET_PACKED_STACK
)
7639 cfun_frame_layout
.backchain_offset
= 0;
7640 cfun_frame_layout
.f0_offset
= 16 * UNITS_PER_LONG
;
7641 cfun_frame_layout
.f4_offset
= cfun_frame_layout
.f0_offset
+ 2 * 8;
7642 cfun_frame_layout
.f8_offset
= -cfun_frame_layout
.high_fprs
* 8;
7643 cfun_frame_layout
.gprs_offset
= (cfun_frame_layout
.first_save_gpr_slot
7646 else if (TARGET_BACKCHAIN
) /* kernel stack layout */
7648 cfun_frame_layout
.backchain_offset
= (STACK_POINTER_OFFSET
7650 cfun_frame_layout
.gprs_offset
7651 = (cfun_frame_layout
.backchain_offset
7652 - (STACK_POINTER_REGNUM
- cfun_frame_layout
.first_save_gpr_slot
+ 1)
7657 cfun_frame_layout
.f4_offset
7658 = (cfun_frame_layout
.gprs_offset
7659 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7661 cfun_frame_layout
.f0_offset
7662 = (cfun_frame_layout
.f4_offset
7663 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7667 /* On 31 bit we have to care about alignment of the
7668 floating point regs to provide fastest access. */
7669 cfun_frame_layout
.f0_offset
7670 = ((cfun_frame_layout
.gprs_offset
7671 & ~(STACK_BOUNDARY
/ BITS_PER_UNIT
- 1))
7672 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7674 cfun_frame_layout
.f4_offset
7675 = (cfun_frame_layout
.f0_offset
7676 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7679 else /* no backchain */
7681 cfun_frame_layout
.f4_offset
7682 = (STACK_POINTER_OFFSET
7683 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7685 cfun_frame_layout
.f0_offset
7686 = (cfun_frame_layout
.f4_offset
7687 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7689 cfun_frame_layout
.gprs_offset
7690 = cfun_frame_layout
.f0_offset
- cfun_gprs_save_area_size
;
7694 && !TARGET_TPF_PROFILING
7695 && cfun_frame_layout
.frame_size
== 0
7696 && !cfun_save_high_fprs_p
7697 && !cfun
->calls_alloca
7701 if (!TARGET_PACKED_STACK
)
7702 cfun_frame_layout
.frame_size
+= (STACK_POINTER_OFFSET
7703 + crtl
->outgoing_args_size
7704 + cfun_frame_layout
.high_fprs
* 8);
7707 if (TARGET_BACKCHAIN
)
7708 cfun_frame_layout
.frame_size
+= UNITS_PER_LONG
;
7710 /* No alignment trouble here because f8-f15 are only saved under
7712 cfun_frame_layout
.f8_offset
= (MIN (MIN (cfun_frame_layout
.f0_offset
,
7713 cfun_frame_layout
.f4_offset
),
7714 cfun_frame_layout
.gprs_offset
)
7715 - cfun_frame_layout
.high_fprs
* 8);
7717 cfun_frame_layout
.frame_size
+= cfun_frame_layout
.high_fprs
* 8;
7719 for (i
= 0; i
< 8; i
++)
7720 if (cfun_fpr_bit_p (i
))
7721 cfun_frame_layout
.frame_size
+= 8;
7723 cfun_frame_layout
.frame_size
+= cfun_gprs_save_area_size
;
7725 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7726 the frame size to sustain 8 byte alignment of stack frames. */
7727 cfun_frame_layout
.frame_size
= ((cfun_frame_layout
.frame_size
+
7728 STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
7729 & ~(STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
7731 cfun_frame_layout
.frame_size
+= crtl
->outgoing_args_size
;
7735 /* Generate frame layout. Fills in register and frame data for the current
7736 function in cfun->machine. This routine can be called multiple times;
7737 it will re-do the complete frame layout every time. */
7740 s390_init_frame_layout (void)
7742 HOST_WIDE_INT frame_size
;
7744 int clobbered_regs
[16];
7746 /* On S/390 machines, we may need to perform branch splitting, which
7747 will require both base and return address register. We have no
7748 choice but to assume we're going to need them until right at the
7749 end of the machine dependent reorg phase. */
7750 if (!TARGET_CPU_ZARCH
)
7751 cfun
->machine
->split_branches_pending_p
= true;
7755 frame_size
= cfun_frame_layout
.frame_size
;
7757 /* Try to predict whether we'll need the base register. */
7758 base_used
= cfun
->machine
->split_branches_pending_p
7759 || crtl
->uses_const_pool
7760 || (!DISP_IN_RANGE (frame_size
)
7761 && !CONST_OK_FOR_K (frame_size
));
7763 /* Decide which register to use as literal pool base. In small
7764 leaf functions, try to use an unused call-clobbered register
7765 as base register to avoid save/restore overhead. */
7767 cfun
->machine
->base_reg
= NULL_RTX
;
7768 else if (crtl
->is_leaf
&& !df_regs_ever_live_p (5))
7769 cfun
->machine
->base_reg
= gen_rtx_REG (Pmode
, 5);
7771 cfun
->machine
->base_reg
= gen_rtx_REG (Pmode
, BASE_REGNUM
);
7773 s390_register_info (clobbered_regs
);
7776 while (frame_size
!= cfun_frame_layout
.frame_size
);
7779 /* Update frame layout. Recompute actual register save data based on
7780 current info and update regs_ever_live for the special registers.
7781 May be called multiple times, but may never cause *more* registers
7782 to be saved than s390_init_frame_layout allocated room for. */
7785 s390_update_frame_layout (void)
7787 int clobbered_regs
[16];
7789 s390_register_info (clobbered_regs
);
7791 df_set_regs_ever_live (BASE_REGNUM
,
7792 clobbered_regs
[BASE_REGNUM
] ? true : false);
7793 df_set_regs_ever_live (RETURN_REGNUM
,
7794 clobbered_regs
[RETURN_REGNUM
] ? true : false);
7795 df_set_regs_ever_live (STACK_POINTER_REGNUM
,
7796 clobbered_regs
[STACK_POINTER_REGNUM
] ? true : false);
7798 if (cfun
->machine
->base_reg
)
7799 df_set_regs_ever_live (REGNO (cfun
->machine
->base_reg
), true);
7802 /* Return true if it is legal to put a value with MODE into REGNO. */
7805 s390_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
7807 switch (REGNO_REG_CLASS (regno
))
7810 if (REGNO_PAIR_OK (regno
, mode
))
7812 if (mode
== SImode
|| mode
== DImode
)
7815 if (FLOAT_MODE_P (mode
) && GET_MODE_CLASS (mode
) != MODE_VECTOR_FLOAT
)
7820 if (FRAME_REGNO_P (regno
) && mode
== Pmode
)
7825 if (REGNO_PAIR_OK (regno
, mode
))
7828 || (mode
!= TFmode
&& mode
!= TCmode
&& mode
!= TDmode
))
7833 if (GET_MODE_CLASS (mode
) == MODE_CC
)
7837 if (REGNO_PAIR_OK (regno
, mode
))
7839 if (mode
== SImode
|| mode
== Pmode
)
7850 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7853 s390_hard_regno_rename_ok (unsigned int old_reg
, unsigned int new_reg
)
7855 /* Once we've decided upon a register to use as base register, it must
7856 no longer be used for any other purpose. */
7857 if (cfun
->machine
->base_reg
)
7858 if (REGNO (cfun
->machine
->base_reg
) == old_reg
7859 || REGNO (cfun
->machine
->base_reg
) == new_reg
)
7865 /* Maximum number of registers to represent a value of mode MODE
7866 in a register of class RCLASS. */
7869 s390_class_max_nregs (enum reg_class rclass
, enum machine_mode mode
)
7874 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
7875 return 2 * ((GET_MODE_SIZE (mode
) / 2 + 8 - 1) / 8);
7877 return (GET_MODE_SIZE (mode
) + 8 - 1) / 8;
7879 return (GET_MODE_SIZE (mode
) + 4 - 1) / 4;
7883 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
7886 /* Return true if we use LRA instead of reload pass. */
7890 return s390_lra_flag
;
7893 /* Return true if register FROM can be eliminated via register TO. */
7896 s390_can_eliminate (const int from
, const int to
)
7898 /* On zSeries machines, we have not marked the base register as fixed.
7899 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7900 If a function requires the base register, we say here that this
7901 elimination cannot be performed. This will cause reload to free
7902 up the base register (as if it were fixed). On the other hand,
7903 if the current function does *not* require the base register, we
7904 say here the elimination succeeds, which in turn allows reload
7905 to allocate the base register for any other purpose. */
7906 if (from
== BASE_REGNUM
&& to
== BASE_REGNUM
)
7908 if (TARGET_CPU_ZARCH
)
7910 s390_init_frame_layout ();
7911 return cfun
->machine
->base_reg
== NULL_RTX
;
7917 /* Everything else must point into the stack frame. */
7918 gcc_assert (to
== STACK_POINTER_REGNUM
7919 || to
== HARD_FRAME_POINTER_REGNUM
);
7921 gcc_assert (from
== FRAME_POINTER_REGNUM
7922 || from
== ARG_POINTER_REGNUM
7923 || from
== RETURN_ADDRESS_POINTER_REGNUM
);
7925 /* Make sure we actually saved the return address. */
7926 if (from
== RETURN_ADDRESS_POINTER_REGNUM
)
7927 if (!crtl
->calls_eh_return
7929 && !cfun_frame_layout
.save_return_addr_p
)
7935 /* Return offset between register FROM and TO initially after prolog. */
7938 s390_initial_elimination_offset (int from
, int to
)
7940 HOST_WIDE_INT offset
;
7943 /* ??? Why are we called for non-eliminable pairs? */
7944 if (!s390_can_eliminate (from
, to
))
7949 case FRAME_POINTER_REGNUM
:
7950 offset
= (get_frame_size()
7951 + STACK_POINTER_OFFSET
7952 + crtl
->outgoing_args_size
);
7955 case ARG_POINTER_REGNUM
:
7956 s390_init_frame_layout ();
7957 offset
= cfun_frame_layout
.frame_size
+ STACK_POINTER_OFFSET
;
7960 case RETURN_ADDRESS_POINTER_REGNUM
:
7961 s390_init_frame_layout ();
7962 index
= RETURN_REGNUM
- cfun_frame_layout
.first_save_gpr_slot
;
7963 gcc_assert (index
>= 0);
7964 offset
= cfun_frame_layout
.frame_size
+ cfun_frame_layout
.gprs_offset
;
7965 offset
+= index
* UNITS_PER_LONG
;
7979 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7980 to register BASE. Return generated insn. */
7983 save_fpr (rtx base
, int offset
, int regnum
)
7986 addr
= gen_rtx_MEM (DFmode
, plus_constant (Pmode
, base
, offset
));
7988 if (regnum
>= 16 && regnum
<= (16 + FP_ARG_NUM_REG
))
7989 set_mem_alias_set (addr
, get_varargs_alias_set ());
7991 set_mem_alias_set (addr
, get_frame_alias_set ());
7993 return emit_move_insn (addr
, gen_rtx_REG (DFmode
, regnum
));
7996 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7997 to register BASE. Return generated insn. */
8000 restore_fpr (rtx base
, int offset
, int regnum
)
8003 addr
= gen_rtx_MEM (DFmode
, plus_constant (Pmode
, base
, offset
));
8004 set_mem_alias_set (addr
, get_frame_alias_set ());
8006 return emit_move_insn (gen_rtx_REG (DFmode
, regnum
), addr
);
8009 /* Return true if REGNO is a global register, but not one
8010 of the special ones that need to be saved/restored in anyway. */
8013 global_not_special_regno_p (int regno
)
8015 return (global_regs
[regno
]
8016 /* These registers are special and need to be
8017 restored in any case. */
8018 && !(regno
== STACK_POINTER_REGNUM
8019 || regno
== RETURN_REGNUM
8020 || regno
== BASE_REGNUM
8021 || (flag_pic
&& regno
== (int)PIC_OFFSET_TABLE_REGNUM
)));
8024 /* Generate insn to save registers FIRST to LAST into
8025 the register save area located at offset OFFSET
8026 relative to register BASE. */
8029 save_gprs (rtx base
, int offset
, int first
, int last
)
8031 rtx addr
, insn
, note
;
8034 addr
= plus_constant (Pmode
, base
, offset
);
8035 addr
= gen_rtx_MEM (Pmode
, addr
);
8037 set_mem_alias_set (addr
, get_frame_alias_set ());
8039 /* Special-case single register. */
8043 insn
= gen_movdi (addr
, gen_rtx_REG (Pmode
, first
));
8045 insn
= gen_movsi (addr
, gen_rtx_REG (Pmode
, first
));
8047 if (!global_not_special_regno_p (first
))
8048 RTX_FRAME_RELATED_P (insn
) = 1;
8053 insn
= gen_store_multiple (addr
,
8054 gen_rtx_REG (Pmode
, first
),
8055 GEN_INT (last
- first
+ 1));
8057 if (first
<= 6 && cfun
->stdarg
)
8058 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
8060 rtx mem
= XEXP (XVECEXP (PATTERN (insn
), 0, i
), 0);
8063 set_mem_alias_set (mem
, get_varargs_alias_set ());
8066 /* We need to set the FRAME_RELATED flag on all SETs
8067 inside the store-multiple pattern.
8069 However, we must not emit DWARF records for registers 2..5
8070 if they are stored for use by variable arguments ...
8072 ??? Unfortunately, it is not enough to simply not the
8073 FRAME_RELATED flags for those SETs, because the first SET
8074 of the PARALLEL is always treated as if it had the flag
8075 set, even if it does not. Therefore we emit a new pattern
8076 without those registers as REG_FRAME_RELATED_EXPR note. */
8078 if (first
>= 6 && !global_not_special_regno_p (first
))
8080 rtx pat
= PATTERN (insn
);
8082 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
8083 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
8084 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat
,
8086 RTX_FRAME_RELATED_P (XVECEXP (pat
, 0, i
)) = 1;
8088 RTX_FRAME_RELATED_P (insn
) = 1;
8094 for (start
= first
>= 6 ? first
: 6; start
<= last
; start
++)
8095 if (!global_not_special_regno_p (start
))
8101 addr
= plus_constant (Pmode
, base
,
8102 offset
+ (start
- first
) * UNITS_PER_LONG
);
8103 note
= gen_store_multiple (gen_rtx_MEM (Pmode
, addr
),
8104 gen_rtx_REG (Pmode
, start
),
8105 GEN_INT (last
- start
+ 1));
8106 note
= PATTERN (note
);
8108 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note
);
8110 for (i
= 0; i
< XVECLEN (note
, 0); i
++)
8111 if (GET_CODE (XVECEXP (note
, 0, i
)) == SET
8112 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note
,
8114 RTX_FRAME_RELATED_P (XVECEXP (note
, 0, i
)) = 1;
8116 RTX_FRAME_RELATED_P (insn
) = 1;
8122 /* Generate insn to restore registers FIRST to LAST from
8123 the register save area located at offset OFFSET
8124 relative to register BASE. */
8127 restore_gprs (rtx base
, int offset
, int first
, int last
)
8131 addr
= plus_constant (Pmode
, base
, offset
);
8132 addr
= gen_rtx_MEM (Pmode
, addr
);
8133 set_mem_alias_set (addr
, get_frame_alias_set ());
8135 /* Special-case single register. */
8139 insn
= gen_movdi (gen_rtx_REG (Pmode
, first
), addr
);
8141 insn
= gen_movsi (gen_rtx_REG (Pmode
, first
), addr
);
8146 insn
= gen_load_multiple (gen_rtx_REG (Pmode
, first
),
8148 GEN_INT (last
- first
+ 1));
8152 /* Return insn sequence to load the GOT register. */
8154 static GTY(()) rtx got_symbol
;
8156 s390_load_got (void)
8160 /* We cannot use pic_offset_table_rtx here since we use this
8161 function also for non-pic if __tls_get_offset is called and in
8162 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8164 rtx got_rtx
= gen_rtx_REG (Pmode
, 12);
8168 got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
8169 SYMBOL_REF_FLAGS (got_symbol
) = SYMBOL_FLAG_LOCAL
;
8174 if (TARGET_CPU_ZARCH
)
8176 emit_move_insn (got_rtx
, got_symbol
);
8182 offset
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, got_symbol
),
8183 UNSPEC_LTREL_OFFSET
);
8184 offset
= gen_rtx_CONST (Pmode
, offset
);
8185 offset
= force_const_mem (Pmode
, offset
);
8187 emit_move_insn (got_rtx
, offset
);
8189 offset
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (offset
, 0)),
8191 offset
= gen_rtx_PLUS (Pmode
, got_rtx
, offset
);
8193 emit_move_insn (got_rtx
, offset
);
8196 insns
= get_insns ();
8201 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8202 and the change to the stack pointer. */
8205 s390_emit_stack_tie (void)
8207 rtx mem
= gen_frame_mem (BLKmode
,
8208 gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
));
8210 emit_insn (gen_stack_tie (mem
));
8213 /* Expand the prologue into a bunch of separate insns. */
8216 s390_emit_prologue (void)
8224 /* Complete frame layout. */
8226 s390_update_frame_layout ();
8228 /* Annotate all constant pool references to let the scheduler know
8229 they implicitly use the base register. */
8231 push_topmost_sequence ();
8233 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8236 annotate_constant_pool_refs (&PATTERN (insn
));
8237 df_insn_rescan (insn
);
8240 pop_topmost_sequence ();
8242 /* Choose best register to use for temp use within prologue.
8243 See below for why TPF must use the register 1. */
8245 if (!has_hard_reg_initial_val (Pmode
, RETURN_REGNUM
)
8247 && !TARGET_TPF_PROFILING
)
8248 temp_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
8250 temp_reg
= gen_rtx_REG (Pmode
, 1);
8252 /* Save call saved gprs. */
8253 if (cfun_frame_layout
.first_save_gpr
!= -1)
8255 insn
= save_gprs (stack_pointer_rtx
,
8256 cfun_frame_layout
.gprs_offset
+
8257 UNITS_PER_LONG
* (cfun_frame_layout
.first_save_gpr
8258 - cfun_frame_layout
.first_save_gpr_slot
),
8259 cfun_frame_layout
.first_save_gpr
,
8260 cfun_frame_layout
.last_save_gpr
);
8264 /* Dummy insn to mark literal pool slot. */
8266 if (cfun
->machine
->base_reg
)
8267 emit_insn (gen_main_pool (cfun
->machine
->base_reg
));
8269 offset
= cfun_frame_layout
.f0_offset
;
8271 /* Save f0 and f2. */
8272 for (i
= 0; i
< 2; i
++)
8274 if (cfun_fpr_bit_p (i
))
8276 save_fpr (stack_pointer_rtx
, offset
, i
+ 16);
8279 else if (!TARGET_PACKED_STACK
)
8283 /* Save f4 and f6. */
8284 offset
= cfun_frame_layout
.f4_offset
;
8285 for (i
= 2; i
< 4; i
++)
8287 if (cfun_fpr_bit_p (i
))
8289 insn
= save_fpr (stack_pointer_rtx
, offset
, i
+ 16);
8292 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8293 therefore are not frame related. */
8294 if (!call_really_used_regs
[i
+ 16])
8295 RTX_FRAME_RELATED_P (insn
) = 1;
8297 else if (!TARGET_PACKED_STACK
)
8301 if (TARGET_PACKED_STACK
8302 && cfun_save_high_fprs_p
8303 && cfun_frame_layout
.f8_offset
+ cfun_frame_layout
.high_fprs
* 8 > 0)
8305 offset
= (cfun_frame_layout
.f8_offset
8306 + (cfun_frame_layout
.high_fprs
- 1) * 8);
8308 for (i
= 15; i
> 7 && offset
>= 0; i
--)
8309 if (cfun_fpr_bit_p (i
))
8311 insn
= save_fpr (stack_pointer_rtx
, offset
, i
+ 16);
8313 RTX_FRAME_RELATED_P (insn
) = 1;
8316 if (offset
>= cfun_frame_layout
.f8_offset
)
8320 if (!TARGET_PACKED_STACK
)
8321 next_fpr
= cfun_save_high_fprs_p
? 31 : 0;
8323 if (flag_stack_usage_info
)
8324 current_function_static_stack_size
= cfun_frame_layout
.frame_size
;
8326 /* Decrement stack pointer. */
8328 if (cfun_frame_layout
.frame_size
> 0)
8330 rtx frame_off
= GEN_INT (-cfun_frame_layout
.frame_size
);
8333 if (s390_stack_size
)
8335 HOST_WIDE_INT stack_guard
;
8337 if (s390_stack_guard
)
8338 stack_guard
= s390_stack_guard
;
8341 /* If no value for stack guard is provided the smallest power of 2
8342 larger than the current frame size is chosen. */
8344 while (stack_guard
< cfun_frame_layout
.frame_size
)
8348 if (cfun_frame_layout
.frame_size
>= s390_stack_size
)
8350 warning (0, "frame size of function %qs is %wd"
8351 " bytes exceeding user provided stack limit of "
8353 "An unconditional trap is added.",
8354 current_function_name(), cfun_frame_layout
.frame_size
,
8356 emit_insn (gen_trap ());
8360 /* stack_guard has to be smaller than s390_stack_size.
8361 Otherwise we would emit an AND with zero which would
8362 not match the test under mask pattern. */
8363 if (stack_guard
>= s390_stack_size
)
8365 warning (0, "frame size of function %qs is %wd"
8366 " bytes which is more than half the stack size. "
8367 "The dynamic check would not be reliable. "
8368 "No check emitted for this function.",
8369 current_function_name(),
8370 cfun_frame_layout
.frame_size
);
8374 HOST_WIDE_INT stack_check_mask
= ((s390_stack_size
- 1)
8375 & ~(stack_guard
- 1));
8377 rtx t
= gen_rtx_AND (Pmode
, stack_pointer_rtx
,
8378 GEN_INT (stack_check_mask
));
8380 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode
,
8382 t
, const0_rtx
, const0_rtx
));
8384 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode
,
8386 t
, const0_rtx
, const0_rtx
));
8391 if (s390_warn_framesize
> 0
8392 && cfun_frame_layout
.frame_size
>= s390_warn_framesize
)
8393 warning (0, "frame size of %qs is %wd bytes",
8394 current_function_name (), cfun_frame_layout
.frame_size
);
8396 if (s390_warn_dynamicstack_p
&& cfun
->calls_alloca
)
8397 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8399 /* Save incoming stack pointer into temp reg. */
8400 if (TARGET_BACKCHAIN
|| next_fpr
)
8401 insn
= emit_insn (gen_move_insn (temp_reg
, stack_pointer_rtx
));
8403 /* Subtract frame size from stack pointer. */
8405 if (DISP_IN_RANGE (INTVAL (frame_off
)))
8407 insn
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8408 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
8410 insn
= emit_insn (insn
);
8414 if (!CONST_OK_FOR_K (INTVAL (frame_off
)))
8415 frame_off
= force_const_mem (Pmode
, frame_off
);
8417 insn
= emit_insn (gen_add2_insn (stack_pointer_rtx
, frame_off
));
8418 annotate_constant_pool_refs (&PATTERN (insn
));
8421 RTX_FRAME_RELATED_P (insn
) = 1;
8422 real_frame_off
= GEN_INT (-cfun_frame_layout
.frame_size
);
8423 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8424 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8425 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
8428 /* Set backchain. */
8430 if (TARGET_BACKCHAIN
)
8432 if (cfun_frame_layout
.backchain_offset
)
8433 addr
= gen_rtx_MEM (Pmode
,
8434 plus_constant (Pmode
, stack_pointer_rtx
,
8435 cfun_frame_layout
.backchain_offset
));
8437 addr
= gen_rtx_MEM (Pmode
, stack_pointer_rtx
);
8438 set_mem_alias_set (addr
, get_frame_alias_set ());
8439 insn
= emit_insn (gen_move_insn (addr
, temp_reg
));
8442 /* If we support non-call exceptions (e.g. for Java),
8443 we need to make sure the backchain pointer is set up
8444 before any possibly trapping memory access. */
8445 if (TARGET_BACKCHAIN
&& cfun
->can_throw_non_call_exceptions
)
8447 addr
= gen_rtx_MEM (BLKmode
, gen_rtx_SCRATCH (VOIDmode
));
8448 emit_clobber (addr
);
8452 /* Save fprs 8 - 15 (64 bit ABI). */
8454 if (cfun_save_high_fprs_p
&& next_fpr
)
8456 /* If the stack might be accessed through a different register
8457 we have to make sure that the stack pointer decrement is not
8458 moved below the use of the stack slots. */
8459 s390_emit_stack_tie ();
8461 insn
= emit_insn (gen_add2_insn (temp_reg
,
8462 GEN_INT (cfun_frame_layout
.f8_offset
)));
8466 for (i
= 24; i
<= next_fpr
; i
++)
8467 if (cfun_fpr_bit_p (i
- 16))
8469 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
,
8470 cfun_frame_layout
.frame_size
8471 + cfun_frame_layout
.f8_offset
8474 insn
= save_fpr (temp_reg
, offset
, i
);
8476 RTX_FRAME_RELATED_P (insn
) = 1;
8477 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8478 gen_rtx_SET (VOIDmode
,
8479 gen_rtx_MEM (DFmode
, addr
),
8480 gen_rtx_REG (DFmode
, i
)));
8484 /* Set frame pointer, if needed. */
8486 if (frame_pointer_needed
)
8488 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
8489 RTX_FRAME_RELATED_P (insn
) = 1;
8492 /* Set up got pointer, if needed. */
8494 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
8496 rtx insns
= s390_load_got ();
8498 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
8499 annotate_constant_pool_refs (&PATTERN (insn
));
8504 if (TARGET_TPF_PROFILING
)
8506 /* Generate a BAS instruction to serve as a function
8507 entry intercept to facilitate the use of tracing
8508 algorithms located at the branch target. */
8509 emit_insn (gen_prologue_tpf ());
8511 /* Emit a blockage here so that all code
8512 lies between the profiling mechanisms. */
8513 emit_insn (gen_blockage ());
8517 /* Expand the epilogue into a bunch of separate insns. */
8520 s390_emit_epilogue (bool sibcall
)
8522 rtx frame_pointer
, return_reg
, cfa_restores
= NULL_RTX
;
8523 int area_bottom
, area_top
, offset
= 0;
8528 if (TARGET_TPF_PROFILING
)
8531 /* Generate a BAS instruction to serve as a function
8532 entry intercept to facilitate the use of tracing
8533 algorithms located at the branch target. */
8535 /* Emit a blockage here so that all code
8536 lies between the profiling mechanisms. */
8537 emit_insn (gen_blockage ());
8539 emit_insn (gen_epilogue_tpf ());
8542 /* Check whether to use frame or stack pointer for restore. */
8544 frame_pointer
= (frame_pointer_needed
8545 ? hard_frame_pointer_rtx
: stack_pointer_rtx
);
8547 s390_frame_area (&area_bottom
, &area_top
);
8549 /* Check whether we can access the register save area.
8550 If not, increment the frame pointer as required. */
8552 if (area_top
<= area_bottom
)
8554 /* Nothing to restore. */
8556 else if (DISP_IN_RANGE (cfun_frame_layout
.frame_size
+ area_bottom
)
8557 && DISP_IN_RANGE (cfun_frame_layout
.frame_size
+ area_top
- 1))
8559 /* Area is in range. */
8560 offset
= cfun_frame_layout
.frame_size
;
8564 rtx insn
, frame_off
, cfa
;
8566 offset
= area_bottom
< 0 ? -area_bottom
: 0;
8567 frame_off
= GEN_INT (cfun_frame_layout
.frame_size
- offset
);
8569 cfa
= gen_rtx_SET (VOIDmode
, frame_pointer
,
8570 gen_rtx_PLUS (Pmode
, frame_pointer
, frame_off
));
8571 if (DISP_IN_RANGE (INTVAL (frame_off
)))
8573 insn
= gen_rtx_SET (VOIDmode
, frame_pointer
,
8574 gen_rtx_PLUS (Pmode
, frame_pointer
, frame_off
));
8575 insn
= emit_insn (insn
);
8579 if (!CONST_OK_FOR_K (INTVAL (frame_off
)))
8580 frame_off
= force_const_mem (Pmode
, frame_off
);
8582 insn
= emit_insn (gen_add2_insn (frame_pointer
, frame_off
));
8583 annotate_constant_pool_refs (&PATTERN (insn
));
8585 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, cfa
);
8586 RTX_FRAME_RELATED_P (insn
) = 1;
8589 /* Restore call saved fprs. */
8593 if (cfun_save_high_fprs_p
)
8595 next_offset
= cfun_frame_layout
.f8_offset
;
8596 for (i
= 24; i
< 32; i
++)
8598 if (cfun_fpr_bit_p (i
- 16))
8600 restore_fpr (frame_pointer
,
8601 offset
+ next_offset
, i
);
8603 = alloc_reg_note (REG_CFA_RESTORE
,
8604 gen_rtx_REG (DFmode
, i
), cfa_restores
);
8613 next_offset
= cfun_frame_layout
.f4_offset
;
8614 for (i
= 18; i
< 20; i
++)
8616 if (cfun_fpr_bit_p (i
- 16))
8618 restore_fpr (frame_pointer
,
8619 offset
+ next_offset
, i
);
8621 = alloc_reg_note (REG_CFA_RESTORE
,
8622 gen_rtx_REG (DFmode
, i
), cfa_restores
);
8625 else if (!TARGET_PACKED_STACK
)
8631 /* Return register. */
8633 return_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
8635 /* Restore call saved gprs. */
8637 if (cfun_frame_layout
.first_restore_gpr
!= -1)
8642 /* Check for global register and save them
8643 to stack location from where they get restored. */
8645 for (i
= cfun_frame_layout
.first_restore_gpr
;
8646 i
<= cfun_frame_layout
.last_restore_gpr
;
8649 if (global_not_special_regno_p (i
))
8651 addr
= plus_constant (Pmode
, frame_pointer
,
8652 offset
+ cfun_frame_layout
.gprs_offset
8653 + (i
- cfun_frame_layout
.first_save_gpr_slot
)
8655 addr
= gen_rtx_MEM (Pmode
, addr
);
8656 set_mem_alias_set (addr
, get_frame_alias_set ());
8657 emit_move_insn (addr
, gen_rtx_REG (Pmode
, i
));
8661 = alloc_reg_note (REG_CFA_RESTORE
,
8662 gen_rtx_REG (Pmode
, i
), cfa_restores
);
8667 /* Fetch return address from stack before load multiple,
8668 this will do good for scheduling. */
8670 if (cfun_frame_layout
.save_return_addr_p
8671 || (cfun_frame_layout
.first_restore_gpr
< BASE_REGNUM
8672 && cfun_frame_layout
.last_restore_gpr
> RETURN_REGNUM
))
8674 int return_regnum
= find_unused_clobbered_reg();
8677 return_reg
= gen_rtx_REG (Pmode
, return_regnum
);
8679 addr
= plus_constant (Pmode
, frame_pointer
,
8680 offset
+ cfun_frame_layout
.gprs_offset
8682 - cfun_frame_layout
.first_save_gpr_slot
)
8684 addr
= gen_rtx_MEM (Pmode
, addr
);
8685 set_mem_alias_set (addr
, get_frame_alias_set ());
8686 emit_move_insn (return_reg
, addr
);
8690 insn
= restore_gprs (frame_pointer
,
8691 offset
+ cfun_frame_layout
.gprs_offset
8692 + (cfun_frame_layout
.first_restore_gpr
8693 - cfun_frame_layout
.first_save_gpr_slot
)
8695 cfun_frame_layout
.first_restore_gpr
,
8696 cfun_frame_layout
.last_restore_gpr
);
8697 insn
= emit_insn (insn
);
8698 REG_NOTES (insn
) = cfa_restores
;
8699 add_reg_note (insn
, REG_CFA_DEF_CFA
,
8700 plus_constant (Pmode
, stack_pointer_rtx
,
8701 STACK_POINTER_OFFSET
));
8702 RTX_FRAME_RELATED_P (insn
) = 1;
8708 /* Return to caller. */
8710 p
= rtvec_alloc (2);
8712 RTVEC_ELT (p
, 0) = ret_rtx
;
8713 RTVEC_ELT (p
, 1) = gen_rtx_USE (VOIDmode
, return_reg
);
8714 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
8719 /* Return the size in bytes of a function argument of
8720 type TYPE and/or mode MODE. At least one of TYPE or
8721 MODE must be specified. */
8724 s390_function_arg_size (enum machine_mode mode
, const_tree type
)
8727 return int_size_in_bytes (type
);
8729 /* No type info available for some library calls ... */
8730 if (mode
!= BLKmode
)
8731 return GET_MODE_SIZE (mode
);
8733 /* If we have neither type nor mode, abort */
8737 /* Return true if a function argument of type TYPE and mode MODE
8738 is to be passed in a floating-point register, if available. */
8741 s390_function_arg_float (enum machine_mode mode
, const_tree type
)
8743 int size
= s390_function_arg_size (mode
, type
);
8747 /* Soft-float changes the ABI: no floating-point registers are used. */
8748 if (TARGET_SOFT_FLOAT
)
8751 /* No type info available for some library calls ... */
8753 return mode
== SFmode
|| mode
== DFmode
|| mode
== SDmode
|| mode
== DDmode
;
8755 /* The ABI says that record types with a single member are treated
8756 just like that member would be. */
8757 while (TREE_CODE (type
) == RECORD_TYPE
)
8759 tree field
, single
= NULL_TREE
;
8761 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
8763 if (TREE_CODE (field
) != FIELD_DECL
)
8766 if (single
== NULL_TREE
)
8767 single
= TREE_TYPE (field
);
8772 if (single
== NULL_TREE
)
8778 return TREE_CODE (type
) == REAL_TYPE
;
8781 /* Return true if a function argument of type TYPE and mode MODE
8782 is to be passed in an integer register, or a pair of integer
8783 registers, if available. */
8786 s390_function_arg_integer (enum machine_mode mode
, const_tree type
)
8788 int size
= s390_function_arg_size (mode
, type
);
8792 /* No type info available for some library calls ... */
8794 return GET_MODE_CLASS (mode
) == MODE_INT
8795 || (TARGET_SOFT_FLOAT
&& SCALAR_FLOAT_MODE_P (mode
));
8797 /* We accept small integral (and similar) types. */
8798 if (INTEGRAL_TYPE_P (type
)
8799 || POINTER_TYPE_P (type
)
8800 || TREE_CODE (type
) == NULLPTR_TYPE
8801 || TREE_CODE (type
) == OFFSET_TYPE
8802 || (TARGET_SOFT_FLOAT
&& TREE_CODE (type
) == REAL_TYPE
))
8805 /* We also accept structs of size 1, 2, 4, 8 that are not
8806 passed in floating-point registers. */
8807 if (AGGREGATE_TYPE_P (type
)
8808 && exact_log2 (size
) >= 0
8809 && !s390_function_arg_float (mode
, type
))
8815 /* Return 1 if a function argument of type TYPE and mode MODE
8816 is to be passed by reference. The ABI specifies that only
8817 structures of size 1, 2, 4, or 8 bytes are passed by value,
8818 all other structures (and complex numbers) are passed by
8822 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
8823 enum machine_mode mode
, const_tree type
,
8824 bool named ATTRIBUTE_UNUSED
)
8826 int size
= s390_function_arg_size (mode
, type
);
8832 if (AGGREGATE_TYPE_P (type
) && exact_log2 (size
) < 0)
8835 if (TREE_CODE (type
) == COMPLEX_TYPE
8836 || TREE_CODE (type
) == VECTOR_TYPE
)
8843 /* Update the data in CUM to advance over an argument of mode MODE and
8844 data type TYPE. (TYPE is null for libcalls where that information
8845 may not be available.). The boolean NAMED specifies whether the
8846 argument is a named argument (as opposed to an unnamed argument
8847 matching an ellipsis). */
8850 s390_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
8851 const_tree type
, bool named ATTRIBUTE_UNUSED
)
8853 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8855 if (s390_function_arg_float (mode
, type
))
8859 else if (s390_function_arg_integer (mode
, type
))
8861 int size
= s390_function_arg_size (mode
, type
);
8862 cum
->gprs
+= ((size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
);
8868 /* Define where to put the arguments to a function.
8869 Value is zero to push the argument on the stack,
8870 or a hard register in which to store the argument.
8872 MODE is the argument's machine mode.
8873 TYPE is the data type of the argument (as a tree).
8874 This is null for libcalls where that information may
8876 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8877 the preceding args and about the function being called.
8878 NAMED is nonzero if this argument is a named parameter
8879 (otherwise it is an extra parameter matching an ellipsis).
8881 On S/390, we use general purpose registers 2 through 6 to
8882 pass integer, pointer, and certain structure arguments, and
8883 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8884 to pass floating point arguments. All remaining arguments
8885 are pushed to the stack. */
8888 s390_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
8889 const_tree type
, bool named ATTRIBUTE_UNUSED
)
8891 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8893 if (s390_function_arg_float (mode
, type
))
8895 if (cum
->fprs
+ 1 > FP_ARG_NUM_REG
)
8898 return gen_rtx_REG (mode
, cum
->fprs
+ 16);
8900 else if (s390_function_arg_integer (mode
, type
))
8902 int size
= s390_function_arg_size (mode
, type
);
8903 int n_gprs
= (size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
;
8905 if (cum
->gprs
+ n_gprs
> GP_ARG_NUM_REG
)
8907 else if (n_gprs
== 1 || UNITS_PER_WORD
== UNITS_PER_LONG
)
8908 return gen_rtx_REG (mode
, cum
->gprs
+ 2);
8909 else if (n_gprs
== 2)
8911 rtvec p
= rtvec_alloc (2);
8914 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, cum
->gprs
+ 2),
8917 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, cum
->gprs
+ 3),
8920 return gen_rtx_PARALLEL (mode
, p
);
8924 /* After the real arguments, expand_call calls us once again
8925 with a void_type_node type. Whatever we return here is
8926 passed as operand 2 to the call expanders.
8928 We don't need this feature ... */
8929 else if (type
== void_type_node
)
8935 /* Return true if return values of type TYPE should be returned
8936 in a memory buffer whose address is passed by the caller as
8937 hidden first argument. */
8940 s390_return_in_memory (const_tree type
, const_tree fundecl ATTRIBUTE_UNUSED
)
8942 /* We accept small integral (and similar) types. */
8943 if (INTEGRAL_TYPE_P (type
)
8944 || POINTER_TYPE_P (type
)
8945 || TREE_CODE (type
) == OFFSET_TYPE
8946 || TREE_CODE (type
) == REAL_TYPE
)
8947 return int_size_in_bytes (type
) > 8;
8949 /* Aggregates and similar constructs are always returned
8951 if (AGGREGATE_TYPE_P (type
)
8952 || TREE_CODE (type
) == COMPLEX_TYPE
8953 || TREE_CODE (type
) == VECTOR_TYPE
)
8956 /* ??? We get called on all sorts of random stuff from
8957 aggregate_value_p. We can't abort, but it's not clear
8958 what's safe to return. Pretend it's a struct I guess. */
8962 /* Function arguments and return values are promoted to word size. */
8964 static enum machine_mode
8965 s390_promote_function_mode (const_tree type
, enum machine_mode mode
,
8967 const_tree fntype ATTRIBUTE_UNUSED
,
8968 int for_return ATTRIBUTE_UNUSED
)
8970 if (INTEGRAL_MODE_P (mode
)
8971 && GET_MODE_SIZE (mode
) < UNITS_PER_LONG
)
8973 if (type
!= NULL_TREE
&& POINTER_TYPE_P (type
))
8974 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
8981 /* Define where to return a (scalar) value of type RET_TYPE.
8982 If RET_TYPE is null, define where to return a (scalar)
8983 value of mode MODE from a libcall. */
8986 s390_function_and_libcall_value (enum machine_mode mode
,
8987 const_tree ret_type
,
8988 const_tree fntype_or_decl
,
8989 bool outgoing ATTRIBUTE_UNUSED
)
8991 /* For normal functions perform the promotion as
8992 promote_function_mode would do. */
8995 int unsignedp
= TYPE_UNSIGNED (ret_type
);
8996 mode
= promote_function_mode (ret_type
, mode
, &unsignedp
,
9000 gcc_assert (GET_MODE_CLASS (mode
) == MODE_INT
|| SCALAR_FLOAT_MODE_P (mode
));
9001 gcc_assert (GET_MODE_SIZE (mode
) <= 8);
9003 if (TARGET_HARD_FLOAT
&& SCALAR_FLOAT_MODE_P (mode
))
9004 return gen_rtx_REG (mode
, 16);
9005 else if (GET_MODE_SIZE (mode
) <= UNITS_PER_LONG
9006 || UNITS_PER_LONG
== UNITS_PER_WORD
)
9007 return gen_rtx_REG (mode
, 2);
9008 else if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_LONG
)
9010 /* This case is triggered when returning a 64 bit value with
9011 -m31 -mzarch. Although the value would fit into a single
9012 register it has to be forced into a 32 bit register pair in
9013 order to match the ABI. */
9014 rtvec p
= rtvec_alloc (2);
9017 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, 2), const0_rtx
);
9019 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, 3), GEN_INT (4));
9021 return gen_rtx_PARALLEL (mode
, p
);
9027 /* Define where to return a scalar return value of type RET_TYPE. */
9030 s390_function_value (const_tree ret_type
, const_tree fn_decl_or_type
,
9033 return s390_function_and_libcall_value (TYPE_MODE (ret_type
), ret_type
,
9034 fn_decl_or_type
, outgoing
);
9037 /* Define where to return a scalar libcall return value of mode
9041 s390_libcall_value (enum machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
9043 return s390_function_and_libcall_value (mode
, NULL_TREE
,
9048 /* Create and return the va_list datatype.
9050 On S/390, va_list is an array type equivalent to
9052 typedef struct __va_list_tag
9056 void *__overflow_arg_area;
9057 void *__reg_save_area;
9060 where __gpr and __fpr hold the number of general purpose
9061 or floating point arguments used up to now, respectively,
9062 __overflow_arg_area points to the stack location of the
9063 next argument passed on the stack, and __reg_save_area
9064 always points to the start of the register area in the
9065 call frame of the current function. The function prologue
9066 saves all registers used for argument passing into this
9067 area if the function uses variable arguments. */
9070 s390_build_builtin_va_list (void)
9072 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
9074 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
9077 build_decl (BUILTINS_LOCATION
,
9078 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
9080 f_gpr
= build_decl (BUILTINS_LOCATION
,
9081 FIELD_DECL
, get_identifier ("__gpr"),
9082 long_integer_type_node
);
9083 f_fpr
= build_decl (BUILTINS_LOCATION
,
9084 FIELD_DECL
, get_identifier ("__fpr"),
9085 long_integer_type_node
);
9086 f_ovf
= build_decl (BUILTINS_LOCATION
,
9087 FIELD_DECL
, get_identifier ("__overflow_arg_area"),
9089 f_sav
= build_decl (BUILTINS_LOCATION
,
9090 FIELD_DECL
, get_identifier ("__reg_save_area"),
9093 va_list_gpr_counter_field
= f_gpr
;
9094 va_list_fpr_counter_field
= f_fpr
;
9096 DECL_FIELD_CONTEXT (f_gpr
) = record
;
9097 DECL_FIELD_CONTEXT (f_fpr
) = record
;
9098 DECL_FIELD_CONTEXT (f_ovf
) = record
;
9099 DECL_FIELD_CONTEXT (f_sav
) = record
;
9101 TYPE_STUB_DECL (record
) = type_decl
;
9102 TYPE_NAME (record
) = type_decl
;
9103 TYPE_FIELDS (record
) = f_gpr
;
9104 DECL_CHAIN (f_gpr
) = f_fpr
;
9105 DECL_CHAIN (f_fpr
) = f_ovf
;
9106 DECL_CHAIN (f_ovf
) = f_sav
;
9108 layout_type (record
);
9110 /* The correct type is an array type of one element. */
9111 return build_array_type (record
, build_index_type (size_zero_node
));
9114 /* Implement va_start by filling the va_list structure VALIST.
9115 STDARG_P is always true, and ignored.
9116 NEXTARG points to the first anonymous stack argument.
9118 The following global variables are used to initialize
9119 the va_list structure:
9122 holds number of gprs and fprs used for named arguments.
9123 crtl->args.arg_offset_rtx:
9124 holds the offset of the first anonymous stack argument
9125 (relative to the virtual arg pointer). */
9128 s390_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
9130 HOST_WIDE_INT n_gpr
, n_fpr
;
9132 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
9133 tree gpr
, fpr
, ovf
, sav
, t
;
9135 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9136 f_fpr
= DECL_CHAIN (f_gpr
);
9137 f_ovf
= DECL_CHAIN (f_fpr
);
9138 f_sav
= DECL_CHAIN (f_ovf
);
9140 valist
= build_simple_mem_ref (valist
);
9141 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9142 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
9143 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
9144 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
9146 /* Count number of gp and fp argument registers used. */
9148 n_gpr
= crtl
->args
.info
.gprs
;
9149 n_fpr
= crtl
->args
.info
.fprs
;
9151 if (cfun
->va_list_gpr_size
)
9153 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
9154 build_int_cst (NULL_TREE
, n_gpr
));
9155 TREE_SIDE_EFFECTS (t
) = 1;
9156 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9159 if (cfun
->va_list_fpr_size
)
9161 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
9162 build_int_cst (NULL_TREE
, n_fpr
));
9163 TREE_SIDE_EFFECTS (t
) = 1;
9164 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9167 /* Find the overflow area. */
9168 if (n_gpr
+ cfun
->va_list_gpr_size
> GP_ARG_NUM_REG
9169 || n_fpr
+ cfun
->va_list_fpr_size
> FP_ARG_NUM_REG
)
9171 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
9173 off
= INTVAL (crtl
->args
.arg_offset_rtx
);
9174 off
= off
< 0 ? 0 : off
;
9175 if (TARGET_DEBUG_ARG
)
9176 fprintf (stderr
, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9177 (int)n_gpr
, (int)n_fpr
, off
);
9179 t
= fold_build_pointer_plus_hwi (t
, off
);
9181 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
9182 TREE_SIDE_EFFECTS (t
) = 1;
9183 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9186 /* Find the register save area. */
9187 if ((cfun
->va_list_gpr_size
&& n_gpr
< GP_ARG_NUM_REG
)
9188 || (cfun
->va_list_fpr_size
&& n_fpr
< FP_ARG_NUM_REG
))
9190 t
= make_tree (TREE_TYPE (sav
), return_address_pointer_rtx
);
9191 t
= fold_build_pointer_plus_hwi (t
, -RETURN_REGNUM
* UNITS_PER_LONG
);
9193 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
9194 TREE_SIDE_EFFECTS (t
) = 1;
9195 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9199 /* Implement va_arg by updating the va_list structure
9200 VALIST as required to retrieve an argument of type
9201 TYPE, and returning that argument.
9203 Generates code equivalent to:
9205 if (integral value) {
9206 if (size <= 4 && args.gpr < 5 ||
9207 size > 4 && args.gpr < 4 )
9208 ret = args.reg_save_area[args.gpr+8]
9210 ret = *args.overflow_arg_area++;
9211 } else if (float value) {
9213 ret = args.reg_save_area[args.fpr+64]
9215 ret = *args.overflow_arg_area++;
9216 } else if (aggregate value) {
9218 ret = *args.reg_save_area[args.gpr]
9220 ret = **args.overflow_arg_area++;
9224 s390_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
9225 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
9227 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
9228 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
9229 int indirect_p
, size
, n_reg
, sav_ofs
, sav_scale
, max_reg
;
9230 tree lab_false
, lab_over
, addr
;
9232 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9233 f_fpr
= DECL_CHAIN (f_gpr
);
9234 f_ovf
= DECL_CHAIN (f_fpr
);
9235 f_sav
= DECL_CHAIN (f_ovf
);
9237 valist
= build_va_arg_indirect_ref (valist
);
9238 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9239 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
9240 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
9242 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9243 both appear on a lhs. */
9244 valist
= unshare_expr (valist
);
9245 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
9247 size
= int_size_in_bytes (type
);
9249 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
9251 if (TARGET_DEBUG_ARG
)
9253 fprintf (stderr
, "va_arg: aggregate type");
9257 /* Aggregates are passed by reference. */
9262 /* kernel stack layout on 31 bit: It is assumed here that no padding
9263 will be added by s390_frame_info because for va_args always an even
9264 number of gprs has to be saved r15-r2 = 14 regs. */
9265 sav_ofs
= 2 * UNITS_PER_LONG
;
9266 sav_scale
= UNITS_PER_LONG
;
9267 size
= UNITS_PER_LONG
;
9268 max_reg
= GP_ARG_NUM_REG
- n_reg
;
9270 else if (s390_function_arg_float (TYPE_MODE (type
), type
))
9272 if (TARGET_DEBUG_ARG
)
9274 fprintf (stderr
, "va_arg: float type");
9278 /* FP args go in FP registers, if present. */
9282 sav_ofs
= 16 * UNITS_PER_LONG
;
9284 max_reg
= FP_ARG_NUM_REG
- n_reg
;
9288 if (TARGET_DEBUG_ARG
)
9290 fprintf (stderr
, "va_arg: other type");
9294 /* Otherwise into GP registers. */
9297 n_reg
= (size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
;
9299 /* kernel stack layout on 31 bit: It is assumed here that no padding
9300 will be added by s390_frame_info because for va_args always an even
9301 number of gprs has to be saved r15-r2 = 14 regs. */
9302 sav_ofs
= 2 * UNITS_PER_LONG
;
9304 if (size
< UNITS_PER_LONG
)
9305 sav_ofs
+= UNITS_PER_LONG
- size
;
9307 sav_scale
= UNITS_PER_LONG
;
9308 max_reg
= GP_ARG_NUM_REG
- n_reg
;
9311 /* Pull the value out of the saved registers ... */
9313 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
9314 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
9315 addr
= create_tmp_var (ptr_type_node
, "addr");
9317 t
= fold_convert (TREE_TYPE (reg
), size_int (max_reg
));
9318 t
= build2 (GT_EXPR
, boolean_type_node
, reg
, t
);
9319 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
9320 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
9321 gimplify_and_add (t
, pre_p
);
9323 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
9324 u
= build2 (MULT_EXPR
, TREE_TYPE (reg
), reg
,
9325 fold_convert (TREE_TYPE (reg
), size_int (sav_scale
)));
9326 t
= fold_build_pointer_plus (t
, u
);
9328 gimplify_assign (addr
, t
, pre_p
);
9330 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
9332 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_false
));
9335 /* ... Otherwise out of the overflow area. */
9338 if (size
< UNITS_PER_LONG
)
9339 t
= fold_build_pointer_plus_hwi (t
, UNITS_PER_LONG
- size
);
9341 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
9343 gimplify_assign (addr
, t
, pre_p
);
9345 t
= fold_build_pointer_plus_hwi (t
, size
);
9346 gimplify_assign (ovf
, t
, pre_p
);
9348 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_over
));
9351 /* Increment register save count. */
9353 u
= build2 (PREINCREMENT_EXPR
, TREE_TYPE (reg
), reg
,
9354 fold_convert (TREE_TYPE (reg
), size_int (n_reg
)));
9355 gimplify_and_add (u
, pre_p
);
9359 t
= build_pointer_type_for_mode (build_pointer_type (type
),
9361 addr
= fold_convert (t
, addr
);
9362 addr
= build_va_arg_indirect_ref (addr
);
9366 t
= build_pointer_type_for_mode (type
, ptr_mode
, true);
9367 addr
= fold_convert (t
, addr
);
9370 return build_va_arg_indirect_ref (addr
);
9373 /* Output assembly code for the trampoline template to
9376 On S/390, we use gpr 1 internally in the trampoline code;
9377 gpr 0 is used to hold the static chain. */
9380 s390_asm_trampoline_template (FILE *file
)
9383 op
[0] = gen_rtx_REG (Pmode
, 0);
9384 op
[1] = gen_rtx_REG (Pmode
, 1);
9388 output_asm_insn ("basr\t%1,0", op
); /* 2 byte */
9389 output_asm_insn ("lmg\t%0,%1,14(%1)", op
); /* 6 byte */
9390 output_asm_insn ("br\t%1", op
); /* 2 byte */
9391 ASM_OUTPUT_SKIP (file
, (HOST_WIDE_INT
)(TRAMPOLINE_SIZE
- 10));
9395 output_asm_insn ("basr\t%1,0", op
); /* 2 byte */
9396 output_asm_insn ("lm\t%0,%1,6(%1)", op
); /* 4 byte */
9397 output_asm_insn ("br\t%1", op
); /* 2 byte */
9398 ASM_OUTPUT_SKIP (file
, (HOST_WIDE_INT
)(TRAMPOLINE_SIZE
- 8));
9402 /* Emit RTL insns to initialize the variable parts of a trampoline.
9403 FNADDR is an RTX for the address of the function's pure code.
9404 CXT is an RTX for the static chain value for the function. */
9407 s390_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
9409 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
9412 emit_block_move (m_tramp
, assemble_trampoline_template (),
9413 GEN_INT (2 * UNITS_PER_LONG
), BLOCK_OP_NORMAL
);
9415 mem
= adjust_address (m_tramp
, Pmode
, 2 * UNITS_PER_LONG
);
9416 emit_move_insn (mem
, cxt
);
9417 mem
= adjust_address (m_tramp
, Pmode
, 3 * UNITS_PER_LONG
);
9418 emit_move_insn (mem
, fnaddr
);
9421 /* Output assembler code to FILE to increment profiler label # LABELNO
9422 for profiling a function entry. */
9425 s390_function_profiler (FILE *file
, int labelno
)
9430 ASM_GENERATE_INTERNAL_LABEL (label
, "LP", labelno
);
9432 fprintf (file
, "# function profiler \n");
9434 op
[0] = gen_rtx_REG (Pmode
, RETURN_REGNUM
);
9435 op
[1] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
9436 op
[1] = gen_rtx_MEM (Pmode
, plus_constant (Pmode
, op
[1], UNITS_PER_LONG
));
9438 op
[2] = gen_rtx_REG (Pmode
, 1);
9439 op
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
9440 SYMBOL_REF_FLAGS (op
[3]) = SYMBOL_FLAG_LOCAL
;
9442 op
[4] = gen_rtx_SYMBOL_REF (Pmode
, "_mcount");
9445 op
[4] = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op
[4]), UNSPEC_PLT
);
9446 op
[4] = gen_rtx_CONST (Pmode
, op
[4]);
9451 output_asm_insn ("stg\t%0,%1", op
);
9452 output_asm_insn ("larl\t%2,%3", op
);
9453 output_asm_insn ("brasl\t%0,%4", op
);
9454 output_asm_insn ("lg\t%0,%1", op
);
9458 op
[6] = gen_label_rtx ();
9460 output_asm_insn ("st\t%0,%1", op
);
9461 output_asm_insn ("bras\t%2,%l6", op
);
9462 output_asm_insn (".long\t%4", op
);
9463 output_asm_insn (".long\t%3", op
);
9464 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[6]));
9465 output_asm_insn ("l\t%0,0(%2)", op
);
9466 output_asm_insn ("l\t%2,4(%2)", op
);
9467 output_asm_insn ("basr\t%0,%0", op
);
9468 output_asm_insn ("l\t%0,%1", op
);
9472 op
[5] = gen_label_rtx ();
9473 op
[6] = gen_label_rtx ();
9475 output_asm_insn ("st\t%0,%1", op
);
9476 output_asm_insn ("bras\t%2,%l6", op
);
9477 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[5]));
9478 output_asm_insn (".long\t%4-%l5", op
);
9479 output_asm_insn (".long\t%3-%l5", op
);
9480 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[6]));
9481 output_asm_insn ("lr\t%0,%2", op
);
9482 output_asm_insn ("a\t%0,0(%2)", op
);
9483 output_asm_insn ("a\t%2,4(%2)", op
);
9484 output_asm_insn ("basr\t%0,%0", op
);
9485 output_asm_insn ("l\t%0,%1", op
);
9489 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9490 into its SYMBOL_REF_FLAGS. */
9493 s390_encode_section_info (tree decl
, rtx rtl
, int first
)
9495 default_encode_section_info (decl
, rtl
, first
);
9497 if (TREE_CODE (decl
) == VAR_DECL
)
9499 /* If a variable has a forced alignment to < 2 bytes, mark it
9500 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9502 if (DECL_USER_ALIGN (decl
) && DECL_ALIGN (decl
) < 16)
9503 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_ALIGN1
;
9504 if (!DECL_SIZE (decl
)
9505 || !DECL_ALIGN (decl
)
9506 || !host_integerp (DECL_SIZE (decl
), 0)
9507 || (DECL_ALIGN (decl
) <= 64
9508 && DECL_ALIGN (decl
) != tree_low_cst (DECL_SIZE (decl
), 0)))
9509 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED
;
9512 /* Literal pool references don't have a decl so they are handled
9513 differently here. We rely on the information in the MEM_ALIGN
9514 entry to decide upon natural alignment. */
9516 && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
9517 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl
, 0))
9518 && (MEM_ALIGN (rtl
) == 0
9519 || GET_MODE_BITSIZE (GET_MODE (rtl
)) == 0
9520 || MEM_ALIGN (rtl
) < GET_MODE_BITSIZE (GET_MODE (rtl
))))
9521 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED
;
9524 /* Output thunk to FILE that implements a C++ virtual function call (with
9525 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9526 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9527 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9528 relative to the resulting this pointer. */
9531 s390_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
9532 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
9538 /* Make sure unwind info is emitted for the thunk if needed. */
9539 final_start_function (emit_barrier (), file
, 1);
9541 /* Operand 0 is the target function. */
9542 op
[0] = XEXP (DECL_RTL (function
), 0);
9543 if (flag_pic
&& !SYMBOL_REF_LOCAL_P (op
[0]))
9546 op
[0] = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op
[0]),
9547 TARGET_64BIT
? UNSPEC_PLT
: UNSPEC_GOT
);
9548 op
[0] = gen_rtx_CONST (Pmode
, op
[0]);
9551 /* Operand 1 is the 'this' pointer. */
9552 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
9553 op
[1] = gen_rtx_REG (Pmode
, 3);
9555 op
[1] = gen_rtx_REG (Pmode
, 2);
9557 /* Operand 2 is the delta. */
9558 op
[2] = GEN_INT (delta
);
9560 /* Operand 3 is the vcall_offset. */
9561 op
[3] = GEN_INT (vcall_offset
);
9563 /* Operand 4 is the temporary register. */
9564 op
[4] = gen_rtx_REG (Pmode
, 1);
9566 /* Operands 5 to 8 can be used as labels. */
9572 /* Operand 9 can be used for temporary register. */
9575 /* Generate code. */
9578 /* Setup literal pool pointer if required. */
9579 if ((!DISP_IN_RANGE (delta
)
9580 && !CONST_OK_FOR_K (delta
)
9581 && !CONST_OK_FOR_Os (delta
))
9582 || (!DISP_IN_RANGE (vcall_offset
)
9583 && !CONST_OK_FOR_K (vcall_offset
)
9584 && !CONST_OK_FOR_Os (vcall_offset
)))
9586 op
[5] = gen_label_rtx ();
9587 output_asm_insn ("larl\t%4,%5", op
);
9590 /* Add DELTA to this pointer. */
9593 if (CONST_OK_FOR_J (delta
))
9594 output_asm_insn ("la\t%1,%2(%1)", op
);
9595 else if (DISP_IN_RANGE (delta
))
9596 output_asm_insn ("lay\t%1,%2(%1)", op
);
9597 else if (CONST_OK_FOR_K (delta
))
9598 output_asm_insn ("aghi\t%1,%2", op
);
9599 else if (CONST_OK_FOR_Os (delta
))
9600 output_asm_insn ("agfi\t%1,%2", op
);
9603 op
[6] = gen_label_rtx ();
9604 output_asm_insn ("agf\t%1,%6-%5(%4)", op
);
9608 /* Perform vcall adjustment. */
9611 if (DISP_IN_RANGE (vcall_offset
))
9613 output_asm_insn ("lg\t%4,0(%1)", op
);
9614 output_asm_insn ("ag\t%1,%3(%4)", op
);
9616 else if (CONST_OK_FOR_K (vcall_offset
))
9618 output_asm_insn ("lghi\t%4,%3", op
);
9619 output_asm_insn ("ag\t%4,0(%1)", op
);
9620 output_asm_insn ("ag\t%1,0(%4)", op
);
9622 else if (CONST_OK_FOR_Os (vcall_offset
))
9624 output_asm_insn ("lgfi\t%4,%3", op
);
9625 output_asm_insn ("ag\t%4,0(%1)", op
);
9626 output_asm_insn ("ag\t%1,0(%4)", op
);
9630 op
[7] = gen_label_rtx ();
9631 output_asm_insn ("llgf\t%4,%7-%5(%4)", op
);
9632 output_asm_insn ("ag\t%4,0(%1)", op
);
9633 output_asm_insn ("ag\t%1,0(%4)", op
);
9637 /* Jump to target. */
9638 output_asm_insn ("jg\t%0", op
);
9640 /* Output literal pool if required. */
9643 output_asm_insn (".align\t4", op
);
9644 targetm
.asm_out
.internal_label (file
, "L",
9645 CODE_LABEL_NUMBER (op
[5]));
9649 targetm
.asm_out
.internal_label (file
, "L",
9650 CODE_LABEL_NUMBER (op
[6]));
9651 output_asm_insn (".long\t%2", op
);
9655 targetm
.asm_out
.internal_label (file
, "L",
9656 CODE_LABEL_NUMBER (op
[7]));
9657 output_asm_insn (".long\t%3", op
);
9662 /* Setup base pointer if required. */
9664 || (!DISP_IN_RANGE (delta
)
9665 && !CONST_OK_FOR_K (delta
)
9666 && !CONST_OK_FOR_Os (delta
))
9667 || (!DISP_IN_RANGE (delta
)
9668 && !CONST_OK_FOR_K (vcall_offset
)
9669 && !CONST_OK_FOR_Os (vcall_offset
)))
9671 op
[5] = gen_label_rtx ();
9672 output_asm_insn ("basr\t%4,0", op
);
9673 targetm
.asm_out
.internal_label (file
, "L",
9674 CODE_LABEL_NUMBER (op
[5]));
9677 /* Add DELTA to this pointer. */
9680 if (CONST_OK_FOR_J (delta
))
9681 output_asm_insn ("la\t%1,%2(%1)", op
);
9682 else if (DISP_IN_RANGE (delta
))
9683 output_asm_insn ("lay\t%1,%2(%1)", op
);
9684 else if (CONST_OK_FOR_K (delta
))
9685 output_asm_insn ("ahi\t%1,%2", op
);
9686 else if (CONST_OK_FOR_Os (delta
))
9687 output_asm_insn ("afi\t%1,%2", op
);
9690 op
[6] = gen_label_rtx ();
9691 output_asm_insn ("a\t%1,%6-%5(%4)", op
);
9695 /* Perform vcall adjustment. */
9698 if (CONST_OK_FOR_J (vcall_offset
))
9700 output_asm_insn ("l\t%4,0(%1)", op
);
9701 output_asm_insn ("a\t%1,%3(%4)", op
);
9703 else if (DISP_IN_RANGE (vcall_offset
))
9705 output_asm_insn ("l\t%4,0(%1)", op
);
9706 output_asm_insn ("ay\t%1,%3(%4)", op
);
9708 else if (CONST_OK_FOR_K (vcall_offset
))
9710 output_asm_insn ("lhi\t%4,%3", op
);
9711 output_asm_insn ("a\t%4,0(%1)", op
);
9712 output_asm_insn ("a\t%1,0(%4)", op
);
9714 else if (CONST_OK_FOR_Os (vcall_offset
))
9716 output_asm_insn ("iilf\t%4,%3", op
);
9717 output_asm_insn ("a\t%4,0(%1)", op
);
9718 output_asm_insn ("a\t%1,0(%4)", op
);
9722 op
[7] = gen_label_rtx ();
9723 output_asm_insn ("l\t%4,%7-%5(%4)", op
);
9724 output_asm_insn ("a\t%4,0(%1)", op
);
9725 output_asm_insn ("a\t%1,0(%4)", op
);
9728 /* We had to clobber the base pointer register.
9729 Re-setup the base pointer (with a different base). */
9730 op
[5] = gen_label_rtx ();
9731 output_asm_insn ("basr\t%4,0", op
);
9732 targetm
.asm_out
.internal_label (file
, "L",
9733 CODE_LABEL_NUMBER (op
[5]));
9736 /* Jump to target. */
9737 op
[8] = gen_label_rtx ();
9740 output_asm_insn ("l\t%4,%8-%5(%4)", op
);
9742 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
9743 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9744 else if (flag_pic
== 1)
9746 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
9747 output_asm_insn ("l\t%4,%0(%4)", op
);
9749 else if (flag_pic
== 2)
9751 op
[9] = gen_rtx_REG (Pmode
, 0);
9752 output_asm_insn ("l\t%9,%8-4-%5(%4)", op
);
9753 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
9754 output_asm_insn ("ar\t%4,%9", op
);
9755 output_asm_insn ("l\t%4,0(%4)", op
);
9758 output_asm_insn ("br\t%4", op
);
9760 /* Output literal pool. */
9761 output_asm_insn (".align\t4", op
);
9763 if (nonlocal
&& flag_pic
== 2)
9764 output_asm_insn (".long\t%0", op
);
9767 op
[0] = gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9768 SYMBOL_REF_FLAGS (op
[0]) = SYMBOL_FLAG_LOCAL
;
9771 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[8]));
9773 output_asm_insn (".long\t%0", op
);
9775 output_asm_insn (".long\t%0-%5", op
);
9779 targetm
.asm_out
.internal_label (file
, "L",
9780 CODE_LABEL_NUMBER (op
[6]));
9781 output_asm_insn (".long\t%2", op
);
9785 targetm
.asm_out
.internal_label (file
, "L",
9786 CODE_LABEL_NUMBER (op
[7]));
9787 output_asm_insn (".long\t%3", op
);
9790 final_end_function ();
9794 s390_valid_pointer_mode (enum machine_mode mode
)
9796 return (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
));
9799 /* Checks whether the given CALL_EXPR would use a caller
9800 saved register. This is used to decide whether sibling call
9801 optimization could be performed on the respective function
9805 s390_call_saved_register_used (tree call_expr
)
9807 CUMULATIVE_ARGS cum_v
;
9808 cumulative_args_t cum
;
9810 enum machine_mode mode
;
9815 INIT_CUMULATIVE_ARGS (cum_v
, NULL
, NULL
, 0, 0);
9816 cum
= pack_cumulative_args (&cum_v
);
9818 for (i
= 0; i
< call_expr_nargs (call_expr
); i
++)
9820 parameter
= CALL_EXPR_ARG (call_expr
, i
);
9821 gcc_assert (parameter
);
9823 /* For an undeclared variable passed as parameter we will get
9824 an ERROR_MARK node here. */
9825 if (TREE_CODE (parameter
) == ERROR_MARK
)
9828 type
= TREE_TYPE (parameter
);
9831 mode
= TYPE_MODE (type
);
9834 if (pass_by_reference (&cum_v
, mode
, type
, true))
9837 type
= build_pointer_type (type
);
9840 parm_rtx
= s390_function_arg (cum
, mode
, type
, 0);
9842 s390_function_arg_advance (cum
, mode
, type
, 0);
9847 if (REG_P (parm_rtx
))
9850 reg
< HARD_REGNO_NREGS (REGNO (parm_rtx
), GET_MODE (parm_rtx
));
9852 if (!call_used_regs
[reg
+ REGNO (parm_rtx
)])
9856 if (GET_CODE (parm_rtx
) == PARALLEL
)
9860 for (i
= 0; i
< XVECLEN (parm_rtx
, 0); i
++)
9862 rtx r
= XEXP (XVECEXP (parm_rtx
, 0, i
), 0);
9864 gcc_assert (REG_P (r
));
9867 reg
< HARD_REGNO_NREGS (REGNO (r
), GET_MODE (r
));
9869 if (!call_used_regs
[reg
+ REGNO (r
)])
9878 /* Return true if the given call expression can be
9879 turned into a sibling call.
9880 DECL holds the declaration of the function to be called whereas
9881 EXP is the call expression itself. */
9884 s390_function_ok_for_sibcall (tree decl
, tree exp
)
9886 /* The TPF epilogue uses register 1. */
9887 if (TARGET_TPF_PROFILING
)
9890 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9891 which would have to be restored before the sibcall. */
9892 if (!TARGET_64BIT
&& flag_pic
&& decl
&& !targetm
.binds_local_p (decl
))
9895 /* Register 6 on s390 is available as an argument register but unfortunately
9896 "caller saved". This makes functions needing this register for arguments
9897 not suitable for sibcalls. */
9898 return !s390_call_saved_register_used (exp
);
9901 /* Return the fixed registers used for condition codes. */
9904 s390_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
9907 *p2
= INVALID_REGNUM
;
9912 /* This function is used by the call expanders of the machine description.
9913 It emits the call insn itself together with the necessary operations
9914 to adjust the target address and returns the emitted insn.
9915 ADDR_LOCATION is the target address rtx
9916 TLS_CALL the location of the thread-local symbol
9917 RESULT_REG the register where the result of the call should be stored
9918 RETADDR_REG the register where the return address should be stored
9919 If this parameter is NULL_RTX the call is considered
9920 to be a sibling call. */
9923 s390_emit_call (rtx addr_location
, rtx tls_call
, rtx result_reg
,
9926 bool plt_call
= false;
9932 /* Direct function calls need special treatment. */
9933 if (GET_CODE (addr_location
) == SYMBOL_REF
)
9935 /* When calling a global routine in PIC mode, we must
9936 replace the symbol itself with the PLT stub. */
9937 if (flag_pic
&& !SYMBOL_REF_LOCAL_P (addr_location
))
9939 if (retaddr_reg
!= NULL_RTX
)
9941 addr_location
= gen_rtx_UNSPEC (Pmode
,
9942 gen_rtvec (1, addr_location
),
9944 addr_location
= gen_rtx_CONST (Pmode
, addr_location
);
9948 /* For -fpic code the PLT entries might use r12 which is
9949 call-saved. Therefore we cannot do a sibcall when
9950 calling directly using a symbol ref. When reaching
9951 this point we decided (in s390_function_ok_for_sibcall)
9952 to do a sibcall for a function pointer but one of the
9953 optimizers was able to get rid of the function pointer
9954 by propagating the symbol ref into the call. This
9955 optimization is illegal for S/390 so we turn the direct
9956 call into a indirect call again. */
9957 addr_location
= force_reg (Pmode
, addr_location
);
9960 /* Unless we can use the bras(l) insn, force the
9961 routine address into a register. */
9962 if (!TARGET_SMALL_EXEC
&& !TARGET_CPU_ZARCH
)
9965 addr_location
= legitimize_pic_address (addr_location
, 0);
9967 addr_location
= force_reg (Pmode
, addr_location
);
9971 /* If it is already an indirect call or the code above moved the
9972 SYMBOL_REF to somewhere else make sure the address can be found in
9974 if (retaddr_reg
== NULL_RTX
9975 && GET_CODE (addr_location
) != SYMBOL_REF
9978 emit_move_insn (gen_rtx_REG (Pmode
, SIBCALL_REGNUM
), addr_location
);
9979 addr_location
= gen_rtx_REG (Pmode
, SIBCALL_REGNUM
);
9982 addr_location
= gen_rtx_MEM (QImode
, addr_location
);
9983 call
= gen_rtx_CALL (VOIDmode
, addr_location
, const0_rtx
);
9985 if (result_reg
!= NULL_RTX
)
9986 call
= gen_rtx_SET (VOIDmode
, result_reg
, call
);
9988 if (retaddr_reg
!= NULL_RTX
)
9990 clobber
= gen_rtx_CLOBBER (VOIDmode
, retaddr_reg
);
9992 if (tls_call
!= NULL_RTX
)
9993 vec
= gen_rtvec (3, call
, clobber
,
9994 gen_rtx_USE (VOIDmode
, tls_call
));
9996 vec
= gen_rtvec (2, call
, clobber
);
9998 call
= gen_rtx_PARALLEL (VOIDmode
, vec
);
10001 insn
= emit_call_insn (call
);
10003 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10004 if ((!TARGET_64BIT
&& plt_call
) || tls_call
!= NULL_RTX
)
10006 /* s390_function_ok_for_sibcall should
10007 have denied sibcalls in this case. */
10008 gcc_assert (retaddr_reg
!= NULL_RTX
);
10009 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, 12));
10014 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10017 s390_conditional_register_usage (void)
10023 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10024 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10026 if (TARGET_CPU_ZARCH
)
10028 fixed_regs
[BASE_REGNUM
] = 0;
10029 call_used_regs
[BASE_REGNUM
] = 0;
10030 fixed_regs
[RETURN_REGNUM
] = 0;
10031 call_used_regs
[RETURN_REGNUM
] = 0;
10035 for (i
= 24; i
< 32; i
++)
10036 call_used_regs
[i
] = call_really_used_regs
[i
] = 0;
10040 for (i
= 18; i
< 20; i
++)
10041 call_used_regs
[i
] = call_really_used_regs
[i
] = 0;
10044 if (TARGET_SOFT_FLOAT
)
10046 for (i
= 16; i
< 32; i
++)
10047 call_used_regs
[i
] = fixed_regs
[i
] = 1;
10051 /* Corresponding function to eh_return expander. */
10053 static GTY(()) rtx s390_tpf_eh_return_symbol
;
10055 s390_emit_tpf_eh_return (rtx target
)
10059 if (!s390_tpf_eh_return_symbol
)
10060 s390_tpf_eh_return_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tpf_eh_return");
10062 reg
= gen_rtx_REG (Pmode
, 2);
10064 emit_move_insn (reg
, target
);
10065 insn
= s390_emit_call (s390_tpf_eh_return_symbol
, NULL_RTX
, reg
,
10066 gen_rtx_REG (Pmode
, RETURN_REGNUM
));
10067 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), reg
);
10069 emit_move_insn (EH_RETURN_HANDLER_RTX
, reg
);
10072 /* Rework the prologue/epilogue to avoid saving/restoring
10073 registers unnecessarily. */
10076 s390_optimize_prologue (void)
10078 rtx insn
, new_insn
, next_insn
;
10080 /* Do a final recompute of the frame-related data. */
10082 s390_update_frame_layout ();
10084 /* If all special registers are in fact used, there's nothing we
10085 can do, so no point in walking the insn list. */
10087 if (cfun_frame_layout
.first_save_gpr
<= BASE_REGNUM
10088 && cfun_frame_layout
.last_save_gpr
>= BASE_REGNUM
10089 && (TARGET_CPU_ZARCH
10090 || (cfun_frame_layout
.first_save_gpr
<= RETURN_REGNUM
10091 && cfun_frame_layout
.last_save_gpr
>= RETURN_REGNUM
)))
10094 /* Search for prologue/epilogue insns and replace them. */
10096 for (insn
= get_insns (); insn
; insn
= next_insn
)
10098 int first
, last
, off
;
10099 rtx set
, base
, offset
;
10101 next_insn
= NEXT_INSN (insn
);
10103 if (! NONJUMP_INSN_P (insn
))
10106 if (GET_CODE (PATTERN (insn
)) == PARALLEL
10107 && store_multiple_operation (PATTERN (insn
), VOIDmode
))
10109 set
= XVECEXP (PATTERN (insn
), 0, 0);
10110 first
= REGNO (SET_SRC (set
));
10111 last
= first
+ XVECLEN (PATTERN (insn
), 0) - 1;
10112 offset
= const0_rtx
;
10113 base
= eliminate_constant_term (XEXP (SET_DEST (set
), 0), &offset
);
10114 off
= INTVAL (offset
);
10116 if (GET_CODE (base
) != REG
|| off
< 0)
10118 if (cfun_frame_layout
.first_save_gpr
!= -1
10119 && (cfun_frame_layout
.first_save_gpr
< first
10120 || cfun_frame_layout
.last_save_gpr
> last
))
10122 if (REGNO (base
) != STACK_POINTER_REGNUM
10123 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
10125 if (first
> BASE_REGNUM
|| last
< BASE_REGNUM
)
10128 if (cfun_frame_layout
.first_save_gpr
!= -1)
10130 new_insn
= save_gprs (base
,
10131 off
+ (cfun_frame_layout
.first_save_gpr
10132 - first
) * UNITS_PER_LONG
,
10133 cfun_frame_layout
.first_save_gpr
,
10134 cfun_frame_layout
.last_save_gpr
);
10135 new_insn
= emit_insn_before (new_insn
, insn
);
10136 INSN_ADDRESSES_NEW (new_insn
, -1);
10139 remove_insn (insn
);
10143 if (cfun_frame_layout
.first_save_gpr
== -1
10144 && GET_CODE (PATTERN (insn
)) == SET
10145 && GET_CODE (SET_SRC (PATTERN (insn
))) == REG
10146 && (REGNO (SET_SRC (PATTERN (insn
))) == BASE_REGNUM
10147 || (!TARGET_CPU_ZARCH
10148 && REGNO (SET_SRC (PATTERN (insn
))) == RETURN_REGNUM
))
10149 && GET_CODE (SET_DEST (PATTERN (insn
))) == MEM
)
10151 set
= PATTERN (insn
);
10152 first
= REGNO (SET_SRC (set
));
10153 offset
= const0_rtx
;
10154 base
= eliminate_constant_term (XEXP (SET_DEST (set
), 0), &offset
);
10155 off
= INTVAL (offset
);
10157 if (GET_CODE (base
) != REG
|| off
< 0)
10159 if (REGNO (base
) != STACK_POINTER_REGNUM
10160 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
10163 remove_insn (insn
);
10167 if (GET_CODE (PATTERN (insn
)) == PARALLEL
10168 && load_multiple_operation (PATTERN (insn
), VOIDmode
))
10170 set
= XVECEXP (PATTERN (insn
), 0, 0);
10171 first
= REGNO (SET_DEST (set
));
10172 last
= first
+ XVECLEN (PATTERN (insn
), 0) - 1;
10173 offset
= const0_rtx
;
10174 base
= eliminate_constant_term (XEXP (SET_SRC (set
), 0), &offset
);
10175 off
= INTVAL (offset
);
10177 if (GET_CODE (base
) != REG
|| off
< 0)
10179 if (cfun_frame_layout
.first_restore_gpr
!= -1
10180 && (cfun_frame_layout
.first_restore_gpr
< first
10181 || cfun_frame_layout
.last_restore_gpr
> last
))
10183 if (REGNO (base
) != STACK_POINTER_REGNUM
10184 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
10186 if (first
> BASE_REGNUM
|| last
< BASE_REGNUM
)
10189 if (cfun_frame_layout
.first_restore_gpr
!= -1)
10191 new_insn
= restore_gprs (base
,
10192 off
+ (cfun_frame_layout
.first_restore_gpr
10193 - first
) * UNITS_PER_LONG
,
10194 cfun_frame_layout
.first_restore_gpr
,
10195 cfun_frame_layout
.last_restore_gpr
);
10196 new_insn
= emit_insn_before (new_insn
, insn
);
10197 INSN_ADDRESSES_NEW (new_insn
, -1);
10200 remove_insn (insn
);
10204 if (cfun_frame_layout
.first_restore_gpr
== -1
10205 && GET_CODE (PATTERN (insn
)) == SET
10206 && GET_CODE (SET_DEST (PATTERN (insn
))) == REG
10207 && (REGNO (SET_DEST (PATTERN (insn
))) == BASE_REGNUM
10208 || (!TARGET_CPU_ZARCH
10209 && REGNO (SET_DEST (PATTERN (insn
))) == RETURN_REGNUM
))
10210 && GET_CODE (SET_SRC (PATTERN (insn
))) == MEM
)
10212 set
= PATTERN (insn
);
10213 first
= REGNO (SET_DEST (set
));
10214 offset
= const0_rtx
;
10215 base
= eliminate_constant_term (XEXP (SET_SRC (set
), 0), &offset
);
10216 off
= INTVAL (offset
);
10218 if (GET_CODE (base
) != REG
|| off
< 0)
10220 if (REGNO (base
) != STACK_POINTER_REGNUM
10221 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
10224 remove_insn (insn
);
10230 /* On z10 and later the dynamic branch prediction must see the
10231 backward jump within a certain windows. If not it falls back to
10232 the static prediction. This function rearranges the loop backward
10233 branch in a way which makes the static prediction always correct.
10234 The function returns true if it added an instruction. */
10236 s390_fix_long_loop_prediction (rtx insn
)
10238 rtx set
= single_set (insn
);
10239 rtx code_label
, label_ref
, new_label
;
10245 /* This will exclude branch on count and branch on index patterns
10246 since these are correctly statically predicted. */
10248 || SET_DEST (set
) != pc_rtx
10249 || GET_CODE (SET_SRC(set
)) != IF_THEN_ELSE
)
10252 label_ref
= (GET_CODE (XEXP (SET_SRC (set
), 1)) == LABEL_REF
?
10253 XEXP (SET_SRC (set
), 1) : XEXP (SET_SRC (set
), 2));
10255 gcc_assert (GET_CODE (label_ref
) == LABEL_REF
);
10257 code_label
= XEXP (label_ref
, 0);
10259 if (INSN_ADDRESSES (INSN_UID (code_label
)) == -1
10260 || INSN_ADDRESSES (INSN_UID (insn
)) == -1
10261 || (INSN_ADDRESSES (INSN_UID (insn
))
10262 - INSN_ADDRESSES (INSN_UID (code_label
)) < PREDICT_DISTANCE
))
10265 for (distance
= 0, cur_insn
= PREV_INSN (insn
);
10266 distance
< PREDICT_DISTANCE
- 6;
10267 distance
+= get_attr_length (cur_insn
), cur_insn
= PREV_INSN (cur_insn
))
10268 if (!cur_insn
|| JUMP_P (cur_insn
) || LABEL_P (cur_insn
))
10271 new_label
= gen_label_rtx ();
10272 uncond_jump
= emit_jump_insn_after (
10273 gen_rtx_SET (VOIDmode
, pc_rtx
,
10274 gen_rtx_LABEL_REF (VOIDmode
, code_label
)),
10276 emit_label_after (new_label
, uncond_jump
);
10278 tmp
= XEXP (SET_SRC (set
), 1);
10279 XEXP (SET_SRC (set
), 1) = XEXP (SET_SRC (set
), 2);
10280 XEXP (SET_SRC (set
), 2) = tmp
;
10281 INSN_CODE (insn
) = -1;
10283 XEXP (label_ref
, 0) = new_label
;
10284 JUMP_LABEL (insn
) = new_label
;
10285 JUMP_LABEL (uncond_jump
) = code_label
;
10290 /* Returns 1 if INSN reads the value of REG for purposes not related
10291 to addressing of memory, and 0 otherwise. */
10293 s390_non_addr_reg_read_p (rtx reg
, rtx insn
)
10295 return reg_referenced_p (reg
, PATTERN (insn
))
10296 && !reg_used_in_mem_p (REGNO (reg
), PATTERN (insn
));
10299 /* Starting from INSN find_cond_jump looks downwards in the insn
10300 stream for a single jump insn which is the last user of the
10301 condition code set in INSN. */
10303 find_cond_jump (rtx insn
)
10305 for (; insn
; insn
= NEXT_INSN (insn
))
10309 if (LABEL_P (insn
))
10312 if (!JUMP_P (insn
))
10314 if (reg_mentioned_p (gen_rtx_REG (CCmode
, CC_REGNUM
), insn
))
10319 /* This will be triggered by a return. */
10320 if (GET_CODE (PATTERN (insn
)) != SET
)
10323 gcc_assert (SET_DEST (PATTERN (insn
)) == pc_rtx
);
10324 ite
= SET_SRC (PATTERN (insn
));
10326 if (GET_CODE (ite
) != IF_THEN_ELSE
)
10329 cc
= XEXP (XEXP (ite
, 0), 0);
10330 if (!REG_P (cc
) || !CC_REGNO_P (REGNO (cc
)))
10333 if (find_reg_note (insn
, REG_DEAD
, cc
))
10341 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10342 the semantics does not change. If NULL_RTX is passed as COND the
10343 function tries to find the conditional jump starting with INSN. */
10345 s390_swap_cmp (rtx cond
, rtx
*op0
, rtx
*op1
, rtx insn
)
10349 if (cond
== NULL_RTX
)
10351 rtx jump
= find_cond_jump (NEXT_INSN (insn
));
10352 jump
= jump
? single_set (jump
) : NULL_RTX
;
10354 if (jump
== NULL_RTX
)
10357 cond
= XEXP (XEXP (jump
, 1), 0);
10362 PUT_CODE (cond
, swap_condition (GET_CODE (cond
)));
10365 /* On z10, instructions of the compare-and-branch family have the
10366 property to access the register occurring as second operand with
10367 its bits complemented. If such a compare is grouped with a second
10368 instruction that accesses the same register non-complemented, and
10369 if that register's value is delivered via a bypass, then the
10370 pipeline recycles, thereby causing significant performance decline.
10371 This function locates such situations and exchanges the two
10372 operands of the compare. The function return true whenever it
10375 s390_z10_optimize_cmp (rtx insn
)
10377 rtx prev_insn
, next_insn
;
10378 bool insn_added_p
= false;
10379 rtx cond
, *op0
, *op1
;
10381 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
10383 /* Handle compare and branch and branch on count
10385 rtx pattern
= single_set (insn
);
10388 || SET_DEST (pattern
) != pc_rtx
10389 || GET_CODE (SET_SRC (pattern
)) != IF_THEN_ELSE
)
10392 cond
= XEXP (SET_SRC (pattern
), 0);
10393 op0
= &XEXP (cond
, 0);
10394 op1
= &XEXP (cond
, 1);
10396 else if (GET_CODE (PATTERN (insn
)) == SET
)
10400 /* Handle normal compare instructions. */
10401 src
= SET_SRC (PATTERN (insn
));
10402 dest
= SET_DEST (PATTERN (insn
));
10405 || !CC_REGNO_P (REGNO (dest
))
10406 || GET_CODE (src
) != COMPARE
)
10409 /* s390_swap_cmp will try to find the conditional
10410 jump when passing NULL_RTX as condition. */
10412 op0
= &XEXP (src
, 0);
10413 op1
= &XEXP (src
, 1);
10418 if (!REG_P (*op0
) || !REG_P (*op1
))
10421 if (GET_MODE_CLASS (GET_MODE (*op0
)) != MODE_INT
)
10424 /* Swap the COMPARE arguments and its mask if there is a
10425 conflicting access in the previous insn. */
10426 prev_insn
= prev_active_insn (insn
);
10427 if (prev_insn
!= NULL_RTX
&& INSN_P (prev_insn
)
10428 && reg_referenced_p (*op1
, PATTERN (prev_insn
)))
10429 s390_swap_cmp (cond
, op0
, op1
, insn
);
10431 /* Check if there is a conflict with the next insn. If there
10432 was no conflict with the previous insn, then swap the
10433 COMPARE arguments and its mask. If we already swapped
10434 the operands, or if swapping them would cause a conflict
10435 with the previous insn, issue a NOP after the COMPARE in
10436 order to separate the two instuctions. */
10437 next_insn
= next_active_insn (insn
);
10438 if (next_insn
!= NULL_RTX
&& INSN_P (next_insn
)
10439 && s390_non_addr_reg_read_p (*op1
, next_insn
))
10441 if (prev_insn
!= NULL_RTX
&& INSN_P (prev_insn
)
10442 && s390_non_addr_reg_read_p (*op0
, prev_insn
))
10444 if (REGNO (*op1
) == 0)
10445 emit_insn_after (gen_nop1 (), insn
);
10447 emit_insn_after (gen_nop (), insn
);
10448 insn_added_p
= true;
10451 s390_swap_cmp (cond
, op0
, op1
, insn
);
10453 return insn_added_p
;
10456 /* Perform machine-dependent processing. */
10461 bool pool_overflow
= false;
10463 /* Make sure all splits have been performed; splits after
10464 machine_dependent_reorg might confuse insn length counts. */
10465 split_all_insns_noflow ();
10467 /* Install the main literal pool and the associated base
10468 register load insns.
10470 In addition, there are two problematic situations we need
10473 - the literal pool might be > 4096 bytes in size, so that
10474 some of its elements cannot be directly accessed
10476 - a branch target might be > 64K away from the branch, so that
10477 it is not possible to use a PC-relative instruction.
10479 To fix those, we split the single literal pool into multiple
10480 pool chunks, reloading the pool base register at various
10481 points throughout the function to ensure it always points to
10482 the pool chunk the following code expects, and / or replace
10483 PC-relative branches by absolute branches.
10485 However, the two problems are interdependent: splitting the
10486 literal pool can move a branch further away from its target,
10487 causing the 64K limit to overflow, and on the other hand,
10488 replacing a PC-relative branch by an absolute branch means
10489 we need to put the branch target address into the literal
10490 pool, possibly causing it to overflow.
10492 So, we loop trying to fix up both problems until we manage
10493 to satisfy both conditions at the same time. Note that the
10494 loop is guaranteed to terminate as every pass of the loop
10495 strictly decreases the total number of PC-relative branches
10496 in the function. (This is not completely true as there
10497 might be branch-over-pool insns introduced by chunkify_start.
10498 Those never need to be split however.) */
10502 struct constant_pool
*pool
= NULL
;
10504 /* Collect the literal pool. */
10505 if (!pool_overflow
)
10507 pool
= s390_mainpool_start ();
10509 pool_overflow
= true;
10512 /* If literal pool overflowed, start to chunkify it. */
10514 pool
= s390_chunkify_start ();
10516 /* Split out-of-range branches. If this has created new
10517 literal pool entries, cancel current chunk list and
10518 recompute it. zSeries machines have large branch
10519 instructions, so we never need to split a branch. */
10520 if (!TARGET_CPU_ZARCH
&& s390_split_branches ())
10523 s390_chunkify_cancel (pool
);
10525 s390_mainpool_cancel (pool
);
10530 /* If we made it up to here, both conditions are satisfied.
10531 Finish up literal pool related changes. */
10533 s390_chunkify_finish (pool
);
10535 s390_mainpool_finish (pool
);
10537 /* We're done splitting branches. */
10538 cfun
->machine
->split_branches_pending_p
= false;
10542 /* Generate out-of-pool execute target insns. */
10543 if (TARGET_CPU_ZARCH
)
10545 rtx insn
, label
, target
;
10547 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10549 label
= s390_execute_label (insn
);
10553 gcc_assert (label
!= const0_rtx
);
10555 target
= emit_label (XEXP (label
, 0));
10556 INSN_ADDRESSES_NEW (target
, -1);
10558 target
= emit_insn (s390_execute_target (insn
));
10559 INSN_ADDRESSES_NEW (target
, -1);
10563 /* Try to optimize prologue and epilogue further. */
10564 s390_optimize_prologue ();
10566 /* Walk over the insns and do some >=z10 specific changes. */
10567 if (s390_tune
== PROCESSOR_2097_Z10
10568 || s390_tune
== PROCESSOR_2817_Z196
10569 || s390_tune
== PROCESSOR_2827_ZEC12
)
10572 bool insn_added_p
= false;
10574 /* The insn lengths and addresses have to be up to date for the
10575 following manipulations. */
10576 shorten_branches (get_insns ());
10578 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10580 if (!INSN_P (insn
) || INSN_CODE (insn
) <= 0)
10584 insn_added_p
|= s390_fix_long_loop_prediction (insn
);
10586 if ((GET_CODE (PATTERN (insn
)) == PARALLEL
10587 || GET_CODE (PATTERN (insn
)) == SET
)
10588 && s390_tune
== PROCESSOR_2097_Z10
)
10589 insn_added_p
|= s390_z10_optimize_cmp (insn
);
10592 /* Adjust branches if we added new instructions. */
10594 shorten_branches (get_insns ());
10598 /* Return true if INSN is a fp load insn writing register REGNO. */
10600 s390_fpload_toreg (rtx insn
, unsigned int regno
)
10603 enum attr_type flag
= s390_safe_attr_type (insn
);
10605 if (flag
!= TYPE_FLOADSF
&& flag
!= TYPE_FLOADDF
)
10608 set
= single_set (insn
);
10610 if (set
== NULL_RTX
)
10613 if (!REG_P (SET_DEST (set
)) || !MEM_P (SET_SRC (set
)))
10616 if (REGNO (SET_DEST (set
)) != regno
)
10622 /* This value describes the distance to be avoided between an
10623 aritmetic fp instruction and an fp load writing the same register.
10624 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10625 fine but the exact value has to be avoided. Otherwise the FP
10626 pipeline will throw an exception causing a major penalty. */
10627 #define Z10_EARLYLOAD_DISTANCE 7
10629 /* Rearrange the ready list in order to avoid the situation described
10630 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10631 moved to the very end of the ready list. */
10633 s390_z10_prevent_earlyload_conflicts (rtx
*ready
, int *nready_p
)
10635 unsigned int regno
;
10636 int nready
= *nready_p
;
10641 enum attr_type flag
;
10644 /* Skip DISTANCE - 1 active insns. */
10645 for (insn
= last_scheduled_insn
, distance
= Z10_EARLYLOAD_DISTANCE
- 1;
10646 distance
> 0 && insn
!= NULL_RTX
;
10647 distance
--, insn
= prev_active_insn (insn
))
10648 if (CALL_P (insn
) || JUMP_P (insn
))
10651 if (insn
== NULL_RTX
)
10654 set
= single_set (insn
);
10656 if (set
== NULL_RTX
|| !REG_P (SET_DEST (set
))
10657 || GET_MODE_CLASS (GET_MODE (SET_DEST (set
))) != MODE_FLOAT
)
10660 flag
= s390_safe_attr_type (insn
);
10662 if (flag
== TYPE_FLOADSF
|| flag
== TYPE_FLOADDF
)
10665 regno
= REGNO (SET_DEST (set
));
10668 while (!s390_fpload_toreg (ready
[i
], regno
) && i
> 0)
10675 memmove (&ready
[1], &ready
[0], sizeof (rtx
) * i
);
10680 /* The s390_sched_state variable tracks the state of the current or
10681 the last instruction group.
10683 0,1,2 number of instructions scheduled in the current group
10684 3 the last group is complete - normal insns
10685 4 the last group was a cracked/expanded insn */
10687 static int s390_sched_state
;
10689 #define S390_OOO_SCHED_STATE_NORMAL 3
10690 #define S390_OOO_SCHED_STATE_CRACKED 4
10692 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
10693 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
10694 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
10695 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
10697 static unsigned int
10698 s390_get_sched_attrmask (rtx insn
)
10700 unsigned int mask
= 0;
10702 if (get_attr_ooo_cracked (insn
))
10703 mask
|= S390_OOO_SCHED_ATTR_MASK_CRACKED
;
10704 if (get_attr_ooo_expanded (insn
))
10705 mask
|= S390_OOO_SCHED_ATTR_MASK_EXPANDED
;
10706 if (get_attr_ooo_endgroup (insn
))
10707 mask
|= S390_OOO_SCHED_ATTR_MASK_ENDGROUP
;
10708 if (get_attr_ooo_groupalone (insn
))
10709 mask
|= S390_OOO_SCHED_ATTR_MASK_GROUPALONE
;
10713 /* Return the scheduling score for INSN. The higher the score the
10714 better. The score is calculated from the OOO scheduling attributes
10715 of INSN and the scheduling state s390_sched_state. */
10717 s390_sched_score (rtx insn
)
10719 unsigned int mask
= s390_get_sched_attrmask (insn
);
10722 switch (s390_sched_state
)
10725 /* Try to put insns into the first slot which would otherwise
10727 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) != 0
10728 || (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) != 0)
10730 if ((mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) != 0)
10733 /* Prefer not cracked insns while trying to put together a
10735 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) == 0
10736 && (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) == 0
10737 && (mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) == 0)
10739 if ((mask
& S390_OOO_SCHED_ATTR_MASK_ENDGROUP
) == 0)
10743 /* Prefer not cracked insns while trying to put together a
10745 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) == 0
10746 && (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) == 0
10747 && (mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) == 0)
10749 /* Prefer endgroup insns in the last slot. */
10750 if ((mask
& S390_OOO_SCHED_ATTR_MASK_ENDGROUP
) != 0)
10753 case S390_OOO_SCHED_STATE_NORMAL
:
10754 /* Prefer not cracked insns if the last was not cracked. */
10755 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) == 0
10756 && (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) == 0)
10758 if ((mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) != 0)
10761 case S390_OOO_SCHED_STATE_CRACKED
:
10762 /* Try to keep cracked insns together to prevent them from
10763 interrupting groups. */
10764 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) != 0
10765 || (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) != 0)
10772 /* This function is called via hook TARGET_SCHED_REORDER before
10773 issueing one insn from list READY which contains *NREADYP entries.
10774 For target z10 it reorders load instructions to avoid early load
10775 conflicts in the floating point pipeline */
10777 s390_sched_reorder (FILE *file
, int verbose
,
10778 rtx
*ready
, int *nreadyp
, int clock ATTRIBUTE_UNUSED
)
10780 if (s390_tune
== PROCESSOR_2097_Z10
)
10781 if (reload_completed
&& *nreadyp
> 1)
10782 s390_z10_prevent_earlyload_conflicts (ready
, nreadyp
);
10784 if (s390_tune
== PROCESSOR_2827_ZEC12
10785 && reload_completed
10789 int last_index
= *nreadyp
- 1;
10790 int max_index
= -1;
10791 int max_score
= -1;
10794 /* Just move the insn with the highest score to the top (the
10795 end) of the list. A full sort is not needed since a conflict
10796 in the hazard recognition cannot happen. So the top insn in
10797 the ready list will always be taken. */
10798 for (i
= last_index
; i
>= 0; i
--)
10802 if (recog_memoized (ready
[i
]) < 0)
10805 score
= s390_sched_score (ready
[i
]);
10806 if (score
> max_score
)
10813 if (max_index
!= -1)
10815 if (max_index
!= last_index
)
10817 tmp
= ready
[max_index
];
10818 ready
[max_index
] = ready
[last_index
];
10819 ready
[last_index
] = tmp
;
10823 "move insn %d to the top of list\n",
10824 INSN_UID (ready
[last_index
]));
10826 else if (verbose
> 5)
10828 "best insn %d already on top\n",
10829 INSN_UID (ready
[last_index
]));
10834 fprintf (file
, "ready list ooo attributes - sched state: %d\n",
10837 for (i
= last_index
; i
>= 0; i
--)
10839 if (recog_memoized (ready
[i
]) < 0)
10841 fprintf (file
, "insn %d score: %d: ", INSN_UID (ready
[i
]),
10842 s390_sched_score (ready
[i
]));
10843 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
10844 PRINT_OOO_ATTR (ooo_cracked
);
10845 PRINT_OOO_ATTR (ooo_expanded
);
10846 PRINT_OOO_ATTR (ooo_endgroup
);
10847 PRINT_OOO_ATTR (ooo_groupalone
);
10848 #undef PRINT_OOO_ATTR
10849 fprintf (file
, "\n");
10854 return s390_issue_rate ();
10858 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10859 the scheduler has issued INSN. It stores the last issued insn into
10860 last_scheduled_insn in order to make it available for
10861 s390_sched_reorder. */
10863 s390_sched_variable_issue (FILE *file
, int verbose
, rtx insn
, int more
)
10865 last_scheduled_insn
= insn
;
10867 if (s390_tune
== PROCESSOR_2827_ZEC12
10868 && reload_completed
10869 && recog_memoized (insn
) >= 0)
10871 unsigned int mask
= s390_get_sched_attrmask (insn
);
10873 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) != 0
10874 || (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) != 0)
10875 s390_sched_state
= S390_OOO_SCHED_STATE_CRACKED
;
10876 else if ((mask
& S390_OOO_SCHED_ATTR_MASK_ENDGROUP
) != 0
10877 || (mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) != 0)
10878 s390_sched_state
= S390_OOO_SCHED_STATE_NORMAL
;
10881 /* Only normal insns are left (mask == 0). */
10882 switch (s390_sched_state
)
10887 case S390_OOO_SCHED_STATE_NORMAL
:
10888 if (s390_sched_state
== S390_OOO_SCHED_STATE_NORMAL
)
10889 s390_sched_state
= 1;
10891 s390_sched_state
++;
10894 case S390_OOO_SCHED_STATE_CRACKED
:
10895 s390_sched_state
= S390_OOO_SCHED_STATE_NORMAL
;
10901 fprintf (file
, "insn %d: ", INSN_UID (insn
));
10902 #define PRINT_OOO_ATTR(ATTR) \
10903 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
10904 PRINT_OOO_ATTR (ooo_cracked
);
10905 PRINT_OOO_ATTR (ooo_expanded
);
10906 PRINT_OOO_ATTR (ooo_endgroup
);
10907 PRINT_OOO_ATTR (ooo_groupalone
);
10908 #undef PRINT_OOO_ATTR
10909 fprintf (file
, "\n");
10910 fprintf (file
, "sched state: %d\n", s390_sched_state
);
10914 if (GET_CODE (PATTERN (insn
)) != USE
10915 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
10922 s390_sched_init (FILE *file ATTRIBUTE_UNUSED
,
10923 int verbose ATTRIBUTE_UNUSED
,
10924 int max_ready ATTRIBUTE_UNUSED
)
10926 last_scheduled_insn
= NULL_RTX
;
10927 s390_sched_state
= 0;
10930 /* This function checks the whole of insn X for memory references. The
10931 function always returns zero because the framework it is called
10932 from would stop recursively analyzing the insn upon a return value
10933 other than zero. The real result of this function is updating
10934 counter variable MEM_COUNT. */
10936 check_dpu (rtx
*x
, unsigned *mem_count
)
10938 if (*x
!= NULL_RTX
&& MEM_P (*x
))
10943 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10944 a new number struct loop *loop should be unrolled if tuned for cpus with
10945 a built-in stride prefetcher.
10946 The loop is analyzed for memory accesses by calling check_dpu for
10947 each rtx of the loop. Depending on the loop_depth and the amount of
10948 memory accesses a new number <=nunroll is returned to improve the
10949 behaviour of the hardware prefetch unit. */
10951 s390_loop_unroll_adjust (unsigned nunroll
, struct loop
*loop
)
10956 unsigned mem_count
= 0;
10958 if (s390_tune
!= PROCESSOR_2097_Z10
10959 && s390_tune
!= PROCESSOR_2817_Z196
10960 && s390_tune
!= PROCESSOR_2827_ZEC12
)
10963 /* Count the number of memory references within the loop body. */
10964 bbs
= get_loop_body (loop
);
10965 for (i
= 0; i
< loop
->num_nodes
; i
++)
10967 for (insn
= BB_HEAD (bbs
[i
]); insn
!= BB_END (bbs
[i
]); insn
= NEXT_INSN (insn
))
10968 if (INSN_P (insn
) && INSN_CODE (insn
) != -1)
10969 for_each_rtx (&insn
, (rtx_function
) check_dpu
, &mem_count
);
10973 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10974 if (mem_count
== 0)
10977 switch (loop_depth(loop
))
10980 return MIN (nunroll
, 28 / mem_count
);
10982 return MIN (nunroll
, 22 / mem_count
);
10984 return MIN (nunroll
, 16 / mem_count
);
10988 /* Initialize GCC target structure. */
10990 #undef TARGET_ASM_ALIGNED_HI_OP
10991 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10992 #undef TARGET_ASM_ALIGNED_DI_OP
10993 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10994 #undef TARGET_ASM_INTEGER
10995 #define TARGET_ASM_INTEGER s390_assemble_integer
10997 #undef TARGET_ASM_OPEN_PAREN
10998 #define TARGET_ASM_OPEN_PAREN ""
11000 #undef TARGET_ASM_CLOSE_PAREN
11001 #define TARGET_ASM_CLOSE_PAREN ""
11003 #undef TARGET_OPTION_OVERRIDE
11004 #define TARGET_OPTION_OVERRIDE s390_option_override
11006 #undef TARGET_ENCODE_SECTION_INFO
11007 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
11009 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11010 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11013 #undef TARGET_HAVE_TLS
11014 #define TARGET_HAVE_TLS true
11016 #undef TARGET_CANNOT_FORCE_CONST_MEM
11017 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
11019 #undef TARGET_DELEGITIMIZE_ADDRESS
11020 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
11022 #undef TARGET_LEGITIMIZE_ADDRESS
11023 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
11025 #undef TARGET_RETURN_IN_MEMORY
11026 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
11028 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
11029 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
11031 #undef TARGET_ASM_OUTPUT_MI_THUNK
11032 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
11033 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11034 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11036 #undef TARGET_SCHED_ADJUST_PRIORITY
11037 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
11038 #undef TARGET_SCHED_ISSUE_RATE
11039 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
11040 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11041 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
11043 #undef TARGET_SCHED_VARIABLE_ISSUE
11044 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
11045 #undef TARGET_SCHED_REORDER
11046 #define TARGET_SCHED_REORDER s390_sched_reorder
11047 #undef TARGET_SCHED_INIT
11048 #define TARGET_SCHED_INIT s390_sched_init
11050 #undef TARGET_CANNOT_COPY_INSN_P
11051 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
11052 #undef TARGET_RTX_COSTS
11053 #define TARGET_RTX_COSTS s390_rtx_costs
11054 #undef TARGET_ADDRESS_COST
11055 #define TARGET_ADDRESS_COST s390_address_cost
11056 #undef TARGET_REGISTER_MOVE_COST
11057 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
11058 #undef TARGET_MEMORY_MOVE_COST
11059 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
11061 #undef TARGET_MACHINE_DEPENDENT_REORG
11062 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
11064 #undef TARGET_VALID_POINTER_MODE
11065 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
11067 #undef TARGET_BUILD_BUILTIN_VA_LIST
11068 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
11069 #undef TARGET_EXPAND_BUILTIN_VA_START
11070 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
11071 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11072 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
11074 #undef TARGET_PROMOTE_FUNCTION_MODE
11075 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
11076 #undef TARGET_PASS_BY_REFERENCE
11077 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
11079 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11080 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
11081 #undef TARGET_FUNCTION_ARG
11082 #define TARGET_FUNCTION_ARG s390_function_arg
11083 #undef TARGET_FUNCTION_ARG_ADVANCE
11084 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
11085 #undef TARGET_FUNCTION_VALUE
11086 #define TARGET_FUNCTION_VALUE s390_function_value
11087 #undef TARGET_LIBCALL_VALUE
11088 #define TARGET_LIBCALL_VALUE s390_libcall_value
11090 #undef TARGET_FIXED_CONDITION_CODE_REGS
11091 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
11093 #undef TARGET_CC_MODES_COMPATIBLE
11094 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
11096 #undef TARGET_INVALID_WITHIN_DOLOOP
11097 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
11100 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
11101 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
11104 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11105 #undef TARGET_MANGLE_TYPE
11106 #define TARGET_MANGLE_TYPE s390_mangle_type
11109 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11110 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11112 #undef TARGET_PREFERRED_RELOAD_CLASS
11113 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
11115 #undef TARGET_SECONDARY_RELOAD
11116 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
11118 #undef TARGET_LIBGCC_CMP_RETURN_MODE
11119 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
11121 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
11122 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
11124 #undef TARGET_LEGITIMATE_ADDRESS_P
11125 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
11127 #undef TARGET_LEGITIMATE_CONSTANT_P
11128 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
11130 #undef TARGET_LRA_P
11131 #define TARGET_LRA_P s390_lra_p
11133 #undef TARGET_CAN_ELIMINATE
11134 #define TARGET_CAN_ELIMINATE s390_can_eliminate
11136 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11137 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
11139 #undef TARGET_LOOP_UNROLL_ADJUST
11140 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
11142 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
11143 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
11144 #undef TARGET_TRAMPOLINE_INIT
11145 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
11147 #undef TARGET_UNWIND_WORD_MODE
11148 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
11150 #undef TARGET_CANONICALIZE_COMPARISON
11151 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
11153 struct gcc_target targetm
= TARGET_INITIALIZER
;
11155 #include "gt-s390.h"