s390.c (s390_chunkify_start): Prevent literal pool splitting between a call and its...
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* Which cpu are we tuning for. */
281 enum processor_type s390_tune = PROCESSOR_max;
282 int s390_tune_flags;
283 /* Which instruction set architecture to use. */
284 enum processor_type s390_arch;
285 int s390_arch_flags;
286
287 HOST_WIDE_INT s390_warn_framesize = 0;
288 HOST_WIDE_INT s390_stack_size = 0;
289 HOST_WIDE_INT s390_stack_guard = 0;
290
291 /* The following structure is embedded in the machine
292 specific part of struct function. */
293
294 struct GTY (()) s390_frame_layout
295 {
296 /* Offset within stack frame. */
297 HOST_WIDE_INT gprs_offset;
298 HOST_WIDE_INT f0_offset;
299 HOST_WIDE_INT f4_offset;
300 HOST_WIDE_INT f8_offset;
301 HOST_WIDE_INT backchain_offset;
302
303 /* Number of first and last gpr where slots in the register
304 save area are reserved for. */
305 int first_save_gpr_slot;
306 int last_save_gpr_slot;
307
308 /* Number of first and last gpr to be saved, restored. */
309 int first_save_gpr;
310 int first_restore_gpr;
311 int last_save_gpr;
312 int last_restore_gpr;
313
314 /* Bits standing for floating point registers. Set, if the
315 respective register has to be saved. Starting with reg 16 (f0)
316 at the rightmost bit.
317 Bit 15 - 8 7 6 5 4 3 2 1 0
318 fpr 15 - 8 7 5 3 1 6 4 2 0
319 reg 31 - 24 23 22 21 20 19 18 17 16 */
320 unsigned int fpr_bitmap;
321
322 /* Number of floating point registers f8-f15 which must be saved. */
323 int high_fprs;
324
325 /* Set if return address needs to be saved.
326 This flag is set by s390_return_addr_rtx if it could not use
327 the initial value of r14 and therefore depends on r14 saved
328 to the stack. */
329 bool save_return_addr_p;
330
331 /* Size of stack frame. */
332 HOST_WIDE_INT frame_size;
333 };
334
335 /* Define the structure for the machine field in struct function. */
336
337 struct GTY(()) machine_function
338 {
339 struct s390_frame_layout frame_layout;
340
341 /* Literal pool base register. */
342 rtx base_reg;
343
344 /* True if we may need to perform branch splitting. */
345 bool split_branches_pending_p;
346
347 /* Some local-dynamic TLS symbol name. */
348 const char *some_ld_name;
349
350 bool has_landing_pad_p;
351 };
352
353 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
354
355 #define cfun_frame_layout (cfun->machine->frame_layout)
356 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
357 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
358 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
359 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
360 (1 << (BITNUM)))
361 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
362 (1 << (BITNUM))))
363
364 /* Number of GPRs and FPRs used for argument passing. */
365 #define GP_ARG_NUM_REG 5
366 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
367
368 /* A couple of shortcuts. */
369 #define CONST_OK_FOR_J(x) \
370 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
371 #define CONST_OK_FOR_K(x) \
372 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
373 #define CONST_OK_FOR_Os(x) \
374 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
375 #define CONST_OK_FOR_Op(x) \
376 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
377 #define CONST_OK_FOR_On(x) \
378 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
379
380 #define REGNO_PAIR_OK(REGNO, MODE) \
381 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
382
383 /* That's the read ahead of the dynamic branch prediction unit in
384 bytes on a z10 (or higher) CPU. */
385 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
386
387 /* Return the alignment for LABEL. We default to the -falign-labels
388 value except for the literal pool base label. */
389 int
390 s390_label_align (rtx label)
391 {
392 rtx prev_insn = prev_active_insn (label);
393
394 if (prev_insn == NULL_RTX)
395 goto old;
396
397 prev_insn = single_set (prev_insn);
398
399 if (prev_insn == NULL_RTX)
400 goto old;
401
402 prev_insn = SET_SRC (prev_insn);
403
404 /* Don't align literal pool base labels. */
405 if (GET_CODE (prev_insn) == UNSPEC
406 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
407 return 0;
408
409 old:
410 return align_labels_log;
411 }
412
413 static enum machine_mode
414 s390_libgcc_cmp_return_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 static enum machine_mode
420 s390_libgcc_shift_count_mode (void)
421 {
422 return TARGET_64BIT ? DImode : SImode;
423 }
424
425 static enum machine_mode
426 s390_unwind_word_mode (void)
427 {
428 return TARGET_64BIT ? DImode : SImode;
429 }
430
431 /* Return true if the back end supports mode MODE. */
432 static bool
433 s390_scalar_mode_supported_p (enum machine_mode mode)
434 {
435 /* In contrast to the default implementation reject TImode constants on 31bit
436 TARGET_ZARCH for ABI compliance. */
437 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
438 return false;
439
440 if (DECIMAL_FLOAT_MODE_P (mode))
441 return default_decimal_float_supported_p ();
442
443 return default_scalar_mode_supported_p (mode);
444 }
445
446 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
447
448 void
449 s390_set_has_landing_pad_p (bool value)
450 {
451 cfun->machine->has_landing_pad_p = value;
452 }
453
454 /* If two condition code modes are compatible, return a condition code
455 mode which is compatible with both. Otherwise, return
456 VOIDmode. */
457
458 static enum machine_mode
459 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
460 {
461 if (m1 == m2)
462 return m1;
463
464 switch (m1)
465 {
466 case CCZmode:
467 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
468 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
469 return m2;
470 return VOIDmode;
471
472 case CCSmode:
473 case CCUmode:
474 case CCTmode:
475 case CCSRmode:
476 case CCURmode:
477 case CCZ1mode:
478 if (m2 == CCZmode)
479 return m1;
480
481 return VOIDmode;
482
483 default:
484 return VOIDmode;
485 }
486 return VOIDmode;
487 }
488
489 /* Return true if SET either doesn't set the CC register, or else
490 the source and destination have matching CC modes and that
491 CC mode is at least as constrained as REQ_MODE. */
492
493 static bool
494 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
495 {
496 enum machine_mode set_mode;
497
498 gcc_assert (GET_CODE (set) == SET);
499
500 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
501 return 1;
502
503 set_mode = GET_MODE (SET_DEST (set));
504 switch (set_mode)
505 {
506 case CCSmode:
507 case CCSRmode:
508 case CCUmode:
509 case CCURmode:
510 case CCLmode:
511 case CCL1mode:
512 case CCL2mode:
513 case CCL3mode:
514 case CCT1mode:
515 case CCT2mode:
516 case CCT3mode:
517 if (req_mode != set_mode)
518 return 0;
519 break;
520
521 case CCZmode:
522 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
523 && req_mode != CCSRmode && req_mode != CCURmode)
524 return 0;
525 break;
526
527 case CCAPmode:
528 case CCANmode:
529 if (req_mode != CCAmode)
530 return 0;
531 break;
532
533 default:
534 gcc_unreachable ();
535 }
536
537 return (GET_MODE (SET_SRC (set)) == set_mode);
538 }
539
540 /* Return true if every SET in INSN that sets the CC register
541 has source and destination with matching CC modes and that
542 CC mode is at least as constrained as REQ_MODE.
543 If REQ_MODE is VOIDmode, always return false. */
544
545 bool
546 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
547 {
548 int i;
549
550 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
551 if (req_mode == VOIDmode)
552 return false;
553
554 if (GET_CODE (PATTERN (insn)) == SET)
555 return s390_match_ccmode_set (PATTERN (insn), req_mode);
556
557 if (GET_CODE (PATTERN (insn)) == PARALLEL)
558 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
559 {
560 rtx set = XVECEXP (PATTERN (insn), 0, i);
561 if (GET_CODE (set) == SET)
562 if (!s390_match_ccmode_set (set, req_mode))
563 return false;
564 }
565
566 return true;
567 }
568
569 /* If a test-under-mask instruction can be used to implement
570 (compare (and ... OP1) OP2), return the CC mode required
571 to do that. Otherwise, return VOIDmode.
572 MIXED is true if the instruction can distinguish between
573 CC1 and CC2 for mixed selected bits (TMxx), it is false
574 if the instruction cannot (TM). */
575
576 enum machine_mode
577 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
578 {
579 int bit0, bit1;
580
581 /* ??? Fixme: should work on CONST_DOUBLE as well. */
582 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
583 return VOIDmode;
584
585 /* Selected bits all zero: CC0.
586 e.g.: int a; if ((a & (16 + 128)) == 0) */
587 if (INTVAL (op2) == 0)
588 return CCTmode;
589
590 /* Selected bits all one: CC3.
591 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
592 if (INTVAL (op2) == INTVAL (op1))
593 return CCT3mode;
594
595 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
596 int a;
597 if ((a & (16 + 128)) == 16) -> CCT1
598 if ((a & (16 + 128)) == 128) -> CCT2 */
599 if (mixed)
600 {
601 bit1 = exact_log2 (INTVAL (op2));
602 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
603 if (bit0 != -1 && bit1 != -1)
604 return bit0 > bit1 ? CCT1mode : CCT2mode;
605 }
606
607 return VOIDmode;
608 }
609
610 /* Given a comparison code OP (EQ, NE, etc.) and the operands
611 OP0 and OP1 of a COMPARE, return the mode to be used for the
612 comparison. */
613
614 enum machine_mode
615 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
616 {
617 switch (code)
618 {
619 case EQ:
620 case NE:
621 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
622 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
623 return CCAPmode;
624 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
625 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
626 return CCAPmode;
627 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
628 || GET_CODE (op1) == NEG)
629 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
630 return CCLmode;
631
632 if (GET_CODE (op0) == AND)
633 {
634 /* Check whether we can potentially do it via TM. */
635 enum machine_mode ccmode;
636 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
637 if (ccmode != VOIDmode)
638 {
639 /* Relax CCTmode to CCZmode to allow fall-back to AND
640 if that turns out to be beneficial. */
641 return ccmode == CCTmode ? CCZmode : ccmode;
642 }
643 }
644
645 if (register_operand (op0, HImode)
646 && GET_CODE (op1) == CONST_INT
647 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
648 return CCT3mode;
649 if (register_operand (op0, QImode)
650 && GET_CODE (op1) == CONST_INT
651 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
652 return CCT3mode;
653
654 return CCZmode;
655
656 case LE:
657 case LT:
658 case GE:
659 case GT:
660 /* The only overflow condition of NEG and ABS happens when
661 -INT_MAX is used as parameter, which stays negative. So
662 we have an overflow from a positive value to a negative.
663 Using CCAP mode the resulting cc can be used for comparisons. */
664 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
665 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
666 return CCAPmode;
667
668 /* If constants are involved in an add instruction it is possible to use
669 the resulting cc for comparisons with zero. Knowing the sign of the
670 constant the overflow behavior gets predictable. e.g.:
671 int a, b; if ((b = a + c) > 0)
672 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
673 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
674 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
675 {
676 if (INTVAL (XEXP((op0), 1)) < 0)
677 return CCANmode;
678 else
679 return CCAPmode;
680 }
681 /* Fall through. */
682 case UNORDERED:
683 case ORDERED:
684 case UNEQ:
685 case UNLE:
686 case UNLT:
687 case UNGE:
688 case UNGT:
689 case LTGT:
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCSRmode;
693 return CCSmode;
694
695 case LTU:
696 case GEU:
697 if (GET_CODE (op0) == PLUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL1mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 case LEU:
707 case GTU:
708 if (GET_CODE (op0) == MINUS
709 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
710 return CCL2mode;
711
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCURmode;
715 return CCUmode;
716
717 default:
718 gcc_unreachable ();
719 }
720 }
721
722 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
723 that we can implement more efficiently. */
724
725 void
726 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
727 {
728 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
729 if ((*code == EQ || *code == NE)
730 && *op1 == const0_rtx
731 && GET_CODE (*op0) == ZERO_EXTRACT
732 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
733 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
735 {
736 rtx inner = XEXP (*op0, 0);
737 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
738 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
739 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
740
741 if (len > 0 && len < modesize
742 && pos >= 0 && pos + len <= modesize
743 && modesize <= HOST_BITS_PER_WIDE_INT)
744 {
745 unsigned HOST_WIDE_INT block;
746 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
747 block <<= modesize - pos - len;
748
749 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
750 gen_int_mode (block, GET_MODE (inner)));
751 }
752 }
753
754 /* Narrow AND of memory against immediate to enable TM. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == AND
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
760 {
761 rtx inner = XEXP (*op0, 0);
762 rtx mask = XEXP (*op0, 1);
763
764 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
765 if (GET_CODE (inner) == SUBREG
766 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
767 && (GET_MODE_SIZE (GET_MODE (inner))
768 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
769 && ((INTVAL (mask)
770 & GET_MODE_MASK (GET_MODE (inner))
771 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
772 == 0))
773 inner = SUBREG_REG (inner);
774
775 /* Do not change volatile MEMs. */
776 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
777 {
778 int part = s390_single_part (XEXP (*op0, 1),
779 GET_MODE (inner), QImode, 0);
780 if (part >= 0)
781 {
782 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
783 inner = adjust_address_nv (inner, QImode, part);
784 *op0 = gen_rtx_AND (QImode, inner, mask);
785 }
786 }
787 }
788
789 /* Narrow comparisons against 0xffff to HImode if possible. */
790 if ((*code == EQ || *code == NE)
791 && GET_CODE (*op1) == CONST_INT
792 && INTVAL (*op1) == 0xffff
793 && SCALAR_INT_MODE_P (GET_MODE (*op0))
794 && (nonzero_bits (*op0, GET_MODE (*op0))
795 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
796 {
797 *op0 = gen_lowpart (HImode, *op0);
798 *op1 = constm1_rtx;
799 }
800
801 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
802 if (GET_CODE (*op0) == UNSPEC
803 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
804 && XVECLEN (*op0, 0) == 1
805 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
806 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
807 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
808 && *op1 == const0_rtx)
809 {
810 enum rtx_code new_code = UNKNOWN;
811 switch (*code)
812 {
813 case EQ: new_code = EQ; break;
814 case NE: new_code = NE; break;
815 case LT: new_code = GTU; break;
816 case GT: new_code = LTU; break;
817 case LE: new_code = GEU; break;
818 case GE: new_code = LEU; break;
819 default: break;
820 }
821
822 if (new_code != UNKNOWN)
823 {
824 *op0 = XVECEXP (*op0, 0, 0);
825 *code = new_code;
826 }
827 }
828
829 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
830 if (GET_CODE (*op0) == UNSPEC
831 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
832 && XVECLEN (*op0, 0) == 1
833 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
834 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
835 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
836 && *op1 == const0_rtx)
837 {
838 enum rtx_code new_code = UNKNOWN;
839 switch (*code)
840 {
841 case EQ: new_code = EQ; break;
842 case NE: new_code = NE; break;
843 default: break;
844 }
845
846 if (new_code != UNKNOWN)
847 {
848 *op0 = XVECEXP (*op0, 0, 0);
849 *code = new_code;
850 }
851 }
852
853 /* Simplify cascaded EQ, NE with const0_rtx. */
854 if ((*code == NE || *code == EQ)
855 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
856 && GET_MODE (*op0) == SImode
857 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
858 && REG_P (XEXP (*op0, 0))
859 && XEXP (*op0, 1) == const0_rtx
860 && *op1 == const0_rtx)
861 {
862 if ((*code == EQ && GET_CODE (*op0) == NE)
863 || (*code == NE && GET_CODE (*op0) == EQ))
864 *code = EQ;
865 else
866 *code = NE;
867 *op0 = XEXP (*op0, 0);
868 }
869
870 /* Prefer register over memory as first operand. */
871 if (MEM_P (*op0) && REG_P (*op1))
872 {
873 rtx tem = *op0; *op0 = *op1; *op1 = tem;
874 *code = swap_condition (*code);
875 }
876 }
877
878 /* Emit a compare instruction suitable to implement the comparison
879 OP0 CODE OP1. Return the correct condition RTL to be placed in
880 the IF_THEN_ELSE of the conditional branch testing the result. */
881
882 rtx
883 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
884 {
885 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
886 rtx cc;
887
888 /* Do not output a redundant compare instruction if a compare_and_swap
889 pattern already computed the result and the machine modes are compatible. */
890 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
891 {
892 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
893 == GET_MODE (op0));
894 cc = op0;
895 }
896 else
897 {
898 cc = gen_rtx_REG (mode, CC_REGNUM);
899 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
900 }
901
902 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
903 }
904
905 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
906 matches CMP.
907 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
908 conditional branch testing the result. */
909
910 static rtx
911 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
912 {
913 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
914 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
915 }
916
917 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
918 unconditional jump, else a conditional jump under condition COND. */
919
920 void
921 s390_emit_jump (rtx target, rtx cond)
922 {
923 rtx insn;
924
925 target = gen_rtx_LABEL_REF (VOIDmode, target);
926 if (cond)
927 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
928
929 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
930 emit_jump_insn (insn);
931 }
932
933 /* Return branch condition mask to implement a branch
934 specified by CODE. Return -1 for invalid comparisons. */
935
936 int
937 s390_branch_condition_mask (rtx code)
938 {
939 const int CC0 = 1 << 3;
940 const int CC1 = 1 << 2;
941 const int CC2 = 1 << 1;
942 const int CC3 = 1 << 0;
943
944 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
945 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
946 gcc_assert (XEXP (code, 1) == const0_rtx);
947
948 switch (GET_MODE (XEXP (code, 0)))
949 {
950 case CCZmode:
951 case CCZ1mode:
952 switch (GET_CODE (code))
953 {
954 case EQ: return CC0;
955 case NE: return CC1 | CC2 | CC3;
956 default: return -1;
957 }
958 break;
959
960 case CCT1mode:
961 switch (GET_CODE (code))
962 {
963 case EQ: return CC1;
964 case NE: return CC0 | CC2 | CC3;
965 default: return -1;
966 }
967 break;
968
969 case CCT2mode:
970 switch (GET_CODE (code))
971 {
972 case EQ: return CC2;
973 case NE: return CC0 | CC1 | CC3;
974 default: return -1;
975 }
976 break;
977
978 case CCT3mode:
979 switch (GET_CODE (code))
980 {
981 case EQ: return CC3;
982 case NE: return CC0 | CC1 | CC2;
983 default: return -1;
984 }
985 break;
986
987 case CCLmode:
988 switch (GET_CODE (code))
989 {
990 case EQ: return CC0 | CC2;
991 case NE: return CC1 | CC3;
992 default: return -1;
993 }
994 break;
995
996 case CCL1mode:
997 switch (GET_CODE (code))
998 {
999 case LTU: return CC2 | CC3; /* carry */
1000 case GEU: return CC0 | CC1; /* no carry */
1001 default: return -1;
1002 }
1003 break;
1004
1005 case CCL2mode:
1006 switch (GET_CODE (code))
1007 {
1008 case GTU: return CC0 | CC1; /* borrow */
1009 case LEU: return CC2 | CC3; /* no borrow */
1010 default: return -1;
1011 }
1012 break;
1013
1014 case CCL3mode:
1015 switch (GET_CODE (code))
1016 {
1017 case EQ: return CC0 | CC2;
1018 case NE: return CC1 | CC3;
1019 case LTU: return CC1;
1020 case GTU: return CC3;
1021 case LEU: return CC1 | CC2;
1022 case GEU: return CC2 | CC3;
1023 default: return -1;
1024 }
1025
1026 case CCUmode:
1027 switch (GET_CODE (code))
1028 {
1029 case EQ: return CC0;
1030 case NE: return CC1 | CC2 | CC3;
1031 case LTU: return CC1;
1032 case GTU: return CC2;
1033 case LEU: return CC0 | CC1;
1034 case GEU: return CC0 | CC2;
1035 default: return -1;
1036 }
1037 break;
1038
1039 case CCURmode:
1040 switch (GET_CODE (code))
1041 {
1042 case EQ: return CC0;
1043 case NE: return CC2 | CC1 | CC3;
1044 case LTU: return CC2;
1045 case GTU: return CC1;
1046 case LEU: return CC0 | CC2;
1047 case GEU: return CC0 | CC1;
1048 default: return -1;
1049 }
1050 break;
1051
1052 case CCAPmode:
1053 switch (GET_CODE (code))
1054 {
1055 case EQ: return CC0;
1056 case NE: return CC1 | CC2 | CC3;
1057 case LT: return CC1 | CC3;
1058 case GT: return CC2;
1059 case LE: return CC0 | CC1 | CC3;
1060 case GE: return CC0 | CC2;
1061 default: return -1;
1062 }
1063 break;
1064
1065 case CCANmode:
1066 switch (GET_CODE (code))
1067 {
1068 case EQ: return CC0;
1069 case NE: return CC1 | CC2 | CC3;
1070 case LT: return CC1;
1071 case GT: return CC2 | CC3;
1072 case LE: return CC0 | CC1;
1073 case GE: return CC0 | CC2 | CC3;
1074 default: return -1;
1075 }
1076 break;
1077
1078 case CCSmode:
1079 switch (GET_CODE (code))
1080 {
1081 case EQ: return CC0;
1082 case NE: return CC1 | CC2 | CC3;
1083 case LT: return CC1;
1084 case GT: return CC2;
1085 case LE: return CC0 | CC1;
1086 case GE: return CC0 | CC2;
1087 case UNORDERED: return CC3;
1088 case ORDERED: return CC0 | CC1 | CC2;
1089 case UNEQ: return CC0 | CC3;
1090 case UNLT: return CC1 | CC3;
1091 case UNGT: return CC2 | CC3;
1092 case UNLE: return CC0 | CC1 | CC3;
1093 case UNGE: return CC0 | CC2 | CC3;
1094 case LTGT: return CC1 | CC2;
1095 default: return -1;
1096 }
1097 break;
1098
1099 case CCSRmode:
1100 switch (GET_CODE (code))
1101 {
1102 case EQ: return CC0;
1103 case NE: return CC2 | CC1 | CC3;
1104 case LT: return CC2;
1105 case GT: return CC1;
1106 case LE: return CC0 | CC2;
1107 case GE: return CC0 | CC1;
1108 case UNORDERED: return CC3;
1109 case ORDERED: return CC0 | CC2 | CC1;
1110 case UNEQ: return CC0 | CC3;
1111 case UNLT: return CC2 | CC3;
1112 case UNGT: return CC1 | CC3;
1113 case UNLE: return CC0 | CC2 | CC3;
1114 case UNGE: return CC0 | CC1 | CC3;
1115 case LTGT: return CC2 | CC1;
1116 default: return -1;
1117 }
1118 break;
1119
1120 default:
1121 return -1;
1122 }
1123 }
1124
1125
1126 /* Return branch condition mask to implement a compare and branch
1127 specified by CODE. Return -1 for invalid comparisons. */
1128
1129 int
1130 s390_compare_and_branch_condition_mask (rtx code)
1131 {
1132 const int CC0 = 1 << 3;
1133 const int CC1 = 1 << 2;
1134 const int CC2 = 1 << 1;
1135
1136 switch (GET_CODE (code))
1137 {
1138 case EQ:
1139 return CC0;
1140 case NE:
1141 return CC1 | CC2;
1142 case LT:
1143 case LTU:
1144 return CC1;
1145 case GT:
1146 case GTU:
1147 return CC2;
1148 case LE:
1149 case LEU:
1150 return CC0 | CC1;
1151 case GE:
1152 case GEU:
1153 return CC0 | CC2;
1154 default:
1155 gcc_unreachable ();
1156 }
1157 return -1;
1158 }
1159
1160 /* If INV is false, return assembler mnemonic string to implement
1161 a branch specified by CODE. If INV is true, return mnemonic
1162 for the corresponding inverted branch. */
1163
1164 static const char *
1165 s390_branch_condition_mnemonic (rtx code, int inv)
1166 {
1167 int mask;
1168
1169 static const char *const mnemonic[16] =
1170 {
1171 NULL, "o", "h", "nle",
1172 "l", "nhe", "lh", "ne",
1173 "e", "nlh", "he", "nl",
1174 "le", "nh", "no", NULL
1175 };
1176
1177 if (GET_CODE (XEXP (code, 0)) == REG
1178 && REGNO (XEXP (code, 0)) == CC_REGNUM
1179 && XEXP (code, 1) == const0_rtx)
1180 mask = s390_branch_condition_mask (code);
1181 else
1182 mask = s390_compare_and_branch_condition_mask (code);
1183
1184 gcc_assert (mask >= 0);
1185
1186 if (inv)
1187 mask ^= 15;
1188
1189 gcc_assert (mask >= 1 && mask <= 14);
1190
1191 return mnemonic[mask];
1192 }
1193
1194 /* Return the part of op which has a value different from def.
1195 The size of the part is determined by mode.
1196 Use this function only if you already know that op really
1197 contains such a part. */
1198
1199 unsigned HOST_WIDE_INT
1200 s390_extract_part (rtx op, enum machine_mode mode, int def)
1201 {
1202 unsigned HOST_WIDE_INT value = 0;
1203 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1204 int part_bits = GET_MODE_BITSIZE (mode);
1205 unsigned HOST_WIDE_INT part_mask
1206 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1207 int i;
1208
1209 for (i = 0; i < max_parts; i++)
1210 {
1211 if (i == 0)
1212 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1213 else
1214 value >>= part_bits;
1215
1216 if ((value & part_mask) != (def & part_mask))
1217 return value & part_mask;
1218 }
1219
1220 gcc_unreachable ();
1221 }
1222
1223 /* If OP is an integer constant of mode MODE with exactly one
1224 part of mode PART_MODE unequal to DEF, return the number of that
1225 part. Otherwise, return -1. */
1226
1227 int
1228 s390_single_part (rtx op,
1229 enum machine_mode mode,
1230 enum machine_mode part_mode,
1231 int def)
1232 {
1233 unsigned HOST_WIDE_INT value = 0;
1234 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1235 unsigned HOST_WIDE_INT part_mask
1236 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1237 int i, part = -1;
1238
1239 if (GET_CODE (op) != CONST_INT)
1240 return -1;
1241
1242 for (i = 0; i < n_parts; i++)
1243 {
1244 if (i == 0)
1245 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1246 else
1247 value >>= GET_MODE_BITSIZE (part_mode);
1248
1249 if ((value & part_mask) != (def & part_mask))
1250 {
1251 if (part != -1)
1252 return -1;
1253 else
1254 part = i;
1255 }
1256 }
1257 return part == -1 ? -1 : n_parts - 1 - part;
1258 }
1259
1260 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1261 bits and no other bits are set in IN. POS and LENGTH can be used
1262 to obtain the start position and the length of the bitfield.
1263
1264 POS gives the position of the first bit of the bitfield counting
1265 from the lowest order bit starting with zero. In order to use this
1266 value for S/390 instructions this has to be converted to "bits big
1267 endian" style. */
1268
1269 bool
1270 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1271 int *pos, int *length)
1272 {
1273 int tmp_pos = 0;
1274 int tmp_length = 0;
1275 int i;
1276 unsigned HOST_WIDE_INT mask = 1ULL;
1277 bool contiguous = false;
1278
1279 for (i = 0; i < size; mask <<= 1, i++)
1280 {
1281 if (contiguous)
1282 {
1283 if (mask & in)
1284 tmp_length++;
1285 else
1286 break;
1287 }
1288 else
1289 {
1290 if (mask & in)
1291 {
1292 contiguous = true;
1293 tmp_length++;
1294 }
1295 else
1296 tmp_pos++;
1297 }
1298 }
1299
1300 if (!tmp_length)
1301 return false;
1302
1303 /* Calculate a mask for all bits beyond the contiguous bits. */
1304 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1305
1306 if (mask & in)
1307 return false;
1308
1309 if (tmp_length + tmp_pos - 1 > size)
1310 return false;
1311
1312 if (length)
1313 *length = tmp_length;
1314
1315 if (pos)
1316 *pos = tmp_pos;
1317
1318 return true;
1319 }
1320
1321 /* Check whether we can (and want to) split a double-word
1322 move in mode MODE from SRC to DST into two single-word
1323 moves, moving the subword FIRST_SUBWORD first. */
1324
1325 bool
1326 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1327 {
1328 /* Floating point registers cannot be split. */
1329 if (FP_REG_P (src) || FP_REG_P (dst))
1330 return false;
1331
1332 /* We don't need to split if operands are directly accessible. */
1333 if (s_operand (src, mode) || s_operand (dst, mode))
1334 return false;
1335
1336 /* Non-offsettable memory references cannot be split. */
1337 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1338 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1339 return false;
1340
1341 /* Moving the first subword must not clobber a register
1342 needed to move the second subword. */
1343 if (register_operand (dst, mode))
1344 {
1345 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1346 if (reg_overlap_mentioned_p (subreg, src))
1347 return false;
1348 }
1349
1350 return true;
1351 }
1352
1353 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1354 and [MEM2, MEM2 + SIZE] do overlap and false
1355 otherwise. */
1356
1357 bool
1358 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1359 {
1360 rtx addr1, addr2, addr_delta;
1361 HOST_WIDE_INT delta;
1362
1363 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1364 return true;
1365
1366 if (size == 0)
1367 return false;
1368
1369 addr1 = XEXP (mem1, 0);
1370 addr2 = XEXP (mem2, 0);
1371
1372 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1373
1374 /* This overlapping check is used by peepholes merging memory block operations.
1375 Overlapping operations would otherwise be recognized by the S/390 hardware
1376 and would fall back to a slower implementation. Allowing overlapping
1377 operations would lead to slow code but not to wrong code. Therefore we are
1378 somewhat optimistic if we cannot prove that the memory blocks are
1379 overlapping.
1380 That's why we return false here although this may accept operations on
1381 overlapping memory areas. */
1382 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1383 return false;
1384
1385 delta = INTVAL (addr_delta);
1386
1387 if (delta == 0
1388 || (delta > 0 && delta < size)
1389 || (delta < 0 && -delta < size))
1390 return true;
1391
1392 return false;
1393 }
1394
1395 /* Check whether the address of memory reference MEM2 equals exactly
1396 the address of memory reference MEM1 plus DELTA. Return true if
1397 we can prove this to be the case, false otherwise. */
1398
1399 bool
1400 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1401 {
1402 rtx addr1, addr2, addr_delta;
1403
1404 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1405 return false;
1406
1407 addr1 = XEXP (mem1, 0);
1408 addr2 = XEXP (mem2, 0);
1409
1410 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1411 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1412 return false;
1413
1414 return true;
1415 }
1416
1417 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1418
1419 void
1420 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1421 rtx *operands)
1422 {
1423 enum machine_mode wmode = mode;
1424 rtx dst = operands[0];
1425 rtx src1 = operands[1];
1426 rtx src2 = operands[2];
1427 rtx op, clob, tem;
1428
1429 /* If we cannot handle the operation directly, use a temp register. */
1430 if (!s390_logical_operator_ok_p (operands))
1431 dst = gen_reg_rtx (mode);
1432
1433 /* QImode and HImode patterns make sense only if we have a destination
1434 in memory. Otherwise perform the operation in SImode. */
1435 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1436 wmode = SImode;
1437
1438 /* Widen operands if required. */
1439 if (mode != wmode)
1440 {
1441 if (GET_CODE (dst) == SUBREG
1442 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1443 dst = tem;
1444 else if (REG_P (dst))
1445 dst = gen_rtx_SUBREG (wmode, dst, 0);
1446 else
1447 dst = gen_reg_rtx (wmode);
1448
1449 if (GET_CODE (src1) == SUBREG
1450 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1451 src1 = tem;
1452 else if (GET_MODE (src1) != VOIDmode)
1453 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1454
1455 if (GET_CODE (src2) == SUBREG
1456 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1457 src2 = tem;
1458 else if (GET_MODE (src2) != VOIDmode)
1459 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1460 }
1461
1462 /* Emit the instruction. */
1463 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1464 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1465 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1466
1467 /* Fix up the destination if needed. */
1468 if (dst != operands[0])
1469 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1470 }
1471
1472 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1473
1474 bool
1475 s390_logical_operator_ok_p (rtx *operands)
1476 {
1477 /* If the destination operand is in memory, it needs to coincide
1478 with one of the source operands. After reload, it has to be
1479 the first source operand. */
1480 if (GET_CODE (operands[0]) == MEM)
1481 return rtx_equal_p (operands[0], operands[1])
1482 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1483
1484 return true;
1485 }
1486
1487 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1488 operand IMMOP to switch from SS to SI type instructions. */
1489
1490 void
1491 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1492 {
1493 int def = code == AND ? -1 : 0;
1494 HOST_WIDE_INT mask;
1495 int part;
1496
1497 gcc_assert (GET_CODE (*memop) == MEM);
1498 gcc_assert (!MEM_VOLATILE_P (*memop));
1499
1500 mask = s390_extract_part (*immop, QImode, def);
1501 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1502 gcc_assert (part >= 0);
1503
1504 *memop = adjust_address (*memop, QImode, part);
1505 *immop = gen_int_mode (mask, QImode);
1506 }
1507
1508
1509 /* How to allocate a 'struct machine_function'. */
1510
1511 static struct machine_function *
1512 s390_init_machine_status (void)
1513 {
1514 return ggc_alloc_cleared_machine_function ();
1515 }
1516
1517 /* Change optimizations to be performed, depending on the
1518 optimization level. */
1519
1520 static const struct default_options s390_option_optimization_table[] =
1521 {
1522 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
1523
1524 /* ??? There are apparently still problems with -fcaller-saves. */
1525 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
1526
1527 /* Use MVCLE instructions to decrease code size if requested. */
1528 { OPT_LEVELS_SIZE, OPT_mmvcle, NULL, 1 },
1529
1530 { OPT_LEVELS_NONE, 0, NULL, 0 }
1531 };
1532
1533 /* Implement TARGET_OPTION_INIT_STRUCT. */
1534
1535 static void
1536 s390_option_init_struct (struct gcc_options *opts)
1537 {
1538 /* By default, always emit DWARF-2 unwind info. This allows debugging
1539 without maintaining a stack frame back-chain. */
1540 opts->x_flag_asynchronous_unwind_tables = 1;
1541 }
1542
1543 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1544 to the associated processor_type and processor_flags if so. */
1545
1546 static bool
1547 s390_handle_arch_option (const char *arg,
1548 enum processor_type *type,
1549 int *flags)
1550 {
1551 static struct pta
1552 {
1553 const char *const name; /* processor name or nickname. */
1554 const enum processor_type processor;
1555 const int flags; /* From enum processor_flags. */
1556 }
1557 const processor_alias_table[] =
1558 {
1559 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1560 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1561 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1562 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1563 | PF_LONG_DISPLACEMENT},
1564 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1565 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1566 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1567 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1568 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1569 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1570 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1571 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1572 };
1573 size_t i;
1574
1575 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1576 if (strcmp (arg, processor_alias_table[i].name) == 0)
1577 {
1578 *type = processor_alias_table[i].processor;
1579 *flags = processor_alias_table[i].flags;
1580 return true;
1581 }
1582
1583 *type = PROCESSOR_max;
1584 *flags = 0;
1585 return false;
1586 }
1587
1588 /* Implement TARGET_HANDLE_OPTION. */
1589
1590 static bool
1591 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1592 {
1593 switch (code)
1594 {
1595 case OPT_march_:
1596 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1597
1598 case OPT_mstack_guard_:
1599 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1600 return false;
1601 if (exact_log2 (s390_stack_guard) == -1)
1602 error ("stack guard value must be an exact power of 2");
1603 return true;
1604
1605 case OPT_mstack_size_:
1606 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1607 return false;
1608 if (exact_log2 (s390_stack_size) == -1)
1609 error ("stack size must be an exact power of 2");
1610 return true;
1611
1612 case OPT_mtune_:
1613 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1614
1615 case OPT_mwarn_framesize_:
1616 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1617
1618 default:
1619 return true;
1620 }
1621 }
1622
1623 static void
1624 s390_option_override (void)
1625 {
1626 /* Set up function hooks. */
1627 init_machine_status = s390_init_machine_status;
1628
1629 /* Architecture mode defaults according to ABI. */
1630 if (!(target_flags_explicit & MASK_ZARCH))
1631 {
1632 if (TARGET_64BIT)
1633 target_flags |= MASK_ZARCH;
1634 else
1635 target_flags &= ~MASK_ZARCH;
1636 }
1637
1638 /* Determine processor architectural level. */
1639 if (!s390_arch_string)
1640 {
1641 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1642 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1643 }
1644
1645 /* This check is triggered when the user specified a wrong -march=
1646 string and prevents subsequent error messages from being
1647 issued. */
1648 if (s390_arch == PROCESSOR_max)
1649 return;
1650
1651 /* Determine processor to tune for. */
1652 if (s390_tune == PROCESSOR_max)
1653 {
1654 s390_tune = s390_arch;
1655 s390_tune_flags = s390_arch_flags;
1656 }
1657
1658 /* Sanity checks. */
1659 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1660 error ("z/Architecture mode not supported on %s", s390_arch_string);
1661 if (TARGET_64BIT && !TARGET_ZARCH)
1662 error ("64-bit ABI not supported in ESA/390 mode");
1663
1664 if (TARGET_HARD_DFP && !TARGET_DFP)
1665 {
1666 if (target_flags_explicit & MASK_HARD_DFP)
1667 {
1668 if (!TARGET_CPU_DFP)
1669 error ("hardware decimal floating point instructions"
1670 " not available on %s", s390_arch_string);
1671 if (!TARGET_ZARCH)
1672 error ("hardware decimal floating point instructions"
1673 " not available in ESA/390 mode");
1674 }
1675 else
1676 target_flags &= ~MASK_HARD_DFP;
1677 }
1678
1679 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1680 {
1681 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1682 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1683
1684 target_flags &= ~MASK_HARD_DFP;
1685 }
1686
1687 /* Set processor cost function. */
1688 switch (s390_tune)
1689 {
1690 case PROCESSOR_2084_Z990:
1691 s390_cost = &z990_cost;
1692 break;
1693 case PROCESSOR_2094_Z9_109:
1694 s390_cost = &z9_109_cost;
1695 break;
1696 case PROCESSOR_2097_Z10:
1697 s390_cost = &z10_cost;
1698 case PROCESSOR_2817_Z196:
1699 s390_cost = &z196_cost;
1700 break;
1701 default:
1702 s390_cost = &z900_cost;
1703 }
1704
1705 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1706 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1707 "in combination");
1708
1709 if (s390_stack_size)
1710 {
1711 if (s390_stack_guard >= s390_stack_size)
1712 error ("stack size must be greater than the stack guard value");
1713 else if (s390_stack_size > 1 << 16)
1714 error ("stack size must not be greater than 64k");
1715 }
1716 else if (s390_stack_guard)
1717 error ("-mstack-guard implies use of -mstack-size");
1718
1719 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1720 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1721 target_flags |= MASK_LONG_DOUBLE_128;
1722 #endif
1723
1724 if (s390_tune == PROCESSOR_2097_Z10
1725 || s390_tune == PROCESSOR_2817_Z196)
1726 {
1727 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1728 global_options.x_param_values,
1729 global_options_set.x_param_values);
1730 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1731 global_options.x_param_values,
1732 global_options_set.x_param_values);
1733 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1734 global_options.x_param_values,
1735 global_options_set.x_param_values);
1736 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1737 global_options.x_param_values,
1738 global_options_set.x_param_values);
1739 }
1740
1741 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1742 global_options.x_param_values,
1743 global_options_set.x_param_values);
1744 /* values for loop prefetching */
1745 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1746 global_options.x_param_values,
1747 global_options_set.x_param_values);
1748 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1749 global_options.x_param_values,
1750 global_options_set.x_param_values);
1751 /* s390 has more than 2 levels and the size is much larger. Since
1752 we are always running virtualized assume that we only get a small
1753 part of the caches above l1. */
1754 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1755 global_options.x_param_values,
1756 global_options_set.x_param_values);
1757 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1758 global_options.x_param_values,
1759 global_options_set.x_param_values);
1760 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1761 global_options.x_param_values,
1762 global_options_set.x_param_values);
1763
1764 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1765 requires the arch flags to be evaluated already. Since prefetching
1766 is beneficial on s390, we enable it if available. */
1767 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1768 flag_prefetch_loop_arrays = 1;
1769 }
1770
1771 /* Map for smallest class containing reg regno. */
1772
1773 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1774 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1775 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1776 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1777 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1778 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1779 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1780 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1781 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1782 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1783 ACCESS_REGS, ACCESS_REGS
1784 };
1785
1786 /* Return attribute type of insn. */
1787
1788 static enum attr_type
1789 s390_safe_attr_type (rtx insn)
1790 {
1791 if (recog_memoized (insn) >= 0)
1792 return get_attr_type (insn);
1793 else
1794 return TYPE_NONE;
1795 }
1796
1797 /* Return true if DISP is a valid short displacement. */
1798
1799 static bool
1800 s390_short_displacement (rtx disp)
1801 {
1802 /* No displacement is OK. */
1803 if (!disp)
1804 return true;
1805
1806 /* Without the long displacement facility we don't need to
1807 distingiush between long and short displacement. */
1808 if (!TARGET_LONG_DISPLACEMENT)
1809 return true;
1810
1811 /* Integer displacement in range. */
1812 if (GET_CODE (disp) == CONST_INT)
1813 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1814
1815 /* GOT offset is not OK, the GOT can be large. */
1816 if (GET_CODE (disp) == CONST
1817 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1818 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1819 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1820 return false;
1821
1822 /* All other symbolic constants are literal pool references,
1823 which are OK as the literal pool must be small. */
1824 if (GET_CODE (disp) == CONST)
1825 return true;
1826
1827 return false;
1828 }
1829
1830 /* Decompose a RTL expression ADDR for a memory address into
1831 its components, returned in OUT.
1832
1833 Returns false if ADDR is not a valid memory address, true
1834 otherwise. If OUT is NULL, don't return the components,
1835 but check for validity only.
1836
1837 Note: Only addresses in canonical form are recognized.
1838 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1839 canonical form so that they will be recognized. */
1840
1841 static int
1842 s390_decompose_address (rtx addr, struct s390_address *out)
1843 {
1844 HOST_WIDE_INT offset = 0;
1845 rtx base = NULL_RTX;
1846 rtx indx = NULL_RTX;
1847 rtx disp = NULL_RTX;
1848 rtx orig_disp;
1849 bool pointer = false;
1850 bool base_ptr = false;
1851 bool indx_ptr = false;
1852 bool literal_pool = false;
1853
1854 /* We may need to substitute the literal pool base register into the address
1855 below. However, at this point we do not know which register is going to
1856 be used as base, so we substitute the arg pointer register. This is going
1857 to be treated as holding a pointer below -- it shouldn't be used for any
1858 other purpose. */
1859 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1860
1861 /* Decompose address into base + index + displacement. */
1862
1863 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1864 base = addr;
1865
1866 else if (GET_CODE (addr) == PLUS)
1867 {
1868 rtx op0 = XEXP (addr, 0);
1869 rtx op1 = XEXP (addr, 1);
1870 enum rtx_code code0 = GET_CODE (op0);
1871 enum rtx_code code1 = GET_CODE (op1);
1872
1873 if (code0 == REG || code0 == UNSPEC)
1874 {
1875 if (code1 == REG || code1 == UNSPEC)
1876 {
1877 indx = op0; /* index + base */
1878 base = op1;
1879 }
1880
1881 else
1882 {
1883 base = op0; /* base + displacement */
1884 disp = op1;
1885 }
1886 }
1887
1888 else if (code0 == PLUS)
1889 {
1890 indx = XEXP (op0, 0); /* index + base + disp */
1891 base = XEXP (op0, 1);
1892 disp = op1;
1893 }
1894
1895 else
1896 {
1897 return false;
1898 }
1899 }
1900
1901 else
1902 disp = addr; /* displacement */
1903
1904 /* Extract integer part of displacement. */
1905 orig_disp = disp;
1906 if (disp)
1907 {
1908 if (GET_CODE (disp) == CONST_INT)
1909 {
1910 offset = INTVAL (disp);
1911 disp = NULL_RTX;
1912 }
1913 else if (GET_CODE (disp) == CONST
1914 && GET_CODE (XEXP (disp, 0)) == PLUS
1915 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1916 {
1917 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1918 disp = XEXP (XEXP (disp, 0), 0);
1919 }
1920 }
1921
1922 /* Strip off CONST here to avoid special case tests later. */
1923 if (disp && GET_CODE (disp) == CONST)
1924 disp = XEXP (disp, 0);
1925
1926 /* We can convert literal pool addresses to
1927 displacements by basing them off the base register. */
1928 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1929 {
1930 /* Either base or index must be free to hold the base register. */
1931 if (!base)
1932 base = fake_pool_base, literal_pool = true;
1933 else if (!indx)
1934 indx = fake_pool_base, literal_pool = true;
1935 else
1936 return false;
1937
1938 /* Mark up the displacement. */
1939 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1940 UNSPEC_LTREL_OFFSET);
1941 }
1942
1943 /* Validate base register. */
1944 if (base)
1945 {
1946 if (GET_CODE (base) == UNSPEC)
1947 switch (XINT (base, 1))
1948 {
1949 case UNSPEC_LTREF:
1950 if (!disp)
1951 disp = gen_rtx_UNSPEC (Pmode,
1952 gen_rtvec (1, XVECEXP (base, 0, 0)),
1953 UNSPEC_LTREL_OFFSET);
1954 else
1955 return false;
1956
1957 base = XVECEXP (base, 0, 1);
1958 break;
1959
1960 case UNSPEC_LTREL_BASE:
1961 if (XVECLEN (base, 0) == 1)
1962 base = fake_pool_base, literal_pool = true;
1963 else
1964 base = XVECEXP (base, 0, 1);
1965 break;
1966
1967 default:
1968 return false;
1969 }
1970
1971 if (!REG_P (base)
1972 || (GET_MODE (base) != SImode
1973 && GET_MODE (base) != Pmode))
1974 return false;
1975
1976 if (REGNO (base) == STACK_POINTER_REGNUM
1977 || REGNO (base) == FRAME_POINTER_REGNUM
1978 || ((reload_completed || reload_in_progress)
1979 && frame_pointer_needed
1980 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1981 || REGNO (base) == ARG_POINTER_REGNUM
1982 || (flag_pic
1983 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1984 pointer = base_ptr = true;
1985
1986 if ((reload_completed || reload_in_progress)
1987 && base == cfun->machine->base_reg)
1988 pointer = base_ptr = literal_pool = true;
1989 }
1990
1991 /* Validate index register. */
1992 if (indx)
1993 {
1994 if (GET_CODE (indx) == UNSPEC)
1995 switch (XINT (indx, 1))
1996 {
1997 case UNSPEC_LTREF:
1998 if (!disp)
1999 disp = gen_rtx_UNSPEC (Pmode,
2000 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2001 UNSPEC_LTREL_OFFSET);
2002 else
2003 return false;
2004
2005 indx = XVECEXP (indx, 0, 1);
2006 break;
2007
2008 case UNSPEC_LTREL_BASE:
2009 if (XVECLEN (indx, 0) == 1)
2010 indx = fake_pool_base, literal_pool = true;
2011 else
2012 indx = XVECEXP (indx, 0, 1);
2013 break;
2014
2015 default:
2016 return false;
2017 }
2018
2019 if (!REG_P (indx)
2020 || (GET_MODE (indx) != SImode
2021 && GET_MODE (indx) != Pmode))
2022 return false;
2023
2024 if (REGNO (indx) == STACK_POINTER_REGNUM
2025 || REGNO (indx) == FRAME_POINTER_REGNUM
2026 || ((reload_completed || reload_in_progress)
2027 && frame_pointer_needed
2028 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2029 || REGNO (indx) == ARG_POINTER_REGNUM
2030 || (flag_pic
2031 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2032 pointer = indx_ptr = true;
2033
2034 if ((reload_completed || reload_in_progress)
2035 && indx == cfun->machine->base_reg)
2036 pointer = indx_ptr = literal_pool = true;
2037 }
2038
2039 /* Prefer to use pointer as base, not index. */
2040 if (base && indx && !base_ptr
2041 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2042 {
2043 rtx tmp = base;
2044 base = indx;
2045 indx = tmp;
2046 }
2047
2048 /* Validate displacement. */
2049 if (!disp)
2050 {
2051 /* If virtual registers are involved, the displacement will change later
2052 anyway as the virtual registers get eliminated. This could make a
2053 valid displacement invalid, but it is more likely to make an invalid
2054 displacement valid, because we sometimes access the register save area
2055 via negative offsets to one of those registers.
2056 Thus we don't check the displacement for validity here. If after
2057 elimination the displacement turns out to be invalid after all,
2058 this is fixed up by reload in any case. */
2059 if (base != arg_pointer_rtx
2060 && indx != arg_pointer_rtx
2061 && base != return_address_pointer_rtx
2062 && indx != return_address_pointer_rtx
2063 && base != frame_pointer_rtx
2064 && indx != frame_pointer_rtx
2065 && base != virtual_stack_vars_rtx
2066 && indx != virtual_stack_vars_rtx)
2067 if (!DISP_IN_RANGE (offset))
2068 return false;
2069 }
2070 else
2071 {
2072 /* All the special cases are pointers. */
2073 pointer = true;
2074
2075 /* In the small-PIC case, the linker converts @GOT
2076 and @GOTNTPOFF offsets to possible displacements. */
2077 if (GET_CODE (disp) == UNSPEC
2078 && (XINT (disp, 1) == UNSPEC_GOT
2079 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2080 && flag_pic == 1)
2081 {
2082 ;
2083 }
2084
2085 /* Accept pool label offsets. */
2086 else if (GET_CODE (disp) == UNSPEC
2087 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2088 ;
2089
2090 /* Accept literal pool references. */
2091 else if (GET_CODE (disp) == UNSPEC
2092 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2093 {
2094 /* In case CSE pulled a non literal pool reference out of
2095 the pool we have to reject the address. This is
2096 especially important when loading the GOT pointer on non
2097 zarch CPUs. In this case the literal pool contains an lt
2098 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2099 will most likely exceed the displacement. */
2100 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2101 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2102 return false;
2103
2104 orig_disp = gen_rtx_CONST (Pmode, disp);
2105 if (offset)
2106 {
2107 /* If we have an offset, make sure it does not
2108 exceed the size of the constant pool entry. */
2109 rtx sym = XVECEXP (disp, 0, 0);
2110 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2111 return false;
2112
2113 orig_disp = plus_constant (orig_disp, offset);
2114 }
2115 }
2116
2117 else
2118 return false;
2119 }
2120
2121 if (!base && !indx)
2122 pointer = true;
2123
2124 if (out)
2125 {
2126 out->base = base;
2127 out->indx = indx;
2128 out->disp = orig_disp;
2129 out->pointer = pointer;
2130 out->literal_pool = literal_pool;
2131 }
2132
2133 return true;
2134 }
2135
2136 /* Decompose a RTL expression OP for a shift count into its components,
2137 and return the base register in BASE and the offset in OFFSET.
2138
2139 Return true if OP is a valid shift count, false if not. */
2140
2141 bool
2142 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2143 {
2144 HOST_WIDE_INT off = 0;
2145
2146 /* We can have an integer constant, an address register,
2147 or a sum of the two. */
2148 if (GET_CODE (op) == CONST_INT)
2149 {
2150 off = INTVAL (op);
2151 op = NULL_RTX;
2152 }
2153 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2154 {
2155 off = INTVAL (XEXP (op, 1));
2156 op = XEXP (op, 0);
2157 }
2158 while (op && GET_CODE (op) == SUBREG)
2159 op = SUBREG_REG (op);
2160
2161 if (op && GET_CODE (op) != REG)
2162 return false;
2163
2164 if (offset)
2165 *offset = off;
2166 if (base)
2167 *base = op;
2168
2169 return true;
2170 }
2171
2172
2173 /* Return true if CODE is a valid address without index. */
2174
2175 bool
2176 s390_legitimate_address_without_index_p (rtx op)
2177 {
2178 struct s390_address addr;
2179
2180 if (!s390_decompose_address (XEXP (op, 0), &addr))
2181 return false;
2182 if (addr.indx)
2183 return false;
2184
2185 return true;
2186 }
2187
2188
2189 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2190 and return these parts in SYMREF and ADDEND. You can pass NULL in
2191 SYMREF and/or ADDEND if you are not interested in these values.
2192 Literal pool references are *not* considered symbol references. */
2193
2194 static bool
2195 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2196 {
2197 HOST_WIDE_INT tmpaddend = 0;
2198
2199 if (GET_CODE (addr) == CONST)
2200 addr = XEXP (addr, 0);
2201
2202 if (GET_CODE (addr) == PLUS)
2203 {
2204 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2205 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2206 && CONST_INT_P (XEXP (addr, 1)))
2207 {
2208 tmpaddend = INTVAL (XEXP (addr, 1));
2209 addr = XEXP (addr, 0);
2210 }
2211 else
2212 return false;
2213 }
2214 else
2215 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2216 return false;
2217
2218 if (symref)
2219 *symref = addr;
2220 if (addend)
2221 *addend = tmpaddend;
2222
2223 return true;
2224 }
2225
2226
2227 /* Return true if the address in OP is valid for constraint letter C
2228 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2229 pool MEMs should be accepted. Only the Q, R, S, T constraint
2230 letters are allowed for C. */
2231
2232 static int
2233 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2234 {
2235 struct s390_address addr;
2236 bool decomposed = false;
2237
2238 /* This check makes sure that no symbolic address (except literal
2239 pool references) are accepted by the R or T constraints. */
2240 if (s390_symref_operand_p (op, NULL, NULL))
2241 return 0;
2242
2243 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2244 if (!lit_pool_ok)
2245 {
2246 if (!s390_decompose_address (op, &addr))
2247 return 0;
2248 if (addr.literal_pool)
2249 return 0;
2250 decomposed = true;
2251 }
2252
2253 switch (c)
2254 {
2255 case 'Q': /* no index short displacement */
2256 if (!decomposed && !s390_decompose_address (op, &addr))
2257 return 0;
2258 if (addr.indx)
2259 return 0;
2260 if (!s390_short_displacement (addr.disp))
2261 return 0;
2262 break;
2263
2264 case 'R': /* with index short displacement */
2265 if (TARGET_LONG_DISPLACEMENT)
2266 {
2267 if (!decomposed && !s390_decompose_address (op, &addr))
2268 return 0;
2269 if (!s390_short_displacement (addr.disp))
2270 return 0;
2271 }
2272 /* Any invalid address here will be fixed up by reload,
2273 so accept it for the most generic constraint. */
2274 break;
2275
2276 case 'S': /* no index long displacement */
2277 if (!TARGET_LONG_DISPLACEMENT)
2278 return 0;
2279 if (!decomposed && !s390_decompose_address (op, &addr))
2280 return 0;
2281 if (addr.indx)
2282 return 0;
2283 if (s390_short_displacement (addr.disp))
2284 return 0;
2285 break;
2286
2287 case 'T': /* with index long displacement */
2288 if (!TARGET_LONG_DISPLACEMENT)
2289 return 0;
2290 /* Any invalid address here will be fixed up by reload,
2291 so accept it for the most generic constraint. */
2292 if ((decomposed || s390_decompose_address (op, &addr))
2293 && s390_short_displacement (addr.disp))
2294 return 0;
2295 break;
2296 default:
2297 return 0;
2298 }
2299 return 1;
2300 }
2301
2302
2303 /* Evaluates constraint strings described by the regular expression
2304 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2305 the constraint given in STR, or 0 else. */
2306
2307 int
2308 s390_mem_constraint (const char *str, rtx op)
2309 {
2310 char c = str[0];
2311
2312 switch (c)
2313 {
2314 case 'A':
2315 /* Check for offsettable variants of memory constraints. */
2316 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2317 return 0;
2318 if ((reload_completed || reload_in_progress)
2319 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2320 return 0;
2321 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2322 case 'B':
2323 /* Check for non-literal-pool variants of memory constraints. */
2324 if (!MEM_P (op))
2325 return 0;
2326 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2327 case 'Q':
2328 case 'R':
2329 case 'S':
2330 case 'T':
2331 if (GET_CODE (op) != MEM)
2332 return 0;
2333 return s390_check_qrst_address (c, XEXP (op, 0), true);
2334 case 'U':
2335 return (s390_check_qrst_address ('Q', op, true)
2336 || s390_check_qrst_address ('R', op, true));
2337 case 'W':
2338 return (s390_check_qrst_address ('S', op, true)
2339 || s390_check_qrst_address ('T', op, true));
2340 case 'Y':
2341 /* Simply check for the basic form of a shift count. Reload will
2342 take care of making sure we have a proper base register. */
2343 if (!s390_decompose_shift_count (op, NULL, NULL))
2344 return 0;
2345 break;
2346 case 'Z':
2347 return s390_check_qrst_address (str[1], op, true);
2348 default:
2349 return 0;
2350 }
2351 return 1;
2352 }
2353
2354
2355 /* Evaluates constraint strings starting with letter O. Input
2356 parameter C is the second letter following the "O" in the constraint
2357 string. Returns 1 if VALUE meets the respective constraint and 0
2358 otherwise. */
2359
2360 int
2361 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2362 {
2363 if (!TARGET_EXTIMM)
2364 return 0;
2365
2366 switch (c)
2367 {
2368 case 's':
2369 return trunc_int_for_mode (value, SImode) == value;
2370
2371 case 'p':
2372 return value == 0
2373 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2374
2375 case 'n':
2376 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2377
2378 default:
2379 gcc_unreachable ();
2380 }
2381 }
2382
2383
2384 /* Evaluates constraint strings starting with letter N. Parameter STR
2385 contains the letters following letter "N" in the constraint string.
2386 Returns true if VALUE matches the constraint. */
2387
2388 int
2389 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2390 {
2391 enum machine_mode mode, part_mode;
2392 int def;
2393 int part, part_goal;
2394
2395
2396 if (str[0] == 'x')
2397 part_goal = -1;
2398 else
2399 part_goal = str[0] - '0';
2400
2401 switch (str[1])
2402 {
2403 case 'Q':
2404 part_mode = QImode;
2405 break;
2406 case 'H':
2407 part_mode = HImode;
2408 break;
2409 case 'S':
2410 part_mode = SImode;
2411 break;
2412 default:
2413 return 0;
2414 }
2415
2416 switch (str[2])
2417 {
2418 case 'H':
2419 mode = HImode;
2420 break;
2421 case 'S':
2422 mode = SImode;
2423 break;
2424 case 'D':
2425 mode = DImode;
2426 break;
2427 default:
2428 return 0;
2429 }
2430
2431 switch (str[3])
2432 {
2433 case '0':
2434 def = 0;
2435 break;
2436 case 'F':
2437 def = -1;
2438 break;
2439 default:
2440 return 0;
2441 }
2442
2443 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2444 return 0;
2445
2446 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2447 if (part < 0)
2448 return 0;
2449 if (part_goal != -1 && part_goal != part)
2450 return 0;
2451
2452 return 1;
2453 }
2454
2455
2456 /* Returns true if the input parameter VALUE is a float zero. */
2457
2458 int
2459 s390_float_const_zero_p (rtx value)
2460 {
2461 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2462 && value == CONST0_RTX (GET_MODE (value)));
2463 }
2464
2465 /* Implement TARGET_REGISTER_MOVE_COST. */
2466
2467 static int
2468 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2469 reg_class_t from, reg_class_t to)
2470 {
2471 /* On s390, copy between fprs and gprs is expensive. */
2472 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2473 && reg_classes_intersect_p (to, FP_REGS))
2474 || (reg_classes_intersect_p (from, FP_REGS)
2475 && reg_classes_intersect_p (to, GENERAL_REGS)))
2476 return 10;
2477
2478 return 1;
2479 }
2480
2481 /* Implement TARGET_MEMORY_MOVE_COST. */
2482
2483 static int
2484 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2485 reg_class_t rclass ATTRIBUTE_UNUSED,
2486 bool in ATTRIBUTE_UNUSED)
2487 {
2488 return 1;
2489 }
2490
2491 /* Compute a (partial) cost for rtx X. Return true if the complete
2492 cost has been computed, and false if subexpressions should be
2493 scanned. In either case, *TOTAL contains the cost result.
2494 CODE contains GET_CODE (x), OUTER_CODE contains the code
2495 of the superexpression of x. */
2496
2497 static bool
2498 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2499 bool speed ATTRIBUTE_UNUSED)
2500 {
2501 switch (code)
2502 {
2503 case CONST:
2504 case CONST_INT:
2505 case LABEL_REF:
2506 case SYMBOL_REF:
2507 case CONST_DOUBLE:
2508 case MEM:
2509 *total = 0;
2510 return true;
2511
2512 case ASHIFT:
2513 case ASHIFTRT:
2514 case LSHIFTRT:
2515 case ROTATE:
2516 case ROTATERT:
2517 case AND:
2518 case IOR:
2519 case XOR:
2520 case NEG:
2521 case NOT:
2522 *total = COSTS_N_INSNS (1);
2523 return false;
2524
2525 case PLUS:
2526 case MINUS:
2527 *total = COSTS_N_INSNS (1);
2528 return false;
2529
2530 case MULT:
2531 switch (GET_MODE (x))
2532 {
2533 case SImode:
2534 {
2535 rtx left = XEXP (x, 0);
2536 rtx right = XEXP (x, 1);
2537 if (GET_CODE (right) == CONST_INT
2538 && CONST_OK_FOR_K (INTVAL (right)))
2539 *total = s390_cost->mhi;
2540 else if (GET_CODE (left) == SIGN_EXTEND)
2541 *total = s390_cost->mh;
2542 else
2543 *total = s390_cost->ms; /* msr, ms, msy */
2544 break;
2545 }
2546 case DImode:
2547 {
2548 rtx left = XEXP (x, 0);
2549 rtx right = XEXP (x, 1);
2550 if (TARGET_ZARCH)
2551 {
2552 if (GET_CODE (right) == CONST_INT
2553 && CONST_OK_FOR_K (INTVAL (right)))
2554 *total = s390_cost->mghi;
2555 else if (GET_CODE (left) == SIGN_EXTEND)
2556 *total = s390_cost->msgf;
2557 else
2558 *total = s390_cost->msg; /* msgr, msg */
2559 }
2560 else /* TARGET_31BIT */
2561 {
2562 if (GET_CODE (left) == SIGN_EXTEND
2563 && GET_CODE (right) == SIGN_EXTEND)
2564 /* mulsidi case: mr, m */
2565 *total = s390_cost->m;
2566 else if (GET_CODE (left) == ZERO_EXTEND
2567 && GET_CODE (right) == ZERO_EXTEND
2568 && TARGET_CPU_ZARCH)
2569 /* umulsidi case: ml, mlr */
2570 *total = s390_cost->ml;
2571 else
2572 /* Complex calculation is required. */
2573 *total = COSTS_N_INSNS (40);
2574 }
2575 break;
2576 }
2577 case SFmode:
2578 case DFmode:
2579 *total = s390_cost->mult_df;
2580 break;
2581 case TFmode:
2582 *total = s390_cost->mxbr;
2583 break;
2584 default:
2585 return false;
2586 }
2587 return false;
2588
2589 case FMA:
2590 switch (GET_MODE (x))
2591 {
2592 case DFmode:
2593 *total = s390_cost->madbr;
2594 break;
2595 case SFmode:
2596 *total = s390_cost->maebr;
2597 break;
2598 default:
2599 return false;
2600 }
2601 /* Negate in the third argument is free: FMSUB. */
2602 if (GET_CODE (XEXP (x, 2)) == NEG)
2603 {
2604 *total += (rtx_cost (XEXP (x, 0), FMA, speed)
2605 + rtx_cost (XEXP (x, 1), FMA, speed)
2606 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, speed));
2607 return true;
2608 }
2609 return false;
2610
2611 case UDIV:
2612 case UMOD:
2613 if (GET_MODE (x) == TImode) /* 128 bit division */
2614 *total = s390_cost->dlgr;
2615 else if (GET_MODE (x) == DImode)
2616 {
2617 rtx right = XEXP (x, 1);
2618 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2619 *total = s390_cost->dlr;
2620 else /* 64 by 64 bit division */
2621 *total = s390_cost->dlgr;
2622 }
2623 else if (GET_MODE (x) == SImode) /* 32 bit division */
2624 *total = s390_cost->dlr;
2625 return false;
2626
2627 case DIV:
2628 case MOD:
2629 if (GET_MODE (x) == DImode)
2630 {
2631 rtx right = XEXP (x, 1);
2632 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2633 if (TARGET_ZARCH)
2634 *total = s390_cost->dsgfr;
2635 else
2636 *total = s390_cost->dr;
2637 else /* 64 by 64 bit division */
2638 *total = s390_cost->dsgr;
2639 }
2640 else if (GET_MODE (x) == SImode) /* 32 bit division */
2641 *total = s390_cost->dlr;
2642 else if (GET_MODE (x) == SFmode)
2643 {
2644 *total = s390_cost->debr;
2645 }
2646 else if (GET_MODE (x) == DFmode)
2647 {
2648 *total = s390_cost->ddbr;
2649 }
2650 else if (GET_MODE (x) == TFmode)
2651 {
2652 *total = s390_cost->dxbr;
2653 }
2654 return false;
2655
2656 case SQRT:
2657 if (GET_MODE (x) == SFmode)
2658 *total = s390_cost->sqebr;
2659 else if (GET_MODE (x) == DFmode)
2660 *total = s390_cost->sqdbr;
2661 else /* TFmode */
2662 *total = s390_cost->sqxbr;
2663 return false;
2664
2665 case SIGN_EXTEND:
2666 case ZERO_EXTEND:
2667 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2668 || outer_code == PLUS || outer_code == MINUS
2669 || outer_code == COMPARE)
2670 *total = 0;
2671 return false;
2672
2673 case COMPARE:
2674 *total = COSTS_N_INSNS (1);
2675 if (GET_CODE (XEXP (x, 0)) == AND
2676 && GET_CODE (XEXP (x, 1)) == CONST_INT
2677 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2678 {
2679 rtx op0 = XEXP (XEXP (x, 0), 0);
2680 rtx op1 = XEXP (XEXP (x, 0), 1);
2681 rtx op2 = XEXP (x, 1);
2682
2683 if (memory_operand (op0, GET_MODE (op0))
2684 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2685 return true;
2686 if (register_operand (op0, GET_MODE (op0))
2687 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2688 return true;
2689 }
2690 return false;
2691
2692 default:
2693 return false;
2694 }
2695 }
2696
2697 /* Return the cost of an address rtx ADDR. */
2698
2699 static int
2700 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2701 {
2702 struct s390_address ad;
2703 if (!s390_decompose_address (addr, &ad))
2704 return 1000;
2705
2706 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2707 }
2708
2709 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2710 otherwise return 0. */
2711
2712 int
2713 tls_symbolic_operand (rtx op)
2714 {
2715 if (GET_CODE (op) != SYMBOL_REF)
2716 return 0;
2717 return SYMBOL_REF_TLS_MODEL (op);
2718 }
2719 \f
2720 /* Split DImode access register reference REG (on 64-bit) into its constituent
2721 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2722 gen_highpart cannot be used as they assume all registers are word-sized,
2723 while our access registers have only half that size. */
2724
2725 void
2726 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2727 {
2728 gcc_assert (TARGET_64BIT);
2729 gcc_assert (ACCESS_REG_P (reg));
2730 gcc_assert (GET_MODE (reg) == DImode);
2731 gcc_assert (!(REGNO (reg) & 1));
2732
2733 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2734 *hi = gen_rtx_REG (SImode, REGNO (reg));
2735 }
2736
2737 /* Return true if OP contains a symbol reference */
2738
2739 bool
2740 symbolic_reference_mentioned_p (rtx op)
2741 {
2742 const char *fmt;
2743 int i;
2744
2745 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2746 return 1;
2747
2748 fmt = GET_RTX_FORMAT (GET_CODE (op));
2749 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2750 {
2751 if (fmt[i] == 'E')
2752 {
2753 int j;
2754
2755 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2756 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2757 return 1;
2758 }
2759
2760 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2761 return 1;
2762 }
2763
2764 return 0;
2765 }
2766
2767 /* Return true if OP contains a reference to a thread-local symbol. */
2768
2769 bool
2770 tls_symbolic_reference_mentioned_p (rtx op)
2771 {
2772 const char *fmt;
2773 int i;
2774
2775 if (GET_CODE (op) == SYMBOL_REF)
2776 return tls_symbolic_operand (op);
2777
2778 fmt = GET_RTX_FORMAT (GET_CODE (op));
2779 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2780 {
2781 if (fmt[i] == 'E')
2782 {
2783 int j;
2784
2785 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2786 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2787 return true;
2788 }
2789
2790 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2791 return true;
2792 }
2793
2794 return false;
2795 }
2796
2797
2798 /* Return true if OP is a legitimate general operand when
2799 generating PIC code. It is given that flag_pic is on
2800 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2801
2802 int
2803 legitimate_pic_operand_p (rtx op)
2804 {
2805 /* Accept all non-symbolic constants. */
2806 if (!SYMBOLIC_CONST (op))
2807 return 1;
2808
2809 /* Reject everything else; must be handled
2810 via emit_symbolic_move. */
2811 return 0;
2812 }
2813
2814 /* Returns true if the constant value OP is a legitimate general operand.
2815 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2816
2817 int
2818 legitimate_constant_p (rtx op)
2819 {
2820 /* Accept all non-symbolic constants. */
2821 if (!SYMBOLIC_CONST (op))
2822 return 1;
2823
2824 /* Accept immediate LARL operands. */
2825 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2826 return 1;
2827
2828 /* Thread-local symbols are never legal constants. This is
2829 so that emit_call knows that computing such addresses
2830 might require a function call. */
2831 if (TLS_SYMBOLIC_CONST (op))
2832 return 0;
2833
2834 /* In the PIC case, symbolic constants must *not* be
2835 forced into the literal pool. We accept them here,
2836 so that they will be handled by emit_symbolic_move. */
2837 if (flag_pic)
2838 return 1;
2839
2840 /* All remaining non-PIC symbolic constants are
2841 forced into the literal pool. */
2842 return 0;
2843 }
2844
2845 /* Determine if it's legal to put X into the constant pool. This
2846 is not possible if X contains the address of a symbol that is
2847 not constant (TLS) or not known at final link time (PIC). */
2848
2849 static bool
2850 s390_cannot_force_const_mem (rtx x)
2851 {
2852 switch (GET_CODE (x))
2853 {
2854 case CONST_INT:
2855 case CONST_DOUBLE:
2856 /* Accept all non-symbolic constants. */
2857 return false;
2858
2859 case LABEL_REF:
2860 /* Labels are OK iff we are non-PIC. */
2861 return flag_pic != 0;
2862
2863 case SYMBOL_REF:
2864 /* 'Naked' TLS symbol references are never OK,
2865 non-TLS symbols are OK iff we are non-PIC. */
2866 if (tls_symbolic_operand (x))
2867 return true;
2868 else
2869 return flag_pic != 0;
2870
2871 case CONST:
2872 return s390_cannot_force_const_mem (XEXP (x, 0));
2873 case PLUS:
2874 case MINUS:
2875 return s390_cannot_force_const_mem (XEXP (x, 0))
2876 || s390_cannot_force_const_mem (XEXP (x, 1));
2877
2878 case UNSPEC:
2879 switch (XINT (x, 1))
2880 {
2881 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2882 case UNSPEC_LTREL_OFFSET:
2883 case UNSPEC_GOT:
2884 case UNSPEC_GOTOFF:
2885 case UNSPEC_PLTOFF:
2886 case UNSPEC_TLSGD:
2887 case UNSPEC_TLSLDM:
2888 case UNSPEC_NTPOFF:
2889 case UNSPEC_DTPOFF:
2890 case UNSPEC_GOTNTPOFF:
2891 case UNSPEC_INDNTPOFF:
2892 return false;
2893
2894 /* If the literal pool shares the code section, be put
2895 execute template placeholders into the pool as well. */
2896 case UNSPEC_INSN:
2897 return TARGET_CPU_ZARCH;
2898
2899 default:
2900 return true;
2901 }
2902 break;
2903
2904 default:
2905 gcc_unreachable ();
2906 }
2907 }
2908
2909 /* Returns true if the constant value OP is a legitimate general
2910 operand during and after reload. The difference to
2911 legitimate_constant_p is that this function will not accept
2912 a constant that would need to be forced to the literal pool
2913 before it can be used as operand.
2914 This function accepts all constants which can be loaded directly
2915 into a GPR. */
2916
2917 bool
2918 legitimate_reload_constant_p (rtx op)
2919 {
2920 /* Accept la(y) operands. */
2921 if (GET_CODE (op) == CONST_INT
2922 && DISP_IN_RANGE (INTVAL (op)))
2923 return true;
2924
2925 /* Accept l(g)hi/l(g)fi operands. */
2926 if (GET_CODE (op) == CONST_INT
2927 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2928 return true;
2929
2930 /* Accept lliXX operands. */
2931 if (TARGET_ZARCH
2932 && GET_CODE (op) == CONST_INT
2933 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2934 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2935 return true;
2936
2937 if (TARGET_EXTIMM
2938 && GET_CODE (op) == CONST_INT
2939 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2940 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2941 return true;
2942
2943 /* Accept larl operands. */
2944 if (TARGET_CPU_ZARCH
2945 && larl_operand (op, VOIDmode))
2946 return true;
2947
2948 /* Accept floating-point zero operands that fit into a single GPR. */
2949 if (GET_CODE (op) == CONST_DOUBLE
2950 && s390_float_const_zero_p (op)
2951 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2952 return true;
2953
2954 /* Accept double-word operands that can be split. */
2955 if (GET_CODE (op) == CONST_INT
2956 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2957 {
2958 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2959 rtx hi = operand_subword (op, 0, 0, dword_mode);
2960 rtx lo = operand_subword (op, 1, 0, dword_mode);
2961 return legitimate_reload_constant_p (hi)
2962 && legitimate_reload_constant_p (lo);
2963 }
2964
2965 /* Everything else cannot be handled without reload. */
2966 return false;
2967 }
2968
2969 /* Returns true if the constant value OP is a legitimate fp operand
2970 during and after reload.
2971 This function accepts all constants which can be loaded directly
2972 into an FPR. */
2973
2974 static bool
2975 legitimate_reload_fp_constant_p (rtx op)
2976 {
2977 /* Accept floating-point zero operands if the load zero instruction
2978 can be used. */
2979 if (TARGET_Z196
2980 && GET_CODE (op) == CONST_DOUBLE
2981 && s390_float_const_zero_p (op))
2982 return true;
2983
2984 return false;
2985 }
2986
2987 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2988 return the class of reg to actually use. */
2989
2990 static reg_class_t
2991 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2992 {
2993 switch (GET_CODE (op))
2994 {
2995 /* Constants we cannot reload into general registers
2996 must be forced into the literal pool. */
2997 case CONST_DOUBLE:
2998 case CONST_INT:
2999 if (reg_class_subset_p (GENERAL_REGS, rclass)
3000 && legitimate_reload_constant_p (op))
3001 return GENERAL_REGS;
3002 else if (reg_class_subset_p (ADDR_REGS, rclass)
3003 && legitimate_reload_constant_p (op))
3004 return ADDR_REGS;
3005 else if (reg_class_subset_p (FP_REGS, rclass)
3006 && legitimate_reload_fp_constant_p (op))
3007 return FP_REGS;
3008 return NO_REGS;
3009
3010 /* If a symbolic constant or a PLUS is reloaded,
3011 it is most likely being used as an address, so
3012 prefer ADDR_REGS. If 'class' is not a superset
3013 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3014 case PLUS:
3015 case LABEL_REF:
3016 case SYMBOL_REF:
3017 case CONST:
3018 if (reg_class_subset_p (ADDR_REGS, rclass))
3019 return ADDR_REGS;
3020 else
3021 return NO_REGS;
3022
3023 default:
3024 break;
3025 }
3026
3027 return rclass;
3028 }
3029
3030 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3031 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3032 aligned. */
3033
3034 bool
3035 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3036 {
3037 HOST_WIDE_INT addend;
3038 rtx symref;
3039
3040 if (!s390_symref_operand_p (addr, &symref, &addend))
3041 return false;
3042
3043 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
3044 && !(addend & (alignment - 1)));
3045 }
3046
3047 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3048 operand SCRATCH is used to reload the even part of the address and
3049 adding one. */
3050
3051 void
3052 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3053 {
3054 HOST_WIDE_INT addend;
3055 rtx symref;
3056
3057 if (!s390_symref_operand_p (addr, &symref, &addend))
3058 gcc_unreachable ();
3059
3060 if (!(addend & 1))
3061 /* Easy case. The addend is even so larl will do fine. */
3062 emit_move_insn (reg, addr);
3063 else
3064 {
3065 /* We can leave the scratch register untouched if the target
3066 register is a valid base register. */
3067 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3068 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3069 scratch = reg;
3070
3071 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3072 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3073
3074 if (addend != 1)
3075 emit_move_insn (scratch,
3076 gen_rtx_CONST (Pmode,
3077 gen_rtx_PLUS (Pmode, symref,
3078 GEN_INT (addend - 1))));
3079 else
3080 emit_move_insn (scratch, symref);
3081
3082 /* Increment the address using la in order to avoid clobbering cc. */
3083 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3084 }
3085 }
3086
3087 /* Generate what is necessary to move between REG and MEM using
3088 SCRATCH. The direction is given by TOMEM. */
3089
3090 void
3091 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3092 {
3093 /* Reload might have pulled a constant out of the literal pool.
3094 Force it back in. */
3095 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3096 || GET_CODE (mem) == CONST)
3097 mem = force_const_mem (GET_MODE (reg), mem);
3098
3099 gcc_assert (MEM_P (mem));
3100
3101 /* For a load from memory we can leave the scratch register
3102 untouched if the target register is a valid base register. */
3103 if (!tomem
3104 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3105 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3106 && GET_MODE (reg) == GET_MODE (scratch))
3107 scratch = reg;
3108
3109 /* Load address into scratch register. Since we can't have a
3110 secondary reload for a secondary reload we have to cover the case
3111 where larl would need a secondary reload here as well. */
3112 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3113
3114 /* Now we can use a standard load/store to do the move. */
3115 if (tomem)
3116 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3117 else
3118 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3119 }
3120
3121 /* Inform reload about cases where moving X with a mode MODE to a register in
3122 RCLASS requires an extra scratch or immediate register. Return the class
3123 needed for the immediate register. */
3124
3125 static reg_class_t
3126 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3127 enum machine_mode mode, secondary_reload_info *sri)
3128 {
3129 enum reg_class rclass = (enum reg_class) rclass_i;
3130
3131 /* Intermediate register needed. */
3132 if (reg_classes_intersect_p (CC_REGS, rclass))
3133 return GENERAL_REGS;
3134
3135 if (TARGET_Z10)
3136 {
3137 /* On z10 several optimizer steps may generate larl operands with
3138 an odd addend. */
3139 if (in_p
3140 && s390_symref_operand_p (x, NULL, NULL)
3141 && mode == Pmode
3142 && !s390_check_symref_alignment (x, 2))
3143 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3144 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3145
3146 /* On z10 we need a scratch register when moving QI, TI or floating
3147 point mode values from or to a memory location with a SYMBOL_REF
3148 or if the symref addend of a SI or DI move is not aligned to the
3149 width of the access. */
3150 if (MEM_P (x)
3151 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3152 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3153 || (!TARGET_ZARCH && mode == DImode)
3154 || ((mode == HImode || mode == SImode || mode == DImode)
3155 && (!s390_check_symref_alignment (XEXP (x, 0),
3156 GET_MODE_SIZE (mode))))))
3157 {
3158 #define __SECONDARY_RELOAD_CASE(M,m) \
3159 case M##mode: \
3160 if (TARGET_64BIT) \
3161 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3162 CODE_FOR_reload##m##di_tomem_z10; \
3163 else \
3164 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3165 CODE_FOR_reload##m##si_tomem_z10; \
3166 break;
3167
3168 switch (GET_MODE (x))
3169 {
3170 __SECONDARY_RELOAD_CASE (QI, qi);
3171 __SECONDARY_RELOAD_CASE (HI, hi);
3172 __SECONDARY_RELOAD_CASE (SI, si);
3173 __SECONDARY_RELOAD_CASE (DI, di);
3174 __SECONDARY_RELOAD_CASE (TI, ti);
3175 __SECONDARY_RELOAD_CASE (SF, sf);
3176 __SECONDARY_RELOAD_CASE (DF, df);
3177 __SECONDARY_RELOAD_CASE (TF, tf);
3178 __SECONDARY_RELOAD_CASE (SD, sd);
3179 __SECONDARY_RELOAD_CASE (DD, dd);
3180 __SECONDARY_RELOAD_CASE (TD, td);
3181
3182 default:
3183 gcc_unreachable ();
3184 }
3185 #undef __SECONDARY_RELOAD_CASE
3186 }
3187 }
3188
3189 /* We need a scratch register when loading a PLUS expression which
3190 is not a legitimate operand of the LOAD ADDRESS instruction. */
3191 if (in_p && s390_plus_operand (x, mode))
3192 sri->icode = (TARGET_64BIT ?
3193 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3194
3195 /* Performing a multiword move from or to memory we have to make sure the
3196 second chunk in memory is addressable without causing a displacement
3197 overflow. If that would be the case we calculate the address in
3198 a scratch register. */
3199 if (MEM_P (x)
3200 && GET_CODE (XEXP (x, 0)) == PLUS
3201 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3202 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3203 + GET_MODE_SIZE (mode) - 1))
3204 {
3205 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3206 in a s_operand address since we may fallback to lm/stm. So we only
3207 have to care about overflows in the b+i+d case. */
3208 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3209 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3211 /* For FP_REGS no lm/stm is available so this check is triggered
3212 for displacement overflows in b+i+d and b+d like addresses. */
3213 || (reg_classes_intersect_p (FP_REGS, rclass)
3214 && s390_class_max_nregs (FP_REGS, mode) > 1))
3215 {
3216 if (in_p)
3217 sri->icode = (TARGET_64BIT ?
3218 CODE_FOR_reloaddi_nonoffmem_in :
3219 CODE_FOR_reloadsi_nonoffmem_in);
3220 else
3221 sri->icode = (TARGET_64BIT ?
3222 CODE_FOR_reloaddi_nonoffmem_out :
3223 CODE_FOR_reloadsi_nonoffmem_out);
3224 }
3225 }
3226
3227 /* A scratch address register is needed when a symbolic constant is
3228 copied to r0 compiling with -fPIC. In other cases the target
3229 register might be used as temporary (see legitimize_pic_address). */
3230 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3231 sri->icode = (TARGET_64BIT ?
3232 CODE_FOR_reloaddi_PIC_addr :
3233 CODE_FOR_reloadsi_PIC_addr);
3234
3235 /* Either scratch or no register needed. */
3236 return NO_REGS;
3237 }
3238
3239 /* Generate code to load SRC, which is PLUS that is not a
3240 legitimate operand for the LA instruction, into TARGET.
3241 SCRATCH may be used as scratch register. */
3242
3243 void
3244 s390_expand_plus_operand (rtx target, rtx src,
3245 rtx scratch)
3246 {
3247 rtx sum1, sum2;
3248 struct s390_address ad;
3249
3250 /* src must be a PLUS; get its two operands. */
3251 gcc_assert (GET_CODE (src) == PLUS);
3252 gcc_assert (GET_MODE (src) == Pmode);
3253
3254 /* Check if any of the two operands is already scheduled
3255 for replacement by reload. This can happen e.g. when
3256 float registers occur in an address. */
3257 sum1 = find_replacement (&XEXP (src, 0));
3258 sum2 = find_replacement (&XEXP (src, 1));
3259 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3260
3261 /* If the address is already strictly valid, there's nothing to do. */
3262 if (!s390_decompose_address (src, &ad)
3263 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3264 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3265 {
3266 /* Otherwise, one of the operands cannot be an address register;
3267 we reload its value into the scratch register. */
3268 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3269 {
3270 emit_move_insn (scratch, sum1);
3271 sum1 = scratch;
3272 }
3273 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3274 {
3275 emit_move_insn (scratch, sum2);
3276 sum2 = scratch;
3277 }
3278
3279 /* According to the way these invalid addresses are generated
3280 in reload.c, it should never happen (at least on s390) that
3281 *neither* of the PLUS components, after find_replacements
3282 was applied, is an address register. */
3283 if (sum1 == scratch && sum2 == scratch)
3284 {
3285 debug_rtx (src);
3286 gcc_unreachable ();
3287 }
3288
3289 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3290 }
3291
3292 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3293 is only ever performed on addresses, so we can mark the
3294 sum as legitimate for LA in any case. */
3295 s390_load_address (target, src);
3296 }
3297
3298
3299 /* Return true if ADDR is a valid memory address.
3300 STRICT specifies whether strict register checking applies. */
3301
3302 static bool
3303 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3304 {
3305 struct s390_address ad;
3306
3307 if (TARGET_Z10
3308 && larl_operand (addr, VOIDmode)
3309 && (mode == VOIDmode
3310 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3311 return true;
3312
3313 if (!s390_decompose_address (addr, &ad))
3314 return false;
3315
3316 if (strict)
3317 {
3318 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3319 return false;
3320
3321 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3322 return false;
3323 }
3324 else
3325 {
3326 if (ad.base
3327 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3328 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3329 return false;
3330
3331 if (ad.indx
3332 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3333 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3334 return false;
3335 }
3336 return true;
3337 }
3338
3339 /* Return true if OP is a valid operand for the LA instruction.
3340 In 31-bit, we need to prove that the result is used as an
3341 address, as LA performs only a 31-bit addition. */
3342
3343 bool
3344 legitimate_la_operand_p (rtx op)
3345 {
3346 struct s390_address addr;
3347 if (!s390_decompose_address (op, &addr))
3348 return false;
3349
3350 return (TARGET_64BIT || addr.pointer);
3351 }
3352
3353 /* Return true if it is valid *and* preferable to use LA to
3354 compute the sum of OP1 and OP2. */
3355
3356 bool
3357 preferred_la_operand_p (rtx op1, rtx op2)
3358 {
3359 struct s390_address addr;
3360
3361 if (op2 != const0_rtx)
3362 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3363
3364 if (!s390_decompose_address (op1, &addr))
3365 return false;
3366 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3367 return false;
3368 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3369 return false;
3370
3371 /* Avoid LA instructions with index register on z196; it is
3372 preferable to use regular add instructions when possible. */
3373 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3374 return false;
3375
3376 if (!TARGET_64BIT && !addr.pointer)
3377 return false;
3378
3379 if (addr.pointer)
3380 return true;
3381
3382 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3383 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3384 return true;
3385
3386 return false;
3387 }
3388
3389 /* Emit a forced load-address operation to load SRC into DST.
3390 This will use the LOAD ADDRESS instruction even in situations
3391 where legitimate_la_operand_p (SRC) returns false. */
3392
3393 void
3394 s390_load_address (rtx dst, rtx src)
3395 {
3396 if (TARGET_64BIT)
3397 emit_move_insn (dst, src);
3398 else
3399 emit_insn (gen_force_la_31 (dst, src));
3400 }
3401
3402 /* Return a legitimate reference for ORIG (an address) using the
3403 register REG. If REG is 0, a new pseudo is generated.
3404
3405 There are two types of references that must be handled:
3406
3407 1. Global data references must load the address from the GOT, via
3408 the PIC reg. An insn is emitted to do this load, and the reg is
3409 returned.
3410
3411 2. Static data references, constant pool addresses, and code labels
3412 compute the address as an offset from the GOT, whose base is in
3413 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3414 differentiate them from global data objects. The returned
3415 address is the PIC reg + an unspec constant.
3416
3417 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3418 reg also appears in the address. */
3419
3420 rtx
3421 legitimize_pic_address (rtx orig, rtx reg)
3422 {
3423 rtx addr = orig;
3424 rtx new_rtx = orig;
3425 rtx base;
3426
3427 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3428
3429 if (GET_CODE (addr) == LABEL_REF
3430 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3431 {
3432 /* This is a local symbol. */
3433 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3434 {
3435 /* Access local symbols PC-relative via LARL.
3436 This is the same as in the non-PIC case, so it is
3437 handled automatically ... */
3438 }
3439 else
3440 {
3441 /* Access local symbols relative to the GOT. */
3442
3443 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3444
3445 if (reload_in_progress || reload_completed)
3446 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3447
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3449 addr = gen_rtx_CONST (Pmode, addr);
3450 addr = force_const_mem (Pmode, addr);
3451 emit_move_insn (temp, addr);
3452
3453 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3454 if (reg != 0)
3455 {
3456 s390_load_address (reg, new_rtx);
3457 new_rtx = reg;
3458 }
3459 }
3460 }
3461 else if (GET_CODE (addr) == SYMBOL_REF)
3462 {
3463 if (reg == 0)
3464 reg = gen_reg_rtx (Pmode);
3465
3466 if (flag_pic == 1)
3467 {
3468 /* Assume GOT offset < 4k. This is handled the same way
3469 in both 31- and 64-bit code (@GOT). */
3470
3471 if (reload_in_progress || reload_completed)
3472 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3473
3474 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3475 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3476 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3477 new_rtx = gen_const_mem (Pmode, new_rtx);
3478 emit_move_insn (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 else if (TARGET_CPU_ZARCH)
3482 {
3483 /* If the GOT offset might be >= 4k, we determine the position
3484 of the GOT entry via a PC-relative LARL (@GOTENT). */
3485
3486 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3487
3488 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3489 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3490
3491 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3492 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3493 emit_move_insn (temp, new_rtx);
3494
3495 new_rtx = gen_const_mem (Pmode, temp);
3496 emit_move_insn (reg, new_rtx);
3497 new_rtx = reg;
3498 }
3499 else
3500 {
3501 /* If the GOT offset might be >= 4k, we have to load it
3502 from the literal pool (@GOT). */
3503
3504 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3505
3506 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3507 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3508
3509 if (reload_in_progress || reload_completed)
3510 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3511
3512 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3513 addr = gen_rtx_CONST (Pmode, addr);
3514 addr = force_const_mem (Pmode, addr);
3515 emit_move_insn (temp, addr);
3516
3517 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3518 new_rtx = gen_const_mem (Pmode, new_rtx);
3519 emit_move_insn (reg, new_rtx);
3520 new_rtx = reg;
3521 }
3522 }
3523 else
3524 {
3525 if (GET_CODE (addr) == CONST)
3526 {
3527 addr = XEXP (addr, 0);
3528 if (GET_CODE (addr) == UNSPEC)
3529 {
3530 gcc_assert (XVECLEN (addr, 0) == 1);
3531 switch (XINT (addr, 1))
3532 {
3533 /* If someone moved a GOT-relative UNSPEC
3534 out of the literal pool, force them back in. */
3535 case UNSPEC_GOTOFF:
3536 case UNSPEC_PLTOFF:
3537 new_rtx = force_const_mem (Pmode, orig);
3538 break;
3539
3540 /* @GOT is OK as is if small. */
3541 case UNSPEC_GOT:
3542 if (flag_pic == 2)
3543 new_rtx = force_const_mem (Pmode, orig);
3544 break;
3545
3546 /* @GOTENT is OK as is. */
3547 case UNSPEC_GOTENT:
3548 break;
3549
3550 /* @PLT is OK as is on 64-bit, must be converted to
3551 GOT-relative @PLTOFF on 31-bit. */
3552 case UNSPEC_PLT:
3553 if (!TARGET_CPU_ZARCH)
3554 {
3555 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3556
3557 if (reload_in_progress || reload_completed)
3558 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3559
3560 addr = XVECEXP (addr, 0, 0);
3561 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3562 UNSPEC_PLTOFF);
3563 addr = gen_rtx_CONST (Pmode, addr);
3564 addr = force_const_mem (Pmode, addr);
3565 emit_move_insn (temp, addr);
3566
3567 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3568 if (reg != 0)
3569 {
3570 s390_load_address (reg, new_rtx);
3571 new_rtx = reg;
3572 }
3573 }
3574 break;
3575
3576 /* Everything else cannot happen. */
3577 default:
3578 gcc_unreachable ();
3579 }
3580 }
3581 else
3582 gcc_assert (GET_CODE (addr) == PLUS);
3583 }
3584 if (GET_CODE (addr) == PLUS)
3585 {
3586 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3587
3588 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3589 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3590
3591 /* Check first to see if this is a constant offset
3592 from a local symbol reference. */
3593 if ((GET_CODE (op0) == LABEL_REF
3594 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3595 && GET_CODE (op1) == CONST_INT)
3596 {
3597 if (TARGET_CPU_ZARCH
3598 && larl_operand (op0, VOIDmode)
3599 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3600 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3601 {
3602 if (INTVAL (op1) & 1)
3603 {
3604 /* LARL can't handle odd offsets, so emit a
3605 pair of LARL and LA. */
3606 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3607
3608 if (!DISP_IN_RANGE (INTVAL (op1)))
3609 {
3610 HOST_WIDE_INT even = INTVAL (op1) - 1;
3611 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3612 op0 = gen_rtx_CONST (Pmode, op0);
3613 op1 = const1_rtx;
3614 }
3615
3616 emit_move_insn (temp, op0);
3617 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3618
3619 if (reg != 0)
3620 {
3621 s390_load_address (reg, new_rtx);
3622 new_rtx = reg;
3623 }
3624 }
3625 else
3626 {
3627 /* If the offset is even, we can just use LARL.
3628 This will happen automatically. */
3629 }
3630 }
3631 else
3632 {
3633 /* Access local symbols relative to the GOT. */
3634
3635 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3636
3637 if (reload_in_progress || reload_completed)
3638 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3639
3640 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3641 UNSPEC_GOTOFF);
3642 addr = gen_rtx_PLUS (Pmode, addr, op1);
3643 addr = gen_rtx_CONST (Pmode, addr);
3644 addr = force_const_mem (Pmode, addr);
3645 emit_move_insn (temp, addr);
3646
3647 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3648 if (reg != 0)
3649 {
3650 s390_load_address (reg, new_rtx);
3651 new_rtx = reg;
3652 }
3653 }
3654 }
3655
3656 /* Now, check whether it is a GOT relative symbol plus offset
3657 that was pulled out of the literal pool. Force it back in. */
3658
3659 else if (GET_CODE (op0) == UNSPEC
3660 && GET_CODE (op1) == CONST_INT
3661 && XINT (op0, 1) == UNSPEC_GOTOFF)
3662 {
3663 gcc_assert (XVECLEN (op0, 0) == 1);
3664
3665 new_rtx = force_const_mem (Pmode, orig);
3666 }
3667
3668 /* Otherwise, compute the sum. */
3669 else
3670 {
3671 base = legitimize_pic_address (XEXP (addr, 0), reg);
3672 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3673 base == reg ? NULL_RTX : reg);
3674 if (GET_CODE (new_rtx) == CONST_INT)
3675 new_rtx = plus_constant (base, INTVAL (new_rtx));
3676 else
3677 {
3678 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3679 {
3680 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3681 new_rtx = XEXP (new_rtx, 1);
3682 }
3683 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3684 }
3685
3686 if (GET_CODE (new_rtx) == CONST)
3687 new_rtx = XEXP (new_rtx, 0);
3688 new_rtx = force_operand (new_rtx, 0);
3689 }
3690 }
3691 }
3692 return new_rtx;
3693 }
3694
3695 /* Load the thread pointer into a register. */
3696
3697 rtx
3698 s390_get_thread_pointer (void)
3699 {
3700 rtx tp = gen_reg_rtx (Pmode);
3701
3702 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3703 mark_reg_pointer (tp, BITS_PER_WORD);
3704
3705 return tp;
3706 }
3707
3708 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3709 in s390_tls_symbol which always refers to __tls_get_offset.
3710 The returned offset is written to RESULT_REG and an USE rtx is
3711 generated for TLS_CALL. */
3712
3713 static GTY(()) rtx s390_tls_symbol;
3714
3715 static void
3716 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3717 {
3718 rtx insn;
3719
3720 gcc_assert (flag_pic);
3721
3722 if (!s390_tls_symbol)
3723 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3724
3725 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3726 gen_rtx_REG (Pmode, RETURN_REGNUM));
3727
3728 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3729 RTL_CONST_CALL_P (insn) = 1;
3730 }
3731
3732 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3733 this (thread-local) address. REG may be used as temporary. */
3734
3735 static rtx
3736 legitimize_tls_address (rtx addr, rtx reg)
3737 {
3738 rtx new_rtx, tls_call, temp, base, r2, insn;
3739
3740 if (GET_CODE (addr) == SYMBOL_REF)
3741 switch (tls_symbolic_operand (addr))
3742 {
3743 case TLS_MODEL_GLOBAL_DYNAMIC:
3744 start_sequence ();
3745 r2 = gen_rtx_REG (Pmode, 2);
3746 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3747 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 emit_move_insn (r2, new_rtx);
3750 s390_emit_tls_call_insn (r2, tls_call);
3751 insn = get_insns ();
3752 end_sequence ();
3753
3754 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3755 temp = gen_reg_rtx (Pmode);
3756 emit_libcall_block (insn, temp, r2, new_rtx);
3757
3758 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3759 if (reg != 0)
3760 {
3761 s390_load_address (reg, new_rtx);
3762 new_rtx = reg;
3763 }
3764 break;
3765
3766 case TLS_MODEL_LOCAL_DYNAMIC:
3767 start_sequence ();
3768 r2 = gen_rtx_REG (Pmode, 2);
3769 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3770 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3771 new_rtx = force_const_mem (Pmode, new_rtx);
3772 emit_move_insn (r2, new_rtx);
3773 s390_emit_tls_call_insn (r2, tls_call);
3774 insn = get_insns ();
3775 end_sequence ();
3776
3777 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3778 temp = gen_reg_rtx (Pmode);
3779 emit_libcall_block (insn, temp, r2, new_rtx);
3780
3781 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3782 base = gen_reg_rtx (Pmode);
3783 s390_load_address (base, new_rtx);
3784
3785 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3786 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3787 new_rtx = force_const_mem (Pmode, new_rtx);
3788 temp = gen_reg_rtx (Pmode);
3789 emit_move_insn (temp, new_rtx);
3790
3791 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3792 if (reg != 0)
3793 {
3794 s390_load_address (reg, new_rtx);
3795 new_rtx = reg;
3796 }
3797 break;
3798
3799 case TLS_MODEL_INITIAL_EXEC:
3800 if (flag_pic == 1)
3801 {
3802 /* Assume GOT offset < 4k. This is handled the same way
3803 in both 31- and 64-bit code. */
3804
3805 if (reload_in_progress || reload_completed)
3806 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3807
3808 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3809 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3810 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3811 new_rtx = gen_const_mem (Pmode, new_rtx);
3812 temp = gen_reg_rtx (Pmode);
3813 emit_move_insn (temp, new_rtx);
3814 }
3815 else if (TARGET_CPU_ZARCH)
3816 {
3817 /* If the GOT offset might be >= 4k, we determine the position
3818 of the GOT entry via a PC-relative LARL. */
3819
3820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3822 temp = gen_reg_rtx (Pmode);
3823 emit_move_insn (temp, new_rtx);
3824
3825 new_rtx = gen_const_mem (Pmode, temp);
3826 temp = gen_reg_rtx (Pmode);
3827 emit_move_insn (temp, new_rtx);
3828 }
3829 else if (flag_pic)
3830 {
3831 /* If the GOT offset might be >= 4k, we have to load it
3832 from the literal pool. */
3833
3834 if (reload_in_progress || reload_completed)
3835 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3836
3837 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3838 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3839 new_rtx = force_const_mem (Pmode, new_rtx);
3840 temp = gen_reg_rtx (Pmode);
3841 emit_move_insn (temp, new_rtx);
3842
3843 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3844 new_rtx = gen_const_mem (Pmode, new_rtx);
3845
3846 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3847 temp = gen_reg_rtx (Pmode);
3848 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3849 }
3850 else
3851 {
3852 /* In position-dependent code, load the absolute address of
3853 the GOT entry from the literal pool. */
3854
3855 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3856 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3857 new_rtx = force_const_mem (Pmode, new_rtx);
3858 temp = gen_reg_rtx (Pmode);
3859 emit_move_insn (temp, new_rtx);
3860
3861 new_rtx = temp;
3862 new_rtx = gen_const_mem (Pmode, new_rtx);
3863 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3864 temp = gen_reg_rtx (Pmode);
3865 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3866 }
3867
3868 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3869 if (reg != 0)
3870 {
3871 s390_load_address (reg, new_rtx);
3872 new_rtx = reg;
3873 }
3874 break;
3875
3876 case TLS_MODEL_LOCAL_EXEC:
3877 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3878 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3879 new_rtx = force_const_mem (Pmode, new_rtx);
3880 temp = gen_reg_rtx (Pmode);
3881 emit_move_insn (temp, new_rtx);
3882
3883 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3884 if (reg != 0)
3885 {
3886 s390_load_address (reg, new_rtx);
3887 new_rtx = reg;
3888 }
3889 break;
3890
3891 default:
3892 gcc_unreachable ();
3893 }
3894
3895 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3896 {
3897 switch (XINT (XEXP (addr, 0), 1))
3898 {
3899 case UNSPEC_INDNTPOFF:
3900 gcc_assert (TARGET_CPU_ZARCH);
3901 new_rtx = addr;
3902 break;
3903
3904 default:
3905 gcc_unreachable ();
3906 }
3907 }
3908
3909 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3910 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3911 {
3912 new_rtx = XEXP (XEXP (addr, 0), 0);
3913 if (GET_CODE (new_rtx) != SYMBOL_REF)
3914 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3915
3916 new_rtx = legitimize_tls_address (new_rtx, reg);
3917 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3918 new_rtx = force_operand (new_rtx, 0);
3919 }
3920
3921 else
3922 gcc_unreachable (); /* for now ... */
3923
3924 return new_rtx;
3925 }
3926
3927 /* Emit insns making the address in operands[1] valid for a standard
3928 move to operands[0]. operands[1] is replaced by an address which
3929 should be used instead of the former RTX to emit the move
3930 pattern. */
3931
3932 void
3933 emit_symbolic_move (rtx *operands)
3934 {
3935 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3936
3937 if (GET_CODE (operands[0]) == MEM)
3938 operands[1] = force_reg (Pmode, operands[1]);
3939 else if (TLS_SYMBOLIC_CONST (operands[1]))
3940 operands[1] = legitimize_tls_address (operands[1], temp);
3941 else if (flag_pic)
3942 operands[1] = legitimize_pic_address (operands[1], temp);
3943 }
3944
3945 /* Try machine-dependent ways of modifying an illegitimate address X
3946 to be legitimate. If we find one, return the new, valid address.
3947
3948 OLDX is the address as it was before break_out_memory_refs was called.
3949 In some cases it is useful to look at this to decide what needs to be done.
3950
3951 MODE is the mode of the operand pointed to by X.
3952
3953 When -fpic is used, special handling is needed for symbolic references.
3954 See comments by legitimize_pic_address for details. */
3955
3956 static rtx
3957 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3958 enum machine_mode mode ATTRIBUTE_UNUSED)
3959 {
3960 rtx constant_term = const0_rtx;
3961
3962 if (TLS_SYMBOLIC_CONST (x))
3963 {
3964 x = legitimize_tls_address (x, 0);
3965
3966 if (s390_legitimate_address_p (mode, x, FALSE))
3967 return x;
3968 }
3969 else if (GET_CODE (x) == PLUS
3970 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3971 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3972 {
3973 return x;
3974 }
3975 else if (flag_pic)
3976 {
3977 if (SYMBOLIC_CONST (x)
3978 || (GET_CODE (x) == PLUS
3979 && (SYMBOLIC_CONST (XEXP (x, 0))
3980 || SYMBOLIC_CONST (XEXP (x, 1)))))
3981 x = legitimize_pic_address (x, 0);
3982
3983 if (s390_legitimate_address_p (mode, x, FALSE))
3984 return x;
3985 }
3986
3987 x = eliminate_constant_term (x, &constant_term);
3988
3989 /* Optimize loading of large displacements by splitting them
3990 into the multiple of 4K and the rest; this allows the
3991 former to be CSE'd if possible.
3992
3993 Don't do this if the displacement is added to a register
3994 pointing into the stack frame, as the offsets will
3995 change later anyway. */
3996
3997 if (GET_CODE (constant_term) == CONST_INT
3998 && !TARGET_LONG_DISPLACEMENT
3999 && !DISP_IN_RANGE (INTVAL (constant_term))
4000 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4001 {
4002 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4003 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4004
4005 rtx temp = gen_reg_rtx (Pmode);
4006 rtx val = force_operand (GEN_INT (upper), temp);
4007 if (val != temp)
4008 emit_move_insn (temp, val);
4009
4010 x = gen_rtx_PLUS (Pmode, x, temp);
4011 constant_term = GEN_INT (lower);
4012 }
4013
4014 if (GET_CODE (x) == PLUS)
4015 {
4016 if (GET_CODE (XEXP (x, 0)) == REG)
4017 {
4018 rtx temp = gen_reg_rtx (Pmode);
4019 rtx val = force_operand (XEXP (x, 1), temp);
4020 if (val != temp)
4021 emit_move_insn (temp, val);
4022
4023 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4024 }
4025
4026 else if (GET_CODE (XEXP (x, 1)) == REG)
4027 {
4028 rtx temp = gen_reg_rtx (Pmode);
4029 rtx val = force_operand (XEXP (x, 0), temp);
4030 if (val != temp)
4031 emit_move_insn (temp, val);
4032
4033 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4034 }
4035 }
4036
4037 if (constant_term != const0_rtx)
4038 x = gen_rtx_PLUS (Pmode, x, constant_term);
4039
4040 return x;
4041 }
4042
4043 /* Try a machine-dependent way of reloading an illegitimate address AD
4044 operand. If we find one, push the reload and and return the new address.
4045
4046 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4047 and TYPE is the reload type of the current reload. */
4048
4049 rtx
4050 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4051 int opnum, int type)
4052 {
4053 if (!optimize || TARGET_LONG_DISPLACEMENT)
4054 return NULL_RTX;
4055
4056 if (GET_CODE (ad) == PLUS)
4057 {
4058 rtx tem = simplify_binary_operation (PLUS, Pmode,
4059 XEXP (ad, 0), XEXP (ad, 1));
4060 if (tem)
4061 ad = tem;
4062 }
4063
4064 if (GET_CODE (ad) == PLUS
4065 && GET_CODE (XEXP (ad, 0)) == REG
4066 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4067 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4068 {
4069 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4070 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4071 rtx cst, tem, new_rtx;
4072
4073 cst = GEN_INT (upper);
4074 if (!legitimate_reload_constant_p (cst))
4075 cst = force_const_mem (Pmode, cst);
4076
4077 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4078 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4079
4080 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4081 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4082 opnum, (enum reload_type) type);
4083 return new_rtx;
4084 }
4085
4086 return NULL_RTX;
4087 }
4088
4089 /* Emit code to move LEN bytes from DST to SRC. */
4090
4091 void
4092 s390_expand_movmem (rtx dst, rtx src, rtx len)
4093 {
4094 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4095 {
4096 if (INTVAL (len) > 0)
4097 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4098 }
4099
4100 else if (TARGET_MVCLE)
4101 {
4102 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4103 }
4104
4105 else
4106 {
4107 rtx dst_addr, src_addr, count, blocks, temp;
4108 rtx loop_start_label = gen_label_rtx ();
4109 rtx loop_end_label = gen_label_rtx ();
4110 rtx end_label = gen_label_rtx ();
4111 enum machine_mode mode;
4112
4113 mode = GET_MODE (len);
4114 if (mode == VOIDmode)
4115 mode = Pmode;
4116
4117 dst_addr = gen_reg_rtx (Pmode);
4118 src_addr = gen_reg_rtx (Pmode);
4119 count = gen_reg_rtx (mode);
4120 blocks = gen_reg_rtx (mode);
4121
4122 convert_move (count, len, 1);
4123 emit_cmp_and_jump_insns (count, const0_rtx,
4124 EQ, NULL_RTX, mode, 1, end_label);
4125
4126 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4127 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4128 dst = change_address (dst, VOIDmode, dst_addr);
4129 src = change_address (src, VOIDmode, src_addr);
4130
4131 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4132 OPTAB_DIRECT);
4133 if (temp != count)
4134 emit_move_insn (count, temp);
4135
4136 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4137 OPTAB_DIRECT);
4138 if (temp != blocks)
4139 emit_move_insn (blocks, temp);
4140
4141 emit_cmp_and_jump_insns (blocks, const0_rtx,
4142 EQ, NULL_RTX, mode, 1, loop_end_label);
4143
4144 emit_label (loop_start_label);
4145
4146 if (TARGET_Z10
4147 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4148 {
4149 rtx prefetch;
4150
4151 /* Issue a read prefetch for the +3 cache line. */
4152 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4153 const0_rtx, const0_rtx);
4154 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4155 emit_insn (prefetch);
4156
4157 /* Issue a write prefetch for the +3 cache line. */
4158 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4159 const1_rtx, const0_rtx);
4160 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4161 emit_insn (prefetch);
4162 }
4163
4164 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4165 s390_load_address (dst_addr,
4166 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4167 s390_load_address (src_addr,
4168 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4169
4170 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4171 OPTAB_DIRECT);
4172 if (temp != blocks)
4173 emit_move_insn (blocks, temp);
4174
4175 emit_cmp_and_jump_insns (blocks, const0_rtx,
4176 EQ, NULL_RTX, mode, 1, loop_end_label);
4177
4178 emit_jump (loop_start_label);
4179 emit_label (loop_end_label);
4180
4181 emit_insn (gen_movmem_short (dst, src,
4182 convert_to_mode (Pmode, count, 1)));
4183 emit_label (end_label);
4184 }
4185 }
4186
4187 /* Emit code to set LEN bytes at DST to VAL.
4188 Make use of clrmem if VAL is zero. */
4189
4190 void
4191 s390_expand_setmem (rtx dst, rtx len, rtx val)
4192 {
4193 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4194 return;
4195
4196 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4197
4198 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4199 {
4200 if (val == const0_rtx && INTVAL (len) <= 256)
4201 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4202 else
4203 {
4204 /* Initialize memory by storing the first byte. */
4205 emit_move_insn (adjust_address (dst, QImode, 0), val);
4206
4207 if (INTVAL (len) > 1)
4208 {
4209 /* Initiate 1 byte overlap move.
4210 The first byte of DST is propagated through DSTP1.
4211 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4212 DST is set to size 1 so the rest of the memory location
4213 does not count as source operand. */
4214 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4215 set_mem_size (dst, const1_rtx);
4216
4217 emit_insn (gen_movmem_short (dstp1, dst,
4218 GEN_INT (INTVAL (len) - 2)));
4219 }
4220 }
4221 }
4222
4223 else if (TARGET_MVCLE)
4224 {
4225 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4226 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4227 }
4228
4229 else
4230 {
4231 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4232 rtx loop_start_label = gen_label_rtx ();
4233 rtx loop_end_label = gen_label_rtx ();
4234 rtx end_label = gen_label_rtx ();
4235 enum machine_mode mode;
4236
4237 mode = GET_MODE (len);
4238 if (mode == VOIDmode)
4239 mode = Pmode;
4240
4241 dst_addr = gen_reg_rtx (Pmode);
4242 count = gen_reg_rtx (mode);
4243 blocks = gen_reg_rtx (mode);
4244
4245 convert_move (count, len, 1);
4246 emit_cmp_and_jump_insns (count, const0_rtx,
4247 EQ, NULL_RTX, mode, 1, end_label);
4248
4249 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4250 dst = change_address (dst, VOIDmode, dst_addr);
4251
4252 if (val == const0_rtx)
4253 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4254 OPTAB_DIRECT);
4255 else
4256 {
4257 dstp1 = adjust_address (dst, VOIDmode, 1);
4258 set_mem_size (dst, const1_rtx);
4259
4260 /* Initialize memory by storing the first byte. */
4261 emit_move_insn (adjust_address (dst, QImode, 0), val);
4262
4263 /* If count is 1 we are done. */
4264 emit_cmp_and_jump_insns (count, const1_rtx,
4265 EQ, NULL_RTX, mode, 1, end_label);
4266
4267 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4268 OPTAB_DIRECT);
4269 }
4270 if (temp != count)
4271 emit_move_insn (count, temp);
4272
4273 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4274 OPTAB_DIRECT);
4275 if (temp != blocks)
4276 emit_move_insn (blocks, temp);
4277
4278 emit_cmp_and_jump_insns (blocks, const0_rtx,
4279 EQ, NULL_RTX, mode, 1, loop_end_label);
4280
4281 emit_label (loop_start_label);
4282
4283 if (TARGET_Z10
4284 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4285 {
4286 /* Issue a write prefetch for the +4 cache line. */
4287 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4288 GEN_INT (1024)),
4289 const1_rtx, const0_rtx);
4290 emit_insn (prefetch);
4291 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4292 }
4293
4294 if (val == const0_rtx)
4295 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4296 else
4297 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4298 s390_load_address (dst_addr,
4299 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4300
4301 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4302 OPTAB_DIRECT);
4303 if (temp != blocks)
4304 emit_move_insn (blocks, temp);
4305
4306 emit_cmp_and_jump_insns (blocks, const0_rtx,
4307 EQ, NULL_RTX, mode, 1, loop_end_label);
4308
4309 emit_jump (loop_start_label);
4310 emit_label (loop_end_label);
4311
4312 if (val == const0_rtx)
4313 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4314 else
4315 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4316 emit_label (end_label);
4317 }
4318 }
4319
4320 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4321 and return the result in TARGET. */
4322
4323 void
4324 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4325 {
4326 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4327 rtx tmp;
4328
4329 /* As the result of CMPINT is inverted compared to what we need,
4330 we have to swap the operands. */
4331 tmp = op0; op0 = op1; op1 = tmp;
4332
4333 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4334 {
4335 if (INTVAL (len) > 0)
4336 {
4337 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4338 emit_insn (gen_cmpint (target, ccreg));
4339 }
4340 else
4341 emit_move_insn (target, const0_rtx);
4342 }
4343 else if (TARGET_MVCLE)
4344 {
4345 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4346 emit_insn (gen_cmpint (target, ccreg));
4347 }
4348 else
4349 {
4350 rtx addr0, addr1, count, blocks, temp;
4351 rtx loop_start_label = gen_label_rtx ();
4352 rtx loop_end_label = gen_label_rtx ();
4353 rtx end_label = gen_label_rtx ();
4354 enum machine_mode mode;
4355
4356 mode = GET_MODE (len);
4357 if (mode == VOIDmode)
4358 mode = Pmode;
4359
4360 addr0 = gen_reg_rtx (Pmode);
4361 addr1 = gen_reg_rtx (Pmode);
4362 count = gen_reg_rtx (mode);
4363 blocks = gen_reg_rtx (mode);
4364
4365 convert_move (count, len, 1);
4366 emit_cmp_and_jump_insns (count, const0_rtx,
4367 EQ, NULL_RTX, mode, 1, end_label);
4368
4369 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4370 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4371 op0 = change_address (op0, VOIDmode, addr0);
4372 op1 = change_address (op1, VOIDmode, addr1);
4373
4374 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4375 OPTAB_DIRECT);
4376 if (temp != count)
4377 emit_move_insn (count, temp);
4378
4379 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4380 OPTAB_DIRECT);
4381 if (temp != blocks)
4382 emit_move_insn (blocks, temp);
4383
4384 emit_cmp_and_jump_insns (blocks, const0_rtx,
4385 EQ, NULL_RTX, mode, 1, loop_end_label);
4386
4387 emit_label (loop_start_label);
4388
4389 if (TARGET_Z10
4390 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4391 {
4392 rtx prefetch;
4393
4394 /* Issue a read prefetch for the +2 cache line of operand 1. */
4395 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4396 const0_rtx, const0_rtx);
4397 emit_insn (prefetch);
4398 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4399
4400 /* Issue a read prefetch for the +2 cache line of operand 2. */
4401 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4402 const0_rtx, const0_rtx);
4403 emit_insn (prefetch);
4404 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4405 }
4406
4407 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4408 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4409 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4410 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4411 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4412 emit_jump_insn (temp);
4413
4414 s390_load_address (addr0,
4415 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4416 s390_load_address (addr1,
4417 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4418
4419 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4420 OPTAB_DIRECT);
4421 if (temp != blocks)
4422 emit_move_insn (blocks, temp);
4423
4424 emit_cmp_and_jump_insns (blocks, const0_rtx,
4425 EQ, NULL_RTX, mode, 1, loop_end_label);
4426
4427 emit_jump (loop_start_label);
4428 emit_label (loop_end_label);
4429
4430 emit_insn (gen_cmpmem_short (op0, op1,
4431 convert_to_mode (Pmode, count, 1)));
4432 emit_label (end_label);
4433
4434 emit_insn (gen_cmpint (target, ccreg));
4435 }
4436 }
4437
4438
4439 /* Expand conditional increment or decrement using alc/slb instructions.
4440 Should generate code setting DST to either SRC or SRC + INCREMENT,
4441 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4442 Returns true if successful, false otherwise.
4443
4444 That makes it possible to implement some if-constructs without jumps e.g.:
4445 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4446 unsigned int a, b, c;
4447 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4448 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4449 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4450 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4451
4452 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4453 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4454 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4455 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4456 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4457
4458 bool
4459 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4460 rtx dst, rtx src, rtx increment)
4461 {
4462 enum machine_mode cmp_mode;
4463 enum machine_mode cc_mode;
4464 rtx op_res;
4465 rtx insn;
4466 rtvec p;
4467 int ret;
4468
4469 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4470 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4471 cmp_mode = SImode;
4472 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4473 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4474 cmp_mode = DImode;
4475 else
4476 return false;
4477
4478 /* Try ADD LOGICAL WITH CARRY. */
4479 if (increment == const1_rtx)
4480 {
4481 /* Determine CC mode to use. */
4482 if (cmp_code == EQ || cmp_code == NE)
4483 {
4484 if (cmp_op1 != const0_rtx)
4485 {
4486 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4487 NULL_RTX, 0, OPTAB_WIDEN);
4488 cmp_op1 = const0_rtx;
4489 }
4490
4491 cmp_code = cmp_code == EQ ? LEU : GTU;
4492 }
4493
4494 if (cmp_code == LTU || cmp_code == LEU)
4495 {
4496 rtx tem = cmp_op0;
4497 cmp_op0 = cmp_op1;
4498 cmp_op1 = tem;
4499 cmp_code = swap_condition (cmp_code);
4500 }
4501
4502 switch (cmp_code)
4503 {
4504 case GTU:
4505 cc_mode = CCUmode;
4506 break;
4507
4508 case GEU:
4509 cc_mode = CCL3mode;
4510 break;
4511
4512 default:
4513 return false;
4514 }
4515
4516 /* Emit comparison instruction pattern. */
4517 if (!register_operand (cmp_op0, cmp_mode))
4518 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4519
4520 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4521 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4522 /* We use insn_invalid_p here to add clobbers if required. */
4523 ret = insn_invalid_p (emit_insn (insn));
4524 gcc_assert (!ret);
4525
4526 /* Emit ALC instruction pattern. */
4527 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4528 gen_rtx_REG (cc_mode, CC_REGNUM),
4529 const0_rtx);
4530
4531 if (src != const0_rtx)
4532 {
4533 if (!register_operand (src, GET_MODE (dst)))
4534 src = force_reg (GET_MODE (dst), src);
4535
4536 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4537 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4538 }
4539
4540 p = rtvec_alloc (2);
4541 RTVEC_ELT (p, 0) =
4542 gen_rtx_SET (VOIDmode, dst, op_res);
4543 RTVEC_ELT (p, 1) =
4544 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4545 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4546
4547 return true;
4548 }
4549
4550 /* Try SUBTRACT LOGICAL WITH BORROW. */
4551 if (increment == constm1_rtx)
4552 {
4553 /* Determine CC mode to use. */
4554 if (cmp_code == EQ || cmp_code == NE)
4555 {
4556 if (cmp_op1 != const0_rtx)
4557 {
4558 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4559 NULL_RTX, 0, OPTAB_WIDEN);
4560 cmp_op1 = const0_rtx;
4561 }
4562
4563 cmp_code = cmp_code == EQ ? LEU : GTU;
4564 }
4565
4566 if (cmp_code == GTU || cmp_code == GEU)
4567 {
4568 rtx tem = cmp_op0;
4569 cmp_op0 = cmp_op1;
4570 cmp_op1 = tem;
4571 cmp_code = swap_condition (cmp_code);
4572 }
4573
4574 switch (cmp_code)
4575 {
4576 case LEU:
4577 cc_mode = CCUmode;
4578 break;
4579
4580 case LTU:
4581 cc_mode = CCL3mode;
4582 break;
4583
4584 default:
4585 return false;
4586 }
4587
4588 /* Emit comparison instruction pattern. */
4589 if (!register_operand (cmp_op0, cmp_mode))
4590 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4591
4592 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4593 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4594 /* We use insn_invalid_p here to add clobbers if required. */
4595 ret = insn_invalid_p (emit_insn (insn));
4596 gcc_assert (!ret);
4597
4598 /* Emit SLB instruction pattern. */
4599 if (!register_operand (src, GET_MODE (dst)))
4600 src = force_reg (GET_MODE (dst), src);
4601
4602 op_res = gen_rtx_MINUS (GET_MODE (dst),
4603 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4604 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4605 gen_rtx_REG (cc_mode, CC_REGNUM),
4606 const0_rtx));
4607 p = rtvec_alloc (2);
4608 RTVEC_ELT (p, 0) =
4609 gen_rtx_SET (VOIDmode, dst, op_res);
4610 RTVEC_ELT (p, 1) =
4611 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4612 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4613
4614 return true;
4615 }
4616
4617 return false;
4618 }
4619
4620 /* Expand code for the insv template. Return true if successful. */
4621
4622 bool
4623 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4624 {
4625 int bitsize = INTVAL (op1);
4626 int bitpos = INTVAL (op2);
4627
4628 /* On z10 we can use the risbg instruction to implement insv. */
4629 if (TARGET_Z10
4630 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4631 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4632 {
4633 rtx op;
4634 rtx clobber;
4635
4636 op = gen_rtx_SET (GET_MODE(src),
4637 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4638 src);
4639 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4640 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4641
4642 return true;
4643 }
4644
4645 /* We need byte alignment. */
4646 if (bitsize % BITS_PER_UNIT)
4647 return false;
4648
4649 if (bitpos == 0
4650 && memory_operand (dest, VOIDmode)
4651 && (register_operand (src, word_mode)
4652 || const_int_operand (src, VOIDmode)))
4653 {
4654 /* Emit standard pattern if possible. */
4655 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4656 if (GET_MODE_BITSIZE (mode) == bitsize)
4657 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4658
4659 /* (set (ze (mem)) (const_int)). */
4660 else if (const_int_operand (src, VOIDmode))
4661 {
4662 int size = bitsize / BITS_PER_UNIT;
4663 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4664 GET_MODE_SIZE (word_mode) - size);
4665
4666 dest = adjust_address (dest, BLKmode, 0);
4667 set_mem_size (dest, GEN_INT (size));
4668 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4669 }
4670
4671 /* (set (ze (mem)) (reg)). */
4672 else if (register_operand (src, word_mode))
4673 {
4674 if (bitsize <= GET_MODE_BITSIZE (SImode))
4675 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4676 const0_rtx), src);
4677 else
4678 {
4679 /* Emit st,stcmh sequence. */
4680 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4681 int size = stcmh_width / BITS_PER_UNIT;
4682
4683 emit_move_insn (adjust_address (dest, SImode, size),
4684 gen_lowpart (SImode, src));
4685 set_mem_size (dest, GEN_INT (size));
4686 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4687 (stcmh_width), const0_rtx),
4688 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4689 (GET_MODE_BITSIZE (SImode))));
4690 }
4691 }
4692 else
4693 return false;
4694
4695 return true;
4696 }
4697
4698 /* (set (ze (reg)) (const_int)). */
4699 if (TARGET_ZARCH
4700 && register_operand (dest, word_mode)
4701 && (bitpos % 16) == 0
4702 && (bitsize % 16) == 0
4703 && const_int_operand (src, VOIDmode))
4704 {
4705 HOST_WIDE_INT val = INTVAL (src);
4706 int regpos = bitpos + bitsize;
4707
4708 while (regpos > bitpos)
4709 {
4710 enum machine_mode putmode;
4711 int putsize;
4712
4713 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4714 putmode = SImode;
4715 else
4716 putmode = HImode;
4717
4718 putsize = GET_MODE_BITSIZE (putmode);
4719 regpos -= putsize;
4720 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4721 GEN_INT (putsize),
4722 GEN_INT (regpos)),
4723 gen_int_mode (val, putmode));
4724 val >>= putsize;
4725 }
4726 gcc_assert (regpos == bitpos);
4727 return true;
4728 }
4729
4730 return false;
4731 }
4732
4733 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4734 register that holds VAL of mode MODE shifted by COUNT bits. */
4735
4736 static inline rtx
4737 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4738 {
4739 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4740 NULL_RTX, 1, OPTAB_DIRECT);
4741 return expand_simple_binop (SImode, ASHIFT, val, count,
4742 NULL_RTX, 1, OPTAB_DIRECT);
4743 }
4744
4745 /* Structure to hold the initial parameters for a compare_and_swap operation
4746 in HImode and QImode. */
4747
4748 struct alignment_context
4749 {
4750 rtx memsi; /* SI aligned memory location. */
4751 rtx shift; /* Bit offset with regard to lsb. */
4752 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4753 rtx modemaski; /* ~modemask */
4754 bool aligned; /* True if memory is aligned, false else. */
4755 };
4756
4757 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4758 structure AC for transparent simplifying, if the memory alignment is known
4759 to be at least 32bit. MEM is the memory location for the actual operation
4760 and MODE its mode. */
4761
4762 static void
4763 init_alignment_context (struct alignment_context *ac, rtx mem,
4764 enum machine_mode mode)
4765 {
4766 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4767 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4768
4769 if (ac->aligned)
4770 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4771 else
4772 {
4773 /* Alignment is unknown. */
4774 rtx byteoffset, addr, align;
4775
4776 /* Force the address into a register. */
4777 addr = force_reg (Pmode, XEXP (mem, 0));
4778
4779 /* Align it to SImode. */
4780 align = expand_simple_binop (Pmode, AND, addr,
4781 GEN_INT (-GET_MODE_SIZE (SImode)),
4782 NULL_RTX, 1, OPTAB_DIRECT);
4783 /* Generate MEM. */
4784 ac->memsi = gen_rtx_MEM (SImode, align);
4785 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4786 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4787 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4788
4789 /* Calculate shiftcount. */
4790 byteoffset = expand_simple_binop (Pmode, AND, addr,
4791 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4792 NULL_RTX, 1, OPTAB_DIRECT);
4793 /* As we already have some offset, evaluate the remaining distance. */
4794 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4795 NULL_RTX, 1, OPTAB_DIRECT);
4796
4797 }
4798 /* Shift is the byte count, but we need the bitcount. */
4799 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4800 NULL_RTX, 1, OPTAB_DIRECT);
4801 /* Calculate masks. */
4802 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4803 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4804 NULL_RTX, 1, OPTAB_DIRECT);
4805 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4806 }
4807
4808 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4809 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4810 to set if CMP == MEM.
4811 CMP is never in memory for compare_and_swap_cc because
4812 expand_bool_compare_and_swap puts it into a register for later compare. */
4813
4814 void
4815 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4816 {
4817 struct alignment_context ac;
4818 rtx cmpv, newv, val, resv, cc;
4819 rtx res = gen_reg_rtx (SImode);
4820 rtx csloop = gen_label_rtx ();
4821 rtx csend = gen_label_rtx ();
4822
4823 gcc_assert (register_operand (target, VOIDmode));
4824 gcc_assert (MEM_P (mem));
4825
4826 init_alignment_context (&ac, mem, mode);
4827
4828 /* Shift the values to the correct bit positions. */
4829 if (!(ac.aligned && MEM_P (cmp)))
4830 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4831 if (!(ac.aligned && MEM_P (new_rtx)))
4832 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4833
4834 /* Load full word. Subsequent loads are performed by CS. */
4835 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4836 NULL_RTX, 1, OPTAB_DIRECT);
4837
4838 /* Start CS loop. */
4839 emit_label (csloop);
4840 /* val = "<mem>00..0<mem>"
4841 * cmp = "00..0<cmp>00..0"
4842 * new = "00..0<new>00..0"
4843 */
4844
4845 /* Patch cmp and new with val at correct position. */
4846 if (ac.aligned && MEM_P (cmp))
4847 {
4848 cmpv = force_reg (SImode, val);
4849 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4850 }
4851 else
4852 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4853 NULL_RTX, 1, OPTAB_DIRECT));
4854 if (ac.aligned && MEM_P (new_rtx))
4855 {
4856 newv = force_reg (SImode, val);
4857 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4858 }
4859 else
4860 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4861 NULL_RTX, 1, OPTAB_DIRECT));
4862
4863 /* Jump to end if we're done (likely?). */
4864 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4865 cmpv, newv));
4866
4867 /* Check for changes outside mode. */
4868 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4869 NULL_RTX, 1, OPTAB_DIRECT);
4870 cc = s390_emit_compare (NE, resv, val);
4871 emit_move_insn (val, resv);
4872 /* Loop internal if so. */
4873 s390_emit_jump (csloop, cc);
4874
4875 emit_label (csend);
4876
4877 /* Return the correct part of the bitfield. */
4878 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4879 NULL_RTX, 1, OPTAB_DIRECT), 1);
4880 }
4881
4882 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4883 and VAL the value to play with. If AFTER is true then store the value
4884 MEM holds after the operation, if AFTER is false then store the value MEM
4885 holds before the operation. If TARGET is zero then discard that value, else
4886 store it to TARGET. */
4887
4888 void
4889 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4890 rtx target, rtx mem, rtx val, bool after)
4891 {
4892 struct alignment_context ac;
4893 rtx cmp;
4894 rtx new_rtx = gen_reg_rtx (SImode);
4895 rtx orig = gen_reg_rtx (SImode);
4896 rtx csloop = gen_label_rtx ();
4897
4898 gcc_assert (!target || register_operand (target, VOIDmode));
4899 gcc_assert (MEM_P (mem));
4900
4901 init_alignment_context (&ac, mem, mode);
4902
4903 /* Shift val to the correct bit positions.
4904 Preserve "icm", but prevent "ex icm". */
4905 if (!(ac.aligned && code == SET && MEM_P (val)))
4906 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4907
4908 /* Further preparation insns. */
4909 if (code == PLUS || code == MINUS)
4910 emit_move_insn (orig, val);
4911 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4912 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4913 NULL_RTX, 1, OPTAB_DIRECT);
4914
4915 /* Load full word. Subsequent loads are performed by CS. */
4916 cmp = force_reg (SImode, ac.memsi);
4917
4918 /* Start CS loop. */
4919 emit_label (csloop);
4920 emit_move_insn (new_rtx, cmp);
4921
4922 /* Patch new with val at correct position. */
4923 switch (code)
4924 {
4925 case PLUS:
4926 case MINUS:
4927 val = expand_simple_binop (SImode, code, new_rtx, orig,
4928 NULL_RTX, 1, OPTAB_DIRECT);
4929 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4930 NULL_RTX, 1, OPTAB_DIRECT);
4931 /* FALLTHRU */
4932 case SET:
4933 if (ac.aligned && MEM_P (val))
4934 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4935 else
4936 {
4937 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4938 NULL_RTX, 1, OPTAB_DIRECT);
4939 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4940 NULL_RTX, 1, OPTAB_DIRECT);
4941 }
4942 break;
4943 case AND:
4944 case IOR:
4945 case XOR:
4946 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4947 NULL_RTX, 1, OPTAB_DIRECT);
4948 break;
4949 case MULT: /* NAND */
4950 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4951 NULL_RTX, 1, OPTAB_DIRECT);
4952 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4953 NULL_RTX, 1, OPTAB_DIRECT);
4954 break;
4955 default:
4956 gcc_unreachable ();
4957 }
4958
4959 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4960 ac.memsi, cmp, new_rtx));
4961
4962 /* Return the correct part of the bitfield. */
4963 if (target)
4964 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4965 after ? new_rtx : cmp, ac.shift,
4966 NULL_RTX, 1, OPTAB_DIRECT), 1);
4967 }
4968
4969 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4970 We need to emit DTP-relative relocations. */
4971
4972 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4973
4974 static void
4975 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4976 {
4977 switch (size)
4978 {
4979 case 4:
4980 fputs ("\t.long\t", file);
4981 break;
4982 case 8:
4983 fputs ("\t.quad\t", file);
4984 break;
4985 default:
4986 gcc_unreachable ();
4987 }
4988 output_addr_const (file, x);
4989 fputs ("@DTPOFF", file);
4990 }
4991
4992 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4993 /* Implement TARGET_MANGLE_TYPE. */
4994
4995 static const char *
4996 s390_mangle_type (const_tree type)
4997 {
4998 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4999 && TARGET_LONG_DOUBLE_128)
5000 return "g";
5001
5002 /* For all other types, use normal C++ mangling. */
5003 return NULL;
5004 }
5005 #endif
5006
5007 /* In the name of slightly smaller debug output, and to cater to
5008 general assembler lossage, recognize various UNSPEC sequences
5009 and turn them back into a direct symbol reference. */
5010
5011 static rtx
5012 s390_delegitimize_address (rtx orig_x)
5013 {
5014 rtx x, y;
5015
5016 orig_x = delegitimize_mem_from_attrs (orig_x);
5017 x = orig_x;
5018 if (GET_CODE (x) != MEM)
5019 return orig_x;
5020
5021 x = XEXP (x, 0);
5022 if (GET_CODE (x) == PLUS
5023 && GET_CODE (XEXP (x, 1)) == CONST
5024 && GET_CODE (XEXP (x, 0)) == REG
5025 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5026 {
5027 y = XEXP (XEXP (x, 1), 0);
5028 if (GET_CODE (y) == UNSPEC
5029 && XINT (y, 1) == UNSPEC_GOT)
5030 y = XVECEXP (y, 0, 0);
5031 else
5032 return orig_x;
5033 }
5034 else if (GET_CODE (x) == CONST)
5035 {
5036 y = XEXP (x, 0);
5037 if (GET_CODE (y) == UNSPEC
5038 && XINT (y, 1) == UNSPEC_GOTENT)
5039 y = XVECEXP (y, 0, 0);
5040 else
5041 return orig_x;
5042 }
5043 else
5044 return orig_x;
5045
5046 if (GET_MODE (orig_x) != Pmode)
5047 {
5048 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5049 if (y == NULL_RTX)
5050 return orig_x;
5051 }
5052 return y;
5053 }
5054
5055 /* Output operand OP to stdio stream FILE.
5056 OP is an address (register + offset) which is not used to address data;
5057 instead the rightmost bits are interpreted as the value. */
5058
5059 static void
5060 print_shift_count_operand (FILE *file, rtx op)
5061 {
5062 HOST_WIDE_INT offset;
5063 rtx base;
5064
5065 /* Extract base register and offset. */
5066 if (!s390_decompose_shift_count (op, &base, &offset))
5067 gcc_unreachable ();
5068
5069 /* Sanity check. */
5070 if (base)
5071 {
5072 gcc_assert (GET_CODE (base) == REG);
5073 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5074 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5075 }
5076
5077 /* Offsets are constricted to twelve bits. */
5078 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5079 if (base)
5080 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5081 }
5082
5083 /* See 'get_some_local_dynamic_name'. */
5084
5085 static int
5086 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5087 {
5088 rtx x = *px;
5089
5090 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5091 {
5092 x = get_pool_constant (x);
5093 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5094 }
5095
5096 if (GET_CODE (x) == SYMBOL_REF
5097 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5098 {
5099 cfun->machine->some_ld_name = XSTR (x, 0);
5100 return 1;
5101 }
5102
5103 return 0;
5104 }
5105
5106 /* Locate some local-dynamic symbol still in use by this function
5107 so that we can print its name in local-dynamic base patterns. */
5108
5109 static const char *
5110 get_some_local_dynamic_name (void)
5111 {
5112 rtx insn;
5113
5114 if (cfun->machine->some_ld_name)
5115 return cfun->machine->some_ld_name;
5116
5117 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5118 if (INSN_P (insn)
5119 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5120 return cfun->machine->some_ld_name;
5121
5122 gcc_unreachable ();
5123 }
5124
5125 /* Output machine-dependent UNSPECs occurring in address constant X
5126 in assembler syntax to stdio stream FILE. Returns true if the
5127 constant X could be recognized, false otherwise. */
5128
5129 static bool
5130 s390_output_addr_const_extra (FILE *file, rtx x)
5131 {
5132 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5133 switch (XINT (x, 1))
5134 {
5135 case UNSPEC_GOTENT:
5136 output_addr_const (file, XVECEXP (x, 0, 0));
5137 fprintf (file, "@GOTENT");
5138 return true;
5139 case UNSPEC_GOT:
5140 output_addr_const (file, XVECEXP (x, 0, 0));
5141 fprintf (file, "@GOT");
5142 return true;
5143 case UNSPEC_GOTOFF:
5144 output_addr_const (file, XVECEXP (x, 0, 0));
5145 fprintf (file, "@GOTOFF");
5146 return true;
5147 case UNSPEC_PLT:
5148 output_addr_const (file, XVECEXP (x, 0, 0));
5149 fprintf (file, "@PLT");
5150 return true;
5151 case UNSPEC_PLTOFF:
5152 output_addr_const (file, XVECEXP (x, 0, 0));
5153 fprintf (file, "@PLTOFF");
5154 return true;
5155 case UNSPEC_TLSGD:
5156 output_addr_const (file, XVECEXP (x, 0, 0));
5157 fprintf (file, "@TLSGD");
5158 return true;
5159 case UNSPEC_TLSLDM:
5160 assemble_name (file, get_some_local_dynamic_name ());
5161 fprintf (file, "@TLSLDM");
5162 return true;
5163 case UNSPEC_DTPOFF:
5164 output_addr_const (file, XVECEXP (x, 0, 0));
5165 fprintf (file, "@DTPOFF");
5166 return true;
5167 case UNSPEC_NTPOFF:
5168 output_addr_const (file, XVECEXP (x, 0, 0));
5169 fprintf (file, "@NTPOFF");
5170 return true;
5171 case UNSPEC_GOTNTPOFF:
5172 output_addr_const (file, XVECEXP (x, 0, 0));
5173 fprintf (file, "@GOTNTPOFF");
5174 return true;
5175 case UNSPEC_INDNTPOFF:
5176 output_addr_const (file, XVECEXP (x, 0, 0));
5177 fprintf (file, "@INDNTPOFF");
5178 return true;
5179 }
5180
5181 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5182 switch (XINT (x, 1))
5183 {
5184 case UNSPEC_POOL_OFFSET:
5185 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5186 output_addr_const (file, x);
5187 return true;
5188 }
5189 return false;
5190 }
5191
5192 /* Output address operand ADDR in assembler syntax to
5193 stdio stream FILE. */
5194
5195 void
5196 print_operand_address (FILE *file, rtx addr)
5197 {
5198 struct s390_address ad;
5199
5200 if (s390_symref_operand_p (addr, NULL, NULL))
5201 {
5202 if (!TARGET_Z10)
5203 {
5204 output_operand_lossage ("symbolic memory references are "
5205 "only supported on z10 or later");
5206 return;
5207 }
5208 output_addr_const (file, addr);
5209 return;
5210 }
5211
5212 if (!s390_decompose_address (addr, &ad)
5213 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5214 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5215 output_operand_lossage ("cannot decompose address");
5216
5217 if (ad.disp)
5218 output_addr_const (file, ad.disp);
5219 else
5220 fprintf (file, "0");
5221
5222 if (ad.base && ad.indx)
5223 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5224 reg_names[REGNO (ad.base)]);
5225 else if (ad.base)
5226 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5227 }
5228
5229 /* Output operand X in assembler syntax to stdio stream FILE.
5230 CODE specified the format flag. The following format flags
5231 are recognized:
5232
5233 'C': print opcode suffix for branch condition.
5234 'D': print opcode suffix for inverse branch condition.
5235 'E': print opcode suffix for branch on index instruction.
5236 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5237 'G': print the size of the operand in bytes.
5238 'O': print only the displacement of a memory reference.
5239 'R': print only the base register of a memory reference.
5240 'S': print S-type memory reference (base+displacement).
5241 'N': print the second word of a DImode operand.
5242 'M': print the second word of a TImode operand.
5243 'Y': print shift count operand.
5244
5245 'b': print integer X as if it's an unsigned byte.
5246 'c': print integer X as if it's an signed byte.
5247 'x': print integer X as if it's an unsigned halfword.
5248 'h': print integer X as if it's a signed halfword.
5249 'i': print the first nonzero HImode part of X.
5250 'j': print the first HImode part unequal to -1 of X.
5251 'k': print the first nonzero SImode part of X.
5252 'm': print the first SImode part unequal to -1 of X.
5253 'o': print integer X as if it's an unsigned 32bit word. */
5254
5255 void
5256 print_operand (FILE *file, rtx x, int code)
5257 {
5258 switch (code)
5259 {
5260 case 'C':
5261 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5262 return;
5263
5264 case 'D':
5265 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5266 return;
5267
5268 case 'E':
5269 if (GET_CODE (x) == LE)
5270 fprintf (file, "l");
5271 else if (GET_CODE (x) == GT)
5272 fprintf (file, "h");
5273 else
5274 output_operand_lossage ("invalid comparison operator "
5275 "for 'E' output modifier");
5276 return;
5277
5278 case 'J':
5279 if (GET_CODE (x) == SYMBOL_REF)
5280 {
5281 fprintf (file, "%s", ":tls_load:");
5282 output_addr_const (file, x);
5283 }
5284 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5285 {
5286 fprintf (file, "%s", ":tls_gdcall:");
5287 output_addr_const (file, XVECEXP (x, 0, 0));
5288 }
5289 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5290 {
5291 fprintf (file, "%s", ":tls_ldcall:");
5292 assemble_name (file, get_some_local_dynamic_name ());
5293 }
5294 else
5295 output_operand_lossage ("invalid reference for 'J' output modifier");
5296 return;
5297
5298 case 'G':
5299 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5300 return;
5301
5302 case 'O':
5303 {
5304 struct s390_address ad;
5305 int ret;
5306
5307 if (!MEM_P (x))
5308 {
5309 output_operand_lossage ("memory reference expected for "
5310 "'O' output modifier");
5311 return;
5312 }
5313
5314 ret = s390_decompose_address (XEXP (x, 0), &ad);
5315
5316 if (!ret
5317 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5318 || ad.indx)
5319 {
5320 output_operand_lossage ("invalid address for 'O' output modifier");
5321 return;
5322 }
5323
5324 if (ad.disp)
5325 output_addr_const (file, ad.disp);
5326 else
5327 fprintf (file, "0");
5328 }
5329 return;
5330
5331 case 'R':
5332 {
5333 struct s390_address ad;
5334 int ret;
5335
5336 if (!MEM_P (x))
5337 {
5338 output_operand_lossage ("memory reference expected for "
5339 "'R' output modifier");
5340 return;
5341 }
5342
5343 ret = s390_decompose_address (XEXP (x, 0), &ad);
5344
5345 if (!ret
5346 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5347 || ad.indx)
5348 {
5349 output_operand_lossage ("invalid address for 'R' output modifier");
5350 return;
5351 }
5352
5353 if (ad.base)
5354 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5355 else
5356 fprintf (file, "0");
5357 }
5358 return;
5359
5360 case 'S':
5361 {
5362 struct s390_address ad;
5363 int ret;
5364
5365 if (!MEM_P (x))
5366 {
5367 output_operand_lossage ("memory reference expected for "
5368 "'S' output modifier");
5369 return;
5370 }
5371 ret = s390_decompose_address (XEXP (x, 0), &ad);
5372
5373 if (!ret
5374 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5375 || ad.indx)
5376 {
5377 output_operand_lossage ("invalid address for 'S' output modifier");
5378 return;
5379 }
5380
5381 if (ad.disp)
5382 output_addr_const (file, ad.disp);
5383 else
5384 fprintf (file, "0");
5385
5386 if (ad.base)
5387 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5388 }
5389 return;
5390
5391 case 'N':
5392 if (GET_CODE (x) == REG)
5393 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5394 else if (GET_CODE (x) == MEM)
5395 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5396 else
5397 output_operand_lossage ("register or memory expression expected "
5398 "for 'N' output modifier");
5399 break;
5400
5401 case 'M':
5402 if (GET_CODE (x) == REG)
5403 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5404 else if (GET_CODE (x) == MEM)
5405 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5406 else
5407 output_operand_lossage ("register or memory expression expected "
5408 "for 'M' output modifier");
5409 break;
5410
5411 case 'Y':
5412 print_shift_count_operand (file, x);
5413 return;
5414 }
5415
5416 switch (GET_CODE (x))
5417 {
5418 case REG:
5419 fprintf (file, "%s", reg_names[REGNO (x)]);
5420 break;
5421
5422 case MEM:
5423 output_address (XEXP (x, 0));
5424 break;
5425
5426 case CONST:
5427 case CODE_LABEL:
5428 case LABEL_REF:
5429 case SYMBOL_REF:
5430 output_addr_const (file, x);
5431 break;
5432
5433 case CONST_INT:
5434 if (code == 'b')
5435 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5436 else if (code == 'c')
5437 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5438 else if (code == 'x')
5439 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5440 else if (code == 'h')
5441 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5442 else if (code == 'i')
5443 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5444 s390_extract_part (x, HImode, 0));
5445 else if (code == 'j')
5446 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5447 s390_extract_part (x, HImode, -1));
5448 else if (code == 'k')
5449 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5450 s390_extract_part (x, SImode, 0));
5451 else if (code == 'm')
5452 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5453 s390_extract_part (x, SImode, -1));
5454 else if (code == 'o')
5455 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5456 else
5457 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5458 break;
5459
5460 case CONST_DOUBLE:
5461 gcc_assert (GET_MODE (x) == VOIDmode);
5462 if (code == 'b')
5463 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5464 else if (code == 'x')
5465 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5466 else if (code == 'h')
5467 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5468 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5469 else
5470 {
5471 if (code == 0)
5472 output_operand_lossage ("invalid constant - try using "
5473 "an output modifier");
5474 else
5475 output_operand_lossage ("invalid constant for output modifier '%c'",
5476 code);
5477 }
5478 break;
5479
5480 default:
5481 if (code == 0)
5482 output_operand_lossage ("invalid expression - try using "
5483 "an output modifier");
5484 else
5485 output_operand_lossage ("invalid expression for output "
5486 "modifier '%c'", code);
5487 break;
5488 }
5489 }
5490
5491 /* Target hook for assembling integer objects. We need to define it
5492 here to work a round a bug in some versions of GAS, which couldn't
5493 handle values smaller than INT_MIN when printed in decimal. */
5494
5495 static bool
5496 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5497 {
5498 if (size == 8 && aligned_p
5499 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5500 {
5501 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5502 INTVAL (x));
5503 return true;
5504 }
5505 return default_assemble_integer (x, size, aligned_p);
5506 }
5507
5508 /* Returns true if register REGNO is used for forming
5509 a memory address in expression X. */
5510
5511 static bool
5512 reg_used_in_mem_p (int regno, rtx x)
5513 {
5514 enum rtx_code code = GET_CODE (x);
5515 int i, j;
5516 const char *fmt;
5517
5518 if (code == MEM)
5519 {
5520 if (refers_to_regno_p (regno, regno+1,
5521 XEXP (x, 0), 0))
5522 return true;
5523 }
5524 else if (code == SET
5525 && GET_CODE (SET_DEST (x)) == PC)
5526 {
5527 if (refers_to_regno_p (regno, regno+1,
5528 SET_SRC (x), 0))
5529 return true;
5530 }
5531
5532 fmt = GET_RTX_FORMAT (code);
5533 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5534 {
5535 if (fmt[i] == 'e'
5536 && reg_used_in_mem_p (regno, XEXP (x, i)))
5537 return true;
5538
5539 else if (fmt[i] == 'E')
5540 for (j = 0; j < XVECLEN (x, i); j++)
5541 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5542 return true;
5543 }
5544 return false;
5545 }
5546
5547 /* Returns true if expression DEP_RTX sets an address register
5548 used by instruction INSN to address memory. */
5549
5550 static bool
5551 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5552 {
5553 rtx target, pat;
5554
5555 if (GET_CODE (dep_rtx) == INSN)
5556 dep_rtx = PATTERN (dep_rtx);
5557
5558 if (GET_CODE (dep_rtx) == SET)
5559 {
5560 target = SET_DEST (dep_rtx);
5561 if (GET_CODE (target) == STRICT_LOW_PART)
5562 target = XEXP (target, 0);
5563 while (GET_CODE (target) == SUBREG)
5564 target = SUBREG_REG (target);
5565
5566 if (GET_CODE (target) == REG)
5567 {
5568 int regno = REGNO (target);
5569
5570 if (s390_safe_attr_type (insn) == TYPE_LA)
5571 {
5572 pat = PATTERN (insn);
5573 if (GET_CODE (pat) == PARALLEL)
5574 {
5575 gcc_assert (XVECLEN (pat, 0) == 2);
5576 pat = XVECEXP (pat, 0, 0);
5577 }
5578 gcc_assert (GET_CODE (pat) == SET);
5579 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5580 }
5581 else if (get_attr_atype (insn) == ATYPE_AGEN)
5582 return reg_used_in_mem_p (regno, PATTERN (insn));
5583 }
5584 }
5585 return false;
5586 }
5587
5588 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5589
5590 int
5591 s390_agen_dep_p (rtx dep_insn, rtx insn)
5592 {
5593 rtx dep_rtx = PATTERN (dep_insn);
5594 int i;
5595
5596 if (GET_CODE (dep_rtx) == SET
5597 && addr_generation_dependency_p (dep_rtx, insn))
5598 return 1;
5599 else if (GET_CODE (dep_rtx) == PARALLEL)
5600 {
5601 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5602 {
5603 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5604 return 1;
5605 }
5606 }
5607 return 0;
5608 }
5609
5610
5611 /* A C statement (sans semicolon) to update the integer scheduling priority
5612 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5613 reduce the priority to execute INSN later. Do not define this macro if
5614 you do not need to adjust the scheduling priorities of insns.
5615
5616 A STD instruction should be scheduled earlier,
5617 in order to use the bypass. */
5618 static int
5619 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5620 {
5621 if (! INSN_P (insn))
5622 return priority;
5623
5624 if (s390_tune != PROCESSOR_2084_Z990
5625 && s390_tune != PROCESSOR_2094_Z9_109
5626 && s390_tune != PROCESSOR_2097_Z10
5627 && s390_tune != PROCESSOR_2817_Z196)
5628 return priority;
5629
5630 switch (s390_safe_attr_type (insn))
5631 {
5632 case TYPE_FSTOREDF:
5633 case TYPE_FSTORESF:
5634 priority = priority << 3;
5635 break;
5636 case TYPE_STORE:
5637 case TYPE_STM:
5638 priority = priority << 1;
5639 break;
5640 default:
5641 break;
5642 }
5643 return priority;
5644 }
5645
5646
5647 /* The number of instructions that can be issued per cycle. */
5648
5649 static int
5650 s390_issue_rate (void)
5651 {
5652 switch (s390_tune)
5653 {
5654 case PROCESSOR_2084_Z990:
5655 case PROCESSOR_2094_Z9_109:
5656 case PROCESSOR_2817_Z196:
5657 return 3;
5658 case PROCESSOR_2097_Z10:
5659 return 2;
5660 default:
5661 return 1;
5662 }
5663 }
5664
5665 static int
5666 s390_first_cycle_multipass_dfa_lookahead (void)
5667 {
5668 return 4;
5669 }
5670
5671 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5672 Fix up MEMs as required. */
5673
5674 static void
5675 annotate_constant_pool_refs (rtx *x)
5676 {
5677 int i, j;
5678 const char *fmt;
5679
5680 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5681 || !CONSTANT_POOL_ADDRESS_P (*x));
5682
5683 /* Literal pool references can only occur inside a MEM ... */
5684 if (GET_CODE (*x) == MEM)
5685 {
5686 rtx memref = XEXP (*x, 0);
5687
5688 if (GET_CODE (memref) == SYMBOL_REF
5689 && CONSTANT_POOL_ADDRESS_P (memref))
5690 {
5691 rtx base = cfun->machine->base_reg;
5692 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5693 UNSPEC_LTREF);
5694
5695 *x = replace_equiv_address (*x, addr);
5696 return;
5697 }
5698
5699 if (GET_CODE (memref) == CONST
5700 && GET_CODE (XEXP (memref, 0)) == PLUS
5701 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5702 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5703 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5704 {
5705 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5706 rtx sym = XEXP (XEXP (memref, 0), 0);
5707 rtx base = cfun->machine->base_reg;
5708 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5709 UNSPEC_LTREF);
5710
5711 *x = replace_equiv_address (*x, plus_constant (addr, off));
5712 return;
5713 }
5714 }
5715
5716 /* ... or a load-address type pattern. */
5717 if (GET_CODE (*x) == SET)
5718 {
5719 rtx addrref = SET_SRC (*x);
5720
5721 if (GET_CODE (addrref) == SYMBOL_REF
5722 && CONSTANT_POOL_ADDRESS_P (addrref))
5723 {
5724 rtx base = cfun->machine->base_reg;
5725 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5726 UNSPEC_LTREF);
5727
5728 SET_SRC (*x) = addr;
5729 return;
5730 }
5731
5732 if (GET_CODE (addrref) == CONST
5733 && GET_CODE (XEXP (addrref, 0)) == PLUS
5734 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5735 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5736 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5737 {
5738 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5739 rtx sym = XEXP (XEXP (addrref, 0), 0);
5740 rtx base = cfun->machine->base_reg;
5741 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5742 UNSPEC_LTREF);
5743
5744 SET_SRC (*x) = plus_constant (addr, off);
5745 return;
5746 }
5747 }
5748
5749 /* Annotate LTREL_BASE as well. */
5750 if (GET_CODE (*x) == UNSPEC
5751 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5752 {
5753 rtx base = cfun->machine->base_reg;
5754 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5755 UNSPEC_LTREL_BASE);
5756 return;
5757 }
5758
5759 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5760 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5761 {
5762 if (fmt[i] == 'e')
5763 {
5764 annotate_constant_pool_refs (&XEXP (*x, i));
5765 }
5766 else if (fmt[i] == 'E')
5767 {
5768 for (j = 0; j < XVECLEN (*x, i); j++)
5769 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5770 }
5771 }
5772 }
5773
5774 /* Split all branches that exceed the maximum distance.
5775 Returns true if this created a new literal pool entry. */
5776
5777 static int
5778 s390_split_branches (void)
5779 {
5780 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5781 int new_literal = 0, ret;
5782 rtx insn, pat, tmp, target;
5783 rtx *label;
5784
5785 /* We need correct insn addresses. */
5786
5787 shorten_branches (get_insns ());
5788
5789 /* Find all branches that exceed 64KB, and split them. */
5790
5791 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5792 {
5793 if (GET_CODE (insn) != JUMP_INSN)
5794 continue;
5795
5796 pat = PATTERN (insn);
5797 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5798 pat = XVECEXP (pat, 0, 0);
5799 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5800 continue;
5801
5802 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5803 {
5804 label = &SET_SRC (pat);
5805 }
5806 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5807 {
5808 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5809 label = &XEXP (SET_SRC (pat), 1);
5810 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5811 label = &XEXP (SET_SRC (pat), 2);
5812 else
5813 continue;
5814 }
5815 else
5816 continue;
5817
5818 if (get_attr_length (insn) <= 4)
5819 continue;
5820
5821 /* We are going to use the return register as scratch register,
5822 make sure it will be saved/restored by the prologue/epilogue. */
5823 cfun_frame_layout.save_return_addr_p = 1;
5824
5825 if (!flag_pic)
5826 {
5827 new_literal = 1;
5828 tmp = force_const_mem (Pmode, *label);
5829 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5830 INSN_ADDRESSES_NEW (tmp, -1);
5831 annotate_constant_pool_refs (&PATTERN (tmp));
5832
5833 target = temp_reg;
5834 }
5835 else
5836 {
5837 new_literal = 1;
5838 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5839 UNSPEC_LTREL_OFFSET);
5840 target = gen_rtx_CONST (Pmode, target);
5841 target = force_const_mem (Pmode, target);
5842 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5843 INSN_ADDRESSES_NEW (tmp, -1);
5844 annotate_constant_pool_refs (&PATTERN (tmp));
5845
5846 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5847 cfun->machine->base_reg),
5848 UNSPEC_LTREL_BASE);
5849 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5850 }
5851
5852 ret = validate_change (insn, label, target, 0);
5853 gcc_assert (ret);
5854 }
5855
5856 return new_literal;
5857 }
5858
5859
5860 /* Find an annotated literal pool symbol referenced in RTX X,
5861 and store it at REF. Will abort if X contains references to
5862 more than one such pool symbol; multiple references to the same
5863 symbol are allowed, however.
5864
5865 The rtx pointed to by REF must be initialized to NULL_RTX
5866 by the caller before calling this routine. */
5867
5868 static void
5869 find_constant_pool_ref (rtx x, rtx *ref)
5870 {
5871 int i, j;
5872 const char *fmt;
5873
5874 /* Ignore LTREL_BASE references. */
5875 if (GET_CODE (x) == UNSPEC
5876 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5877 return;
5878 /* Likewise POOL_ENTRY insns. */
5879 if (GET_CODE (x) == UNSPEC_VOLATILE
5880 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5881 return;
5882
5883 gcc_assert (GET_CODE (x) != SYMBOL_REF
5884 || !CONSTANT_POOL_ADDRESS_P (x));
5885
5886 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5887 {
5888 rtx sym = XVECEXP (x, 0, 0);
5889 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5890 && CONSTANT_POOL_ADDRESS_P (sym));
5891
5892 if (*ref == NULL_RTX)
5893 *ref = sym;
5894 else
5895 gcc_assert (*ref == sym);
5896
5897 return;
5898 }
5899
5900 fmt = GET_RTX_FORMAT (GET_CODE (x));
5901 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5902 {
5903 if (fmt[i] == 'e')
5904 {
5905 find_constant_pool_ref (XEXP (x, i), ref);
5906 }
5907 else if (fmt[i] == 'E')
5908 {
5909 for (j = 0; j < XVECLEN (x, i); j++)
5910 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5911 }
5912 }
5913 }
5914
5915 /* Replace every reference to the annotated literal pool
5916 symbol REF in X by its base plus OFFSET. */
5917
5918 static void
5919 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5920 {
5921 int i, j;
5922 const char *fmt;
5923
5924 gcc_assert (*x != ref);
5925
5926 if (GET_CODE (*x) == UNSPEC
5927 && XINT (*x, 1) == UNSPEC_LTREF
5928 && XVECEXP (*x, 0, 0) == ref)
5929 {
5930 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5931 return;
5932 }
5933
5934 if (GET_CODE (*x) == PLUS
5935 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5936 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5937 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5938 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5939 {
5940 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5941 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5942 return;
5943 }
5944
5945 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5946 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5947 {
5948 if (fmt[i] == 'e')
5949 {
5950 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5951 }
5952 else if (fmt[i] == 'E')
5953 {
5954 for (j = 0; j < XVECLEN (*x, i); j++)
5955 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5956 }
5957 }
5958 }
5959
5960 /* Check whether X contains an UNSPEC_LTREL_BASE.
5961 Return its constant pool symbol if found, NULL_RTX otherwise. */
5962
5963 static rtx
5964 find_ltrel_base (rtx x)
5965 {
5966 int i, j;
5967 const char *fmt;
5968
5969 if (GET_CODE (x) == UNSPEC
5970 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5971 return XVECEXP (x, 0, 0);
5972
5973 fmt = GET_RTX_FORMAT (GET_CODE (x));
5974 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5975 {
5976 if (fmt[i] == 'e')
5977 {
5978 rtx fnd = find_ltrel_base (XEXP (x, i));
5979 if (fnd)
5980 return fnd;
5981 }
5982 else if (fmt[i] == 'E')
5983 {
5984 for (j = 0; j < XVECLEN (x, i); j++)
5985 {
5986 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5987 if (fnd)
5988 return fnd;
5989 }
5990 }
5991 }
5992
5993 return NULL_RTX;
5994 }
5995
5996 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5997
5998 static void
5999 replace_ltrel_base (rtx *x)
6000 {
6001 int i, j;
6002 const char *fmt;
6003
6004 if (GET_CODE (*x) == UNSPEC
6005 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6006 {
6007 *x = XVECEXP (*x, 0, 1);
6008 return;
6009 }
6010
6011 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6012 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6013 {
6014 if (fmt[i] == 'e')
6015 {
6016 replace_ltrel_base (&XEXP (*x, i));
6017 }
6018 else if (fmt[i] == 'E')
6019 {
6020 for (j = 0; j < XVECLEN (*x, i); j++)
6021 replace_ltrel_base (&XVECEXP (*x, i, j));
6022 }
6023 }
6024 }
6025
6026
6027 /* We keep a list of constants which we have to add to internal
6028 constant tables in the middle of large functions. */
6029
6030 #define NR_C_MODES 11
6031 enum machine_mode constant_modes[NR_C_MODES] =
6032 {
6033 TFmode, TImode, TDmode,
6034 DFmode, DImode, DDmode,
6035 SFmode, SImode, SDmode,
6036 HImode,
6037 QImode
6038 };
6039
6040 struct constant
6041 {
6042 struct constant *next;
6043 rtx value;
6044 rtx label;
6045 };
6046
6047 struct constant_pool
6048 {
6049 struct constant_pool *next;
6050 rtx first_insn;
6051 rtx pool_insn;
6052 bitmap insns;
6053 rtx emit_pool_after;
6054
6055 struct constant *constants[NR_C_MODES];
6056 struct constant *execute;
6057 rtx label;
6058 int size;
6059 };
6060
6061 /* Allocate new constant_pool structure. */
6062
6063 static struct constant_pool *
6064 s390_alloc_pool (void)
6065 {
6066 struct constant_pool *pool;
6067 int i;
6068
6069 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6070 pool->next = NULL;
6071 for (i = 0; i < NR_C_MODES; i++)
6072 pool->constants[i] = NULL;
6073
6074 pool->execute = NULL;
6075 pool->label = gen_label_rtx ();
6076 pool->first_insn = NULL_RTX;
6077 pool->pool_insn = NULL_RTX;
6078 pool->insns = BITMAP_ALLOC (NULL);
6079 pool->size = 0;
6080 pool->emit_pool_after = NULL_RTX;
6081
6082 return pool;
6083 }
6084
6085 /* Create new constant pool covering instructions starting at INSN
6086 and chain it to the end of POOL_LIST. */
6087
6088 static struct constant_pool *
6089 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6090 {
6091 struct constant_pool *pool, **prev;
6092
6093 pool = s390_alloc_pool ();
6094 pool->first_insn = insn;
6095
6096 for (prev = pool_list; *prev; prev = &(*prev)->next)
6097 ;
6098 *prev = pool;
6099
6100 return pool;
6101 }
6102
6103 /* End range of instructions covered by POOL at INSN and emit
6104 placeholder insn representing the pool. */
6105
6106 static void
6107 s390_end_pool (struct constant_pool *pool, rtx insn)
6108 {
6109 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6110
6111 if (!insn)
6112 insn = get_last_insn ();
6113
6114 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6115 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6116 }
6117
6118 /* Add INSN to the list of insns covered by POOL. */
6119
6120 static void
6121 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6122 {
6123 bitmap_set_bit (pool->insns, INSN_UID (insn));
6124 }
6125
6126 /* Return pool out of POOL_LIST that covers INSN. */
6127
6128 static struct constant_pool *
6129 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6130 {
6131 struct constant_pool *pool;
6132
6133 for (pool = pool_list; pool; pool = pool->next)
6134 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6135 break;
6136
6137 return pool;
6138 }
6139
6140 /* Add constant VAL of mode MODE to the constant pool POOL. */
6141
6142 static void
6143 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6144 {
6145 struct constant *c;
6146 int i;
6147
6148 for (i = 0; i < NR_C_MODES; i++)
6149 if (constant_modes[i] == mode)
6150 break;
6151 gcc_assert (i != NR_C_MODES);
6152
6153 for (c = pool->constants[i]; c != NULL; c = c->next)
6154 if (rtx_equal_p (val, c->value))
6155 break;
6156
6157 if (c == NULL)
6158 {
6159 c = (struct constant *) xmalloc (sizeof *c);
6160 c->value = val;
6161 c->label = gen_label_rtx ();
6162 c->next = pool->constants[i];
6163 pool->constants[i] = c;
6164 pool->size += GET_MODE_SIZE (mode);
6165 }
6166 }
6167
6168 /* Return an rtx that represents the offset of X from the start of
6169 pool POOL. */
6170
6171 static rtx
6172 s390_pool_offset (struct constant_pool *pool, rtx x)
6173 {
6174 rtx label;
6175
6176 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6177 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6178 UNSPEC_POOL_OFFSET);
6179 return gen_rtx_CONST (GET_MODE (x), x);
6180 }
6181
6182 /* Find constant VAL of mode MODE in the constant pool POOL.
6183 Return an RTX describing the distance from the start of
6184 the pool to the location of the new constant. */
6185
6186 static rtx
6187 s390_find_constant (struct constant_pool *pool, rtx val,
6188 enum machine_mode mode)
6189 {
6190 struct constant *c;
6191 int i;
6192
6193 for (i = 0; i < NR_C_MODES; i++)
6194 if (constant_modes[i] == mode)
6195 break;
6196 gcc_assert (i != NR_C_MODES);
6197
6198 for (c = pool->constants[i]; c != NULL; c = c->next)
6199 if (rtx_equal_p (val, c->value))
6200 break;
6201
6202 gcc_assert (c);
6203
6204 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6205 }
6206
6207 /* Check whether INSN is an execute. Return the label_ref to its
6208 execute target template if so, NULL_RTX otherwise. */
6209
6210 static rtx
6211 s390_execute_label (rtx insn)
6212 {
6213 if (GET_CODE (insn) == INSN
6214 && GET_CODE (PATTERN (insn)) == PARALLEL
6215 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6216 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6217 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6218
6219 return NULL_RTX;
6220 }
6221
6222 /* Add execute target for INSN to the constant pool POOL. */
6223
6224 static void
6225 s390_add_execute (struct constant_pool *pool, rtx insn)
6226 {
6227 struct constant *c;
6228
6229 for (c = pool->execute; c != NULL; c = c->next)
6230 if (INSN_UID (insn) == INSN_UID (c->value))
6231 break;
6232
6233 if (c == NULL)
6234 {
6235 c = (struct constant *) xmalloc (sizeof *c);
6236 c->value = insn;
6237 c->label = gen_label_rtx ();
6238 c->next = pool->execute;
6239 pool->execute = c;
6240 pool->size += 6;
6241 }
6242 }
6243
6244 /* Find execute target for INSN in the constant pool POOL.
6245 Return an RTX describing the distance from the start of
6246 the pool to the location of the execute target. */
6247
6248 static rtx
6249 s390_find_execute (struct constant_pool *pool, rtx insn)
6250 {
6251 struct constant *c;
6252
6253 for (c = pool->execute; c != NULL; c = c->next)
6254 if (INSN_UID (insn) == INSN_UID (c->value))
6255 break;
6256
6257 gcc_assert (c);
6258
6259 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6260 }
6261
6262 /* For an execute INSN, extract the execute target template. */
6263
6264 static rtx
6265 s390_execute_target (rtx insn)
6266 {
6267 rtx pattern = PATTERN (insn);
6268 gcc_assert (s390_execute_label (insn));
6269
6270 if (XVECLEN (pattern, 0) == 2)
6271 {
6272 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6273 }
6274 else
6275 {
6276 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6277 int i;
6278
6279 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6280 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6281
6282 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6283 }
6284
6285 return pattern;
6286 }
6287
6288 /* Indicate that INSN cannot be duplicated. This is the case for
6289 execute insns that carry a unique label. */
6290
6291 static bool
6292 s390_cannot_copy_insn_p (rtx insn)
6293 {
6294 rtx label = s390_execute_label (insn);
6295 return label && label != const0_rtx;
6296 }
6297
6298 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6299 do not emit the pool base label. */
6300
6301 static void
6302 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6303 {
6304 struct constant *c;
6305 rtx insn = pool->pool_insn;
6306 int i;
6307
6308 /* Switch to rodata section. */
6309 if (TARGET_CPU_ZARCH)
6310 {
6311 insn = emit_insn_after (gen_pool_section_start (), insn);
6312 INSN_ADDRESSES_NEW (insn, -1);
6313 }
6314
6315 /* Ensure minimum pool alignment. */
6316 if (TARGET_CPU_ZARCH)
6317 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6318 else
6319 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6320 INSN_ADDRESSES_NEW (insn, -1);
6321
6322 /* Emit pool base label. */
6323 if (!remote_label)
6324 {
6325 insn = emit_label_after (pool->label, insn);
6326 INSN_ADDRESSES_NEW (insn, -1);
6327 }
6328
6329 /* Dump constants in descending alignment requirement order,
6330 ensuring proper alignment for every constant. */
6331 for (i = 0; i < NR_C_MODES; i++)
6332 for (c = pool->constants[i]; c; c = c->next)
6333 {
6334 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6335 rtx value = copy_rtx (c->value);
6336 if (GET_CODE (value) == CONST
6337 && GET_CODE (XEXP (value, 0)) == UNSPEC
6338 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6339 && XVECLEN (XEXP (value, 0), 0) == 1)
6340 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6341
6342 insn = emit_label_after (c->label, insn);
6343 INSN_ADDRESSES_NEW (insn, -1);
6344
6345 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6346 gen_rtvec (1, value),
6347 UNSPECV_POOL_ENTRY);
6348 insn = emit_insn_after (value, insn);
6349 INSN_ADDRESSES_NEW (insn, -1);
6350 }
6351
6352 /* Ensure minimum alignment for instructions. */
6353 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6354 INSN_ADDRESSES_NEW (insn, -1);
6355
6356 /* Output in-pool execute template insns. */
6357 for (c = pool->execute; c; c = c->next)
6358 {
6359 insn = emit_label_after (c->label, insn);
6360 INSN_ADDRESSES_NEW (insn, -1);
6361
6362 insn = emit_insn_after (s390_execute_target (c->value), insn);
6363 INSN_ADDRESSES_NEW (insn, -1);
6364 }
6365
6366 /* Switch back to previous section. */
6367 if (TARGET_CPU_ZARCH)
6368 {
6369 insn = emit_insn_after (gen_pool_section_end (), insn);
6370 INSN_ADDRESSES_NEW (insn, -1);
6371 }
6372
6373 insn = emit_barrier_after (insn);
6374 INSN_ADDRESSES_NEW (insn, -1);
6375
6376 /* Remove placeholder insn. */
6377 remove_insn (pool->pool_insn);
6378 }
6379
6380 /* Free all memory used by POOL. */
6381
6382 static void
6383 s390_free_pool (struct constant_pool *pool)
6384 {
6385 struct constant *c, *next;
6386 int i;
6387
6388 for (i = 0; i < NR_C_MODES; i++)
6389 for (c = pool->constants[i]; c; c = next)
6390 {
6391 next = c->next;
6392 free (c);
6393 }
6394
6395 for (c = pool->execute; c; c = next)
6396 {
6397 next = c->next;
6398 free (c);
6399 }
6400
6401 BITMAP_FREE (pool->insns);
6402 free (pool);
6403 }
6404
6405
6406 /* Collect main literal pool. Return NULL on overflow. */
6407
6408 static struct constant_pool *
6409 s390_mainpool_start (void)
6410 {
6411 struct constant_pool *pool;
6412 rtx insn;
6413
6414 pool = s390_alloc_pool ();
6415
6416 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6417 {
6418 if (GET_CODE (insn) == INSN
6419 && GET_CODE (PATTERN (insn)) == SET
6420 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6421 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6422 {
6423 gcc_assert (!pool->pool_insn);
6424 pool->pool_insn = insn;
6425 }
6426
6427 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6428 {
6429 s390_add_execute (pool, insn);
6430 }
6431 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6432 {
6433 rtx pool_ref = NULL_RTX;
6434 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6435 if (pool_ref)
6436 {
6437 rtx constant = get_pool_constant (pool_ref);
6438 enum machine_mode mode = get_pool_mode (pool_ref);
6439 s390_add_constant (pool, constant, mode);
6440 }
6441 }
6442
6443 /* If hot/cold partitioning is enabled we have to make sure that
6444 the literal pool is emitted in the same section where the
6445 initialization of the literal pool base pointer takes place.
6446 emit_pool_after is only used in the non-overflow case on non
6447 Z cpus where we can emit the literal pool at the end of the
6448 function body within the text section. */
6449 if (NOTE_P (insn)
6450 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6451 && !pool->emit_pool_after)
6452 pool->emit_pool_after = PREV_INSN (insn);
6453 }
6454
6455 gcc_assert (pool->pool_insn || pool->size == 0);
6456
6457 if (pool->size >= 4096)
6458 {
6459 /* We're going to chunkify the pool, so remove the main
6460 pool placeholder insn. */
6461 remove_insn (pool->pool_insn);
6462
6463 s390_free_pool (pool);
6464 pool = NULL;
6465 }
6466
6467 /* If the functions ends with the section where the literal pool
6468 should be emitted set the marker to its end. */
6469 if (pool && !pool->emit_pool_after)
6470 pool->emit_pool_after = get_last_insn ();
6471
6472 return pool;
6473 }
6474
6475 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6476 Modify the current function to output the pool constants as well as
6477 the pool register setup instruction. */
6478
6479 static void
6480 s390_mainpool_finish (struct constant_pool *pool)
6481 {
6482 rtx base_reg = cfun->machine->base_reg;
6483 rtx insn;
6484
6485 /* If the pool is empty, we're done. */
6486 if (pool->size == 0)
6487 {
6488 /* We don't actually need a base register after all. */
6489 cfun->machine->base_reg = NULL_RTX;
6490
6491 if (pool->pool_insn)
6492 remove_insn (pool->pool_insn);
6493 s390_free_pool (pool);
6494 return;
6495 }
6496
6497 /* We need correct insn addresses. */
6498 shorten_branches (get_insns ());
6499
6500 /* On zSeries, we use a LARL to load the pool register. The pool is
6501 located in the .rodata section, so we emit it after the function. */
6502 if (TARGET_CPU_ZARCH)
6503 {
6504 insn = gen_main_base_64 (base_reg, pool->label);
6505 insn = emit_insn_after (insn, pool->pool_insn);
6506 INSN_ADDRESSES_NEW (insn, -1);
6507 remove_insn (pool->pool_insn);
6508
6509 insn = get_last_insn ();
6510 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6511 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6512
6513 s390_dump_pool (pool, 0);
6514 }
6515
6516 /* On S/390, if the total size of the function's code plus literal pool
6517 does not exceed 4096 bytes, we use BASR to set up a function base
6518 pointer, and emit the literal pool at the end of the function. */
6519 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6520 + pool->size + 8 /* alignment slop */ < 4096)
6521 {
6522 insn = gen_main_base_31_small (base_reg, pool->label);
6523 insn = emit_insn_after (insn, pool->pool_insn);
6524 INSN_ADDRESSES_NEW (insn, -1);
6525 remove_insn (pool->pool_insn);
6526
6527 insn = emit_label_after (pool->label, insn);
6528 INSN_ADDRESSES_NEW (insn, -1);
6529
6530 /* emit_pool_after will be set by s390_mainpool_start to the
6531 last insn of the section where the literal pool should be
6532 emitted. */
6533 insn = pool->emit_pool_after;
6534
6535 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6536 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6537
6538 s390_dump_pool (pool, 1);
6539 }
6540
6541 /* Otherwise, we emit an inline literal pool and use BASR to branch
6542 over it, setting up the pool register at the same time. */
6543 else
6544 {
6545 rtx pool_end = gen_label_rtx ();
6546
6547 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6548 insn = emit_insn_after (insn, pool->pool_insn);
6549 INSN_ADDRESSES_NEW (insn, -1);
6550 remove_insn (pool->pool_insn);
6551
6552 insn = emit_label_after (pool->label, insn);
6553 INSN_ADDRESSES_NEW (insn, -1);
6554
6555 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6556 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6557
6558 insn = emit_label_after (pool_end, pool->pool_insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6560
6561 s390_dump_pool (pool, 1);
6562 }
6563
6564
6565 /* Replace all literal pool references. */
6566
6567 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6568 {
6569 if (INSN_P (insn))
6570 replace_ltrel_base (&PATTERN (insn));
6571
6572 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6573 {
6574 rtx addr, pool_ref = NULL_RTX;
6575 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6576 if (pool_ref)
6577 {
6578 if (s390_execute_label (insn))
6579 addr = s390_find_execute (pool, insn);
6580 else
6581 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6582 get_pool_mode (pool_ref));
6583
6584 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6585 INSN_CODE (insn) = -1;
6586 }
6587 }
6588 }
6589
6590
6591 /* Free the pool. */
6592 s390_free_pool (pool);
6593 }
6594
6595 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6596 We have decided we cannot use this pool, so revert all changes
6597 to the current function that were done by s390_mainpool_start. */
6598 static void
6599 s390_mainpool_cancel (struct constant_pool *pool)
6600 {
6601 /* We didn't actually change the instruction stream, so simply
6602 free the pool memory. */
6603 s390_free_pool (pool);
6604 }
6605
6606
6607 /* Chunkify the literal pool. */
6608
6609 #define S390_POOL_CHUNK_MIN 0xc00
6610 #define S390_POOL_CHUNK_MAX 0xe00
6611
6612 static struct constant_pool *
6613 s390_chunkify_start (void)
6614 {
6615 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6616 int extra_size = 0;
6617 bitmap far_labels;
6618 rtx pending_ltrel = NULL_RTX;
6619 rtx insn;
6620
6621 rtx (*gen_reload_base) (rtx, rtx) =
6622 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6623
6624
6625 /* We need correct insn addresses. */
6626
6627 shorten_branches (get_insns ());
6628
6629 /* Scan all insns and move literals to pool chunks. */
6630
6631 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6632 {
6633 bool section_switch_p = false;
6634
6635 /* Check for pending LTREL_BASE. */
6636 if (INSN_P (insn))
6637 {
6638 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6639 if (ltrel_base)
6640 {
6641 gcc_assert (ltrel_base == pending_ltrel);
6642 pending_ltrel = NULL_RTX;
6643 }
6644 }
6645
6646 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6647 {
6648 if (!curr_pool)
6649 curr_pool = s390_start_pool (&pool_list, insn);
6650
6651 s390_add_execute (curr_pool, insn);
6652 s390_add_pool_insn (curr_pool, insn);
6653 }
6654 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6655 {
6656 rtx pool_ref = NULL_RTX;
6657 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6658 if (pool_ref)
6659 {
6660 rtx constant = get_pool_constant (pool_ref);
6661 enum machine_mode mode = get_pool_mode (pool_ref);
6662
6663 if (!curr_pool)
6664 curr_pool = s390_start_pool (&pool_list, insn);
6665
6666 s390_add_constant (curr_pool, constant, mode);
6667 s390_add_pool_insn (curr_pool, insn);
6668
6669 /* Don't split the pool chunk between a LTREL_OFFSET load
6670 and the corresponding LTREL_BASE. */
6671 if (GET_CODE (constant) == CONST
6672 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6673 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6674 {
6675 gcc_assert (!pending_ltrel);
6676 pending_ltrel = pool_ref;
6677 }
6678 }
6679 /* Make sure we do not split between a call and its
6680 corresponding CALL_ARG_LOCATION note. */
6681 if (CALL_P (insn))
6682 {
6683 rtx next = NEXT_INSN (insn);
6684 if (next && NOTE_P (next)
6685 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6686 continue;
6687 }
6688 }
6689
6690 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6691 {
6692 if (curr_pool)
6693 s390_add_pool_insn (curr_pool, insn);
6694 /* An LTREL_BASE must follow within the same basic block. */
6695 gcc_assert (!pending_ltrel);
6696 }
6697
6698 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6699 section_switch_p = true;
6700
6701 if (!curr_pool
6702 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6703 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6704 continue;
6705
6706 if (TARGET_CPU_ZARCH)
6707 {
6708 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6709 continue;
6710
6711 s390_end_pool (curr_pool, NULL_RTX);
6712 curr_pool = NULL;
6713 }
6714 else
6715 {
6716 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6717 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6718 + extra_size;
6719
6720 /* We will later have to insert base register reload insns.
6721 Those will have an effect on code size, which we need to
6722 consider here. This calculation makes rather pessimistic
6723 worst-case assumptions. */
6724 if (GET_CODE (insn) == CODE_LABEL)
6725 extra_size += 6;
6726
6727 if (chunk_size < S390_POOL_CHUNK_MIN
6728 && curr_pool->size < S390_POOL_CHUNK_MIN
6729 && !section_switch_p)
6730 continue;
6731
6732 /* Pool chunks can only be inserted after BARRIERs ... */
6733 if (GET_CODE (insn) == BARRIER)
6734 {
6735 s390_end_pool (curr_pool, insn);
6736 curr_pool = NULL;
6737 extra_size = 0;
6738 }
6739
6740 /* ... so if we don't find one in time, create one. */
6741 else if (chunk_size > S390_POOL_CHUNK_MAX
6742 || curr_pool->size > S390_POOL_CHUNK_MAX
6743 || section_switch_p)
6744 {
6745 rtx label, jump, barrier;
6746
6747 if (!section_switch_p)
6748 {
6749 /* We can insert the barrier only after a 'real' insn. */
6750 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6751 continue;
6752 if (get_attr_length (insn) == 0)
6753 continue;
6754 /* Don't separate LTREL_BASE from the corresponding
6755 LTREL_OFFSET load. */
6756 if (pending_ltrel)
6757 continue;
6758 }
6759 else
6760 {
6761 gcc_assert (!pending_ltrel);
6762
6763 /* The old pool has to end before the section switch
6764 note in order to make it part of the current
6765 section. */
6766 insn = PREV_INSN (insn);
6767 }
6768
6769 label = gen_label_rtx ();
6770 jump = emit_jump_insn_after (gen_jump (label), insn);
6771 barrier = emit_barrier_after (jump);
6772 insn = emit_label_after (label, barrier);
6773 JUMP_LABEL (jump) = label;
6774 LABEL_NUSES (label) = 1;
6775
6776 INSN_ADDRESSES_NEW (jump, -1);
6777 INSN_ADDRESSES_NEW (barrier, -1);
6778 INSN_ADDRESSES_NEW (insn, -1);
6779
6780 s390_end_pool (curr_pool, barrier);
6781 curr_pool = NULL;
6782 extra_size = 0;
6783 }
6784 }
6785 }
6786
6787 if (curr_pool)
6788 s390_end_pool (curr_pool, NULL_RTX);
6789 gcc_assert (!pending_ltrel);
6790
6791 /* Find all labels that are branched into
6792 from an insn belonging to a different chunk. */
6793
6794 far_labels = BITMAP_ALLOC (NULL);
6795
6796 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6797 {
6798 /* Labels marked with LABEL_PRESERVE_P can be target
6799 of non-local jumps, so we have to mark them.
6800 The same holds for named labels.
6801
6802 Don't do that, however, if it is the label before
6803 a jump table. */
6804
6805 if (GET_CODE (insn) == CODE_LABEL
6806 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6807 {
6808 rtx vec_insn = next_real_insn (insn);
6809 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6810 PATTERN (vec_insn) : NULL_RTX;
6811 if (!vec_pat
6812 || !(GET_CODE (vec_pat) == ADDR_VEC
6813 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6814 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6815 }
6816
6817 /* If we have a direct jump (conditional or unconditional)
6818 or a casesi jump, check all potential targets. */
6819 else if (GET_CODE (insn) == JUMP_INSN)
6820 {
6821 rtx pat = PATTERN (insn);
6822 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6823 pat = XVECEXP (pat, 0, 0);
6824
6825 if (GET_CODE (pat) == SET)
6826 {
6827 rtx label = JUMP_LABEL (insn);
6828 if (label)
6829 {
6830 if (s390_find_pool (pool_list, label)
6831 != s390_find_pool (pool_list, insn))
6832 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6833 }
6834 }
6835 else if (GET_CODE (pat) == PARALLEL
6836 && XVECLEN (pat, 0) == 2
6837 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6838 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6839 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6840 {
6841 /* Find the jump table used by this casesi jump. */
6842 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6843 rtx vec_insn = next_real_insn (vec_label);
6844 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6845 PATTERN (vec_insn) : NULL_RTX;
6846 if (vec_pat
6847 && (GET_CODE (vec_pat) == ADDR_VEC
6848 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6849 {
6850 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6851
6852 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6853 {
6854 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6855
6856 if (s390_find_pool (pool_list, label)
6857 != s390_find_pool (pool_list, insn))
6858 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6859 }
6860 }
6861 }
6862 }
6863 }
6864
6865 /* Insert base register reload insns before every pool. */
6866
6867 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6868 {
6869 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6870 curr_pool->label);
6871 rtx insn = curr_pool->first_insn;
6872 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6873 }
6874
6875 /* Insert base register reload insns at every far label. */
6876
6877 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6878 if (GET_CODE (insn) == CODE_LABEL
6879 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6880 {
6881 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6882 if (pool)
6883 {
6884 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6885 pool->label);
6886 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6887 }
6888 }
6889
6890
6891 BITMAP_FREE (far_labels);
6892
6893
6894 /* Recompute insn addresses. */
6895
6896 init_insn_lengths ();
6897 shorten_branches (get_insns ());
6898
6899 return pool_list;
6900 }
6901
6902 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6903 After we have decided to use this list, finish implementing
6904 all changes to the current function as required. */
6905
6906 static void
6907 s390_chunkify_finish (struct constant_pool *pool_list)
6908 {
6909 struct constant_pool *curr_pool = NULL;
6910 rtx insn;
6911
6912
6913 /* Replace all literal pool references. */
6914
6915 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6916 {
6917 if (INSN_P (insn))
6918 replace_ltrel_base (&PATTERN (insn));
6919
6920 curr_pool = s390_find_pool (pool_list, insn);
6921 if (!curr_pool)
6922 continue;
6923
6924 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6925 {
6926 rtx addr, pool_ref = NULL_RTX;
6927 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6928 if (pool_ref)
6929 {
6930 if (s390_execute_label (insn))
6931 addr = s390_find_execute (curr_pool, insn);
6932 else
6933 addr = s390_find_constant (curr_pool,
6934 get_pool_constant (pool_ref),
6935 get_pool_mode (pool_ref));
6936
6937 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6938 INSN_CODE (insn) = -1;
6939 }
6940 }
6941 }
6942
6943 /* Dump out all literal pools. */
6944
6945 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6946 s390_dump_pool (curr_pool, 0);
6947
6948 /* Free pool list. */
6949
6950 while (pool_list)
6951 {
6952 struct constant_pool *next = pool_list->next;
6953 s390_free_pool (pool_list);
6954 pool_list = next;
6955 }
6956 }
6957
6958 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6959 We have decided we cannot use this list, so revert all changes
6960 to the current function that were done by s390_chunkify_start. */
6961
6962 static void
6963 s390_chunkify_cancel (struct constant_pool *pool_list)
6964 {
6965 struct constant_pool *curr_pool = NULL;
6966 rtx insn;
6967
6968 /* Remove all pool placeholder insns. */
6969
6970 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6971 {
6972 /* Did we insert an extra barrier? Remove it. */
6973 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6974 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6975 rtx label = NEXT_INSN (curr_pool->pool_insn);
6976
6977 if (jump && GET_CODE (jump) == JUMP_INSN
6978 && barrier && GET_CODE (barrier) == BARRIER
6979 && label && GET_CODE (label) == CODE_LABEL
6980 && GET_CODE (PATTERN (jump)) == SET
6981 && SET_DEST (PATTERN (jump)) == pc_rtx
6982 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6983 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6984 {
6985 remove_insn (jump);
6986 remove_insn (barrier);
6987 remove_insn (label);
6988 }
6989
6990 remove_insn (curr_pool->pool_insn);
6991 }
6992
6993 /* Remove all base register reload insns. */
6994
6995 for (insn = get_insns (); insn; )
6996 {
6997 rtx next_insn = NEXT_INSN (insn);
6998
6999 if (GET_CODE (insn) == INSN
7000 && GET_CODE (PATTERN (insn)) == SET
7001 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7002 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7003 remove_insn (insn);
7004
7005 insn = next_insn;
7006 }
7007
7008 /* Free pool list. */
7009
7010 while (pool_list)
7011 {
7012 struct constant_pool *next = pool_list->next;
7013 s390_free_pool (pool_list);
7014 pool_list = next;
7015 }
7016 }
7017
7018 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7019
7020 void
7021 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7022 {
7023 REAL_VALUE_TYPE r;
7024
7025 switch (GET_MODE_CLASS (mode))
7026 {
7027 case MODE_FLOAT:
7028 case MODE_DECIMAL_FLOAT:
7029 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7030
7031 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7032 assemble_real (r, mode, align);
7033 break;
7034
7035 case MODE_INT:
7036 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7037 mark_symbol_refs_as_used (exp);
7038 break;
7039
7040 default:
7041 gcc_unreachable ();
7042 }
7043 }
7044
7045
7046 /* Return an RTL expression representing the value of the return address
7047 for the frame COUNT steps up from the current frame. FRAME is the
7048 frame pointer of that frame. */
7049
7050 rtx
7051 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7052 {
7053 int offset;
7054 rtx addr;
7055
7056 /* Without backchain, we fail for all but the current frame. */
7057
7058 if (!TARGET_BACKCHAIN && count > 0)
7059 return NULL_RTX;
7060
7061 /* For the current frame, we need to make sure the initial
7062 value of RETURN_REGNUM is actually saved. */
7063
7064 if (count == 0)
7065 {
7066 /* On non-z architectures branch splitting could overwrite r14. */
7067 if (TARGET_CPU_ZARCH)
7068 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7069 else
7070 {
7071 cfun_frame_layout.save_return_addr_p = true;
7072 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7073 }
7074 }
7075
7076 if (TARGET_PACKED_STACK)
7077 offset = -2 * UNITS_PER_LONG;
7078 else
7079 offset = RETURN_REGNUM * UNITS_PER_LONG;
7080
7081 addr = plus_constant (frame, offset);
7082 addr = memory_address (Pmode, addr);
7083 return gen_rtx_MEM (Pmode, addr);
7084 }
7085
7086 /* Return an RTL expression representing the back chain stored in
7087 the current stack frame. */
7088
7089 rtx
7090 s390_back_chain_rtx (void)
7091 {
7092 rtx chain;
7093
7094 gcc_assert (TARGET_BACKCHAIN);
7095
7096 if (TARGET_PACKED_STACK)
7097 chain = plus_constant (stack_pointer_rtx,
7098 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7099 else
7100 chain = stack_pointer_rtx;
7101
7102 chain = gen_rtx_MEM (Pmode, chain);
7103 return chain;
7104 }
7105
7106 /* Find first call clobbered register unused in a function.
7107 This could be used as base register in a leaf function
7108 or for holding the return address before epilogue. */
7109
7110 static int
7111 find_unused_clobbered_reg (void)
7112 {
7113 int i;
7114 for (i = 0; i < 6; i++)
7115 if (!df_regs_ever_live_p (i))
7116 return i;
7117 return 0;
7118 }
7119
7120
7121 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7122 clobbered hard regs in SETREG. */
7123
7124 static void
7125 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7126 {
7127 int *regs_ever_clobbered = (int *)data;
7128 unsigned int i, regno;
7129 enum machine_mode mode = GET_MODE (setreg);
7130
7131 if (GET_CODE (setreg) == SUBREG)
7132 {
7133 rtx inner = SUBREG_REG (setreg);
7134 if (!GENERAL_REG_P (inner))
7135 return;
7136 regno = subreg_regno (setreg);
7137 }
7138 else if (GENERAL_REG_P (setreg))
7139 regno = REGNO (setreg);
7140 else
7141 return;
7142
7143 for (i = regno;
7144 i < regno + HARD_REGNO_NREGS (regno, mode);
7145 i++)
7146 regs_ever_clobbered[i] = 1;
7147 }
7148
7149 /* Walks through all basic blocks of the current function looking
7150 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7151 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7152 each of those regs. */
7153
7154 static void
7155 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7156 {
7157 basic_block cur_bb;
7158 rtx cur_insn;
7159 unsigned int i;
7160
7161 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7162
7163 /* For non-leaf functions we have to consider all call clobbered regs to be
7164 clobbered. */
7165 if (!current_function_is_leaf)
7166 {
7167 for (i = 0; i < 16; i++)
7168 regs_ever_clobbered[i] = call_really_used_regs[i];
7169 }
7170
7171 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7172 this work is done by liveness analysis (mark_regs_live_at_end).
7173 Special care is needed for functions containing landing pads. Landing pads
7174 may use the eh registers, but the code which sets these registers is not
7175 contained in that function. Hence s390_regs_ever_clobbered is not able to
7176 deal with this automatically. */
7177 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7178 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7179 if (crtl->calls_eh_return
7180 || (cfun->machine->has_landing_pad_p
7181 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7182 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7183
7184 /* For nonlocal gotos all call-saved registers have to be saved.
7185 This flag is also set for the unwinding code in libgcc.
7186 See expand_builtin_unwind_init. For regs_ever_live this is done by
7187 reload. */
7188 if (cfun->has_nonlocal_label)
7189 for (i = 0; i < 16; i++)
7190 if (!call_really_used_regs[i])
7191 regs_ever_clobbered[i] = 1;
7192
7193 FOR_EACH_BB (cur_bb)
7194 {
7195 FOR_BB_INSNS (cur_bb, cur_insn)
7196 {
7197 if (INSN_P (cur_insn))
7198 note_stores (PATTERN (cur_insn),
7199 s390_reg_clobbered_rtx,
7200 regs_ever_clobbered);
7201 }
7202 }
7203 }
7204
7205 /* Determine the frame area which actually has to be accessed
7206 in the function epilogue. The values are stored at the
7207 given pointers AREA_BOTTOM (address of the lowest used stack
7208 address) and AREA_TOP (address of the first item which does
7209 not belong to the stack frame). */
7210
7211 static void
7212 s390_frame_area (int *area_bottom, int *area_top)
7213 {
7214 int b, t;
7215 int i;
7216
7217 b = INT_MAX;
7218 t = INT_MIN;
7219
7220 if (cfun_frame_layout.first_restore_gpr != -1)
7221 {
7222 b = (cfun_frame_layout.gprs_offset
7223 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7224 t = b + (cfun_frame_layout.last_restore_gpr
7225 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7226 }
7227
7228 if (TARGET_64BIT && cfun_save_high_fprs_p)
7229 {
7230 b = MIN (b, cfun_frame_layout.f8_offset);
7231 t = MAX (t, (cfun_frame_layout.f8_offset
7232 + cfun_frame_layout.high_fprs * 8));
7233 }
7234
7235 if (!TARGET_64BIT)
7236 for (i = 2; i < 4; i++)
7237 if (cfun_fpr_bit_p (i))
7238 {
7239 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7240 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7241 }
7242
7243 *area_bottom = b;
7244 *area_top = t;
7245 }
7246
7247 /* Fill cfun->machine with info about register usage of current function.
7248 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7249
7250 static void
7251 s390_register_info (int clobbered_regs[])
7252 {
7253 int i, j;
7254
7255 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7256 cfun_frame_layout.fpr_bitmap = 0;
7257 cfun_frame_layout.high_fprs = 0;
7258 if (TARGET_64BIT)
7259 for (i = 24; i < 32; i++)
7260 if (df_regs_ever_live_p (i) && !global_regs[i])
7261 {
7262 cfun_set_fpr_bit (i - 16);
7263 cfun_frame_layout.high_fprs++;
7264 }
7265
7266 /* Find first and last gpr to be saved. We trust regs_ever_live
7267 data, except that we don't save and restore global registers.
7268
7269 Also, all registers with special meaning to the compiler need
7270 to be handled extra. */
7271
7272 s390_regs_ever_clobbered (clobbered_regs);
7273
7274 for (i = 0; i < 16; i++)
7275 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7276
7277 if (frame_pointer_needed)
7278 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7279
7280 if (flag_pic)
7281 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7282 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7283
7284 clobbered_regs[BASE_REGNUM]
7285 |= (cfun->machine->base_reg
7286 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7287
7288 clobbered_regs[RETURN_REGNUM]
7289 |= (!current_function_is_leaf
7290 || TARGET_TPF_PROFILING
7291 || cfun->machine->split_branches_pending_p
7292 || cfun_frame_layout.save_return_addr_p
7293 || crtl->calls_eh_return
7294 || cfun->stdarg);
7295
7296 clobbered_regs[STACK_POINTER_REGNUM]
7297 |= (!current_function_is_leaf
7298 || TARGET_TPF_PROFILING
7299 || cfun_save_high_fprs_p
7300 || get_frame_size () > 0
7301 || cfun->calls_alloca
7302 || cfun->stdarg);
7303
7304 for (i = 6; i < 16; i++)
7305 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7306 break;
7307 for (j = 15; j > i; j--)
7308 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7309 break;
7310
7311 if (i == 16)
7312 {
7313 /* Nothing to save/restore. */
7314 cfun_frame_layout.first_save_gpr_slot = -1;
7315 cfun_frame_layout.last_save_gpr_slot = -1;
7316 cfun_frame_layout.first_save_gpr = -1;
7317 cfun_frame_layout.first_restore_gpr = -1;
7318 cfun_frame_layout.last_save_gpr = -1;
7319 cfun_frame_layout.last_restore_gpr = -1;
7320 }
7321 else
7322 {
7323 /* Save slots for gprs from i to j. */
7324 cfun_frame_layout.first_save_gpr_slot = i;
7325 cfun_frame_layout.last_save_gpr_slot = j;
7326
7327 for (i = cfun_frame_layout.first_save_gpr_slot;
7328 i < cfun_frame_layout.last_save_gpr_slot + 1;
7329 i++)
7330 if (clobbered_regs[i])
7331 break;
7332
7333 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7334 if (clobbered_regs[j])
7335 break;
7336
7337 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7338 {
7339 /* Nothing to save/restore. */
7340 cfun_frame_layout.first_save_gpr = -1;
7341 cfun_frame_layout.first_restore_gpr = -1;
7342 cfun_frame_layout.last_save_gpr = -1;
7343 cfun_frame_layout.last_restore_gpr = -1;
7344 }
7345 else
7346 {
7347 /* Save / Restore from gpr i to j. */
7348 cfun_frame_layout.first_save_gpr = i;
7349 cfun_frame_layout.first_restore_gpr = i;
7350 cfun_frame_layout.last_save_gpr = j;
7351 cfun_frame_layout.last_restore_gpr = j;
7352 }
7353 }
7354
7355 if (cfun->stdarg)
7356 {
7357 /* Varargs functions need to save gprs 2 to 6. */
7358 if (cfun->va_list_gpr_size
7359 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7360 {
7361 int min_gpr = crtl->args.info.gprs;
7362 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7363 if (max_gpr > GP_ARG_NUM_REG)
7364 max_gpr = GP_ARG_NUM_REG;
7365
7366 if (cfun_frame_layout.first_save_gpr == -1
7367 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7368 {
7369 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7370 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7371 }
7372
7373 if (cfun_frame_layout.last_save_gpr == -1
7374 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7375 {
7376 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7377 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7378 }
7379 }
7380
7381 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7382 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7383 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7384 {
7385 int min_fpr = crtl->args.info.fprs;
7386 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7387 if (max_fpr > FP_ARG_NUM_REG)
7388 max_fpr = FP_ARG_NUM_REG;
7389
7390 /* ??? This is currently required to ensure proper location
7391 of the fpr save slots within the va_list save area. */
7392 if (TARGET_PACKED_STACK)
7393 min_fpr = 0;
7394
7395 for (i = min_fpr; i < max_fpr; i++)
7396 cfun_set_fpr_bit (i);
7397 }
7398 }
7399
7400 if (!TARGET_64BIT)
7401 for (i = 2; i < 4; i++)
7402 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7403 cfun_set_fpr_bit (i);
7404 }
7405
7406 /* Fill cfun->machine with info about frame of current function. */
7407
7408 static void
7409 s390_frame_info (void)
7410 {
7411 int i;
7412
7413 cfun_frame_layout.frame_size = get_frame_size ();
7414 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7415 fatal_error ("total size of local variables exceeds architecture limit");
7416
7417 if (!TARGET_PACKED_STACK)
7418 {
7419 cfun_frame_layout.backchain_offset = 0;
7420 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7421 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7422 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7423 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7424 * UNITS_PER_LONG);
7425 }
7426 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7427 {
7428 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7429 - UNITS_PER_LONG);
7430 cfun_frame_layout.gprs_offset
7431 = (cfun_frame_layout.backchain_offset
7432 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7433 * UNITS_PER_LONG);
7434
7435 if (TARGET_64BIT)
7436 {
7437 cfun_frame_layout.f4_offset
7438 = (cfun_frame_layout.gprs_offset
7439 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7440
7441 cfun_frame_layout.f0_offset
7442 = (cfun_frame_layout.f4_offset
7443 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7444 }
7445 else
7446 {
7447 /* On 31 bit we have to care about alignment of the
7448 floating point regs to provide fastest access. */
7449 cfun_frame_layout.f0_offset
7450 = ((cfun_frame_layout.gprs_offset
7451 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7452 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7453
7454 cfun_frame_layout.f4_offset
7455 = (cfun_frame_layout.f0_offset
7456 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7457 }
7458 }
7459 else /* no backchain */
7460 {
7461 cfun_frame_layout.f4_offset
7462 = (STACK_POINTER_OFFSET
7463 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7464
7465 cfun_frame_layout.f0_offset
7466 = (cfun_frame_layout.f4_offset
7467 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7468
7469 cfun_frame_layout.gprs_offset
7470 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7471 }
7472
7473 if (current_function_is_leaf
7474 && !TARGET_TPF_PROFILING
7475 && cfun_frame_layout.frame_size == 0
7476 && !cfun_save_high_fprs_p
7477 && !cfun->calls_alloca
7478 && !cfun->stdarg)
7479 return;
7480
7481 if (!TARGET_PACKED_STACK)
7482 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7483 + crtl->outgoing_args_size
7484 + cfun_frame_layout.high_fprs * 8);
7485 else
7486 {
7487 if (TARGET_BACKCHAIN)
7488 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7489
7490 /* No alignment trouble here because f8-f15 are only saved under
7491 64 bit. */
7492 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7493 cfun_frame_layout.f4_offset),
7494 cfun_frame_layout.gprs_offset)
7495 - cfun_frame_layout.high_fprs * 8);
7496
7497 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7498
7499 for (i = 0; i < 8; i++)
7500 if (cfun_fpr_bit_p (i))
7501 cfun_frame_layout.frame_size += 8;
7502
7503 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7504
7505 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7506 the frame size to sustain 8 byte alignment of stack frames. */
7507 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7508 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7509 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7510
7511 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7512 }
7513 }
7514
7515 /* Generate frame layout. Fills in register and frame data for the current
7516 function in cfun->machine. This routine can be called multiple times;
7517 it will re-do the complete frame layout every time. */
7518
7519 static void
7520 s390_init_frame_layout (void)
7521 {
7522 HOST_WIDE_INT frame_size;
7523 int base_used;
7524 int clobbered_regs[16];
7525
7526 /* On S/390 machines, we may need to perform branch splitting, which
7527 will require both base and return address register. We have no
7528 choice but to assume we're going to need them until right at the
7529 end of the machine dependent reorg phase. */
7530 if (!TARGET_CPU_ZARCH)
7531 cfun->machine->split_branches_pending_p = true;
7532
7533 do
7534 {
7535 frame_size = cfun_frame_layout.frame_size;
7536
7537 /* Try to predict whether we'll need the base register. */
7538 base_used = cfun->machine->split_branches_pending_p
7539 || crtl->uses_const_pool
7540 || (!DISP_IN_RANGE (frame_size)
7541 && !CONST_OK_FOR_K (frame_size));
7542
7543 /* Decide which register to use as literal pool base. In small
7544 leaf functions, try to use an unused call-clobbered register
7545 as base register to avoid save/restore overhead. */
7546 if (!base_used)
7547 cfun->machine->base_reg = NULL_RTX;
7548 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7549 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7550 else
7551 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7552
7553 s390_register_info (clobbered_regs);
7554 s390_frame_info ();
7555 }
7556 while (frame_size != cfun_frame_layout.frame_size);
7557 }
7558
7559 /* Update frame layout. Recompute actual register save data based on
7560 current info and update regs_ever_live for the special registers.
7561 May be called multiple times, but may never cause *more* registers
7562 to be saved than s390_init_frame_layout allocated room for. */
7563
7564 static void
7565 s390_update_frame_layout (void)
7566 {
7567 int clobbered_regs[16];
7568
7569 s390_register_info (clobbered_regs);
7570
7571 df_set_regs_ever_live (BASE_REGNUM,
7572 clobbered_regs[BASE_REGNUM] ? true : false);
7573 df_set_regs_ever_live (RETURN_REGNUM,
7574 clobbered_regs[RETURN_REGNUM] ? true : false);
7575 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7576 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7577
7578 if (cfun->machine->base_reg)
7579 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7580 }
7581
7582 /* Return true if it is legal to put a value with MODE into REGNO. */
7583
7584 bool
7585 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7586 {
7587 switch (REGNO_REG_CLASS (regno))
7588 {
7589 case FP_REGS:
7590 if (REGNO_PAIR_OK (regno, mode))
7591 {
7592 if (mode == SImode || mode == DImode)
7593 return true;
7594
7595 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7596 return true;
7597 }
7598 break;
7599 case ADDR_REGS:
7600 if (FRAME_REGNO_P (regno) && mode == Pmode)
7601 return true;
7602
7603 /* fallthrough */
7604 case GENERAL_REGS:
7605 if (REGNO_PAIR_OK (regno, mode))
7606 {
7607 if (TARGET_ZARCH
7608 || (mode != TFmode && mode != TCmode && mode != TDmode))
7609 return true;
7610 }
7611 break;
7612 case CC_REGS:
7613 if (GET_MODE_CLASS (mode) == MODE_CC)
7614 return true;
7615 break;
7616 case ACCESS_REGS:
7617 if (REGNO_PAIR_OK (regno, mode))
7618 {
7619 if (mode == SImode || mode == Pmode)
7620 return true;
7621 }
7622 break;
7623 default:
7624 return false;
7625 }
7626
7627 return false;
7628 }
7629
7630 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7631
7632 bool
7633 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7634 {
7635 /* Once we've decided upon a register to use as base register, it must
7636 no longer be used for any other purpose. */
7637 if (cfun->machine->base_reg)
7638 if (REGNO (cfun->machine->base_reg) == old_reg
7639 || REGNO (cfun->machine->base_reg) == new_reg)
7640 return false;
7641
7642 return true;
7643 }
7644
7645 /* Maximum number of registers to represent a value of mode MODE
7646 in a register of class RCLASS. */
7647
7648 bool
7649 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7650 {
7651 switch (rclass)
7652 {
7653 case FP_REGS:
7654 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7655 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7656 else
7657 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7658 case ACCESS_REGS:
7659 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7660 default:
7661 break;
7662 }
7663 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7664 }
7665
7666 /* Return true if register FROM can be eliminated via register TO. */
7667
7668 static bool
7669 s390_can_eliminate (const int from, const int to)
7670 {
7671 /* On zSeries machines, we have not marked the base register as fixed.
7672 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7673 If a function requires the base register, we say here that this
7674 elimination cannot be performed. This will cause reload to free
7675 up the base register (as if it were fixed). On the other hand,
7676 if the current function does *not* require the base register, we
7677 say here the elimination succeeds, which in turn allows reload
7678 to allocate the base register for any other purpose. */
7679 if (from == BASE_REGNUM && to == BASE_REGNUM)
7680 {
7681 if (TARGET_CPU_ZARCH)
7682 {
7683 s390_init_frame_layout ();
7684 return cfun->machine->base_reg == NULL_RTX;
7685 }
7686
7687 return false;
7688 }
7689
7690 /* Everything else must point into the stack frame. */
7691 gcc_assert (to == STACK_POINTER_REGNUM
7692 || to == HARD_FRAME_POINTER_REGNUM);
7693
7694 gcc_assert (from == FRAME_POINTER_REGNUM
7695 || from == ARG_POINTER_REGNUM
7696 || from == RETURN_ADDRESS_POINTER_REGNUM);
7697
7698 /* Make sure we actually saved the return address. */
7699 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7700 if (!crtl->calls_eh_return
7701 && !cfun->stdarg
7702 && !cfun_frame_layout.save_return_addr_p)
7703 return false;
7704
7705 return true;
7706 }
7707
7708 /* Return offset between register FROM and TO initially after prolog. */
7709
7710 HOST_WIDE_INT
7711 s390_initial_elimination_offset (int from, int to)
7712 {
7713 HOST_WIDE_INT offset;
7714 int index;
7715
7716 /* ??? Why are we called for non-eliminable pairs? */
7717 if (!s390_can_eliminate (from, to))
7718 return 0;
7719
7720 switch (from)
7721 {
7722 case FRAME_POINTER_REGNUM:
7723 offset = (get_frame_size()
7724 + STACK_POINTER_OFFSET
7725 + crtl->outgoing_args_size);
7726 break;
7727
7728 case ARG_POINTER_REGNUM:
7729 s390_init_frame_layout ();
7730 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7731 break;
7732
7733 case RETURN_ADDRESS_POINTER_REGNUM:
7734 s390_init_frame_layout ();
7735 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7736 gcc_assert (index >= 0);
7737 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7738 offset += index * UNITS_PER_LONG;
7739 break;
7740
7741 case BASE_REGNUM:
7742 offset = 0;
7743 break;
7744
7745 default:
7746 gcc_unreachable ();
7747 }
7748
7749 return offset;
7750 }
7751
7752 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7753 to register BASE. Return generated insn. */
7754
7755 static rtx
7756 save_fpr (rtx base, int offset, int regnum)
7757 {
7758 rtx addr;
7759 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7760
7761 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7762 set_mem_alias_set (addr, get_varargs_alias_set ());
7763 else
7764 set_mem_alias_set (addr, get_frame_alias_set ());
7765
7766 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7767 }
7768
7769 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7770 to register BASE. Return generated insn. */
7771
7772 static rtx
7773 restore_fpr (rtx base, int offset, int regnum)
7774 {
7775 rtx addr;
7776 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7777 set_mem_alias_set (addr, get_frame_alias_set ());
7778
7779 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7780 }
7781
7782 /* Return true if REGNO is a global register, but not one
7783 of the special ones that need to be saved/restored in anyway. */
7784
7785 static inline bool
7786 global_not_special_regno_p (int regno)
7787 {
7788 return (global_regs[regno]
7789 /* These registers are special and need to be
7790 restored in any case. */
7791 && !(regno == STACK_POINTER_REGNUM
7792 || regno == RETURN_REGNUM
7793 || regno == BASE_REGNUM
7794 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7795 }
7796
7797 /* Generate insn to save registers FIRST to LAST into
7798 the register save area located at offset OFFSET
7799 relative to register BASE. */
7800
7801 static rtx
7802 save_gprs (rtx base, int offset, int first, int last)
7803 {
7804 rtx addr, insn, note;
7805 int i;
7806
7807 addr = plus_constant (base, offset);
7808 addr = gen_rtx_MEM (Pmode, addr);
7809
7810 set_mem_alias_set (addr, get_frame_alias_set ());
7811
7812 /* Special-case single register. */
7813 if (first == last)
7814 {
7815 if (TARGET_64BIT)
7816 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7817 else
7818 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7819
7820 if (!global_not_special_regno_p (first))
7821 RTX_FRAME_RELATED_P (insn) = 1;
7822 return insn;
7823 }
7824
7825
7826 insn = gen_store_multiple (addr,
7827 gen_rtx_REG (Pmode, first),
7828 GEN_INT (last - first + 1));
7829
7830 if (first <= 6 && cfun->stdarg)
7831 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7832 {
7833 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7834
7835 if (first + i <= 6)
7836 set_mem_alias_set (mem, get_varargs_alias_set ());
7837 }
7838
7839 /* We need to set the FRAME_RELATED flag on all SETs
7840 inside the store-multiple pattern.
7841
7842 However, we must not emit DWARF records for registers 2..5
7843 if they are stored for use by variable arguments ...
7844
7845 ??? Unfortunately, it is not enough to simply not the
7846 FRAME_RELATED flags for those SETs, because the first SET
7847 of the PARALLEL is always treated as if it had the flag
7848 set, even if it does not. Therefore we emit a new pattern
7849 without those registers as REG_FRAME_RELATED_EXPR note. */
7850
7851 if (first >= 6 && !global_not_special_regno_p (first))
7852 {
7853 rtx pat = PATTERN (insn);
7854
7855 for (i = 0; i < XVECLEN (pat, 0); i++)
7856 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7857 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7858 0, i)))))
7859 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7860
7861 RTX_FRAME_RELATED_P (insn) = 1;
7862 }
7863 else if (last >= 6)
7864 {
7865 int start;
7866
7867 for (start = first >= 6 ? first : 6; start <= last; start++)
7868 if (!global_not_special_regno_p (start))
7869 break;
7870
7871 if (start > last)
7872 return insn;
7873
7874 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7875 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7876 gen_rtx_REG (Pmode, start),
7877 GEN_INT (last - start + 1));
7878 note = PATTERN (note);
7879
7880 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7881
7882 for (i = 0; i < XVECLEN (note, 0); i++)
7883 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7884 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7885 0, i)))))
7886 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7887
7888 RTX_FRAME_RELATED_P (insn) = 1;
7889 }
7890
7891 return insn;
7892 }
7893
7894 /* Generate insn to restore registers FIRST to LAST from
7895 the register save area located at offset OFFSET
7896 relative to register BASE. */
7897
7898 static rtx
7899 restore_gprs (rtx base, int offset, int first, int last)
7900 {
7901 rtx addr, insn;
7902
7903 addr = plus_constant (base, offset);
7904 addr = gen_rtx_MEM (Pmode, addr);
7905 set_mem_alias_set (addr, get_frame_alias_set ());
7906
7907 /* Special-case single register. */
7908 if (first == last)
7909 {
7910 if (TARGET_64BIT)
7911 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7912 else
7913 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7914
7915 return insn;
7916 }
7917
7918 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7919 addr,
7920 GEN_INT (last - first + 1));
7921 return insn;
7922 }
7923
7924 /* Return insn sequence to load the GOT register. */
7925
7926 static GTY(()) rtx got_symbol;
7927 rtx
7928 s390_load_got (void)
7929 {
7930 rtx insns;
7931
7932 if (!got_symbol)
7933 {
7934 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7935 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7936 }
7937
7938 start_sequence ();
7939
7940 if (TARGET_CPU_ZARCH)
7941 {
7942 emit_move_insn (pic_offset_table_rtx, got_symbol);
7943 }
7944 else
7945 {
7946 rtx offset;
7947
7948 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7949 UNSPEC_LTREL_OFFSET);
7950 offset = gen_rtx_CONST (Pmode, offset);
7951 offset = force_const_mem (Pmode, offset);
7952
7953 emit_move_insn (pic_offset_table_rtx, offset);
7954
7955 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7956 UNSPEC_LTREL_BASE);
7957 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7958
7959 emit_move_insn (pic_offset_table_rtx, offset);
7960 }
7961
7962 insns = get_insns ();
7963 end_sequence ();
7964 return insns;
7965 }
7966
7967 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7968 and the change to the stack pointer. */
7969
7970 static void
7971 s390_emit_stack_tie (void)
7972 {
7973 rtx mem = gen_frame_mem (BLKmode,
7974 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7975
7976 emit_insn (gen_stack_tie (mem));
7977 }
7978
7979 /* Expand the prologue into a bunch of separate insns. */
7980
7981 void
7982 s390_emit_prologue (void)
7983 {
7984 rtx insn, addr;
7985 rtx temp_reg;
7986 int i;
7987 int offset;
7988 int next_fpr = 0;
7989
7990 /* Complete frame layout. */
7991
7992 s390_update_frame_layout ();
7993
7994 /* Annotate all constant pool references to let the scheduler know
7995 they implicitly use the base register. */
7996
7997 push_topmost_sequence ();
7998
7999 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8000 if (INSN_P (insn))
8001 {
8002 annotate_constant_pool_refs (&PATTERN (insn));
8003 df_insn_rescan (insn);
8004 }
8005
8006 pop_topmost_sequence ();
8007
8008 /* Choose best register to use for temp use within prologue.
8009 See below for why TPF must use the register 1. */
8010
8011 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8012 && !current_function_is_leaf
8013 && !TARGET_TPF_PROFILING)
8014 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8015 else
8016 temp_reg = gen_rtx_REG (Pmode, 1);
8017
8018 /* Save call saved gprs. */
8019 if (cfun_frame_layout.first_save_gpr != -1)
8020 {
8021 insn = save_gprs (stack_pointer_rtx,
8022 cfun_frame_layout.gprs_offset +
8023 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8024 - cfun_frame_layout.first_save_gpr_slot),
8025 cfun_frame_layout.first_save_gpr,
8026 cfun_frame_layout.last_save_gpr);
8027 emit_insn (insn);
8028 }
8029
8030 /* Dummy insn to mark literal pool slot. */
8031
8032 if (cfun->machine->base_reg)
8033 emit_insn (gen_main_pool (cfun->machine->base_reg));
8034
8035 offset = cfun_frame_layout.f0_offset;
8036
8037 /* Save f0 and f2. */
8038 for (i = 0; i < 2; i++)
8039 {
8040 if (cfun_fpr_bit_p (i))
8041 {
8042 save_fpr (stack_pointer_rtx, offset, i + 16);
8043 offset += 8;
8044 }
8045 else if (!TARGET_PACKED_STACK)
8046 offset += 8;
8047 }
8048
8049 /* Save f4 and f6. */
8050 offset = cfun_frame_layout.f4_offset;
8051 for (i = 2; i < 4; i++)
8052 {
8053 if (cfun_fpr_bit_p (i))
8054 {
8055 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8056 offset += 8;
8057
8058 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8059 therefore are not frame related. */
8060 if (!call_really_used_regs[i + 16])
8061 RTX_FRAME_RELATED_P (insn) = 1;
8062 }
8063 else if (!TARGET_PACKED_STACK)
8064 offset += 8;
8065 }
8066
8067 if (TARGET_PACKED_STACK
8068 && cfun_save_high_fprs_p
8069 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8070 {
8071 offset = (cfun_frame_layout.f8_offset
8072 + (cfun_frame_layout.high_fprs - 1) * 8);
8073
8074 for (i = 15; i > 7 && offset >= 0; i--)
8075 if (cfun_fpr_bit_p (i))
8076 {
8077 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8078
8079 RTX_FRAME_RELATED_P (insn) = 1;
8080 offset -= 8;
8081 }
8082 if (offset >= cfun_frame_layout.f8_offset)
8083 next_fpr = i + 16;
8084 }
8085
8086 if (!TARGET_PACKED_STACK)
8087 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8088
8089 if (flag_stack_usage)
8090 current_function_static_stack_size = cfun_frame_layout.frame_size;
8091
8092 /* Decrement stack pointer. */
8093
8094 if (cfun_frame_layout.frame_size > 0)
8095 {
8096 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8097 rtx real_frame_off;
8098
8099 if (s390_stack_size)
8100 {
8101 HOST_WIDE_INT stack_guard;
8102
8103 if (s390_stack_guard)
8104 stack_guard = s390_stack_guard;
8105 else
8106 {
8107 /* If no value for stack guard is provided the smallest power of 2
8108 larger than the current frame size is chosen. */
8109 stack_guard = 1;
8110 while (stack_guard < cfun_frame_layout.frame_size)
8111 stack_guard <<= 1;
8112 }
8113
8114 if (cfun_frame_layout.frame_size >= s390_stack_size)
8115 {
8116 warning (0, "frame size of function %qs is "
8117 HOST_WIDE_INT_PRINT_DEC
8118 " bytes exceeding user provided stack limit of "
8119 HOST_WIDE_INT_PRINT_DEC " bytes. "
8120 "An unconditional trap is added.",
8121 current_function_name(), cfun_frame_layout.frame_size,
8122 s390_stack_size);
8123 emit_insn (gen_trap ());
8124 }
8125 else
8126 {
8127 /* stack_guard has to be smaller than s390_stack_size.
8128 Otherwise we would emit an AND with zero which would
8129 not match the test under mask pattern. */
8130 if (stack_guard >= s390_stack_size)
8131 {
8132 warning (0, "frame size of function %qs is "
8133 HOST_WIDE_INT_PRINT_DEC
8134 " bytes which is more than half the stack size. "
8135 "The dynamic check would not be reliable. "
8136 "No check emitted for this function.",
8137 current_function_name(),
8138 cfun_frame_layout.frame_size);
8139 }
8140 else
8141 {
8142 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8143 & ~(stack_guard - 1));
8144
8145 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8146 GEN_INT (stack_check_mask));
8147 if (TARGET_64BIT)
8148 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8149 t, const0_rtx),
8150 t, const0_rtx, const0_rtx));
8151 else
8152 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8153 t, const0_rtx),
8154 t, const0_rtx, const0_rtx));
8155 }
8156 }
8157 }
8158
8159 if (s390_warn_framesize > 0
8160 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8161 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
8162 current_function_name (), cfun_frame_layout.frame_size);
8163
8164 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8165 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8166
8167 /* Save incoming stack pointer into temp reg. */
8168 if (TARGET_BACKCHAIN || next_fpr)
8169 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8170
8171 /* Subtract frame size from stack pointer. */
8172
8173 if (DISP_IN_RANGE (INTVAL (frame_off)))
8174 {
8175 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8176 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8177 frame_off));
8178 insn = emit_insn (insn);
8179 }
8180 else
8181 {
8182 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8183 frame_off = force_const_mem (Pmode, frame_off);
8184
8185 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8186 annotate_constant_pool_refs (&PATTERN (insn));
8187 }
8188
8189 RTX_FRAME_RELATED_P (insn) = 1;
8190 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8191 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8192 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8193 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8194 real_frame_off)));
8195
8196 /* Set backchain. */
8197
8198 if (TARGET_BACKCHAIN)
8199 {
8200 if (cfun_frame_layout.backchain_offset)
8201 addr = gen_rtx_MEM (Pmode,
8202 plus_constant (stack_pointer_rtx,
8203 cfun_frame_layout.backchain_offset));
8204 else
8205 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8206 set_mem_alias_set (addr, get_frame_alias_set ());
8207 insn = emit_insn (gen_move_insn (addr, temp_reg));
8208 }
8209
8210 /* If we support non-call exceptions (e.g. for Java),
8211 we need to make sure the backchain pointer is set up
8212 before any possibly trapping memory access. */
8213 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8214 {
8215 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8216 emit_clobber (addr);
8217 }
8218 }
8219
8220 /* Save fprs 8 - 15 (64 bit ABI). */
8221
8222 if (cfun_save_high_fprs_p && next_fpr)
8223 {
8224 /* If the stack might be accessed through a different register
8225 we have to make sure that the stack pointer decrement is not
8226 moved below the use of the stack slots. */
8227 s390_emit_stack_tie ();
8228
8229 insn = emit_insn (gen_add2_insn (temp_reg,
8230 GEN_INT (cfun_frame_layout.f8_offset)));
8231
8232 offset = 0;
8233
8234 for (i = 24; i <= next_fpr; i++)
8235 if (cfun_fpr_bit_p (i - 16))
8236 {
8237 rtx addr = plus_constant (stack_pointer_rtx,
8238 cfun_frame_layout.frame_size
8239 + cfun_frame_layout.f8_offset
8240 + offset);
8241
8242 insn = save_fpr (temp_reg, offset, i);
8243 offset += 8;
8244 RTX_FRAME_RELATED_P (insn) = 1;
8245 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8246 gen_rtx_SET (VOIDmode,
8247 gen_rtx_MEM (DFmode, addr),
8248 gen_rtx_REG (DFmode, i)));
8249 }
8250 }
8251
8252 /* Set frame pointer, if needed. */
8253
8254 if (frame_pointer_needed)
8255 {
8256 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8257 RTX_FRAME_RELATED_P (insn) = 1;
8258 }
8259
8260 /* Set up got pointer, if needed. */
8261
8262 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8263 {
8264 rtx insns = s390_load_got ();
8265
8266 for (insn = insns; insn; insn = NEXT_INSN (insn))
8267 annotate_constant_pool_refs (&PATTERN (insn));
8268
8269 emit_insn (insns);
8270 }
8271
8272 if (TARGET_TPF_PROFILING)
8273 {
8274 /* Generate a BAS instruction to serve as a function
8275 entry intercept to facilitate the use of tracing
8276 algorithms located at the branch target. */
8277 emit_insn (gen_prologue_tpf ());
8278
8279 /* Emit a blockage here so that all code
8280 lies between the profiling mechanisms. */
8281 emit_insn (gen_blockage ());
8282 }
8283 }
8284
8285 /* Expand the epilogue into a bunch of separate insns. */
8286
8287 void
8288 s390_emit_epilogue (bool sibcall)
8289 {
8290 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8291 int area_bottom, area_top, offset = 0;
8292 int next_offset;
8293 rtvec p;
8294 int i;
8295
8296 if (TARGET_TPF_PROFILING)
8297 {
8298
8299 /* Generate a BAS instruction to serve as a function
8300 entry intercept to facilitate the use of tracing
8301 algorithms located at the branch target. */
8302
8303 /* Emit a blockage here so that all code
8304 lies between the profiling mechanisms. */
8305 emit_insn (gen_blockage ());
8306
8307 emit_insn (gen_epilogue_tpf ());
8308 }
8309
8310 /* Check whether to use frame or stack pointer for restore. */
8311
8312 frame_pointer = (frame_pointer_needed
8313 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8314
8315 s390_frame_area (&area_bottom, &area_top);
8316
8317 /* Check whether we can access the register save area.
8318 If not, increment the frame pointer as required. */
8319
8320 if (area_top <= area_bottom)
8321 {
8322 /* Nothing to restore. */
8323 }
8324 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8325 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8326 {
8327 /* Area is in range. */
8328 offset = cfun_frame_layout.frame_size;
8329 }
8330 else
8331 {
8332 rtx insn, frame_off, cfa;
8333
8334 offset = area_bottom < 0 ? -area_bottom : 0;
8335 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8336
8337 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8338 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8339 if (DISP_IN_RANGE (INTVAL (frame_off)))
8340 {
8341 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8342 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8343 insn = emit_insn (insn);
8344 }
8345 else
8346 {
8347 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8348 frame_off = force_const_mem (Pmode, frame_off);
8349
8350 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8351 annotate_constant_pool_refs (&PATTERN (insn));
8352 }
8353 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8354 RTX_FRAME_RELATED_P (insn) = 1;
8355 }
8356
8357 /* Restore call saved fprs. */
8358
8359 if (TARGET_64BIT)
8360 {
8361 if (cfun_save_high_fprs_p)
8362 {
8363 next_offset = cfun_frame_layout.f8_offset;
8364 for (i = 24; i < 32; i++)
8365 {
8366 if (cfun_fpr_bit_p (i - 16))
8367 {
8368 restore_fpr (frame_pointer,
8369 offset + next_offset, i);
8370 cfa_restores
8371 = alloc_reg_note (REG_CFA_RESTORE,
8372 gen_rtx_REG (DFmode, i), cfa_restores);
8373 next_offset += 8;
8374 }
8375 }
8376 }
8377
8378 }
8379 else
8380 {
8381 next_offset = cfun_frame_layout.f4_offset;
8382 for (i = 18; i < 20; i++)
8383 {
8384 if (cfun_fpr_bit_p (i - 16))
8385 {
8386 restore_fpr (frame_pointer,
8387 offset + next_offset, i);
8388 cfa_restores
8389 = alloc_reg_note (REG_CFA_RESTORE,
8390 gen_rtx_REG (DFmode, i), cfa_restores);
8391 next_offset += 8;
8392 }
8393 else if (!TARGET_PACKED_STACK)
8394 next_offset += 8;
8395 }
8396
8397 }
8398
8399 /* Return register. */
8400
8401 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8402
8403 /* Restore call saved gprs. */
8404
8405 if (cfun_frame_layout.first_restore_gpr != -1)
8406 {
8407 rtx insn, addr;
8408 int i;
8409
8410 /* Check for global register and save them
8411 to stack location from where they get restored. */
8412
8413 for (i = cfun_frame_layout.first_restore_gpr;
8414 i <= cfun_frame_layout.last_restore_gpr;
8415 i++)
8416 {
8417 if (global_not_special_regno_p (i))
8418 {
8419 addr = plus_constant (frame_pointer,
8420 offset + cfun_frame_layout.gprs_offset
8421 + (i - cfun_frame_layout.first_save_gpr_slot)
8422 * UNITS_PER_LONG);
8423 addr = gen_rtx_MEM (Pmode, addr);
8424 set_mem_alias_set (addr, get_frame_alias_set ());
8425 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8426 }
8427 else
8428 cfa_restores
8429 = alloc_reg_note (REG_CFA_RESTORE,
8430 gen_rtx_REG (Pmode, i), cfa_restores);
8431 }
8432
8433 if (! sibcall)
8434 {
8435 /* Fetch return address from stack before load multiple,
8436 this will do good for scheduling. */
8437
8438 if (cfun_frame_layout.save_return_addr_p
8439 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8440 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8441 {
8442 int return_regnum = find_unused_clobbered_reg();
8443 if (!return_regnum)
8444 return_regnum = 4;
8445 return_reg = gen_rtx_REG (Pmode, return_regnum);
8446
8447 addr = plus_constant (frame_pointer,
8448 offset + cfun_frame_layout.gprs_offset
8449 + (RETURN_REGNUM
8450 - cfun_frame_layout.first_save_gpr_slot)
8451 * UNITS_PER_LONG);
8452 addr = gen_rtx_MEM (Pmode, addr);
8453 set_mem_alias_set (addr, get_frame_alias_set ());
8454 emit_move_insn (return_reg, addr);
8455 }
8456 }
8457
8458 insn = restore_gprs (frame_pointer,
8459 offset + cfun_frame_layout.gprs_offset
8460 + (cfun_frame_layout.first_restore_gpr
8461 - cfun_frame_layout.first_save_gpr_slot)
8462 * UNITS_PER_LONG,
8463 cfun_frame_layout.first_restore_gpr,
8464 cfun_frame_layout.last_restore_gpr);
8465 insn = emit_insn (insn);
8466 REG_NOTES (insn) = cfa_restores;
8467 add_reg_note (insn, REG_CFA_DEF_CFA,
8468 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8469 RTX_FRAME_RELATED_P (insn) = 1;
8470 }
8471
8472 if (! sibcall)
8473 {
8474
8475 /* Return to caller. */
8476
8477 p = rtvec_alloc (2);
8478
8479 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8480 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8481 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8482 }
8483 }
8484
8485
8486 /* Return the size in bytes of a function argument of
8487 type TYPE and/or mode MODE. At least one of TYPE or
8488 MODE must be specified. */
8489
8490 static int
8491 s390_function_arg_size (enum machine_mode mode, const_tree type)
8492 {
8493 if (type)
8494 return int_size_in_bytes (type);
8495
8496 /* No type info available for some library calls ... */
8497 if (mode != BLKmode)
8498 return GET_MODE_SIZE (mode);
8499
8500 /* If we have neither type nor mode, abort */
8501 gcc_unreachable ();
8502 }
8503
8504 /* Return true if a function argument of type TYPE and mode MODE
8505 is to be passed in a floating-point register, if available. */
8506
8507 static bool
8508 s390_function_arg_float (enum machine_mode mode, const_tree type)
8509 {
8510 int size = s390_function_arg_size (mode, type);
8511 if (size > 8)
8512 return false;
8513
8514 /* Soft-float changes the ABI: no floating-point registers are used. */
8515 if (TARGET_SOFT_FLOAT)
8516 return false;
8517
8518 /* No type info available for some library calls ... */
8519 if (!type)
8520 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8521
8522 /* The ABI says that record types with a single member are treated
8523 just like that member would be. */
8524 while (TREE_CODE (type) == RECORD_TYPE)
8525 {
8526 tree field, single = NULL_TREE;
8527
8528 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8529 {
8530 if (TREE_CODE (field) != FIELD_DECL)
8531 continue;
8532
8533 if (single == NULL_TREE)
8534 single = TREE_TYPE (field);
8535 else
8536 return false;
8537 }
8538
8539 if (single == NULL_TREE)
8540 return false;
8541 else
8542 type = single;
8543 }
8544
8545 return TREE_CODE (type) == REAL_TYPE;
8546 }
8547
8548 /* Return true if a function argument of type TYPE and mode MODE
8549 is to be passed in an integer register, or a pair of integer
8550 registers, if available. */
8551
8552 static bool
8553 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8554 {
8555 int size = s390_function_arg_size (mode, type);
8556 if (size > 8)
8557 return false;
8558
8559 /* No type info available for some library calls ... */
8560 if (!type)
8561 return GET_MODE_CLASS (mode) == MODE_INT
8562 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8563
8564 /* We accept small integral (and similar) types. */
8565 if (INTEGRAL_TYPE_P (type)
8566 || POINTER_TYPE_P (type)
8567 || TREE_CODE (type) == NULLPTR_TYPE
8568 || TREE_CODE (type) == OFFSET_TYPE
8569 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8570 return true;
8571
8572 /* We also accept structs of size 1, 2, 4, 8 that are not
8573 passed in floating-point registers. */
8574 if (AGGREGATE_TYPE_P (type)
8575 && exact_log2 (size) >= 0
8576 && !s390_function_arg_float (mode, type))
8577 return true;
8578
8579 return false;
8580 }
8581
8582 /* Return 1 if a function argument of type TYPE and mode MODE
8583 is to be passed by reference. The ABI specifies that only
8584 structures of size 1, 2, 4, or 8 bytes are passed by value,
8585 all other structures (and complex numbers) are passed by
8586 reference. */
8587
8588 static bool
8589 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8590 enum machine_mode mode, const_tree type,
8591 bool named ATTRIBUTE_UNUSED)
8592 {
8593 int size = s390_function_arg_size (mode, type);
8594 if (size > 8)
8595 return true;
8596
8597 if (type)
8598 {
8599 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8600 return 1;
8601
8602 if (TREE_CODE (type) == COMPLEX_TYPE
8603 || TREE_CODE (type) == VECTOR_TYPE)
8604 return 1;
8605 }
8606
8607 return 0;
8608 }
8609
8610 /* Update the data in CUM to advance over an argument of mode MODE and
8611 data type TYPE. (TYPE is null for libcalls where that information
8612 may not be available.). The boolean NAMED specifies whether the
8613 argument is a named argument (as opposed to an unnamed argument
8614 matching an ellipsis). */
8615
8616 static void
8617 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8618 const_tree type, bool named ATTRIBUTE_UNUSED)
8619 {
8620 if (s390_function_arg_float (mode, type))
8621 {
8622 cum->fprs += 1;
8623 }
8624 else if (s390_function_arg_integer (mode, type))
8625 {
8626 int size = s390_function_arg_size (mode, type);
8627 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8628 }
8629 else
8630 gcc_unreachable ();
8631 }
8632
8633 /* Define where to put the arguments to a function.
8634 Value is zero to push the argument on the stack,
8635 or a hard register in which to store the argument.
8636
8637 MODE is the argument's machine mode.
8638 TYPE is the data type of the argument (as a tree).
8639 This is null for libcalls where that information may
8640 not be available.
8641 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8642 the preceding args and about the function being called.
8643 NAMED is nonzero if this argument is a named parameter
8644 (otherwise it is an extra parameter matching an ellipsis).
8645
8646 On S/390, we use general purpose registers 2 through 6 to
8647 pass integer, pointer, and certain structure arguments, and
8648 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8649 to pass floating point arguments. All remaining arguments
8650 are pushed to the stack. */
8651
8652 static rtx
8653 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8654 const_tree type, bool named ATTRIBUTE_UNUSED)
8655 {
8656 if (s390_function_arg_float (mode, type))
8657 {
8658 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8659 return 0;
8660 else
8661 return gen_rtx_REG (mode, cum->fprs + 16);
8662 }
8663 else if (s390_function_arg_integer (mode, type))
8664 {
8665 int size = s390_function_arg_size (mode, type);
8666 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8667
8668 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8669 return 0;
8670 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8671 return gen_rtx_REG (mode, cum->gprs + 2);
8672 else if (n_gprs == 2)
8673 {
8674 rtvec p = rtvec_alloc (2);
8675
8676 RTVEC_ELT (p, 0)
8677 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8678 const0_rtx);
8679 RTVEC_ELT (p, 1)
8680 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8681 GEN_INT (4));
8682
8683 return gen_rtx_PARALLEL (mode, p);
8684 }
8685 }
8686
8687 /* After the real arguments, expand_call calls us once again
8688 with a void_type_node type. Whatever we return here is
8689 passed as operand 2 to the call expanders.
8690
8691 We don't need this feature ... */
8692 else if (type == void_type_node)
8693 return const0_rtx;
8694
8695 gcc_unreachable ();
8696 }
8697
8698 /* Return true if return values of type TYPE should be returned
8699 in a memory buffer whose address is passed by the caller as
8700 hidden first argument. */
8701
8702 static bool
8703 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8704 {
8705 /* We accept small integral (and similar) types. */
8706 if (INTEGRAL_TYPE_P (type)
8707 || POINTER_TYPE_P (type)
8708 || TREE_CODE (type) == OFFSET_TYPE
8709 || TREE_CODE (type) == REAL_TYPE)
8710 return int_size_in_bytes (type) > 8;
8711
8712 /* Aggregates and similar constructs are always returned
8713 in memory. */
8714 if (AGGREGATE_TYPE_P (type)
8715 || TREE_CODE (type) == COMPLEX_TYPE
8716 || TREE_CODE (type) == VECTOR_TYPE)
8717 return true;
8718
8719 /* ??? We get called on all sorts of random stuff from
8720 aggregate_value_p. We can't abort, but it's not clear
8721 what's safe to return. Pretend it's a struct I guess. */
8722 return true;
8723 }
8724
8725 /* Function arguments and return values are promoted to word size. */
8726
8727 static enum machine_mode
8728 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8729 int *punsignedp,
8730 const_tree fntype ATTRIBUTE_UNUSED,
8731 int for_return ATTRIBUTE_UNUSED)
8732 {
8733 if (INTEGRAL_MODE_P (mode)
8734 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8735 {
8736 if (POINTER_TYPE_P (type))
8737 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8738 return Pmode;
8739 }
8740
8741 return mode;
8742 }
8743
8744 /* Define where to return a (scalar) value of type RET_TYPE.
8745 If RET_TYPE is null, define where to return a (scalar)
8746 value of mode MODE from a libcall. */
8747
8748 static rtx
8749 s390_function_and_libcall_value (enum machine_mode mode,
8750 const_tree ret_type,
8751 const_tree fntype_or_decl,
8752 bool outgoing ATTRIBUTE_UNUSED)
8753 {
8754 /* For normal functions perform the promotion as
8755 promote_function_mode would do. */
8756 if (ret_type)
8757 {
8758 int unsignedp = TYPE_UNSIGNED (ret_type);
8759 mode = promote_function_mode (ret_type, mode, &unsignedp,
8760 fntype_or_decl, 1);
8761 }
8762
8763 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8764 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8765
8766 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8767 return gen_rtx_REG (mode, 16);
8768 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8769 || UNITS_PER_LONG == UNITS_PER_WORD)
8770 return gen_rtx_REG (mode, 2);
8771 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8772 {
8773 /* This case is triggered when returning a 64 bit value with
8774 -m31 -mzarch. Although the value would fit into a single
8775 register it has to be forced into a 32 bit register pair in
8776 order to match the ABI. */
8777 rtvec p = rtvec_alloc (2);
8778
8779 RTVEC_ELT (p, 0)
8780 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8781 RTVEC_ELT (p, 1)
8782 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8783
8784 return gen_rtx_PARALLEL (mode, p);
8785 }
8786
8787 gcc_unreachable ();
8788 }
8789
8790 /* Define where to return a scalar return value of type RET_TYPE. */
8791
8792 static rtx
8793 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8794 bool outgoing)
8795 {
8796 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8797 fn_decl_or_type, outgoing);
8798 }
8799
8800 /* Define where to return a scalar libcall return value of mode
8801 MODE. */
8802
8803 static rtx
8804 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8805 {
8806 return s390_function_and_libcall_value (mode, NULL_TREE,
8807 NULL_TREE, true);
8808 }
8809
8810
8811 /* Create and return the va_list datatype.
8812
8813 On S/390, va_list is an array type equivalent to
8814
8815 typedef struct __va_list_tag
8816 {
8817 long __gpr;
8818 long __fpr;
8819 void *__overflow_arg_area;
8820 void *__reg_save_area;
8821 } va_list[1];
8822
8823 where __gpr and __fpr hold the number of general purpose
8824 or floating point arguments used up to now, respectively,
8825 __overflow_arg_area points to the stack location of the
8826 next argument passed on the stack, and __reg_save_area
8827 always points to the start of the register area in the
8828 call frame of the current function. The function prologue
8829 saves all registers used for argument passing into this
8830 area if the function uses variable arguments. */
8831
8832 static tree
8833 s390_build_builtin_va_list (void)
8834 {
8835 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8836
8837 record = lang_hooks.types.make_type (RECORD_TYPE);
8838
8839 type_decl =
8840 build_decl (BUILTINS_LOCATION,
8841 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8842
8843 f_gpr = build_decl (BUILTINS_LOCATION,
8844 FIELD_DECL, get_identifier ("__gpr"),
8845 long_integer_type_node);
8846 f_fpr = build_decl (BUILTINS_LOCATION,
8847 FIELD_DECL, get_identifier ("__fpr"),
8848 long_integer_type_node);
8849 f_ovf = build_decl (BUILTINS_LOCATION,
8850 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8851 ptr_type_node);
8852 f_sav = build_decl (BUILTINS_LOCATION,
8853 FIELD_DECL, get_identifier ("__reg_save_area"),
8854 ptr_type_node);
8855
8856 va_list_gpr_counter_field = f_gpr;
8857 va_list_fpr_counter_field = f_fpr;
8858
8859 DECL_FIELD_CONTEXT (f_gpr) = record;
8860 DECL_FIELD_CONTEXT (f_fpr) = record;
8861 DECL_FIELD_CONTEXT (f_ovf) = record;
8862 DECL_FIELD_CONTEXT (f_sav) = record;
8863
8864 TYPE_STUB_DECL (record) = type_decl;
8865 TYPE_NAME (record) = type_decl;
8866 TYPE_FIELDS (record) = f_gpr;
8867 DECL_CHAIN (f_gpr) = f_fpr;
8868 DECL_CHAIN (f_fpr) = f_ovf;
8869 DECL_CHAIN (f_ovf) = f_sav;
8870
8871 layout_type (record);
8872
8873 /* The correct type is an array type of one element. */
8874 return build_array_type (record, build_index_type (size_zero_node));
8875 }
8876
8877 /* Implement va_start by filling the va_list structure VALIST.
8878 STDARG_P is always true, and ignored.
8879 NEXTARG points to the first anonymous stack argument.
8880
8881 The following global variables are used to initialize
8882 the va_list structure:
8883
8884 crtl->args.info:
8885 holds number of gprs and fprs used for named arguments.
8886 crtl->args.arg_offset_rtx:
8887 holds the offset of the first anonymous stack argument
8888 (relative to the virtual arg pointer). */
8889
8890 static void
8891 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8892 {
8893 HOST_WIDE_INT n_gpr, n_fpr;
8894 int off;
8895 tree f_gpr, f_fpr, f_ovf, f_sav;
8896 tree gpr, fpr, ovf, sav, t;
8897
8898 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8899 f_fpr = DECL_CHAIN (f_gpr);
8900 f_ovf = DECL_CHAIN (f_fpr);
8901 f_sav = DECL_CHAIN (f_ovf);
8902
8903 valist = build_simple_mem_ref (valist);
8904 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8905 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8906 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8907 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8908
8909 /* Count number of gp and fp argument registers used. */
8910
8911 n_gpr = crtl->args.info.gprs;
8912 n_fpr = crtl->args.info.fprs;
8913
8914 if (cfun->va_list_gpr_size)
8915 {
8916 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8917 build_int_cst (NULL_TREE, n_gpr));
8918 TREE_SIDE_EFFECTS (t) = 1;
8919 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8920 }
8921
8922 if (cfun->va_list_fpr_size)
8923 {
8924 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8925 build_int_cst (NULL_TREE, n_fpr));
8926 TREE_SIDE_EFFECTS (t) = 1;
8927 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8928 }
8929
8930 /* Find the overflow area. */
8931 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8932 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8933 {
8934 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8935
8936 off = INTVAL (crtl->args.arg_offset_rtx);
8937 off = off < 0 ? 0 : off;
8938 if (TARGET_DEBUG_ARG)
8939 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8940 (int)n_gpr, (int)n_fpr, off);
8941
8942 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8943
8944 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8945 TREE_SIDE_EFFECTS (t) = 1;
8946 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8947 }
8948
8949 /* Find the register save area. */
8950 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8951 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8952 {
8953 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8954 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8955 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8956
8957 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8958 TREE_SIDE_EFFECTS (t) = 1;
8959 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8960 }
8961 }
8962
8963 /* Implement va_arg by updating the va_list structure
8964 VALIST as required to retrieve an argument of type
8965 TYPE, and returning that argument.
8966
8967 Generates code equivalent to:
8968
8969 if (integral value) {
8970 if (size <= 4 && args.gpr < 5 ||
8971 size > 4 && args.gpr < 4 )
8972 ret = args.reg_save_area[args.gpr+8]
8973 else
8974 ret = *args.overflow_arg_area++;
8975 } else if (float value) {
8976 if (args.fgpr < 2)
8977 ret = args.reg_save_area[args.fpr+64]
8978 else
8979 ret = *args.overflow_arg_area++;
8980 } else if (aggregate value) {
8981 if (args.gpr < 5)
8982 ret = *args.reg_save_area[args.gpr]
8983 else
8984 ret = **args.overflow_arg_area++;
8985 } */
8986
8987 static tree
8988 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8989 gimple_seq *post_p ATTRIBUTE_UNUSED)
8990 {
8991 tree f_gpr, f_fpr, f_ovf, f_sav;
8992 tree gpr, fpr, ovf, sav, reg, t, u;
8993 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8994 tree lab_false, lab_over, addr;
8995
8996 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8997 f_fpr = DECL_CHAIN (f_gpr);
8998 f_ovf = DECL_CHAIN (f_fpr);
8999 f_sav = DECL_CHAIN (f_ovf);
9000
9001 valist = build_va_arg_indirect_ref (valist);
9002 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9003 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9004 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9005
9006 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9007 both appear on a lhs. */
9008 valist = unshare_expr (valist);
9009 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9010
9011 size = int_size_in_bytes (type);
9012
9013 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9014 {
9015 if (TARGET_DEBUG_ARG)
9016 {
9017 fprintf (stderr, "va_arg: aggregate type");
9018 debug_tree (type);
9019 }
9020
9021 /* Aggregates are passed by reference. */
9022 indirect_p = 1;
9023 reg = gpr;
9024 n_reg = 1;
9025
9026 /* kernel stack layout on 31 bit: It is assumed here that no padding
9027 will be added by s390_frame_info because for va_args always an even
9028 number of gprs has to be saved r15-r2 = 14 regs. */
9029 sav_ofs = 2 * UNITS_PER_LONG;
9030 sav_scale = UNITS_PER_LONG;
9031 size = UNITS_PER_LONG;
9032 max_reg = GP_ARG_NUM_REG - n_reg;
9033 }
9034 else if (s390_function_arg_float (TYPE_MODE (type), type))
9035 {
9036 if (TARGET_DEBUG_ARG)
9037 {
9038 fprintf (stderr, "va_arg: float type");
9039 debug_tree (type);
9040 }
9041
9042 /* FP args go in FP registers, if present. */
9043 indirect_p = 0;
9044 reg = fpr;
9045 n_reg = 1;
9046 sav_ofs = 16 * UNITS_PER_LONG;
9047 sav_scale = 8;
9048 max_reg = FP_ARG_NUM_REG - n_reg;
9049 }
9050 else
9051 {
9052 if (TARGET_DEBUG_ARG)
9053 {
9054 fprintf (stderr, "va_arg: other type");
9055 debug_tree (type);
9056 }
9057
9058 /* Otherwise into GP registers. */
9059 indirect_p = 0;
9060 reg = gpr;
9061 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9062
9063 /* kernel stack layout on 31 bit: It is assumed here that no padding
9064 will be added by s390_frame_info because for va_args always an even
9065 number of gprs has to be saved r15-r2 = 14 regs. */
9066 sav_ofs = 2 * UNITS_PER_LONG;
9067
9068 if (size < UNITS_PER_LONG)
9069 sav_ofs += UNITS_PER_LONG - size;
9070
9071 sav_scale = UNITS_PER_LONG;
9072 max_reg = GP_ARG_NUM_REG - n_reg;
9073 }
9074
9075 /* Pull the value out of the saved registers ... */
9076
9077 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9078 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9079 addr = create_tmp_var (ptr_type_node, "addr");
9080
9081 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9082 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9083 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9084 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9085 gimplify_and_add (t, pre_p);
9086
9087 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
9088 size_int (sav_ofs));
9089 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9090 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9091 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
9092
9093 gimplify_assign (addr, t, pre_p);
9094
9095 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9096
9097 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9098
9099
9100 /* ... Otherwise out of the overflow area. */
9101
9102 t = ovf;
9103 if (size < UNITS_PER_LONG)
9104 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9105 size_int (UNITS_PER_LONG - size));
9106
9107 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9108
9109 gimplify_assign (addr, t, pre_p);
9110
9111 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9112 size_int (size));
9113 gimplify_assign (ovf, t, pre_p);
9114
9115 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9116
9117
9118 /* Increment register save count. */
9119
9120 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9121 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9122 gimplify_and_add (u, pre_p);
9123
9124 if (indirect_p)
9125 {
9126 t = build_pointer_type_for_mode (build_pointer_type (type),
9127 ptr_mode, true);
9128 addr = fold_convert (t, addr);
9129 addr = build_va_arg_indirect_ref (addr);
9130 }
9131 else
9132 {
9133 t = build_pointer_type_for_mode (type, ptr_mode, true);
9134 addr = fold_convert (t, addr);
9135 }
9136
9137 return build_va_arg_indirect_ref (addr);
9138 }
9139
9140
9141 /* Builtins. */
9142
9143 enum s390_builtin
9144 {
9145 S390_BUILTIN_THREAD_POINTER,
9146 S390_BUILTIN_SET_THREAD_POINTER,
9147
9148 S390_BUILTIN_max
9149 };
9150
9151 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9152 CODE_FOR_get_tp_64,
9153 CODE_FOR_set_tp_64
9154 };
9155
9156 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9157 CODE_FOR_get_tp_31,
9158 CODE_FOR_set_tp_31
9159 };
9160
9161 static void
9162 s390_init_builtins (void)
9163 {
9164 tree ftype;
9165
9166 ftype = build_function_type (ptr_type_node, void_list_node);
9167 add_builtin_function ("__builtin_thread_pointer", ftype,
9168 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9169 NULL, NULL_TREE);
9170
9171 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9172 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9173 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9174 NULL, NULL_TREE);
9175 }
9176
9177 /* Expand an expression EXP that calls a built-in function,
9178 with result going to TARGET if that's convenient
9179 (and in mode MODE if that's convenient).
9180 SUBTARGET may be used as the target for computing one of EXP's operands.
9181 IGNORE is nonzero if the value is to be ignored. */
9182
9183 static rtx
9184 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9185 enum machine_mode mode ATTRIBUTE_UNUSED,
9186 int ignore ATTRIBUTE_UNUSED)
9187 {
9188 #define MAX_ARGS 2
9189
9190 enum insn_code const *code_for_builtin =
9191 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9192
9193 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9194 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9195 enum insn_code icode;
9196 rtx op[MAX_ARGS], pat;
9197 int arity;
9198 bool nonvoid;
9199 tree arg;
9200 call_expr_arg_iterator iter;
9201
9202 if (fcode >= S390_BUILTIN_max)
9203 internal_error ("bad builtin fcode");
9204 icode = code_for_builtin[fcode];
9205 if (icode == 0)
9206 internal_error ("bad builtin fcode");
9207
9208 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9209
9210 arity = 0;
9211 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9212 {
9213 const struct insn_operand_data *insn_op;
9214
9215 if (arg == error_mark_node)
9216 return NULL_RTX;
9217 if (arity > MAX_ARGS)
9218 return NULL_RTX;
9219
9220 insn_op = &insn_data[icode].operand[arity + nonvoid];
9221
9222 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9223
9224 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9225 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9226 arity++;
9227 }
9228
9229 if (nonvoid)
9230 {
9231 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9232 if (!target
9233 || GET_MODE (target) != tmode
9234 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9235 target = gen_reg_rtx (tmode);
9236 }
9237
9238 switch (arity)
9239 {
9240 case 0:
9241 pat = GEN_FCN (icode) (target);
9242 break;
9243 case 1:
9244 if (nonvoid)
9245 pat = GEN_FCN (icode) (target, op[0]);
9246 else
9247 pat = GEN_FCN (icode) (op[0]);
9248 break;
9249 case 2:
9250 pat = GEN_FCN (icode) (target, op[0], op[1]);
9251 break;
9252 default:
9253 gcc_unreachable ();
9254 }
9255 if (!pat)
9256 return NULL_RTX;
9257 emit_insn (pat);
9258
9259 if (nonvoid)
9260 return target;
9261 else
9262 return const0_rtx;
9263 }
9264
9265
9266 /* Output assembly code for the trampoline template to
9267 stdio stream FILE.
9268
9269 On S/390, we use gpr 1 internally in the trampoline code;
9270 gpr 0 is used to hold the static chain. */
9271
9272 static void
9273 s390_asm_trampoline_template (FILE *file)
9274 {
9275 rtx op[2];
9276 op[0] = gen_rtx_REG (Pmode, 0);
9277 op[1] = gen_rtx_REG (Pmode, 1);
9278
9279 if (TARGET_64BIT)
9280 {
9281 output_asm_insn ("basr\t%1,0", op);
9282 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9283 output_asm_insn ("br\t%1", op);
9284 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9285 }
9286 else
9287 {
9288 output_asm_insn ("basr\t%1,0", op);
9289 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9290 output_asm_insn ("br\t%1", op);
9291 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9292 }
9293 }
9294
9295 /* Emit RTL insns to initialize the variable parts of a trampoline.
9296 FNADDR is an RTX for the address of the function's pure code.
9297 CXT is an RTX for the static chain value for the function. */
9298
9299 static void
9300 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9301 {
9302 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9303 rtx mem;
9304
9305 emit_block_move (m_tramp, assemble_trampoline_template (),
9306 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9307
9308 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9309 emit_move_insn (mem, cxt);
9310 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9311 emit_move_insn (mem, fnaddr);
9312 }
9313
9314 /* Output assembler code to FILE to increment profiler label # LABELNO
9315 for profiling a function entry. */
9316
9317 void
9318 s390_function_profiler (FILE *file, int labelno)
9319 {
9320 rtx op[7];
9321
9322 char label[128];
9323 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9324
9325 fprintf (file, "# function profiler \n");
9326
9327 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9328 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9329 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9330
9331 op[2] = gen_rtx_REG (Pmode, 1);
9332 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9333 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9334
9335 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9336 if (flag_pic)
9337 {
9338 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9339 op[4] = gen_rtx_CONST (Pmode, op[4]);
9340 }
9341
9342 if (TARGET_64BIT)
9343 {
9344 output_asm_insn ("stg\t%0,%1", op);
9345 output_asm_insn ("larl\t%2,%3", op);
9346 output_asm_insn ("brasl\t%0,%4", op);
9347 output_asm_insn ("lg\t%0,%1", op);
9348 }
9349 else if (!flag_pic)
9350 {
9351 op[6] = gen_label_rtx ();
9352
9353 output_asm_insn ("st\t%0,%1", op);
9354 output_asm_insn ("bras\t%2,%l6", op);
9355 output_asm_insn (".long\t%4", op);
9356 output_asm_insn (".long\t%3", op);
9357 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9358 output_asm_insn ("l\t%0,0(%2)", op);
9359 output_asm_insn ("l\t%2,4(%2)", op);
9360 output_asm_insn ("basr\t%0,%0", op);
9361 output_asm_insn ("l\t%0,%1", op);
9362 }
9363 else
9364 {
9365 op[5] = gen_label_rtx ();
9366 op[6] = gen_label_rtx ();
9367
9368 output_asm_insn ("st\t%0,%1", op);
9369 output_asm_insn ("bras\t%2,%l6", op);
9370 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9371 output_asm_insn (".long\t%4-%l5", op);
9372 output_asm_insn (".long\t%3-%l5", op);
9373 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9374 output_asm_insn ("lr\t%0,%2", op);
9375 output_asm_insn ("a\t%0,0(%2)", op);
9376 output_asm_insn ("a\t%2,4(%2)", op);
9377 output_asm_insn ("basr\t%0,%0", op);
9378 output_asm_insn ("l\t%0,%1", op);
9379 }
9380 }
9381
9382 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9383 into its SYMBOL_REF_FLAGS. */
9384
9385 static void
9386 s390_encode_section_info (tree decl, rtx rtl, int first)
9387 {
9388 default_encode_section_info (decl, rtl, first);
9389
9390 if (TREE_CODE (decl) == VAR_DECL)
9391 {
9392 /* If a variable has a forced alignment to < 2 bytes, mark it
9393 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9394 operand. */
9395 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9396 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9397 if (!DECL_SIZE (decl)
9398 || !DECL_ALIGN (decl)
9399 || !host_integerp (DECL_SIZE (decl), 0)
9400 || (DECL_ALIGN (decl) <= 64
9401 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9402 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9403 }
9404
9405 /* Literal pool references don't have a decl so they are handled
9406 differently here. We rely on the information in the MEM_ALIGN
9407 entry to decide upon natural alignment. */
9408 if (MEM_P (rtl)
9409 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9410 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9411 && (MEM_ALIGN (rtl) == 0
9412 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9413 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9414 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9415 }
9416
9417 /* Output thunk to FILE that implements a C++ virtual function call (with
9418 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9419 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9420 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9421 relative to the resulting this pointer. */
9422
9423 static void
9424 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9425 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9426 tree function)
9427 {
9428 rtx op[10];
9429 int nonlocal = 0;
9430
9431 /* Make sure unwind info is emitted for the thunk if needed. */
9432 final_start_function (emit_barrier (), file, 1);
9433
9434 /* Operand 0 is the target function. */
9435 op[0] = XEXP (DECL_RTL (function), 0);
9436 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9437 {
9438 nonlocal = 1;
9439 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9440 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9441 op[0] = gen_rtx_CONST (Pmode, op[0]);
9442 }
9443
9444 /* Operand 1 is the 'this' pointer. */
9445 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9446 op[1] = gen_rtx_REG (Pmode, 3);
9447 else
9448 op[1] = gen_rtx_REG (Pmode, 2);
9449
9450 /* Operand 2 is the delta. */
9451 op[2] = GEN_INT (delta);
9452
9453 /* Operand 3 is the vcall_offset. */
9454 op[3] = GEN_INT (vcall_offset);
9455
9456 /* Operand 4 is the temporary register. */
9457 op[4] = gen_rtx_REG (Pmode, 1);
9458
9459 /* Operands 5 to 8 can be used as labels. */
9460 op[5] = NULL_RTX;
9461 op[6] = NULL_RTX;
9462 op[7] = NULL_RTX;
9463 op[8] = NULL_RTX;
9464
9465 /* Operand 9 can be used for temporary register. */
9466 op[9] = NULL_RTX;
9467
9468 /* Generate code. */
9469 if (TARGET_64BIT)
9470 {
9471 /* Setup literal pool pointer if required. */
9472 if ((!DISP_IN_RANGE (delta)
9473 && !CONST_OK_FOR_K (delta)
9474 && !CONST_OK_FOR_Os (delta))
9475 || (!DISP_IN_RANGE (vcall_offset)
9476 && !CONST_OK_FOR_K (vcall_offset)
9477 && !CONST_OK_FOR_Os (vcall_offset)))
9478 {
9479 op[5] = gen_label_rtx ();
9480 output_asm_insn ("larl\t%4,%5", op);
9481 }
9482
9483 /* Add DELTA to this pointer. */
9484 if (delta)
9485 {
9486 if (CONST_OK_FOR_J (delta))
9487 output_asm_insn ("la\t%1,%2(%1)", op);
9488 else if (DISP_IN_RANGE (delta))
9489 output_asm_insn ("lay\t%1,%2(%1)", op);
9490 else if (CONST_OK_FOR_K (delta))
9491 output_asm_insn ("aghi\t%1,%2", op);
9492 else if (CONST_OK_FOR_Os (delta))
9493 output_asm_insn ("agfi\t%1,%2", op);
9494 else
9495 {
9496 op[6] = gen_label_rtx ();
9497 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9498 }
9499 }
9500
9501 /* Perform vcall adjustment. */
9502 if (vcall_offset)
9503 {
9504 if (DISP_IN_RANGE (vcall_offset))
9505 {
9506 output_asm_insn ("lg\t%4,0(%1)", op);
9507 output_asm_insn ("ag\t%1,%3(%4)", op);
9508 }
9509 else if (CONST_OK_FOR_K (vcall_offset))
9510 {
9511 output_asm_insn ("lghi\t%4,%3", op);
9512 output_asm_insn ("ag\t%4,0(%1)", op);
9513 output_asm_insn ("ag\t%1,0(%4)", op);
9514 }
9515 else if (CONST_OK_FOR_Os (vcall_offset))
9516 {
9517 output_asm_insn ("lgfi\t%4,%3", op);
9518 output_asm_insn ("ag\t%4,0(%1)", op);
9519 output_asm_insn ("ag\t%1,0(%4)", op);
9520 }
9521 else
9522 {
9523 op[7] = gen_label_rtx ();
9524 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9525 output_asm_insn ("ag\t%4,0(%1)", op);
9526 output_asm_insn ("ag\t%1,0(%4)", op);
9527 }
9528 }
9529
9530 /* Jump to target. */
9531 output_asm_insn ("jg\t%0", op);
9532
9533 /* Output literal pool if required. */
9534 if (op[5])
9535 {
9536 output_asm_insn (".align\t4", op);
9537 targetm.asm_out.internal_label (file, "L",
9538 CODE_LABEL_NUMBER (op[5]));
9539 }
9540 if (op[6])
9541 {
9542 targetm.asm_out.internal_label (file, "L",
9543 CODE_LABEL_NUMBER (op[6]));
9544 output_asm_insn (".long\t%2", op);
9545 }
9546 if (op[7])
9547 {
9548 targetm.asm_out.internal_label (file, "L",
9549 CODE_LABEL_NUMBER (op[7]));
9550 output_asm_insn (".long\t%3", op);
9551 }
9552 }
9553 else
9554 {
9555 /* Setup base pointer if required. */
9556 if (!vcall_offset
9557 || (!DISP_IN_RANGE (delta)
9558 && !CONST_OK_FOR_K (delta)
9559 && !CONST_OK_FOR_Os (delta))
9560 || (!DISP_IN_RANGE (delta)
9561 && !CONST_OK_FOR_K (vcall_offset)
9562 && !CONST_OK_FOR_Os (vcall_offset)))
9563 {
9564 op[5] = gen_label_rtx ();
9565 output_asm_insn ("basr\t%4,0", op);
9566 targetm.asm_out.internal_label (file, "L",
9567 CODE_LABEL_NUMBER (op[5]));
9568 }
9569
9570 /* Add DELTA to this pointer. */
9571 if (delta)
9572 {
9573 if (CONST_OK_FOR_J (delta))
9574 output_asm_insn ("la\t%1,%2(%1)", op);
9575 else if (DISP_IN_RANGE (delta))
9576 output_asm_insn ("lay\t%1,%2(%1)", op);
9577 else if (CONST_OK_FOR_K (delta))
9578 output_asm_insn ("ahi\t%1,%2", op);
9579 else if (CONST_OK_FOR_Os (delta))
9580 output_asm_insn ("afi\t%1,%2", op);
9581 else
9582 {
9583 op[6] = gen_label_rtx ();
9584 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9585 }
9586 }
9587
9588 /* Perform vcall adjustment. */
9589 if (vcall_offset)
9590 {
9591 if (CONST_OK_FOR_J (vcall_offset))
9592 {
9593 output_asm_insn ("l\t%4,0(%1)", op);
9594 output_asm_insn ("a\t%1,%3(%4)", op);
9595 }
9596 else if (DISP_IN_RANGE (vcall_offset))
9597 {
9598 output_asm_insn ("l\t%4,0(%1)", op);
9599 output_asm_insn ("ay\t%1,%3(%4)", op);
9600 }
9601 else if (CONST_OK_FOR_K (vcall_offset))
9602 {
9603 output_asm_insn ("lhi\t%4,%3", op);
9604 output_asm_insn ("a\t%4,0(%1)", op);
9605 output_asm_insn ("a\t%1,0(%4)", op);
9606 }
9607 else if (CONST_OK_FOR_Os (vcall_offset))
9608 {
9609 output_asm_insn ("iilf\t%4,%3", op);
9610 output_asm_insn ("a\t%4,0(%1)", op);
9611 output_asm_insn ("a\t%1,0(%4)", op);
9612 }
9613 else
9614 {
9615 op[7] = gen_label_rtx ();
9616 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9617 output_asm_insn ("a\t%4,0(%1)", op);
9618 output_asm_insn ("a\t%1,0(%4)", op);
9619 }
9620
9621 /* We had to clobber the base pointer register.
9622 Re-setup the base pointer (with a different base). */
9623 op[5] = gen_label_rtx ();
9624 output_asm_insn ("basr\t%4,0", op);
9625 targetm.asm_out.internal_label (file, "L",
9626 CODE_LABEL_NUMBER (op[5]));
9627 }
9628
9629 /* Jump to target. */
9630 op[8] = gen_label_rtx ();
9631
9632 if (!flag_pic)
9633 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9634 else if (!nonlocal)
9635 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9636 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9637 else if (flag_pic == 1)
9638 {
9639 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9640 output_asm_insn ("l\t%4,%0(%4)", op);
9641 }
9642 else if (flag_pic == 2)
9643 {
9644 op[9] = gen_rtx_REG (Pmode, 0);
9645 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9646 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9647 output_asm_insn ("ar\t%4,%9", op);
9648 output_asm_insn ("l\t%4,0(%4)", op);
9649 }
9650
9651 output_asm_insn ("br\t%4", op);
9652
9653 /* Output literal pool. */
9654 output_asm_insn (".align\t4", op);
9655
9656 if (nonlocal && flag_pic == 2)
9657 output_asm_insn (".long\t%0", op);
9658 if (nonlocal)
9659 {
9660 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9661 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9662 }
9663
9664 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9665 if (!flag_pic)
9666 output_asm_insn (".long\t%0", op);
9667 else
9668 output_asm_insn (".long\t%0-%5", op);
9669
9670 if (op[6])
9671 {
9672 targetm.asm_out.internal_label (file, "L",
9673 CODE_LABEL_NUMBER (op[6]));
9674 output_asm_insn (".long\t%2", op);
9675 }
9676 if (op[7])
9677 {
9678 targetm.asm_out.internal_label (file, "L",
9679 CODE_LABEL_NUMBER (op[7]));
9680 output_asm_insn (".long\t%3", op);
9681 }
9682 }
9683 final_end_function ();
9684 }
9685
9686 static bool
9687 s390_valid_pointer_mode (enum machine_mode mode)
9688 {
9689 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9690 }
9691
9692 /* Checks whether the given CALL_EXPR would use a caller
9693 saved register. This is used to decide whether sibling call
9694 optimization could be performed on the respective function
9695 call. */
9696
9697 static bool
9698 s390_call_saved_register_used (tree call_expr)
9699 {
9700 CUMULATIVE_ARGS cum;
9701 tree parameter;
9702 enum machine_mode mode;
9703 tree type;
9704 rtx parm_rtx;
9705 int reg, i;
9706
9707 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9708
9709 for (i = 0; i < call_expr_nargs (call_expr); i++)
9710 {
9711 parameter = CALL_EXPR_ARG (call_expr, i);
9712 gcc_assert (parameter);
9713
9714 /* For an undeclared variable passed as parameter we will get
9715 an ERROR_MARK node here. */
9716 if (TREE_CODE (parameter) == ERROR_MARK)
9717 return true;
9718
9719 type = TREE_TYPE (parameter);
9720 gcc_assert (type);
9721
9722 mode = TYPE_MODE (type);
9723 gcc_assert (mode);
9724
9725 if (pass_by_reference (&cum, mode, type, true))
9726 {
9727 mode = Pmode;
9728 type = build_pointer_type (type);
9729 }
9730
9731 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9732
9733 s390_function_arg_advance (&cum, mode, type, 0);
9734
9735 if (!parm_rtx)
9736 continue;
9737
9738 if (REG_P (parm_rtx))
9739 {
9740 for (reg = 0;
9741 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9742 reg++)
9743 if (!call_used_regs[reg + REGNO (parm_rtx)])
9744 return true;
9745 }
9746
9747 if (GET_CODE (parm_rtx) == PARALLEL)
9748 {
9749 int i;
9750
9751 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9752 {
9753 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9754
9755 gcc_assert (REG_P (r));
9756
9757 for (reg = 0;
9758 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9759 reg++)
9760 if (!call_used_regs[reg + REGNO (r)])
9761 return true;
9762 }
9763 }
9764
9765 }
9766 return false;
9767 }
9768
9769 /* Return true if the given call expression can be
9770 turned into a sibling call.
9771 DECL holds the declaration of the function to be called whereas
9772 EXP is the call expression itself. */
9773
9774 static bool
9775 s390_function_ok_for_sibcall (tree decl, tree exp)
9776 {
9777 /* The TPF epilogue uses register 1. */
9778 if (TARGET_TPF_PROFILING)
9779 return false;
9780
9781 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9782 which would have to be restored before the sibcall. */
9783 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9784 return false;
9785
9786 /* Register 6 on s390 is available as an argument register but unfortunately
9787 "caller saved". This makes functions needing this register for arguments
9788 not suitable for sibcalls. */
9789 return !s390_call_saved_register_used (exp);
9790 }
9791
9792 /* Return the fixed registers used for condition codes. */
9793
9794 static bool
9795 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9796 {
9797 *p1 = CC_REGNUM;
9798 *p2 = INVALID_REGNUM;
9799
9800 return true;
9801 }
9802
9803 /* This function is used by the call expanders of the machine description.
9804 It emits the call insn itself together with the necessary operations
9805 to adjust the target address and returns the emitted insn.
9806 ADDR_LOCATION is the target address rtx
9807 TLS_CALL the location of the thread-local symbol
9808 RESULT_REG the register where the result of the call should be stored
9809 RETADDR_REG the register where the return address should be stored
9810 If this parameter is NULL_RTX the call is considered
9811 to be a sibling call. */
9812
9813 rtx
9814 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9815 rtx retaddr_reg)
9816 {
9817 bool plt_call = false;
9818 rtx insn;
9819 rtx call;
9820 rtx clobber;
9821 rtvec vec;
9822
9823 /* Direct function calls need special treatment. */
9824 if (GET_CODE (addr_location) == SYMBOL_REF)
9825 {
9826 /* When calling a global routine in PIC mode, we must
9827 replace the symbol itself with the PLT stub. */
9828 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9829 {
9830 if (retaddr_reg != NULL_RTX)
9831 {
9832 addr_location = gen_rtx_UNSPEC (Pmode,
9833 gen_rtvec (1, addr_location),
9834 UNSPEC_PLT);
9835 addr_location = gen_rtx_CONST (Pmode, addr_location);
9836 plt_call = true;
9837 }
9838 else
9839 /* For -fpic code the PLT entries might use r12 which is
9840 call-saved. Therefore we cannot do a sibcall when
9841 calling directly using a symbol ref. When reaching
9842 this point we decided (in s390_function_ok_for_sibcall)
9843 to do a sibcall for a function pointer but one of the
9844 optimizers was able to get rid of the function pointer
9845 by propagating the symbol ref into the call. This
9846 optimization is illegal for S/390 so we turn the direct
9847 call into a indirect call again. */
9848 addr_location = force_reg (Pmode, addr_location);
9849 }
9850
9851 /* Unless we can use the bras(l) insn, force the
9852 routine address into a register. */
9853 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9854 {
9855 if (flag_pic)
9856 addr_location = legitimize_pic_address (addr_location, 0);
9857 else
9858 addr_location = force_reg (Pmode, addr_location);
9859 }
9860 }
9861
9862 /* If it is already an indirect call or the code above moved the
9863 SYMBOL_REF to somewhere else make sure the address can be found in
9864 register 1. */
9865 if (retaddr_reg == NULL_RTX
9866 && GET_CODE (addr_location) != SYMBOL_REF
9867 && !plt_call)
9868 {
9869 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9870 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9871 }
9872
9873 addr_location = gen_rtx_MEM (QImode, addr_location);
9874 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9875
9876 if (result_reg != NULL_RTX)
9877 call = gen_rtx_SET (VOIDmode, result_reg, call);
9878
9879 if (retaddr_reg != NULL_RTX)
9880 {
9881 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9882
9883 if (tls_call != NULL_RTX)
9884 vec = gen_rtvec (3, call, clobber,
9885 gen_rtx_USE (VOIDmode, tls_call));
9886 else
9887 vec = gen_rtvec (2, call, clobber);
9888
9889 call = gen_rtx_PARALLEL (VOIDmode, vec);
9890 }
9891
9892 insn = emit_call_insn (call);
9893
9894 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9895 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9896 {
9897 /* s390_function_ok_for_sibcall should
9898 have denied sibcalls in this case. */
9899 gcc_assert (retaddr_reg != NULL_RTX);
9900
9901 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9902 }
9903 return insn;
9904 }
9905
9906 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9907
9908 static void
9909 s390_conditional_register_usage (void)
9910 {
9911 int i;
9912
9913 if (flag_pic)
9914 {
9915 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9916 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9917 }
9918 if (TARGET_CPU_ZARCH)
9919 {
9920 fixed_regs[BASE_REGNUM] = 0;
9921 call_used_regs[BASE_REGNUM] = 0;
9922 fixed_regs[RETURN_REGNUM] = 0;
9923 call_used_regs[RETURN_REGNUM] = 0;
9924 }
9925 if (TARGET_64BIT)
9926 {
9927 for (i = 24; i < 32; i++)
9928 call_used_regs[i] = call_really_used_regs[i] = 0;
9929 }
9930 else
9931 {
9932 for (i = 18; i < 20; i++)
9933 call_used_regs[i] = call_really_used_regs[i] = 0;
9934 }
9935
9936 if (TARGET_SOFT_FLOAT)
9937 {
9938 for (i = 16; i < 32; i++)
9939 call_used_regs[i] = fixed_regs[i] = 1;
9940 }
9941 }
9942
9943 /* Corresponding function to eh_return expander. */
9944
9945 static GTY(()) rtx s390_tpf_eh_return_symbol;
9946 void
9947 s390_emit_tpf_eh_return (rtx target)
9948 {
9949 rtx insn, reg;
9950
9951 if (!s390_tpf_eh_return_symbol)
9952 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9953
9954 reg = gen_rtx_REG (Pmode, 2);
9955
9956 emit_move_insn (reg, target);
9957 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9958 gen_rtx_REG (Pmode, RETURN_REGNUM));
9959 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9960
9961 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9962 }
9963
9964 /* Rework the prologue/epilogue to avoid saving/restoring
9965 registers unnecessarily. */
9966
9967 static void
9968 s390_optimize_prologue (void)
9969 {
9970 rtx insn, new_insn, next_insn;
9971
9972 /* Do a final recompute of the frame-related data. */
9973
9974 s390_update_frame_layout ();
9975
9976 /* If all special registers are in fact used, there's nothing we
9977 can do, so no point in walking the insn list. */
9978
9979 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9980 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9981 && (TARGET_CPU_ZARCH
9982 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9983 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9984 return;
9985
9986 /* Search for prologue/epilogue insns and replace them. */
9987
9988 for (insn = get_insns (); insn; insn = next_insn)
9989 {
9990 int first, last, off;
9991 rtx set, base, offset;
9992
9993 next_insn = NEXT_INSN (insn);
9994
9995 if (GET_CODE (insn) != INSN)
9996 continue;
9997
9998 if (GET_CODE (PATTERN (insn)) == PARALLEL
9999 && store_multiple_operation (PATTERN (insn), VOIDmode))
10000 {
10001 set = XVECEXP (PATTERN (insn), 0, 0);
10002 first = REGNO (SET_SRC (set));
10003 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10004 offset = const0_rtx;
10005 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10006 off = INTVAL (offset);
10007
10008 if (GET_CODE (base) != REG || off < 0)
10009 continue;
10010 if (cfun_frame_layout.first_save_gpr != -1
10011 && (cfun_frame_layout.first_save_gpr < first
10012 || cfun_frame_layout.last_save_gpr > last))
10013 continue;
10014 if (REGNO (base) != STACK_POINTER_REGNUM
10015 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10016 continue;
10017 if (first > BASE_REGNUM || last < BASE_REGNUM)
10018 continue;
10019
10020 if (cfun_frame_layout.first_save_gpr != -1)
10021 {
10022 new_insn = save_gprs (base,
10023 off + (cfun_frame_layout.first_save_gpr
10024 - first) * UNITS_PER_LONG,
10025 cfun_frame_layout.first_save_gpr,
10026 cfun_frame_layout.last_save_gpr);
10027 new_insn = emit_insn_before (new_insn, insn);
10028 INSN_ADDRESSES_NEW (new_insn, -1);
10029 }
10030
10031 remove_insn (insn);
10032 continue;
10033 }
10034
10035 if (cfun_frame_layout.first_save_gpr == -1
10036 && GET_CODE (PATTERN (insn)) == SET
10037 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10038 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10039 || (!TARGET_CPU_ZARCH
10040 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10041 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10042 {
10043 set = PATTERN (insn);
10044 first = REGNO (SET_SRC (set));
10045 offset = const0_rtx;
10046 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10047 off = INTVAL (offset);
10048
10049 if (GET_CODE (base) != REG || off < 0)
10050 continue;
10051 if (REGNO (base) != STACK_POINTER_REGNUM
10052 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10053 continue;
10054
10055 remove_insn (insn);
10056 continue;
10057 }
10058
10059 if (GET_CODE (PATTERN (insn)) == PARALLEL
10060 && load_multiple_operation (PATTERN (insn), VOIDmode))
10061 {
10062 set = XVECEXP (PATTERN (insn), 0, 0);
10063 first = REGNO (SET_DEST (set));
10064 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10065 offset = const0_rtx;
10066 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10067 off = INTVAL (offset);
10068
10069 if (GET_CODE (base) != REG || off < 0)
10070 continue;
10071 if (cfun_frame_layout.first_restore_gpr != -1
10072 && (cfun_frame_layout.first_restore_gpr < first
10073 || cfun_frame_layout.last_restore_gpr > last))
10074 continue;
10075 if (REGNO (base) != STACK_POINTER_REGNUM
10076 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10077 continue;
10078 if (first > BASE_REGNUM || last < BASE_REGNUM)
10079 continue;
10080
10081 if (cfun_frame_layout.first_restore_gpr != -1)
10082 {
10083 new_insn = restore_gprs (base,
10084 off + (cfun_frame_layout.first_restore_gpr
10085 - first) * UNITS_PER_LONG,
10086 cfun_frame_layout.first_restore_gpr,
10087 cfun_frame_layout.last_restore_gpr);
10088 new_insn = emit_insn_before (new_insn, insn);
10089 INSN_ADDRESSES_NEW (new_insn, -1);
10090 }
10091
10092 remove_insn (insn);
10093 continue;
10094 }
10095
10096 if (cfun_frame_layout.first_restore_gpr == -1
10097 && GET_CODE (PATTERN (insn)) == SET
10098 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10099 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10100 || (!TARGET_CPU_ZARCH
10101 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10102 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10103 {
10104 set = PATTERN (insn);
10105 first = REGNO (SET_DEST (set));
10106 offset = const0_rtx;
10107 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10108 off = INTVAL (offset);
10109
10110 if (GET_CODE (base) != REG || off < 0)
10111 continue;
10112 if (REGNO (base) != STACK_POINTER_REGNUM
10113 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10114 continue;
10115
10116 remove_insn (insn);
10117 continue;
10118 }
10119 }
10120 }
10121
10122 /* On z10 and later the dynamic branch prediction must see the
10123 backward jump within a certain windows. If not it falls back to
10124 the static prediction. This function rearranges the loop backward
10125 branch in a way which makes the static prediction always correct.
10126 The function returns true if it added an instruction. */
10127 static bool
10128 s390_fix_long_loop_prediction (rtx insn)
10129 {
10130 rtx set = single_set (insn);
10131 rtx code_label, label_ref, new_label;
10132 rtx uncond_jump;
10133 rtx cur_insn;
10134 rtx tmp;
10135 int distance;
10136
10137 /* This will exclude branch on count and branch on index patterns
10138 since these are correctly statically predicted. */
10139 if (!set
10140 || SET_DEST (set) != pc_rtx
10141 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10142 return false;
10143
10144 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10145 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10146
10147 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10148
10149 code_label = XEXP (label_ref, 0);
10150
10151 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10152 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10153 || (INSN_ADDRESSES (INSN_UID (insn))
10154 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10155 return false;
10156
10157 for (distance = 0, cur_insn = PREV_INSN (insn);
10158 distance < PREDICT_DISTANCE - 6;
10159 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10160 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10161 return false;
10162
10163 new_label = gen_label_rtx ();
10164 uncond_jump = emit_jump_insn_after (
10165 gen_rtx_SET (VOIDmode, pc_rtx,
10166 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10167 insn);
10168 emit_label_after (new_label, uncond_jump);
10169
10170 tmp = XEXP (SET_SRC (set), 1);
10171 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10172 XEXP (SET_SRC (set), 2) = tmp;
10173 INSN_CODE (insn) = -1;
10174
10175 XEXP (label_ref, 0) = new_label;
10176 JUMP_LABEL (insn) = new_label;
10177 JUMP_LABEL (uncond_jump) = code_label;
10178
10179 return true;
10180 }
10181
10182 /* Returns 1 if INSN reads the value of REG for purposes not related
10183 to addressing of memory, and 0 otherwise. */
10184 static int
10185 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10186 {
10187 return reg_referenced_p (reg, PATTERN (insn))
10188 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10189 }
10190
10191 /* Starting from INSN find_cond_jump looks downwards in the insn
10192 stream for a single jump insn which is the last user of the
10193 condition code set in INSN. */
10194 static rtx
10195 find_cond_jump (rtx insn)
10196 {
10197 for (; insn; insn = NEXT_INSN (insn))
10198 {
10199 rtx ite, cc;
10200
10201 if (LABEL_P (insn))
10202 break;
10203
10204 if (!JUMP_P (insn))
10205 {
10206 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10207 break;
10208 continue;
10209 }
10210
10211 /* This will be triggered by a return. */
10212 if (GET_CODE (PATTERN (insn)) != SET)
10213 break;
10214
10215 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10216 ite = SET_SRC (PATTERN (insn));
10217
10218 if (GET_CODE (ite) != IF_THEN_ELSE)
10219 break;
10220
10221 cc = XEXP (XEXP (ite, 0), 0);
10222 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10223 break;
10224
10225 if (find_reg_note (insn, REG_DEAD, cc))
10226 return insn;
10227 break;
10228 }
10229
10230 return NULL_RTX;
10231 }
10232
10233 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10234 the semantics does not change. If NULL_RTX is passed as COND the
10235 function tries to find the conditional jump starting with INSN. */
10236 static void
10237 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10238 {
10239 rtx tmp = *op0;
10240
10241 if (cond == NULL_RTX)
10242 {
10243 rtx jump = find_cond_jump (NEXT_INSN (insn));
10244 jump = jump ? single_set (jump) : NULL_RTX;
10245
10246 if (jump == NULL_RTX)
10247 return;
10248
10249 cond = XEXP (XEXP (jump, 1), 0);
10250 }
10251
10252 *op0 = *op1;
10253 *op1 = tmp;
10254 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10255 }
10256
10257 /* On z10, instructions of the compare-and-branch family have the
10258 property to access the register occurring as second operand with
10259 its bits complemented. If such a compare is grouped with a second
10260 instruction that accesses the same register non-complemented, and
10261 if that register's value is delivered via a bypass, then the
10262 pipeline recycles, thereby causing significant performance decline.
10263 This function locates such situations and exchanges the two
10264 operands of the compare. The function return true whenever it
10265 added an insn. */
10266 static bool
10267 s390_z10_optimize_cmp (rtx insn)
10268 {
10269 rtx prev_insn, next_insn;
10270 bool insn_added_p = false;
10271 rtx cond, *op0, *op1;
10272
10273 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10274 {
10275 /* Handle compare and branch and branch on count
10276 instructions. */
10277 rtx pattern = single_set (insn);
10278
10279 if (!pattern
10280 || SET_DEST (pattern) != pc_rtx
10281 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10282 return false;
10283
10284 cond = XEXP (SET_SRC (pattern), 0);
10285 op0 = &XEXP (cond, 0);
10286 op1 = &XEXP (cond, 1);
10287 }
10288 else if (GET_CODE (PATTERN (insn)) == SET)
10289 {
10290 rtx src, dest;
10291
10292 /* Handle normal compare instructions. */
10293 src = SET_SRC (PATTERN (insn));
10294 dest = SET_DEST (PATTERN (insn));
10295
10296 if (!REG_P (dest)
10297 || !CC_REGNO_P (REGNO (dest))
10298 || GET_CODE (src) != COMPARE)
10299 return false;
10300
10301 /* s390_swap_cmp will try to find the conditional
10302 jump when passing NULL_RTX as condition. */
10303 cond = NULL_RTX;
10304 op0 = &XEXP (src, 0);
10305 op1 = &XEXP (src, 1);
10306 }
10307 else
10308 return false;
10309
10310 if (!REG_P (*op0) || !REG_P (*op1))
10311 return false;
10312
10313 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10314 return false;
10315
10316 /* Swap the COMPARE arguments and its mask if there is a
10317 conflicting access in the previous insn. */
10318 prev_insn = prev_active_insn (insn);
10319 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10320 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10321 s390_swap_cmp (cond, op0, op1, insn);
10322
10323 /* Check if there is a conflict with the next insn. If there
10324 was no conflict with the previous insn, then swap the
10325 COMPARE arguments and its mask. If we already swapped
10326 the operands, or if swapping them would cause a conflict
10327 with the previous insn, issue a NOP after the COMPARE in
10328 order to separate the two instuctions. */
10329 next_insn = next_active_insn (insn);
10330 if (next_insn != NULL_RTX && INSN_P (next_insn)
10331 && s390_non_addr_reg_read_p (*op1, next_insn))
10332 {
10333 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10334 && s390_non_addr_reg_read_p (*op0, prev_insn))
10335 {
10336 if (REGNO (*op1) == 0)
10337 emit_insn_after (gen_nop1 (), insn);
10338 else
10339 emit_insn_after (gen_nop (), insn);
10340 insn_added_p = true;
10341 }
10342 else
10343 s390_swap_cmp (cond, op0, op1, insn);
10344 }
10345 return insn_added_p;
10346 }
10347
10348 /* Perform machine-dependent processing. */
10349
10350 static void
10351 s390_reorg (void)
10352 {
10353 bool pool_overflow = false;
10354
10355 /* Make sure all splits have been performed; splits after
10356 machine_dependent_reorg might confuse insn length counts. */
10357 split_all_insns_noflow ();
10358
10359 /* Install the main literal pool and the associated base
10360 register load insns.
10361
10362 In addition, there are two problematic situations we need
10363 to correct:
10364
10365 - the literal pool might be > 4096 bytes in size, so that
10366 some of its elements cannot be directly accessed
10367
10368 - a branch target might be > 64K away from the branch, so that
10369 it is not possible to use a PC-relative instruction.
10370
10371 To fix those, we split the single literal pool into multiple
10372 pool chunks, reloading the pool base register at various
10373 points throughout the function to ensure it always points to
10374 the pool chunk the following code expects, and / or replace
10375 PC-relative branches by absolute branches.
10376
10377 However, the two problems are interdependent: splitting the
10378 literal pool can move a branch further away from its target,
10379 causing the 64K limit to overflow, and on the other hand,
10380 replacing a PC-relative branch by an absolute branch means
10381 we need to put the branch target address into the literal
10382 pool, possibly causing it to overflow.
10383
10384 So, we loop trying to fix up both problems until we manage
10385 to satisfy both conditions at the same time. Note that the
10386 loop is guaranteed to terminate as every pass of the loop
10387 strictly decreases the total number of PC-relative branches
10388 in the function. (This is not completely true as there
10389 might be branch-over-pool insns introduced by chunkify_start.
10390 Those never need to be split however.) */
10391
10392 for (;;)
10393 {
10394 struct constant_pool *pool = NULL;
10395
10396 /* Collect the literal pool. */
10397 if (!pool_overflow)
10398 {
10399 pool = s390_mainpool_start ();
10400 if (!pool)
10401 pool_overflow = true;
10402 }
10403
10404 /* If literal pool overflowed, start to chunkify it. */
10405 if (pool_overflow)
10406 pool = s390_chunkify_start ();
10407
10408 /* Split out-of-range branches. If this has created new
10409 literal pool entries, cancel current chunk list and
10410 recompute it. zSeries machines have large branch
10411 instructions, so we never need to split a branch. */
10412 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10413 {
10414 if (pool_overflow)
10415 s390_chunkify_cancel (pool);
10416 else
10417 s390_mainpool_cancel (pool);
10418
10419 continue;
10420 }
10421
10422 /* If we made it up to here, both conditions are satisfied.
10423 Finish up literal pool related changes. */
10424 if (pool_overflow)
10425 s390_chunkify_finish (pool);
10426 else
10427 s390_mainpool_finish (pool);
10428
10429 /* We're done splitting branches. */
10430 cfun->machine->split_branches_pending_p = false;
10431 break;
10432 }
10433
10434 /* Generate out-of-pool execute target insns. */
10435 if (TARGET_CPU_ZARCH)
10436 {
10437 rtx insn, label, target;
10438
10439 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10440 {
10441 label = s390_execute_label (insn);
10442 if (!label)
10443 continue;
10444
10445 gcc_assert (label != const0_rtx);
10446
10447 target = emit_label (XEXP (label, 0));
10448 INSN_ADDRESSES_NEW (target, -1);
10449
10450 target = emit_insn (s390_execute_target (insn));
10451 INSN_ADDRESSES_NEW (target, -1);
10452 }
10453 }
10454
10455 /* Try to optimize prologue and epilogue further. */
10456 s390_optimize_prologue ();
10457
10458 /* Walk over the insns and do some >=z10 specific changes. */
10459 if (s390_tune == PROCESSOR_2097_Z10
10460 || s390_tune == PROCESSOR_2817_Z196)
10461 {
10462 rtx insn;
10463 bool insn_added_p = false;
10464
10465 /* The insn lengths and addresses have to be up to date for the
10466 following manipulations. */
10467 shorten_branches (get_insns ());
10468
10469 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10470 {
10471 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10472 continue;
10473
10474 if (JUMP_P (insn))
10475 insn_added_p |= s390_fix_long_loop_prediction (insn);
10476
10477 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10478 || GET_CODE (PATTERN (insn)) == SET)
10479 && s390_tune == PROCESSOR_2097_Z10)
10480 insn_added_p |= s390_z10_optimize_cmp (insn);
10481 }
10482
10483 /* Adjust branches if we added new instructions. */
10484 if (insn_added_p)
10485 shorten_branches (get_insns ());
10486 }
10487 }
10488
10489 /* Return true if INSN is a fp load insn writing register REGNO. */
10490 static inline bool
10491 s390_fpload_toreg (rtx insn, unsigned int regno)
10492 {
10493 rtx set;
10494 enum attr_type flag = s390_safe_attr_type (insn);
10495
10496 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10497 return false;
10498
10499 set = single_set (insn);
10500
10501 if (set == NULL_RTX)
10502 return false;
10503
10504 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10505 return false;
10506
10507 if (REGNO (SET_DEST (set)) != regno)
10508 return false;
10509
10510 return true;
10511 }
10512
10513 /* This value describes the distance to be avoided between an
10514 aritmetic fp instruction and an fp load writing the same register.
10515 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10516 fine but the exact value has to be avoided. Otherwise the FP
10517 pipeline will throw an exception causing a major penalty. */
10518 #define Z10_EARLYLOAD_DISTANCE 7
10519
10520 /* Rearrange the ready list in order to avoid the situation described
10521 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10522 moved to the very end of the ready list. */
10523 static void
10524 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10525 {
10526 unsigned int regno;
10527 int nready = *nready_p;
10528 rtx tmp;
10529 int i;
10530 rtx insn;
10531 rtx set;
10532 enum attr_type flag;
10533 int distance;
10534
10535 /* Skip DISTANCE - 1 active insns. */
10536 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10537 distance > 0 && insn != NULL_RTX;
10538 distance--, insn = prev_active_insn (insn))
10539 if (CALL_P (insn) || JUMP_P (insn))
10540 return;
10541
10542 if (insn == NULL_RTX)
10543 return;
10544
10545 set = single_set (insn);
10546
10547 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10548 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10549 return;
10550
10551 flag = s390_safe_attr_type (insn);
10552
10553 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10554 return;
10555
10556 regno = REGNO (SET_DEST (set));
10557 i = nready - 1;
10558
10559 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10560 i--;
10561
10562 if (!i)
10563 return;
10564
10565 tmp = ready[i];
10566 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10567 ready[0] = tmp;
10568 }
10569
10570 /* This function is called via hook TARGET_SCHED_REORDER before
10571 issueing one insn from list READY which contains *NREADYP entries.
10572 For target z10 it reorders load instructions to avoid early load
10573 conflicts in the floating point pipeline */
10574 static int
10575 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10576 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10577 {
10578 if (s390_tune == PROCESSOR_2097_Z10)
10579 if (reload_completed && *nreadyp > 1)
10580 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10581
10582 return s390_issue_rate ();
10583 }
10584
10585 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10586 the scheduler has issued INSN. It stores the last issued insn into
10587 last_scheduled_insn in order to make it available for
10588 s390_sched_reorder. */
10589 static int
10590 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10591 int verbose ATTRIBUTE_UNUSED,
10592 rtx insn, int more)
10593 {
10594 last_scheduled_insn = insn;
10595
10596 if (GET_CODE (PATTERN (insn)) != USE
10597 && GET_CODE (PATTERN (insn)) != CLOBBER)
10598 return more - 1;
10599 else
10600 return more;
10601 }
10602
10603 static void
10604 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10605 int verbose ATTRIBUTE_UNUSED,
10606 int max_ready ATTRIBUTE_UNUSED)
10607 {
10608 last_scheduled_insn = NULL_RTX;
10609 }
10610
10611 /* This function checks the whole of insn X for memory references. The
10612 function always returns zero because the framework it is called
10613 from would stop recursively analyzing the insn upon a return value
10614 other than zero. The real result of this function is updating
10615 counter variable MEM_COUNT. */
10616 static int
10617 check_dpu (rtx *x, unsigned *mem_count)
10618 {
10619 if (*x != NULL_RTX && MEM_P (*x))
10620 (*mem_count)++;
10621 return 0;
10622 }
10623
10624 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10625 a new number struct loop *loop should be unrolled if tuned for cpus with
10626 a built-in stride prefetcher.
10627 The loop is analyzed for memory accesses by calling check_dpu for
10628 each rtx of the loop. Depending on the loop_depth and the amount of
10629 memory accesses a new number <=nunroll is returned to improve the
10630 behaviour of the hardware prefetch unit. */
10631 static unsigned
10632 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10633 {
10634 basic_block *bbs;
10635 rtx insn;
10636 unsigned i;
10637 unsigned mem_count = 0;
10638
10639 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10640 return nunroll;
10641
10642 /* Count the number of memory references within the loop body. */
10643 bbs = get_loop_body (loop);
10644 for (i = 0; i < loop->num_nodes; i++)
10645 {
10646 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10647 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10648 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10649 }
10650 free (bbs);
10651
10652 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10653 if (mem_count == 0)
10654 return nunroll;
10655
10656 switch (loop_depth(loop))
10657 {
10658 case 1:
10659 return MIN (nunroll, 28 / mem_count);
10660 case 2:
10661 return MIN (nunroll, 22 / mem_count);
10662 default:
10663 return MIN (nunroll, 16 / mem_count);
10664 }
10665 }
10666
10667 /* Initialize GCC target structure. */
10668
10669 #undef TARGET_ASM_ALIGNED_HI_OP
10670 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10671 #undef TARGET_ASM_ALIGNED_DI_OP
10672 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10673 #undef TARGET_ASM_INTEGER
10674 #define TARGET_ASM_INTEGER s390_assemble_integer
10675
10676 #undef TARGET_ASM_OPEN_PAREN
10677 #define TARGET_ASM_OPEN_PAREN ""
10678
10679 #undef TARGET_ASM_CLOSE_PAREN
10680 #define TARGET_ASM_CLOSE_PAREN ""
10681
10682 #undef TARGET_DEFAULT_TARGET_FLAGS
10683 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT)
10684
10685 #undef TARGET_HANDLE_OPTION
10686 #define TARGET_HANDLE_OPTION s390_handle_option
10687
10688 #undef TARGET_OPTION_OVERRIDE
10689 #define TARGET_OPTION_OVERRIDE s390_option_override
10690
10691 #undef TARGET_OPTION_OPTIMIZATION_TABLE
10692 #define TARGET_OPTION_OPTIMIZATION_TABLE s390_option_optimization_table
10693
10694 #undef TARGET_OPTION_INIT_STRUCT
10695 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10696
10697 #undef TARGET_ENCODE_SECTION_INFO
10698 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10699
10700 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10701 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10702
10703 #ifdef HAVE_AS_TLS
10704 #undef TARGET_HAVE_TLS
10705 #define TARGET_HAVE_TLS true
10706 #endif
10707 #undef TARGET_CANNOT_FORCE_CONST_MEM
10708 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10709
10710 #undef TARGET_DELEGITIMIZE_ADDRESS
10711 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10712
10713 #undef TARGET_LEGITIMIZE_ADDRESS
10714 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10715
10716 #undef TARGET_RETURN_IN_MEMORY
10717 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10718
10719 #undef TARGET_INIT_BUILTINS
10720 #define TARGET_INIT_BUILTINS s390_init_builtins
10721 #undef TARGET_EXPAND_BUILTIN
10722 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10723
10724 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10725 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10726
10727 #undef TARGET_ASM_OUTPUT_MI_THUNK
10728 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10729 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10730 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10731
10732 #undef TARGET_SCHED_ADJUST_PRIORITY
10733 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10734 #undef TARGET_SCHED_ISSUE_RATE
10735 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10736 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10737 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10738
10739 #undef TARGET_SCHED_VARIABLE_ISSUE
10740 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10741 #undef TARGET_SCHED_REORDER
10742 #define TARGET_SCHED_REORDER s390_sched_reorder
10743 #undef TARGET_SCHED_INIT
10744 #define TARGET_SCHED_INIT s390_sched_init
10745
10746 #undef TARGET_CANNOT_COPY_INSN_P
10747 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10748 #undef TARGET_RTX_COSTS
10749 #define TARGET_RTX_COSTS s390_rtx_costs
10750 #undef TARGET_ADDRESS_COST
10751 #define TARGET_ADDRESS_COST s390_address_cost
10752 #undef TARGET_REGISTER_MOVE_COST
10753 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10754 #undef TARGET_MEMORY_MOVE_COST
10755 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10756
10757 #undef TARGET_MACHINE_DEPENDENT_REORG
10758 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10759
10760 #undef TARGET_VALID_POINTER_MODE
10761 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10762
10763 #undef TARGET_BUILD_BUILTIN_VA_LIST
10764 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10765 #undef TARGET_EXPAND_BUILTIN_VA_START
10766 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10767 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10768 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10769
10770 #undef TARGET_PROMOTE_FUNCTION_MODE
10771 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10772 #undef TARGET_PASS_BY_REFERENCE
10773 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10774
10775 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10776 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10777 #undef TARGET_FUNCTION_ARG
10778 #define TARGET_FUNCTION_ARG s390_function_arg
10779 #undef TARGET_FUNCTION_ARG_ADVANCE
10780 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10781 #undef TARGET_FUNCTION_VALUE
10782 #define TARGET_FUNCTION_VALUE s390_function_value
10783 #undef TARGET_LIBCALL_VALUE
10784 #define TARGET_LIBCALL_VALUE s390_libcall_value
10785
10786 #undef TARGET_FIXED_CONDITION_CODE_REGS
10787 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10788
10789 #undef TARGET_CC_MODES_COMPATIBLE
10790 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10791
10792 #undef TARGET_INVALID_WITHIN_DOLOOP
10793 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10794
10795 #ifdef HAVE_AS_TLS
10796 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10797 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10798 #endif
10799
10800 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10801 #undef TARGET_MANGLE_TYPE
10802 #define TARGET_MANGLE_TYPE s390_mangle_type
10803 #endif
10804
10805 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10806 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10807
10808 #undef TARGET_PREFERRED_RELOAD_CLASS
10809 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10810
10811 #undef TARGET_SECONDARY_RELOAD
10812 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10813
10814 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10815 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10816
10817 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10818 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10819
10820 #undef TARGET_LEGITIMATE_ADDRESS_P
10821 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10822
10823 #undef TARGET_CAN_ELIMINATE
10824 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10825
10826 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10827 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10828
10829 #undef TARGET_LOOP_UNROLL_ADJUST
10830 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10831
10832 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10833 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10834 #undef TARGET_TRAMPOLINE_INIT
10835 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10836
10837 #undef TARGET_UNWIND_WORD_MODE
10838 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10839
10840 struct gcc_target targetm = TARGET_INITIALIZER;
10841
10842 #include "gt-s390.h"