re PR target/50395 (Infinite loop when bootstrapping java)
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56 #include "opts.h"
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* The following structure is embedded in the machine
281 specific part of struct function. */
282
283 struct GTY (()) s390_frame_layout
284 {
285 /* Offset within stack frame. */
286 HOST_WIDE_INT gprs_offset;
287 HOST_WIDE_INT f0_offset;
288 HOST_WIDE_INT f4_offset;
289 HOST_WIDE_INT f8_offset;
290 HOST_WIDE_INT backchain_offset;
291
292 /* Number of first and last gpr where slots in the register
293 save area are reserved for. */
294 int first_save_gpr_slot;
295 int last_save_gpr_slot;
296
297 /* Number of first and last gpr to be saved, restored. */
298 int first_save_gpr;
299 int first_restore_gpr;
300 int last_save_gpr;
301 int last_restore_gpr;
302
303 /* Bits standing for floating point registers. Set, if the
304 respective register has to be saved. Starting with reg 16 (f0)
305 at the rightmost bit.
306 Bit 15 - 8 7 6 5 4 3 2 1 0
307 fpr 15 - 8 7 5 3 1 6 4 2 0
308 reg 31 - 24 23 22 21 20 19 18 17 16 */
309 unsigned int fpr_bitmap;
310
311 /* Number of floating point registers f8-f15 which must be saved. */
312 int high_fprs;
313
314 /* Set if return address needs to be saved.
315 This flag is set by s390_return_addr_rtx if it could not use
316 the initial value of r14 and therefore depends on r14 saved
317 to the stack. */
318 bool save_return_addr_p;
319
320 /* Size of stack frame. */
321 HOST_WIDE_INT frame_size;
322 };
323
324 /* Define the structure for the machine field in struct function. */
325
326 struct GTY(()) machine_function
327 {
328 struct s390_frame_layout frame_layout;
329
330 /* Literal pool base register. */
331 rtx base_reg;
332
333 /* True if we may need to perform branch splitting. */
334 bool split_branches_pending_p;
335
336 /* Some local-dynamic TLS symbol name. */
337 const char *some_ld_name;
338
339 bool has_landing_pad_p;
340 };
341
342 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
343
344 #define cfun_frame_layout (cfun->machine->frame_layout)
345 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
346 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
347 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
348 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
349 (1 << (BITNUM)))
350 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
351 (1 << (BITNUM))))
352
353 /* Number of GPRs and FPRs used for argument passing. */
354 #define GP_ARG_NUM_REG 5
355 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
356
357 /* A couple of shortcuts. */
358 #define CONST_OK_FOR_J(x) \
359 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
360 #define CONST_OK_FOR_K(x) \
361 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
362 #define CONST_OK_FOR_Os(x) \
363 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
364 #define CONST_OK_FOR_Op(x) \
365 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
366 #define CONST_OK_FOR_On(x) \
367 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
368
369 #define REGNO_PAIR_OK(REGNO, MODE) \
370 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
371
372 /* That's the read ahead of the dynamic branch prediction unit in
373 bytes on a z10 (or higher) CPU. */
374 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
375
376 /* Return the alignment for LABEL. We default to the -falign-labels
377 value except for the literal pool base label. */
378 int
379 s390_label_align (rtx label)
380 {
381 rtx prev_insn = prev_active_insn (label);
382
383 if (prev_insn == NULL_RTX)
384 goto old;
385
386 prev_insn = single_set (prev_insn);
387
388 if (prev_insn == NULL_RTX)
389 goto old;
390
391 prev_insn = SET_SRC (prev_insn);
392
393 /* Don't align literal pool base labels. */
394 if (GET_CODE (prev_insn) == UNSPEC
395 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
396 return 0;
397
398 old:
399 return align_labels_log;
400 }
401
402 static enum machine_mode
403 s390_libgcc_cmp_return_mode (void)
404 {
405 return TARGET_64BIT ? DImode : SImode;
406 }
407
408 static enum machine_mode
409 s390_libgcc_shift_count_mode (void)
410 {
411 return TARGET_64BIT ? DImode : SImode;
412 }
413
414 static enum machine_mode
415 s390_unwind_word_mode (void)
416 {
417 return TARGET_64BIT ? DImode : SImode;
418 }
419
420 /* Return true if the back end supports mode MODE. */
421 static bool
422 s390_scalar_mode_supported_p (enum machine_mode mode)
423 {
424 /* In contrast to the default implementation reject TImode constants on 31bit
425 TARGET_ZARCH for ABI compliance. */
426 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
427 return false;
428
429 if (DECIMAL_FLOAT_MODE_P (mode))
430 return default_decimal_float_supported_p ();
431
432 return default_scalar_mode_supported_p (mode);
433 }
434
435 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
436
437 void
438 s390_set_has_landing_pad_p (bool value)
439 {
440 cfun->machine->has_landing_pad_p = value;
441 }
442
443 /* If two condition code modes are compatible, return a condition code
444 mode which is compatible with both. Otherwise, return
445 VOIDmode. */
446
447 static enum machine_mode
448 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
449 {
450 if (m1 == m2)
451 return m1;
452
453 switch (m1)
454 {
455 case CCZmode:
456 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
457 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
458 return m2;
459 return VOIDmode;
460
461 case CCSmode:
462 case CCUmode:
463 case CCTmode:
464 case CCSRmode:
465 case CCURmode:
466 case CCZ1mode:
467 if (m2 == CCZmode)
468 return m1;
469
470 return VOIDmode;
471
472 default:
473 return VOIDmode;
474 }
475 return VOIDmode;
476 }
477
478 /* Return true if SET either doesn't set the CC register, or else
479 the source and destination have matching CC modes and that
480 CC mode is at least as constrained as REQ_MODE. */
481
482 static bool
483 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
484 {
485 enum machine_mode set_mode;
486
487 gcc_assert (GET_CODE (set) == SET);
488
489 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
490 return 1;
491
492 set_mode = GET_MODE (SET_DEST (set));
493 switch (set_mode)
494 {
495 case CCSmode:
496 case CCSRmode:
497 case CCUmode:
498 case CCURmode:
499 case CCLmode:
500 case CCL1mode:
501 case CCL2mode:
502 case CCL3mode:
503 case CCT1mode:
504 case CCT2mode:
505 case CCT3mode:
506 if (req_mode != set_mode)
507 return 0;
508 break;
509
510 case CCZmode:
511 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
512 && req_mode != CCSRmode && req_mode != CCURmode)
513 return 0;
514 break;
515
516 case CCAPmode:
517 case CCANmode:
518 if (req_mode != CCAmode)
519 return 0;
520 break;
521
522 default:
523 gcc_unreachable ();
524 }
525
526 return (GET_MODE (SET_SRC (set)) == set_mode);
527 }
528
529 /* Return true if every SET in INSN that sets the CC register
530 has source and destination with matching CC modes and that
531 CC mode is at least as constrained as REQ_MODE.
532 If REQ_MODE is VOIDmode, always return false. */
533
534 bool
535 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
536 {
537 int i;
538
539 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
540 if (req_mode == VOIDmode)
541 return false;
542
543 if (GET_CODE (PATTERN (insn)) == SET)
544 return s390_match_ccmode_set (PATTERN (insn), req_mode);
545
546 if (GET_CODE (PATTERN (insn)) == PARALLEL)
547 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
548 {
549 rtx set = XVECEXP (PATTERN (insn), 0, i);
550 if (GET_CODE (set) == SET)
551 if (!s390_match_ccmode_set (set, req_mode))
552 return false;
553 }
554
555 return true;
556 }
557
558 /* If a test-under-mask instruction can be used to implement
559 (compare (and ... OP1) OP2), return the CC mode required
560 to do that. Otherwise, return VOIDmode.
561 MIXED is true if the instruction can distinguish between
562 CC1 and CC2 for mixed selected bits (TMxx), it is false
563 if the instruction cannot (TM). */
564
565 enum machine_mode
566 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
567 {
568 int bit0, bit1;
569
570 /* ??? Fixme: should work on CONST_DOUBLE as well. */
571 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
572 return VOIDmode;
573
574 /* Selected bits all zero: CC0.
575 e.g.: int a; if ((a & (16 + 128)) == 0) */
576 if (INTVAL (op2) == 0)
577 return CCTmode;
578
579 /* Selected bits all one: CC3.
580 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
581 if (INTVAL (op2) == INTVAL (op1))
582 return CCT3mode;
583
584 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
585 int a;
586 if ((a & (16 + 128)) == 16) -> CCT1
587 if ((a & (16 + 128)) == 128) -> CCT2 */
588 if (mixed)
589 {
590 bit1 = exact_log2 (INTVAL (op2));
591 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
592 if (bit0 != -1 && bit1 != -1)
593 return bit0 > bit1 ? CCT1mode : CCT2mode;
594 }
595
596 return VOIDmode;
597 }
598
599 /* Given a comparison code OP (EQ, NE, etc.) and the operands
600 OP0 and OP1 of a COMPARE, return the mode to be used for the
601 comparison. */
602
603 enum machine_mode
604 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
605 {
606 switch (code)
607 {
608 case EQ:
609 case NE:
610 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
611 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
612 return CCAPmode;
613 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
614 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
615 return CCAPmode;
616 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
617 || GET_CODE (op1) == NEG)
618 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
619 return CCLmode;
620
621 if (GET_CODE (op0) == AND)
622 {
623 /* Check whether we can potentially do it via TM. */
624 enum machine_mode ccmode;
625 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
626 if (ccmode != VOIDmode)
627 {
628 /* Relax CCTmode to CCZmode to allow fall-back to AND
629 if that turns out to be beneficial. */
630 return ccmode == CCTmode ? CCZmode : ccmode;
631 }
632 }
633
634 if (register_operand (op0, HImode)
635 && GET_CODE (op1) == CONST_INT
636 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
637 return CCT3mode;
638 if (register_operand (op0, QImode)
639 && GET_CODE (op1) == CONST_INT
640 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
641 return CCT3mode;
642
643 return CCZmode;
644
645 case LE:
646 case LT:
647 case GE:
648 case GT:
649 /* The only overflow condition of NEG and ABS happens when
650 -INT_MAX is used as parameter, which stays negative. So
651 we have an overflow from a positive value to a negative.
652 Using CCAP mode the resulting cc can be used for comparisons. */
653 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
654 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
655 return CCAPmode;
656
657 /* If constants are involved in an add instruction it is possible to use
658 the resulting cc for comparisons with zero. Knowing the sign of the
659 constant the overflow behavior gets predictable. e.g.:
660 int a, b; if ((b = a + c) > 0)
661 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
662 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
663 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
664 {
665 if (INTVAL (XEXP((op0), 1)) < 0)
666 return CCANmode;
667 else
668 return CCAPmode;
669 }
670 /* Fall through. */
671 case UNORDERED:
672 case ORDERED:
673 case UNEQ:
674 case UNLE:
675 case UNLT:
676 case UNGE:
677 case UNGT:
678 case LTGT:
679 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
680 && GET_CODE (op1) != CONST_INT)
681 return CCSRmode;
682 return CCSmode;
683
684 case LTU:
685 case GEU:
686 if (GET_CODE (op0) == PLUS
687 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
688 return CCL1mode;
689
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCURmode;
693 return CCUmode;
694
695 case LEU:
696 case GTU:
697 if (GET_CODE (op0) == MINUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL2mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 default:
707 gcc_unreachable ();
708 }
709 }
710
711 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
712 that we can implement more efficiently. */
713
714 void
715 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
716 {
717 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
718 if ((*code == EQ || *code == NE)
719 && *op1 == const0_rtx
720 && GET_CODE (*op0) == ZERO_EXTRACT
721 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
722 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
723 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
724 {
725 rtx inner = XEXP (*op0, 0);
726 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
727 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
728 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
729
730 if (len > 0 && len < modesize
731 && pos >= 0 && pos + len <= modesize
732 && modesize <= HOST_BITS_PER_WIDE_INT)
733 {
734 unsigned HOST_WIDE_INT block;
735 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
736 block <<= modesize - pos - len;
737
738 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
739 gen_int_mode (block, GET_MODE (inner)));
740 }
741 }
742
743 /* Narrow AND of memory against immediate to enable TM. */
744 if ((*code == EQ || *code == NE)
745 && *op1 == const0_rtx
746 && GET_CODE (*op0) == AND
747 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
748 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
749 {
750 rtx inner = XEXP (*op0, 0);
751 rtx mask = XEXP (*op0, 1);
752
753 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
754 if (GET_CODE (inner) == SUBREG
755 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
756 && (GET_MODE_SIZE (GET_MODE (inner))
757 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
758 && ((INTVAL (mask)
759 & GET_MODE_MASK (GET_MODE (inner))
760 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
761 == 0))
762 inner = SUBREG_REG (inner);
763
764 /* Do not change volatile MEMs. */
765 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
766 {
767 int part = s390_single_part (XEXP (*op0, 1),
768 GET_MODE (inner), QImode, 0);
769 if (part >= 0)
770 {
771 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
772 inner = adjust_address_nv (inner, QImode, part);
773 *op0 = gen_rtx_AND (QImode, inner, mask);
774 }
775 }
776 }
777
778 /* Narrow comparisons against 0xffff to HImode if possible. */
779 if ((*code == EQ || *code == NE)
780 && GET_CODE (*op1) == CONST_INT
781 && INTVAL (*op1) == 0xffff
782 && SCALAR_INT_MODE_P (GET_MODE (*op0))
783 && (nonzero_bits (*op0, GET_MODE (*op0))
784 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
785 {
786 *op0 = gen_lowpart (HImode, *op0);
787 *op1 = constm1_rtx;
788 }
789
790 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
791 if (GET_CODE (*op0) == UNSPEC
792 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
793 && XVECLEN (*op0, 0) == 1
794 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
795 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
796 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
797 && *op1 == const0_rtx)
798 {
799 enum rtx_code new_code = UNKNOWN;
800 switch (*code)
801 {
802 case EQ: new_code = EQ; break;
803 case NE: new_code = NE; break;
804 case LT: new_code = GTU; break;
805 case GT: new_code = LTU; break;
806 case LE: new_code = GEU; break;
807 case GE: new_code = LEU; break;
808 default: break;
809 }
810
811 if (new_code != UNKNOWN)
812 {
813 *op0 = XVECEXP (*op0, 0, 0);
814 *code = new_code;
815 }
816 }
817
818 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
819 if (GET_CODE (*op0) == UNSPEC
820 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
821 && XVECLEN (*op0, 0) == 1
822 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
823 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
824 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
825 && *op1 == const0_rtx)
826 {
827 enum rtx_code new_code = UNKNOWN;
828 switch (*code)
829 {
830 case EQ: new_code = EQ; break;
831 case NE: new_code = NE; break;
832 default: break;
833 }
834
835 if (new_code != UNKNOWN)
836 {
837 *op0 = XVECEXP (*op0, 0, 0);
838 *code = new_code;
839 }
840 }
841
842 /* Simplify cascaded EQ, NE with const0_rtx. */
843 if ((*code == NE || *code == EQ)
844 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
845 && GET_MODE (*op0) == SImode
846 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
847 && REG_P (XEXP (*op0, 0))
848 && XEXP (*op0, 1) == const0_rtx
849 && *op1 == const0_rtx)
850 {
851 if ((*code == EQ && GET_CODE (*op0) == NE)
852 || (*code == NE && GET_CODE (*op0) == EQ))
853 *code = EQ;
854 else
855 *code = NE;
856 *op0 = XEXP (*op0, 0);
857 }
858
859 /* Prefer register over memory as first operand. */
860 if (MEM_P (*op0) && REG_P (*op1))
861 {
862 rtx tem = *op0; *op0 = *op1; *op1 = tem;
863 *code = swap_condition (*code);
864 }
865 }
866
867 /* Emit a compare instruction suitable to implement the comparison
868 OP0 CODE OP1. Return the correct condition RTL to be placed in
869 the IF_THEN_ELSE of the conditional branch testing the result. */
870
871 rtx
872 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
873 {
874 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
875 rtx cc;
876
877 /* Do not output a redundant compare instruction if a compare_and_swap
878 pattern already computed the result and the machine modes are compatible. */
879 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
880 {
881 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
882 == GET_MODE (op0));
883 cc = op0;
884 }
885 else
886 {
887 cc = gen_rtx_REG (mode, CC_REGNUM);
888 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
889 }
890
891 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
892 }
893
894 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
895 matches CMP.
896 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
897 conditional branch testing the result. */
898
899 static rtx
900 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
904 }
905
906 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
907 unconditional jump, else a conditional jump under condition COND. */
908
909 void
910 s390_emit_jump (rtx target, rtx cond)
911 {
912 rtx insn;
913
914 target = gen_rtx_LABEL_REF (VOIDmode, target);
915 if (cond)
916 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
917
918 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
919 emit_jump_insn (insn);
920 }
921
922 /* Return branch condition mask to implement a branch
923 specified by CODE. Return -1 for invalid comparisons. */
924
925 int
926 s390_branch_condition_mask (rtx code)
927 {
928 const int CC0 = 1 << 3;
929 const int CC1 = 1 << 2;
930 const int CC2 = 1 << 1;
931 const int CC3 = 1 << 0;
932
933 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
934 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
935 gcc_assert (XEXP (code, 1) == const0_rtx);
936
937 switch (GET_MODE (XEXP (code, 0)))
938 {
939 case CCZmode:
940 case CCZ1mode:
941 switch (GET_CODE (code))
942 {
943 case EQ: return CC0;
944 case NE: return CC1 | CC2 | CC3;
945 default: return -1;
946 }
947 break;
948
949 case CCT1mode:
950 switch (GET_CODE (code))
951 {
952 case EQ: return CC1;
953 case NE: return CC0 | CC2 | CC3;
954 default: return -1;
955 }
956 break;
957
958 case CCT2mode:
959 switch (GET_CODE (code))
960 {
961 case EQ: return CC2;
962 case NE: return CC0 | CC1 | CC3;
963 default: return -1;
964 }
965 break;
966
967 case CCT3mode:
968 switch (GET_CODE (code))
969 {
970 case EQ: return CC3;
971 case NE: return CC0 | CC1 | CC2;
972 default: return -1;
973 }
974 break;
975
976 case CCLmode:
977 switch (GET_CODE (code))
978 {
979 case EQ: return CC0 | CC2;
980 case NE: return CC1 | CC3;
981 default: return -1;
982 }
983 break;
984
985 case CCL1mode:
986 switch (GET_CODE (code))
987 {
988 case LTU: return CC2 | CC3; /* carry */
989 case GEU: return CC0 | CC1; /* no carry */
990 default: return -1;
991 }
992 break;
993
994 case CCL2mode:
995 switch (GET_CODE (code))
996 {
997 case GTU: return CC0 | CC1; /* borrow */
998 case LEU: return CC2 | CC3; /* no borrow */
999 default: return -1;
1000 }
1001 break;
1002
1003 case CCL3mode:
1004 switch (GET_CODE (code))
1005 {
1006 case EQ: return CC0 | CC2;
1007 case NE: return CC1 | CC3;
1008 case LTU: return CC1;
1009 case GTU: return CC3;
1010 case LEU: return CC1 | CC2;
1011 case GEU: return CC2 | CC3;
1012 default: return -1;
1013 }
1014
1015 case CCUmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0;
1019 case NE: return CC1 | CC2 | CC3;
1020 case LTU: return CC1;
1021 case GTU: return CC2;
1022 case LEU: return CC0 | CC1;
1023 case GEU: return CC0 | CC2;
1024 default: return -1;
1025 }
1026 break;
1027
1028 case CCURmode:
1029 switch (GET_CODE (code))
1030 {
1031 case EQ: return CC0;
1032 case NE: return CC2 | CC1 | CC3;
1033 case LTU: return CC2;
1034 case GTU: return CC1;
1035 case LEU: return CC0 | CC2;
1036 case GEU: return CC0 | CC1;
1037 default: return -1;
1038 }
1039 break;
1040
1041 case CCAPmode:
1042 switch (GET_CODE (code))
1043 {
1044 case EQ: return CC0;
1045 case NE: return CC1 | CC2 | CC3;
1046 case LT: return CC1 | CC3;
1047 case GT: return CC2;
1048 case LE: return CC0 | CC1 | CC3;
1049 case GE: return CC0 | CC2;
1050 default: return -1;
1051 }
1052 break;
1053
1054 case CCANmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LT: return CC1;
1060 case GT: return CC2 | CC3;
1061 case LE: return CC0 | CC1;
1062 case GE: return CC0 | CC2 | CC3;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCSmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC1 | CC2 | CC3;
1072 case LT: return CC1;
1073 case GT: return CC2;
1074 case LE: return CC0 | CC1;
1075 case GE: return CC0 | CC2;
1076 case UNORDERED: return CC3;
1077 case ORDERED: return CC0 | CC1 | CC2;
1078 case UNEQ: return CC0 | CC3;
1079 case UNLT: return CC1 | CC3;
1080 case UNGT: return CC2 | CC3;
1081 case UNLE: return CC0 | CC1 | CC3;
1082 case UNGE: return CC0 | CC2 | CC3;
1083 case LTGT: return CC1 | CC2;
1084 default: return -1;
1085 }
1086 break;
1087
1088 case CCSRmode:
1089 switch (GET_CODE (code))
1090 {
1091 case EQ: return CC0;
1092 case NE: return CC2 | CC1 | CC3;
1093 case LT: return CC2;
1094 case GT: return CC1;
1095 case LE: return CC0 | CC2;
1096 case GE: return CC0 | CC1;
1097 case UNORDERED: return CC3;
1098 case ORDERED: return CC0 | CC2 | CC1;
1099 case UNEQ: return CC0 | CC3;
1100 case UNLT: return CC2 | CC3;
1101 case UNGT: return CC1 | CC3;
1102 case UNLE: return CC0 | CC2 | CC3;
1103 case UNGE: return CC0 | CC1 | CC3;
1104 case LTGT: return CC2 | CC1;
1105 default: return -1;
1106 }
1107 break;
1108
1109 default:
1110 return -1;
1111 }
1112 }
1113
1114
1115 /* Return branch condition mask to implement a compare and branch
1116 specified by CODE. Return -1 for invalid comparisons. */
1117
1118 int
1119 s390_compare_and_branch_condition_mask (rtx code)
1120 {
1121 const int CC0 = 1 << 3;
1122 const int CC1 = 1 << 2;
1123 const int CC2 = 1 << 1;
1124
1125 switch (GET_CODE (code))
1126 {
1127 case EQ:
1128 return CC0;
1129 case NE:
1130 return CC1 | CC2;
1131 case LT:
1132 case LTU:
1133 return CC1;
1134 case GT:
1135 case GTU:
1136 return CC2;
1137 case LE:
1138 case LEU:
1139 return CC0 | CC1;
1140 case GE:
1141 case GEU:
1142 return CC0 | CC2;
1143 default:
1144 gcc_unreachable ();
1145 }
1146 return -1;
1147 }
1148
1149 /* If INV is false, return assembler mnemonic string to implement
1150 a branch specified by CODE. If INV is true, return mnemonic
1151 for the corresponding inverted branch. */
1152
1153 static const char *
1154 s390_branch_condition_mnemonic (rtx code, int inv)
1155 {
1156 int mask;
1157
1158 static const char *const mnemonic[16] =
1159 {
1160 NULL, "o", "h", "nle",
1161 "l", "nhe", "lh", "ne",
1162 "e", "nlh", "he", "nl",
1163 "le", "nh", "no", NULL
1164 };
1165
1166 if (GET_CODE (XEXP (code, 0)) == REG
1167 && REGNO (XEXP (code, 0)) == CC_REGNUM
1168 && XEXP (code, 1) == const0_rtx)
1169 mask = s390_branch_condition_mask (code);
1170 else
1171 mask = s390_compare_and_branch_condition_mask (code);
1172
1173 gcc_assert (mask >= 0);
1174
1175 if (inv)
1176 mask ^= 15;
1177
1178 gcc_assert (mask >= 1 && mask <= 14);
1179
1180 return mnemonic[mask];
1181 }
1182
1183 /* Return the part of op which has a value different from def.
1184 The size of the part is determined by mode.
1185 Use this function only if you already know that op really
1186 contains such a part. */
1187
1188 unsigned HOST_WIDE_INT
1189 s390_extract_part (rtx op, enum machine_mode mode, int def)
1190 {
1191 unsigned HOST_WIDE_INT value = 0;
1192 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1193 int part_bits = GET_MODE_BITSIZE (mode);
1194 unsigned HOST_WIDE_INT part_mask
1195 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1196 int i;
1197
1198 for (i = 0; i < max_parts; i++)
1199 {
1200 if (i == 0)
1201 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1202 else
1203 value >>= part_bits;
1204
1205 if ((value & part_mask) != (def & part_mask))
1206 return value & part_mask;
1207 }
1208
1209 gcc_unreachable ();
1210 }
1211
1212 /* If OP is an integer constant of mode MODE with exactly one
1213 part of mode PART_MODE unequal to DEF, return the number of that
1214 part. Otherwise, return -1. */
1215
1216 int
1217 s390_single_part (rtx op,
1218 enum machine_mode mode,
1219 enum machine_mode part_mode,
1220 int def)
1221 {
1222 unsigned HOST_WIDE_INT value = 0;
1223 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1224 unsigned HOST_WIDE_INT part_mask
1225 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1226 int i, part = -1;
1227
1228 if (GET_CODE (op) != CONST_INT)
1229 return -1;
1230
1231 for (i = 0; i < n_parts; i++)
1232 {
1233 if (i == 0)
1234 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1235 else
1236 value >>= GET_MODE_BITSIZE (part_mode);
1237
1238 if ((value & part_mask) != (def & part_mask))
1239 {
1240 if (part != -1)
1241 return -1;
1242 else
1243 part = i;
1244 }
1245 }
1246 return part == -1 ? -1 : n_parts - 1 - part;
1247 }
1248
1249 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1250 bits and no other bits are set in IN. POS and LENGTH can be used
1251 to obtain the start position and the length of the bitfield.
1252
1253 POS gives the position of the first bit of the bitfield counting
1254 from the lowest order bit starting with zero. In order to use this
1255 value for S/390 instructions this has to be converted to "bits big
1256 endian" style. */
1257
1258 bool
1259 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1260 int *pos, int *length)
1261 {
1262 int tmp_pos = 0;
1263 int tmp_length = 0;
1264 int i;
1265 unsigned HOST_WIDE_INT mask = 1ULL;
1266 bool contiguous = false;
1267
1268 for (i = 0; i < size; mask <<= 1, i++)
1269 {
1270 if (contiguous)
1271 {
1272 if (mask & in)
1273 tmp_length++;
1274 else
1275 break;
1276 }
1277 else
1278 {
1279 if (mask & in)
1280 {
1281 contiguous = true;
1282 tmp_length++;
1283 }
1284 else
1285 tmp_pos++;
1286 }
1287 }
1288
1289 if (!tmp_length)
1290 return false;
1291
1292 /* Calculate a mask for all bits beyond the contiguous bits. */
1293 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1294
1295 if (mask & in)
1296 return false;
1297
1298 if (tmp_length + tmp_pos - 1 > size)
1299 return false;
1300
1301 if (length)
1302 *length = tmp_length;
1303
1304 if (pos)
1305 *pos = tmp_pos;
1306
1307 return true;
1308 }
1309
1310 /* Check whether we can (and want to) split a double-word
1311 move in mode MODE from SRC to DST into two single-word
1312 moves, moving the subword FIRST_SUBWORD first. */
1313
1314 bool
1315 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1316 {
1317 /* Floating point registers cannot be split. */
1318 if (FP_REG_P (src) || FP_REG_P (dst))
1319 return false;
1320
1321 /* We don't need to split if operands are directly accessible. */
1322 if (s_operand (src, mode) || s_operand (dst, mode))
1323 return false;
1324
1325 /* Non-offsettable memory references cannot be split. */
1326 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1327 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1328 return false;
1329
1330 /* Moving the first subword must not clobber a register
1331 needed to move the second subword. */
1332 if (register_operand (dst, mode))
1333 {
1334 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1335 if (reg_overlap_mentioned_p (subreg, src))
1336 return false;
1337 }
1338
1339 return true;
1340 }
1341
1342 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1343 and [MEM2, MEM2 + SIZE] do overlap and false
1344 otherwise. */
1345
1346 bool
1347 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1348 {
1349 rtx addr1, addr2, addr_delta;
1350 HOST_WIDE_INT delta;
1351
1352 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1353 return true;
1354
1355 if (size == 0)
1356 return false;
1357
1358 addr1 = XEXP (mem1, 0);
1359 addr2 = XEXP (mem2, 0);
1360
1361 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1362
1363 /* This overlapping check is used by peepholes merging memory block operations.
1364 Overlapping operations would otherwise be recognized by the S/390 hardware
1365 and would fall back to a slower implementation. Allowing overlapping
1366 operations would lead to slow code but not to wrong code. Therefore we are
1367 somewhat optimistic if we cannot prove that the memory blocks are
1368 overlapping.
1369 That's why we return false here although this may accept operations on
1370 overlapping memory areas. */
1371 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1372 return false;
1373
1374 delta = INTVAL (addr_delta);
1375
1376 if (delta == 0
1377 || (delta > 0 && delta < size)
1378 || (delta < 0 && -delta < size))
1379 return true;
1380
1381 return false;
1382 }
1383
1384 /* Check whether the address of memory reference MEM2 equals exactly
1385 the address of memory reference MEM1 plus DELTA. Return true if
1386 we can prove this to be the case, false otherwise. */
1387
1388 bool
1389 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1390 {
1391 rtx addr1, addr2, addr_delta;
1392
1393 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1394 return false;
1395
1396 addr1 = XEXP (mem1, 0);
1397 addr2 = XEXP (mem2, 0);
1398
1399 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1400 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1401 return false;
1402
1403 return true;
1404 }
1405
1406 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1407
1408 void
1409 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1410 rtx *operands)
1411 {
1412 enum machine_mode wmode = mode;
1413 rtx dst = operands[0];
1414 rtx src1 = operands[1];
1415 rtx src2 = operands[2];
1416 rtx op, clob, tem;
1417
1418 /* If we cannot handle the operation directly, use a temp register. */
1419 if (!s390_logical_operator_ok_p (operands))
1420 dst = gen_reg_rtx (mode);
1421
1422 /* QImode and HImode patterns make sense only if we have a destination
1423 in memory. Otherwise perform the operation in SImode. */
1424 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1425 wmode = SImode;
1426
1427 /* Widen operands if required. */
1428 if (mode != wmode)
1429 {
1430 if (GET_CODE (dst) == SUBREG
1431 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1432 dst = tem;
1433 else if (REG_P (dst))
1434 dst = gen_rtx_SUBREG (wmode, dst, 0);
1435 else
1436 dst = gen_reg_rtx (wmode);
1437
1438 if (GET_CODE (src1) == SUBREG
1439 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1440 src1 = tem;
1441 else if (GET_MODE (src1) != VOIDmode)
1442 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1443
1444 if (GET_CODE (src2) == SUBREG
1445 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1446 src2 = tem;
1447 else if (GET_MODE (src2) != VOIDmode)
1448 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1449 }
1450
1451 /* Emit the instruction. */
1452 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1453 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1454 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1455
1456 /* Fix up the destination if needed. */
1457 if (dst != operands[0])
1458 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1459 }
1460
1461 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1462
1463 bool
1464 s390_logical_operator_ok_p (rtx *operands)
1465 {
1466 /* If the destination operand is in memory, it needs to coincide
1467 with one of the source operands. After reload, it has to be
1468 the first source operand. */
1469 if (GET_CODE (operands[0]) == MEM)
1470 return rtx_equal_p (operands[0], operands[1])
1471 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1472
1473 return true;
1474 }
1475
1476 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1477 operand IMMOP to switch from SS to SI type instructions. */
1478
1479 void
1480 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1481 {
1482 int def = code == AND ? -1 : 0;
1483 HOST_WIDE_INT mask;
1484 int part;
1485
1486 gcc_assert (GET_CODE (*memop) == MEM);
1487 gcc_assert (!MEM_VOLATILE_P (*memop));
1488
1489 mask = s390_extract_part (*immop, QImode, def);
1490 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1491 gcc_assert (part >= 0);
1492
1493 *memop = adjust_address (*memop, QImode, part);
1494 *immop = gen_int_mode (mask, QImode);
1495 }
1496
1497
1498 /* How to allocate a 'struct machine_function'. */
1499
1500 static struct machine_function *
1501 s390_init_machine_status (void)
1502 {
1503 return ggc_alloc_cleared_machine_function ();
1504 }
1505
1506 static void
1507 s390_option_override (void)
1508 {
1509 /* Set up function hooks. */
1510 init_machine_status = s390_init_machine_status;
1511
1512 /* Architecture mode defaults according to ABI. */
1513 if (!(target_flags_explicit & MASK_ZARCH))
1514 {
1515 if (TARGET_64BIT)
1516 target_flags |= MASK_ZARCH;
1517 else
1518 target_flags &= ~MASK_ZARCH;
1519 }
1520
1521 /* Set the march default in case it hasn't been specified on
1522 cmdline. */
1523 if (s390_arch == PROCESSOR_max)
1524 {
1525 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1526 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1527 s390_arch_flags = processor_flags_table[(int)s390_arch];
1528 }
1529
1530 /* Determine processor to tune for. */
1531 if (s390_tune == PROCESSOR_max)
1532 {
1533 s390_tune = s390_arch;
1534 s390_tune_flags = s390_arch_flags;
1535 }
1536
1537 /* Sanity checks. */
1538 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1539 error ("z/Architecture mode not supported on %s", s390_arch_string);
1540 if (TARGET_64BIT && !TARGET_ZARCH)
1541 error ("64-bit ABI not supported in ESA/390 mode");
1542
1543 if (TARGET_HARD_DFP && !TARGET_DFP)
1544 {
1545 if (target_flags_explicit & MASK_HARD_DFP)
1546 {
1547 if (!TARGET_CPU_DFP)
1548 error ("hardware decimal floating point instructions"
1549 " not available on %s", s390_arch_string);
1550 if (!TARGET_ZARCH)
1551 error ("hardware decimal floating point instructions"
1552 " not available in ESA/390 mode");
1553 }
1554 else
1555 target_flags &= ~MASK_HARD_DFP;
1556 }
1557
1558 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1559 {
1560 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1561 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1562
1563 target_flags &= ~MASK_HARD_DFP;
1564 }
1565
1566 /* Set processor cost function. */
1567 switch (s390_tune)
1568 {
1569 case PROCESSOR_2084_Z990:
1570 s390_cost = &z990_cost;
1571 break;
1572 case PROCESSOR_2094_Z9_109:
1573 s390_cost = &z9_109_cost;
1574 break;
1575 case PROCESSOR_2097_Z10:
1576 s390_cost = &z10_cost;
1577 case PROCESSOR_2817_Z196:
1578 s390_cost = &z196_cost;
1579 break;
1580 default:
1581 s390_cost = &z900_cost;
1582 }
1583
1584 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1585 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1586 "in combination");
1587
1588 if (s390_stack_size)
1589 {
1590 if (s390_stack_guard >= s390_stack_size)
1591 error ("stack size must be greater than the stack guard value");
1592 else if (s390_stack_size > 1 << 16)
1593 error ("stack size must not be greater than 64k");
1594 }
1595 else if (s390_stack_guard)
1596 error ("-mstack-guard implies use of -mstack-size");
1597
1598 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1599 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1600 target_flags |= MASK_LONG_DOUBLE_128;
1601 #endif
1602
1603 if (s390_tune == PROCESSOR_2097_Z10
1604 || s390_tune == PROCESSOR_2817_Z196)
1605 {
1606 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1607 global_options.x_param_values,
1608 global_options_set.x_param_values);
1609 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1610 global_options.x_param_values,
1611 global_options_set.x_param_values);
1612 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 }
1619
1620 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1621 global_options.x_param_values,
1622 global_options_set.x_param_values);
1623 /* values for loop prefetching */
1624 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1625 global_options.x_param_values,
1626 global_options_set.x_param_values);
1627 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1628 global_options.x_param_values,
1629 global_options_set.x_param_values);
1630 /* s390 has more than 2 levels and the size is much larger. Since
1631 we are always running virtualized assume that we only get a small
1632 part of the caches above l1. */
1633 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1637 global_options.x_param_values,
1638 global_options_set.x_param_values);
1639 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642
1643 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1644 requires the arch flags to be evaluated already. Since prefetching
1645 is beneficial on s390, we enable it if available. */
1646 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1647 flag_prefetch_loop_arrays = 1;
1648 }
1649
1650 /* Map for smallest class containing reg regno. */
1651
1652 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1653 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1654 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1655 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1656 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1661 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1662 ACCESS_REGS, ACCESS_REGS
1663 };
1664
1665 /* Return attribute type of insn. */
1666
1667 static enum attr_type
1668 s390_safe_attr_type (rtx insn)
1669 {
1670 if (recog_memoized (insn) >= 0)
1671 return get_attr_type (insn);
1672 else
1673 return TYPE_NONE;
1674 }
1675
1676 /* Return true if DISP is a valid short displacement. */
1677
1678 static bool
1679 s390_short_displacement (rtx disp)
1680 {
1681 /* No displacement is OK. */
1682 if (!disp)
1683 return true;
1684
1685 /* Without the long displacement facility we don't need to
1686 distingiush between long and short displacement. */
1687 if (!TARGET_LONG_DISPLACEMENT)
1688 return true;
1689
1690 /* Integer displacement in range. */
1691 if (GET_CODE (disp) == CONST_INT)
1692 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1693
1694 /* GOT offset is not OK, the GOT can be large. */
1695 if (GET_CODE (disp) == CONST
1696 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1697 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1698 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1699 return false;
1700
1701 /* All other symbolic constants are literal pool references,
1702 which are OK as the literal pool must be small. */
1703 if (GET_CODE (disp) == CONST)
1704 return true;
1705
1706 return false;
1707 }
1708
1709 /* Decompose a RTL expression ADDR for a memory address into
1710 its components, returned in OUT.
1711
1712 Returns false if ADDR is not a valid memory address, true
1713 otherwise. If OUT is NULL, don't return the components,
1714 but check for validity only.
1715
1716 Note: Only addresses in canonical form are recognized.
1717 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1718 canonical form so that they will be recognized. */
1719
1720 static int
1721 s390_decompose_address (rtx addr, struct s390_address *out)
1722 {
1723 HOST_WIDE_INT offset = 0;
1724 rtx base = NULL_RTX;
1725 rtx indx = NULL_RTX;
1726 rtx disp = NULL_RTX;
1727 rtx orig_disp;
1728 bool pointer = false;
1729 bool base_ptr = false;
1730 bool indx_ptr = false;
1731 bool literal_pool = false;
1732
1733 /* We may need to substitute the literal pool base register into the address
1734 below. However, at this point we do not know which register is going to
1735 be used as base, so we substitute the arg pointer register. This is going
1736 to be treated as holding a pointer below -- it shouldn't be used for any
1737 other purpose. */
1738 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1739
1740 /* Decompose address into base + index + displacement. */
1741
1742 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1743 base = addr;
1744
1745 else if (GET_CODE (addr) == PLUS)
1746 {
1747 rtx op0 = XEXP (addr, 0);
1748 rtx op1 = XEXP (addr, 1);
1749 enum rtx_code code0 = GET_CODE (op0);
1750 enum rtx_code code1 = GET_CODE (op1);
1751
1752 if (code0 == REG || code0 == UNSPEC)
1753 {
1754 if (code1 == REG || code1 == UNSPEC)
1755 {
1756 indx = op0; /* index + base */
1757 base = op1;
1758 }
1759
1760 else
1761 {
1762 base = op0; /* base + displacement */
1763 disp = op1;
1764 }
1765 }
1766
1767 else if (code0 == PLUS)
1768 {
1769 indx = XEXP (op0, 0); /* index + base + disp */
1770 base = XEXP (op0, 1);
1771 disp = op1;
1772 }
1773
1774 else
1775 {
1776 return false;
1777 }
1778 }
1779
1780 else
1781 disp = addr; /* displacement */
1782
1783 /* Extract integer part of displacement. */
1784 orig_disp = disp;
1785 if (disp)
1786 {
1787 if (GET_CODE (disp) == CONST_INT)
1788 {
1789 offset = INTVAL (disp);
1790 disp = NULL_RTX;
1791 }
1792 else if (GET_CODE (disp) == CONST
1793 && GET_CODE (XEXP (disp, 0)) == PLUS
1794 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1795 {
1796 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1797 disp = XEXP (XEXP (disp, 0), 0);
1798 }
1799 }
1800
1801 /* Strip off CONST here to avoid special case tests later. */
1802 if (disp && GET_CODE (disp) == CONST)
1803 disp = XEXP (disp, 0);
1804
1805 /* We can convert literal pool addresses to
1806 displacements by basing them off the base register. */
1807 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1808 {
1809 /* Either base or index must be free to hold the base register. */
1810 if (!base)
1811 base = fake_pool_base, literal_pool = true;
1812 else if (!indx)
1813 indx = fake_pool_base, literal_pool = true;
1814 else
1815 return false;
1816
1817 /* Mark up the displacement. */
1818 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1819 UNSPEC_LTREL_OFFSET);
1820 }
1821
1822 /* Validate base register. */
1823 if (base)
1824 {
1825 if (GET_CODE (base) == UNSPEC)
1826 switch (XINT (base, 1))
1827 {
1828 case UNSPEC_LTREF:
1829 if (!disp)
1830 disp = gen_rtx_UNSPEC (Pmode,
1831 gen_rtvec (1, XVECEXP (base, 0, 0)),
1832 UNSPEC_LTREL_OFFSET);
1833 else
1834 return false;
1835
1836 base = XVECEXP (base, 0, 1);
1837 break;
1838
1839 case UNSPEC_LTREL_BASE:
1840 if (XVECLEN (base, 0) == 1)
1841 base = fake_pool_base, literal_pool = true;
1842 else
1843 base = XVECEXP (base, 0, 1);
1844 break;
1845
1846 default:
1847 return false;
1848 }
1849
1850 if (!REG_P (base)
1851 || (GET_MODE (base) != SImode
1852 && GET_MODE (base) != Pmode))
1853 return false;
1854
1855 if (REGNO (base) == STACK_POINTER_REGNUM
1856 || REGNO (base) == FRAME_POINTER_REGNUM
1857 || ((reload_completed || reload_in_progress)
1858 && frame_pointer_needed
1859 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1860 || REGNO (base) == ARG_POINTER_REGNUM
1861 || (flag_pic
1862 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1863 pointer = base_ptr = true;
1864
1865 if ((reload_completed || reload_in_progress)
1866 && base == cfun->machine->base_reg)
1867 pointer = base_ptr = literal_pool = true;
1868 }
1869
1870 /* Validate index register. */
1871 if (indx)
1872 {
1873 if (GET_CODE (indx) == UNSPEC)
1874 switch (XINT (indx, 1))
1875 {
1876 case UNSPEC_LTREF:
1877 if (!disp)
1878 disp = gen_rtx_UNSPEC (Pmode,
1879 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1880 UNSPEC_LTREL_OFFSET);
1881 else
1882 return false;
1883
1884 indx = XVECEXP (indx, 0, 1);
1885 break;
1886
1887 case UNSPEC_LTREL_BASE:
1888 if (XVECLEN (indx, 0) == 1)
1889 indx = fake_pool_base, literal_pool = true;
1890 else
1891 indx = XVECEXP (indx, 0, 1);
1892 break;
1893
1894 default:
1895 return false;
1896 }
1897
1898 if (!REG_P (indx)
1899 || (GET_MODE (indx) != SImode
1900 && GET_MODE (indx) != Pmode))
1901 return false;
1902
1903 if (REGNO (indx) == STACK_POINTER_REGNUM
1904 || REGNO (indx) == FRAME_POINTER_REGNUM
1905 || ((reload_completed || reload_in_progress)
1906 && frame_pointer_needed
1907 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1908 || REGNO (indx) == ARG_POINTER_REGNUM
1909 || (flag_pic
1910 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1911 pointer = indx_ptr = true;
1912
1913 if ((reload_completed || reload_in_progress)
1914 && indx == cfun->machine->base_reg)
1915 pointer = indx_ptr = literal_pool = true;
1916 }
1917
1918 /* Prefer to use pointer as base, not index. */
1919 if (base && indx && !base_ptr
1920 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1921 {
1922 rtx tmp = base;
1923 base = indx;
1924 indx = tmp;
1925 }
1926
1927 /* Validate displacement. */
1928 if (!disp)
1929 {
1930 /* If virtual registers are involved, the displacement will change later
1931 anyway as the virtual registers get eliminated. This could make a
1932 valid displacement invalid, but it is more likely to make an invalid
1933 displacement valid, because we sometimes access the register save area
1934 via negative offsets to one of those registers.
1935 Thus we don't check the displacement for validity here. If after
1936 elimination the displacement turns out to be invalid after all,
1937 this is fixed up by reload in any case. */
1938 if (base != arg_pointer_rtx
1939 && indx != arg_pointer_rtx
1940 && base != return_address_pointer_rtx
1941 && indx != return_address_pointer_rtx
1942 && base != frame_pointer_rtx
1943 && indx != frame_pointer_rtx
1944 && base != virtual_stack_vars_rtx
1945 && indx != virtual_stack_vars_rtx)
1946 if (!DISP_IN_RANGE (offset))
1947 return false;
1948 }
1949 else
1950 {
1951 /* All the special cases are pointers. */
1952 pointer = true;
1953
1954 /* In the small-PIC case, the linker converts @GOT
1955 and @GOTNTPOFF offsets to possible displacements. */
1956 if (GET_CODE (disp) == UNSPEC
1957 && (XINT (disp, 1) == UNSPEC_GOT
1958 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1959 && flag_pic == 1)
1960 {
1961 ;
1962 }
1963
1964 /* Accept pool label offsets. */
1965 else if (GET_CODE (disp) == UNSPEC
1966 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1967 ;
1968
1969 /* Accept literal pool references. */
1970 else if (GET_CODE (disp) == UNSPEC
1971 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1972 {
1973 /* In case CSE pulled a non literal pool reference out of
1974 the pool we have to reject the address. This is
1975 especially important when loading the GOT pointer on non
1976 zarch CPUs. In this case the literal pool contains an lt
1977 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1978 will most likely exceed the displacement. */
1979 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
1980 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
1981 return false;
1982
1983 orig_disp = gen_rtx_CONST (Pmode, disp);
1984 if (offset)
1985 {
1986 /* If we have an offset, make sure it does not
1987 exceed the size of the constant pool entry. */
1988 rtx sym = XVECEXP (disp, 0, 0);
1989 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1990 return false;
1991
1992 orig_disp = plus_constant (orig_disp, offset);
1993 }
1994 }
1995
1996 else
1997 return false;
1998 }
1999
2000 if (!base && !indx)
2001 pointer = true;
2002
2003 if (out)
2004 {
2005 out->base = base;
2006 out->indx = indx;
2007 out->disp = orig_disp;
2008 out->pointer = pointer;
2009 out->literal_pool = literal_pool;
2010 }
2011
2012 return true;
2013 }
2014
2015 /* Decompose a RTL expression OP for a shift count into its components,
2016 and return the base register in BASE and the offset in OFFSET.
2017
2018 Return true if OP is a valid shift count, false if not. */
2019
2020 bool
2021 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2022 {
2023 HOST_WIDE_INT off = 0;
2024
2025 /* We can have an integer constant, an address register,
2026 or a sum of the two. */
2027 if (GET_CODE (op) == CONST_INT)
2028 {
2029 off = INTVAL (op);
2030 op = NULL_RTX;
2031 }
2032 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2033 {
2034 off = INTVAL (XEXP (op, 1));
2035 op = XEXP (op, 0);
2036 }
2037 while (op && GET_CODE (op) == SUBREG)
2038 op = SUBREG_REG (op);
2039
2040 if (op && GET_CODE (op) != REG)
2041 return false;
2042
2043 if (offset)
2044 *offset = off;
2045 if (base)
2046 *base = op;
2047
2048 return true;
2049 }
2050
2051
2052 /* Return true if CODE is a valid address without index. */
2053
2054 bool
2055 s390_legitimate_address_without_index_p (rtx op)
2056 {
2057 struct s390_address addr;
2058
2059 if (!s390_decompose_address (XEXP (op, 0), &addr))
2060 return false;
2061 if (addr.indx)
2062 return false;
2063
2064 return true;
2065 }
2066
2067
2068 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2069 and return these parts in SYMREF and ADDEND. You can pass NULL in
2070 SYMREF and/or ADDEND if you are not interested in these values.
2071 Literal pool references are *not* considered symbol references. */
2072
2073 static bool
2074 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2075 {
2076 HOST_WIDE_INT tmpaddend = 0;
2077
2078 if (GET_CODE (addr) == CONST)
2079 addr = XEXP (addr, 0);
2080
2081 if (GET_CODE (addr) == PLUS)
2082 {
2083 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2084 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2085 && CONST_INT_P (XEXP (addr, 1)))
2086 {
2087 tmpaddend = INTVAL (XEXP (addr, 1));
2088 addr = XEXP (addr, 0);
2089 }
2090 else
2091 return false;
2092 }
2093 else
2094 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2095 return false;
2096
2097 if (symref)
2098 *symref = addr;
2099 if (addend)
2100 *addend = tmpaddend;
2101
2102 return true;
2103 }
2104
2105
2106 /* Return true if the address in OP is valid for constraint letter C
2107 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2108 pool MEMs should be accepted. Only the Q, R, S, T constraint
2109 letters are allowed for C. */
2110
2111 static int
2112 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2113 {
2114 struct s390_address addr;
2115 bool decomposed = false;
2116
2117 /* This check makes sure that no symbolic address (except literal
2118 pool references) are accepted by the R or T constraints. */
2119 if (s390_symref_operand_p (op, NULL, NULL))
2120 return 0;
2121
2122 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2123 if (!lit_pool_ok)
2124 {
2125 if (!s390_decompose_address (op, &addr))
2126 return 0;
2127 if (addr.literal_pool)
2128 return 0;
2129 decomposed = true;
2130 }
2131
2132 switch (c)
2133 {
2134 case 'Q': /* no index short displacement */
2135 if (!decomposed && !s390_decompose_address (op, &addr))
2136 return 0;
2137 if (addr.indx)
2138 return 0;
2139 if (!s390_short_displacement (addr.disp))
2140 return 0;
2141 break;
2142
2143 case 'R': /* with index short displacement */
2144 if (TARGET_LONG_DISPLACEMENT)
2145 {
2146 if (!decomposed && !s390_decompose_address (op, &addr))
2147 return 0;
2148 if (!s390_short_displacement (addr.disp))
2149 return 0;
2150 }
2151 /* Any invalid address here will be fixed up by reload,
2152 so accept it for the most generic constraint. */
2153 break;
2154
2155 case 'S': /* no index long displacement */
2156 if (!TARGET_LONG_DISPLACEMENT)
2157 return 0;
2158 if (!decomposed && !s390_decompose_address (op, &addr))
2159 return 0;
2160 if (addr.indx)
2161 return 0;
2162 if (s390_short_displacement (addr.disp))
2163 return 0;
2164 break;
2165
2166 case 'T': /* with index long displacement */
2167 if (!TARGET_LONG_DISPLACEMENT)
2168 return 0;
2169 /* Any invalid address here will be fixed up by reload,
2170 so accept it for the most generic constraint. */
2171 if ((decomposed || s390_decompose_address (op, &addr))
2172 && s390_short_displacement (addr.disp))
2173 return 0;
2174 break;
2175 default:
2176 return 0;
2177 }
2178 return 1;
2179 }
2180
2181
2182 /* Evaluates constraint strings described by the regular expression
2183 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2184 the constraint given in STR, or 0 else. */
2185
2186 int
2187 s390_mem_constraint (const char *str, rtx op)
2188 {
2189 char c = str[0];
2190
2191 switch (c)
2192 {
2193 case 'A':
2194 /* Check for offsettable variants of memory constraints. */
2195 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2196 return 0;
2197 if ((reload_completed || reload_in_progress)
2198 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2199 return 0;
2200 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2201 case 'B':
2202 /* Check for non-literal-pool variants of memory constraints. */
2203 if (!MEM_P (op))
2204 return 0;
2205 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2206 case 'Q':
2207 case 'R':
2208 case 'S':
2209 case 'T':
2210 if (GET_CODE (op) != MEM)
2211 return 0;
2212 return s390_check_qrst_address (c, XEXP (op, 0), true);
2213 case 'U':
2214 return (s390_check_qrst_address ('Q', op, true)
2215 || s390_check_qrst_address ('R', op, true));
2216 case 'W':
2217 return (s390_check_qrst_address ('S', op, true)
2218 || s390_check_qrst_address ('T', op, true));
2219 case 'Y':
2220 /* Simply check for the basic form of a shift count. Reload will
2221 take care of making sure we have a proper base register. */
2222 if (!s390_decompose_shift_count (op, NULL, NULL))
2223 return 0;
2224 break;
2225 case 'Z':
2226 return s390_check_qrst_address (str[1], op, true);
2227 default:
2228 return 0;
2229 }
2230 return 1;
2231 }
2232
2233
2234 /* Evaluates constraint strings starting with letter O. Input
2235 parameter C is the second letter following the "O" in the constraint
2236 string. Returns 1 if VALUE meets the respective constraint and 0
2237 otherwise. */
2238
2239 int
2240 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2241 {
2242 if (!TARGET_EXTIMM)
2243 return 0;
2244
2245 switch (c)
2246 {
2247 case 's':
2248 return trunc_int_for_mode (value, SImode) == value;
2249
2250 case 'p':
2251 return value == 0
2252 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2253
2254 case 'n':
2255 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2256
2257 default:
2258 gcc_unreachable ();
2259 }
2260 }
2261
2262
2263 /* Evaluates constraint strings starting with letter N. Parameter STR
2264 contains the letters following letter "N" in the constraint string.
2265 Returns true if VALUE matches the constraint. */
2266
2267 int
2268 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2269 {
2270 enum machine_mode mode, part_mode;
2271 int def;
2272 int part, part_goal;
2273
2274
2275 if (str[0] == 'x')
2276 part_goal = -1;
2277 else
2278 part_goal = str[0] - '0';
2279
2280 switch (str[1])
2281 {
2282 case 'Q':
2283 part_mode = QImode;
2284 break;
2285 case 'H':
2286 part_mode = HImode;
2287 break;
2288 case 'S':
2289 part_mode = SImode;
2290 break;
2291 default:
2292 return 0;
2293 }
2294
2295 switch (str[2])
2296 {
2297 case 'H':
2298 mode = HImode;
2299 break;
2300 case 'S':
2301 mode = SImode;
2302 break;
2303 case 'D':
2304 mode = DImode;
2305 break;
2306 default:
2307 return 0;
2308 }
2309
2310 switch (str[3])
2311 {
2312 case '0':
2313 def = 0;
2314 break;
2315 case 'F':
2316 def = -1;
2317 break;
2318 default:
2319 return 0;
2320 }
2321
2322 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2323 return 0;
2324
2325 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2326 if (part < 0)
2327 return 0;
2328 if (part_goal != -1 && part_goal != part)
2329 return 0;
2330
2331 return 1;
2332 }
2333
2334
2335 /* Returns true if the input parameter VALUE is a float zero. */
2336
2337 int
2338 s390_float_const_zero_p (rtx value)
2339 {
2340 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2341 && value == CONST0_RTX (GET_MODE (value)));
2342 }
2343
2344 /* Implement TARGET_REGISTER_MOVE_COST. */
2345
2346 static int
2347 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2348 reg_class_t from, reg_class_t to)
2349 {
2350 /* On s390, copy between fprs and gprs is expensive. */
2351 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2352 && reg_classes_intersect_p (to, FP_REGS))
2353 || (reg_classes_intersect_p (from, FP_REGS)
2354 && reg_classes_intersect_p (to, GENERAL_REGS)))
2355 return 10;
2356
2357 return 1;
2358 }
2359
2360 /* Implement TARGET_MEMORY_MOVE_COST. */
2361
2362 static int
2363 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2364 reg_class_t rclass ATTRIBUTE_UNUSED,
2365 bool in ATTRIBUTE_UNUSED)
2366 {
2367 return 1;
2368 }
2369
2370 /* Compute a (partial) cost for rtx X. Return true if the complete
2371 cost has been computed, and false if subexpressions should be
2372 scanned. In either case, *TOTAL contains the cost result.
2373 CODE contains GET_CODE (x), OUTER_CODE contains the code
2374 of the superexpression of x. */
2375
2376 static bool
2377 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2378 int *total, bool speed ATTRIBUTE_UNUSED)
2379 {
2380 switch (code)
2381 {
2382 case CONST:
2383 case CONST_INT:
2384 case LABEL_REF:
2385 case SYMBOL_REF:
2386 case CONST_DOUBLE:
2387 case MEM:
2388 *total = 0;
2389 return true;
2390
2391 case ASHIFT:
2392 case ASHIFTRT:
2393 case LSHIFTRT:
2394 case ROTATE:
2395 case ROTATERT:
2396 case AND:
2397 case IOR:
2398 case XOR:
2399 case NEG:
2400 case NOT:
2401 *total = COSTS_N_INSNS (1);
2402 return false;
2403
2404 case PLUS:
2405 case MINUS:
2406 *total = COSTS_N_INSNS (1);
2407 return false;
2408
2409 case MULT:
2410 switch (GET_MODE (x))
2411 {
2412 case SImode:
2413 {
2414 rtx left = XEXP (x, 0);
2415 rtx right = XEXP (x, 1);
2416 if (GET_CODE (right) == CONST_INT
2417 && CONST_OK_FOR_K (INTVAL (right)))
2418 *total = s390_cost->mhi;
2419 else if (GET_CODE (left) == SIGN_EXTEND)
2420 *total = s390_cost->mh;
2421 else
2422 *total = s390_cost->ms; /* msr, ms, msy */
2423 break;
2424 }
2425 case DImode:
2426 {
2427 rtx left = XEXP (x, 0);
2428 rtx right = XEXP (x, 1);
2429 if (TARGET_ZARCH)
2430 {
2431 if (GET_CODE (right) == CONST_INT
2432 && CONST_OK_FOR_K (INTVAL (right)))
2433 *total = s390_cost->mghi;
2434 else if (GET_CODE (left) == SIGN_EXTEND)
2435 *total = s390_cost->msgf;
2436 else
2437 *total = s390_cost->msg; /* msgr, msg */
2438 }
2439 else /* TARGET_31BIT */
2440 {
2441 if (GET_CODE (left) == SIGN_EXTEND
2442 && GET_CODE (right) == SIGN_EXTEND)
2443 /* mulsidi case: mr, m */
2444 *total = s390_cost->m;
2445 else if (GET_CODE (left) == ZERO_EXTEND
2446 && GET_CODE (right) == ZERO_EXTEND
2447 && TARGET_CPU_ZARCH)
2448 /* umulsidi case: ml, mlr */
2449 *total = s390_cost->ml;
2450 else
2451 /* Complex calculation is required. */
2452 *total = COSTS_N_INSNS (40);
2453 }
2454 break;
2455 }
2456 case SFmode:
2457 case DFmode:
2458 *total = s390_cost->mult_df;
2459 break;
2460 case TFmode:
2461 *total = s390_cost->mxbr;
2462 break;
2463 default:
2464 return false;
2465 }
2466 return false;
2467
2468 case FMA:
2469 switch (GET_MODE (x))
2470 {
2471 case DFmode:
2472 *total = s390_cost->madbr;
2473 break;
2474 case SFmode:
2475 *total = s390_cost->maebr;
2476 break;
2477 default:
2478 return false;
2479 }
2480 /* Negate in the third argument is free: FMSUB. */
2481 if (GET_CODE (XEXP (x, 2)) == NEG)
2482 {
2483 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2484 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2485 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2486 return true;
2487 }
2488 return false;
2489
2490 case UDIV:
2491 case UMOD:
2492 if (GET_MODE (x) == TImode) /* 128 bit division */
2493 *total = s390_cost->dlgr;
2494 else if (GET_MODE (x) == DImode)
2495 {
2496 rtx right = XEXP (x, 1);
2497 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2498 *total = s390_cost->dlr;
2499 else /* 64 by 64 bit division */
2500 *total = s390_cost->dlgr;
2501 }
2502 else if (GET_MODE (x) == SImode) /* 32 bit division */
2503 *total = s390_cost->dlr;
2504 return false;
2505
2506 case DIV:
2507 case MOD:
2508 if (GET_MODE (x) == DImode)
2509 {
2510 rtx right = XEXP (x, 1);
2511 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2512 if (TARGET_ZARCH)
2513 *total = s390_cost->dsgfr;
2514 else
2515 *total = s390_cost->dr;
2516 else /* 64 by 64 bit division */
2517 *total = s390_cost->dsgr;
2518 }
2519 else if (GET_MODE (x) == SImode) /* 32 bit division */
2520 *total = s390_cost->dlr;
2521 else if (GET_MODE (x) == SFmode)
2522 {
2523 *total = s390_cost->debr;
2524 }
2525 else if (GET_MODE (x) == DFmode)
2526 {
2527 *total = s390_cost->ddbr;
2528 }
2529 else if (GET_MODE (x) == TFmode)
2530 {
2531 *total = s390_cost->dxbr;
2532 }
2533 return false;
2534
2535 case SQRT:
2536 if (GET_MODE (x) == SFmode)
2537 *total = s390_cost->sqebr;
2538 else if (GET_MODE (x) == DFmode)
2539 *total = s390_cost->sqdbr;
2540 else /* TFmode */
2541 *total = s390_cost->sqxbr;
2542 return false;
2543
2544 case SIGN_EXTEND:
2545 case ZERO_EXTEND:
2546 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2547 || outer_code == PLUS || outer_code == MINUS
2548 || outer_code == COMPARE)
2549 *total = 0;
2550 return false;
2551
2552 case COMPARE:
2553 *total = COSTS_N_INSNS (1);
2554 if (GET_CODE (XEXP (x, 0)) == AND
2555 && GET_CODE (XEXP (x, 1)) == CONST_INT
2556 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2557 {
2558 rtx op0 = XEXP (XEXP (x, 0), 0);
2559 rtx op1 = XEXP (XEXP (x, 0), 1);
2560 rtx op2 = XEXP (x, 1);
2561
2562 if (memory_operand (op0, GET_MODE (op0))
2563 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2564 return true;
2565 if (register_operand (op0, GET_MODE (op0))
2566 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2567 return true;
2568 }
2569 return false;
2570
2571 default:
2572 return false;
2573 }
2574 }
2575
2576 /* Return the cost of an address rtx ADDR. */
2577
2578 static int
2579 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2580 {
2581 struct s390_address ad;
2582 if (!s390_decompose_address (addr, &ad))
2583 return 1000;
2584
2585 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2586 }
2587
2588 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2589 otherwise return 0. */
2590
2591 int
2592 tls_symbolic_operand (rtx op)
2593 {
2594 if (GET_CODE (op) != SYMBOL_REF)
2595 return 0;
2596 return SYMBOL_REF_TLS_MODEL (op);
2597 }
2598 \f
2599 /* Split DImode access register reference REG (on 64-bit) into its constituent
2600 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2601 gen_highpart cannot be used as they assume all registers are word-sized,
2602 while our access registers have only half that size. */
2603
2604 void
2605 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2606 {
2607 gcc_assert (TARGET_64BIT);
2608 gcc_assert (ACCESS_REG_P (reg));
2609 gcc_assert (GET_MODE (reg) == DImode);
2610 gcc_assert (!(REGNO (reg) & 1));
2611
2612 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2613 *hi = gen_rtx_REG (SImode, REGNO (reg));
2614 }
2615
2616 /* Return true if OP contains a symbol reference */
2617
2618 bool
2619 symbolic_reference_mentioned_p (rtx op)
2620 {
2621 const char *fmt;
2622 int i;
2623
2624 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2625 return 1;
2626
2627 fmt = GET_RTX_FORMAT (GET_CODE (op));
2628 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2629 {
2630 if (fmt[i] == 'E')
2631 {
2632 int j;
2633
2634 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2635 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2636 return 1;
2637 }
2638
2639 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2640 return 1;
2641 }
2642
2643 return 0;
2644 }
2645
2646 /* Return true if OP contains a reference to a thread-local symbol. */
2647
2648 bool
2649 tls_symbolic_reference_mentioned_p (rtx op)
2650 {
2651 const char *fmt;
2652 int i;
2653
2654 if (GET_CODE (op) == SYMBOL_REF)
2655 return tls_symbolic_operand (op);
2656
2657 fmt = GET_RTX_FORMAT (GET_CODE (op));
2658 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2659 {
2660 if (fmt[i] == 'E')
2661 {
2662 int j;
2663
2664 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2665 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2666 return true;
2667 }
2668
2669 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2670 return true;
2671 }
2672
2673 return false;
2674 }
2675
2676
2677 /* Return true if OP is a legitimate general operand when
2678 generating PIC code. It is given that flag_pic is on
2679 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2680
2681 int
2682 legitimate_pic_operand_p (rtx op)
2683 {
2684 /* Accept all non-symbolic constants. */
2685 if (!SYMBOLIC_CONST (op))
2686 return 1;
2687
2688 /* Reject everything else; must be handled
2689 via emit_symbolic_move. */
2690 return 0;
2691 }
2692
2693 /* Returns true if the constant value OP is a legitimate general operand.
2694 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2695
2696 static bool
2697 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2698 {
2699 /* Accept all non-symbolic constants. */
2700 if (!SYMBOLIC_CONST (op))
2701 return 1;
2702
2703 /* Accept immediate LARL operands. */
2704 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2705 return 1;
2706
2707 /* Thread-local symbols are never legal constants. This is
2708 so that emit_call knows that computing such addresses
2709 might require a function call. */
2710 if (TLS_SYMBOLIC_CONST (op))
2711 return 0;
2712
2713 /* In the PIC case, symbolic constants must *not* be
2714 forced into the literal pool. We accept them here,
2715 so that they will be handled by emit_symbolic_move. */
2716 if (flag_pic)
2717 return 1;
2718
2719 /* All remaining non-PIC symbolic constants are
2720 forced into the literal pool. */
2721 return 0;
2722 }
2723
2724 /* Determine if it's legal to put X into the constant pool. This
2725 is not possible if X contains the address of a symbol that is
2726 not constant (TLS) or not known at final link time (PIC). */
2727
2728 static bool
2729 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2730 {
2731 switch (GET_CODE (x))
2732 {
2733 case CONST_INT:
2734 case CONST_DOUBLE:
2735 /* Accept all non-symbolic constants. */
2736 return false;
2737
2738 case LABEL_REF:
2739 /* Labels are OK iff we are non-PIC. */
2740 return flag_pic != 0;
2741
2742 case SYMBOL_REF:
2743 /* 'Naked' TLS symbol references are never OK,
2744 non-TLS symbols are OK iff we are non-PIC. */
2745 if (tls_symbolic_operand (x))
2746 return true;
2747 else
2748 return flag_pic != 0;
2749
2750 case CONST:
2751 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2752 case PLUS:
2753 case MINUS:
2754 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2755 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2756
2757 case UNSPEC:
2758 switch (XINT (x, 1))
2759 {
2760 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2761 case UNSPEC_LTREL_OFFSET:
2762 case UNSPEC_GOT:
2763 case UNSPEC_GOTOFF:
2764 case UNSPEC_PLTOFF:
2765 case UNSPEC_TLSGD:
2766 case UNSPEC_TLSLDM:
2767 case UNSPEC_NTPOFF:
2768 case UNSPEC_DTPOFF:
2769 case UNSPEC_GOTNTPOFF:
2770 case UNSPEC_INDNTPOFF:
2771 return false;
2772
2773 /* If the literal pool shares the code section, be put
2774 execute template placeholders into the pool as well. */
2775 case UNSPEC_INSN:
2776 return TARGET_CPU_ZARCH;
2777
2778 default:
2779 return true;
2780 }
2781 break;
2782
2783 default:
2784 gcc_unreachable ();
2785 }
2786 }
2787
2788 /* Returns true if the constant value OP is a legitimate general
2789 operand during and after reload. The difference to
2790 legitimate_constant_p is that this function will not accept
2791 a constant that would need to be forced to the literal pool
2792 before it can be used as operand.
2793 This function accepts all constants which can be loaded directly
2794 into a GPR. */
2795
2796 bool
2797 legitimate_reload_constant_p (rtx op)
2798 {
2799 /* Accept la(y) operands. */
2800 if (GET_CODE (op) == CONST_INT
2801 && DISP_IN_RANGE (INTVAL (op)))
2802 return true;
2803
2804 /* Accept l(g)hi/l(g)fi operands. */
2805 if (GET_CODE (op) == CONST_INT
2806 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2807 return true;
2808
2809 /* Accept lliXX operands. */
2810 if (TARGET_ZARCH
2811 && GET_CODE (op) == CONST_INT
2812 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2813 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2814 return true;
2815
2816 if (TARGET_EXTIMM
2817 && GET_CODE (op) == CONST_INT
2818 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2819 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2820 return true;
2821
2822 /* Accept larl operands. */
2823 if (TARGET_CPU_ZARCH
2824 && larl_operand (op, VOIDmode))
2825 return true;
2826
2827 /* Accept floating-point zero operands that fit into a single GPR. */
2828 if (GET_CODE (op) == CONST_DOUBLE
2829 && s390_float_const_zero_p (op)
2830 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2831 return true;
2832
2833 /* Accept double-word operands that can be split. */
2834 if (GET_CODE (op) == CONST_INT
2835 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2836 {
2837 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2838 rtx hi = operand_subword (op, 0, 0, dword_mode);
2839 rtx lo = operand_subword (op, 1, 0, dword_mode);
2840 return legitimate_reload_constant_p (hi)
2841 && legitimate_reload_constant_p (lo);
2842 }
2843
2844 /* Everything else cannot be handled without reload. */
2845 return false;
2846 }
2847
2848 /* Returns true if the constant value OP is a legitimate fp operand
2849 during and after reload.
2850 This function accepts all constants which can be loaded directly
2851 into an FPR. */
2852
2853 static bool
2854 legitimate_reload_fp_constant_p (rtx op)
2855 {
2856 /* Accept floating-point zero operands if the load zero instruction
2857 can be used. */
2858 if (TARGET_Z196
2859 && GET_CODE (op) == CONST_DOUBLE
2860 && s390_float_const_zero_p (op))
2861 return true;
2862
2863 return false;
2864 }
2865
2866 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2867 return the class of reg to actually use. */
2868
2869 static reg_class_t
2870 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2871 {
2872 switch (GET_CODE (op))
2873 {
2874 /* Constants we cannot reload into general registers
2875 must be forced into the literal pool. */
2876 case CONST_DOUBLE:
2877 case CONST_INT:
2878 if (reg_class_subset_p (GENERAL_REGS, rclass)
2879 && legitimate_reload_constant_p (op))
2880 return GENERAL_REGS;
2881 else if (reg_class_subset_p (ADDR_REGS, rclass)
2882 && legitimate_reload_constant_p (op))
2883 return ADDR_REGS;
2884 else if (reg_class_subset_p (FP_REGS, rclass)
2885 && legitimate_reload_fp_constant_p (op))
2886 return FP_REGS;
2887 return NO_REGS;
2888
2889 /* If a symbolic constant or a PLUS is reloaded,
2890 it is most likely being used as an address, so
2891 prefer ADDR_REGS. If 'class' is not a superset
2892 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2893 case LABEL_REF:
2894 case SYMBOL_REF:
2895 case CONST:
2896 if (!legitimate_reload_constant_p (op))
2897 return NO_REGS;
2898 /* fallthrough */
2899 case PLUS:
2900 /* load address will be used. */
2901 if (reg_class_subset_p (ADDR_REGS, rclass))
2902 return ADDR_REGS;
2903 else
2904 return NO_REGS;
2905
2906 default:
2907 break;
2908 }
2909
2910 return rclass;
2911 }
2912
2913 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2914 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2915 aligned. */
2916
2917 bool
2918 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2919 {
2920 HOST_WIDE_INT addend;
2921 rtx symref;
2922
2923 if (!s390_symref_operand_p (addr, &symref, &addend))
2924 return false;
2925
2926 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2927 && !(addend & (alignment - 1)));
2928 }
2929
2930 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2931 operand SCRATCH is used to reload the even part of the address and
2932 adding one. */
2933
2934 void
2935 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2936 {
2937 HOST_WIDE_INT addend;
2938 rtx symref;
2939
2940 if (!s390_symref_operand_p (addr, &symref, &addend))
2941 gcc_unreachable ();
2942
2943 if (!(addend & 1))
2944 /* Easy case. The addend is even so larl will do fine. */
2945 emit_move_insn (reg, addr);
2946 else
2947 {
2948 /* We can leave the scratch register untouched if the target
2949 register is a valid base register. */
2950 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2951 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2952 scratch = reg;
2953
2954 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2955 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2956
2957 if (addend != 1)
2958 emit_move_insn (scratch,
2959 gen_rtx_CONST (Pmode,
2960 gen_rtx_PLUS (Pmode, symref,
2961 GEN_INT (addend - 1))));
2962 else
2963 emit_move_insn (scratch, symref);
2964
2965 /* Increment the address using la in order to avoid clobbering cc. */
2966 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2967 }
2968 }
2969
2970 /* Generate what is necessary to move between REG and MEM using
2971 SCRATCH. The direction is given by TOMEM. */
2972
2973 void
2974 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2975 {
2976 /* Reload might have pulled a constant out of the literal pool.
2977 Force it back in. */
2978 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2979 || GET_CODE (mem) == CONST)
2980 mem = force_const_mem (GET_MODE (reg), mem);
2981
2982 gcc_assert (MEM_P (mem));
2983
2984 /* For a load from memory we can leave the scratch register
2985 untouched if the target register is a valid base register. */
2986 if (!tomem
2987 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2988 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2989 && GET_MODE (reg) == GET_MODE (scratch))
2990 scratch = reg;
2991
2992 /* Load address into scratch register. Since we can't have a
2993 secondary reload for a secondary reload we have to cover the case
2994 where larl would need a secondary reload here as well. */
2995 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2996
2997 /* Now we can use a standard load/store to do the move. */
2998 if (tomem)
2999 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3000 else
3001 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3002 }
3003
3004 /* Inform reload about cases where moving X with a mode MODE to a register in
3005 RCLASS requires an extra scratch or immediate register. Return the class
3006 needed for the immediate register. */
3007
3008 static reg_class_t
3009 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3010 enum machine_mode mode, secondary_reload_info *sri)
3011 {
3012 enum reg_class rclass = (enum reg_class) rclass_i;
3013
3014 /* Intermediate register needed. */
3015 if (reg_classes_intersect_p (CC_REGS, rclass))
3016 return GENERAL_REGS;
3017
3018 if (TARGET_Z10)
3019 {
3020 HOST_WIDE_INT offset;
3021 rtx symref;
3022
3023 /* On z10 several optimizer steps may generate larl operands with
3024 an odd addend. */
3025 if (in_p
3026 && s390_symref_operand_p (x, &symref, &offset)
3027 && mode == Pmode
3028 && !SYMBOL_REF_ALIGN1_P (symref)
3029 && (offset & 1) == 1)
3030 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3031 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3032
3033 /* On z10 we need a scratch register when moving QI, TI or floating
3034 point mode values from or to a memory location with a SYMBOL_REF
3035 or if the symref addend of a SI or DI move is not aligned to the
3036 width of the access. */
3037 if (MEM_P (x)
3038 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3039 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3040 || (!TARGET_ZARCH && mode == DImode)
3041 || ((mode == HImode || mode == SImode || mode == DImode)
3042 && (!s390_check_symref_alignment (XEXP (x, 0),
3043 GET_MODE_SIZE (mode))))))
3044 {
3045 #define __SECONDARY_RELOAD_CASE(M,m) \
3046 case M##mode: \
3047 if (TARGET_64BIT) \
3048 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3049 CODE_FOR_reload##m##di_tomem_z10; \
3050 else \
3051 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3052 CODE_FOR_reload##m##si_tomem_z10; \
3053 break;
3054
3055 switch (GET_MODE (x))
3056 {
3057 __SECONDARY_RELOAD_CASE (QI, qi);
3058 __SECONDARY_RELOAD_CASE (HI, hi);
3059 __SECONDARY_RELOAD_CASE (SI, si);
3060 __SECONDARY_RELOAD_CASE (DI, di);
3061 __SECONDARY_RELOAD_CASE (TI, ti);
3062 __SECONDARY_RELOAD_CASE (SF, sf);
3063 __SECONDARY_RELOAD_CASE (DF, df);
3064 __SECONDARY_RELOAD_CASE (TF, tf);
3065 __SECONDARY_RELOAD_CASE (SD, sd);
3066 __SECONDARY_RELOAD_CASE (DD, dd);
3067 __SECONDARY_RELOAD_CASE (TD, td);
3068
3069 default:
3070 gcc_unreachable ();
3071 }
3072 #undef __SECONDARY_RELOAD_CASE
3073 }
3074 }
3075
3076 /* We need a scratch register when loading a PLUS expression which
3077 is not a legitimate operand of the LOAD ADDRESS instruction. */
3078 if (in_p && s390_plus_operand (x, mode))
3079 sri->icode = (TARGET_64BIT ?
3080 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3081
3082 /* Performing a multiword move from or to memory we have to make sure the
3083 second chunk in memory is addressable without causing a displacement
3084 overflow. If that would be the case we calculate the address in
3085 a scratch register. */
3086 if (MEM_P (x)
3087 && GET_CODE (XEXP (x, 0)) == PLUS
3088 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3089 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3090 + GET_MODE_SIZE (mode) - 1))
3091 {
3092 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3093 in a s_operand address since we may fallback to lm/stm. So we only
3094 have to care about overflows in the b+i+d case. */
3095 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3096 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3097 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3098 /* For FP_REGS no lm/stm is available so this check is triggered
3099 for displacement overflows in b+i+d and b+d like addresses. */
3100 || (reg_classes_intersect_p (FP_REGS, rclass)
3101 && s390_class_max_nregs (FP_REGS, mode) > 1))
3102 {
3103 if (in_p)
3104 sri->icode = (TARGET_64BIT ?
3105 CODE_FOR_reloaddi_nonoffmem_in :
3106 CODE_FOR_reloadsi_nonoffmem_in);
3107 else
3108 sri->icode = (TARGET_64BIT ?
3109 CODE_FOR_reloaddi_nonoffmem_out :
3110 CODE_FOR_reloadsi_nonoffmem_out);
3111 }
3112 }
3113
3114 /* A scratch address register is needed when a symbolic constant is
3115 copied to r0 compiling with -fPIC. In other cases the target
3116 register might be used as temporary (see legitimize_pic_address). */
3117 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3118 sri->icode = (TARGET_64BIT ?
3119 CODE_FOR_reloaddi_PIC_addr :
3120 CODE_FOR_reloadsi_PIC_addr);
3121
3122 /* Either scratch or no register needed. */
3123 return NO_REGS;
3124 }
3125
3126 /* Generate code to load SRC, which is PLUS that is not a
3127 legitimate operand for the LA instruction, into TARGET.
3128 SCRATCH may be used as scratch register. */
3129
3130 void
3131 s390_expand_plus_operand (rtx target, rtx src,
3132 rtx scratch)
3133 {
3134 rtx sum1, sum2;
3135 struct s390_address ad;
3136
3137 /* src must be a PLUS; get its two operands. */
3138 gcc_assert (GET_CODE (src) == PLUS);
3139 gcc_assert (GET_MODE (src) == Pmode);
3140
3141 /* Check if any of the two operands is already scheduled
3142 for replacement by reload. This can happen e.g. when
3143 float registers occur in an address. */
3144 sum1 = find_replacement (&XEXP (src, 0));
3145 sum2 = find_replacement (&XEXP (src, 1));
3146 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3147
3148 /* If the address is already strictly valid, there's nothing to do. */
3149 if (!s390_decompose_address (src, &ad)
3150 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3151 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3152 {
3153 /* Otherwise, one of the operands cannot be an address register;
3154 we reload its value into the scratch register. */
3155 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3156 {
3157 emit_move_insn (scratch, sum1);
3158 sum1 = scratch;
3159 }
3160 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3161 {
3162 emit_move_insn (scratch, sum2);
3163 sum2 = scratch;
3164 }
3165
3166 /* According to the way these invalid addresses are generated
3167 in reload.c, it should never happen (at least on s390) that
3168 *neither* of the PLUS components, after find_replacements
3169 was applied, is an address register. */
3170 if (sum1 == scratch && sum2 == scratch)
3171 {
3172 debug_rtx (src);
3173 gcc_unreachable ();
3174 }
3175
3176 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3177 }
3178
3179 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3180 is only ever performed on addresses, so we can mark the
3181 sum as legitimate for LA in any case. */
3182 s390_load_address (target, src);
3183 }
3184
3185
3186 /* Return true if ADDR is a valid memory address.
3187 STRICT specifies whether strict register checking applies. */
3188
3189 static bool
3190 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3191 {
3192 struct s390_address ad;
3193
3194 if (TARGET_Z10
3195 && larl_operand (addr, VOIDmode)
3196 && (mode == VOIDmode
3197 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3198 return true;
3199
3200 if (!s390_decompose_address (addr, &ad))
3201 return false;
3202
3203 if (strict)
3204 {
3205 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3206 return false;
3207
3208 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3209 return false;
3210 }
3211 else
3212 {
3213 if (ad.base
3214 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3215 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3216 return false;
3217
3218 if (ad.indx
3219 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3220 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3221 return false;
3222 }
3223 return true;
3224 }
3225
3226 /* Return true if OP is a valid operand for the LA instruction.
3227 In 31-bit, we need to prove that the result is used as an
3228 address, as LA performs only a 31-bit addition. */
3229
3230 bool
3231 legitimate_la_operand_p (rtx op)
3232 {
3233 struct s390_address addr;
3234 if (!s390_decompose_address (op, &addr))
3235 return false;
3236
3237 return (TARGET_64BIT || addr.pointer);
3238 }
3239
3240 /* Return true if it is valid *and* preferable to use LA to
3241 compute the sum of OP1 and OP2. */
3242
3243 bool
3244 preferred_la_operand_p (rtx op1, rtx op2)
3245 {
3246 struct s390_address addr;
3247
3248 if (op2 != const0_rtx)
3249 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3250
3251 if (!s390_decompose_address (op1, &addr))
3252 return false;
3253 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3254 return false;
3255 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3256 return false;
3257
3258 /* Avoid LA instructions with index register on z196; it is
3259 preferable to use regular add instructions when possible. */
3260 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3261 return false;
3262
3263 if (!TARGET_64BIT && !addr.pointer)
3264 return false;
3265
3266 if (addr.pointer)
3267 return true;
3268
3269 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3270 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3271 return true;
3272
3273 return false;
3274 }
3275
3276 /* Emit a forced load-address operation to load SRC into DST.
3277 This will use the LOAD ADDRESS instruction even in situations
3278 where legitimate_la_operand_p (SRC) returns false. */
3279
3280 void
3281 s390_load_address (rtx dst, rtx src)
3282 {
3283 if (TARGET_64BIT)
3284 emit_move_insn (dst, src);
3285 else
3286 emit_insn (gen_force_la_31 (dst, src));
3287 }
3288
3289 /* Return a legitimate reference for ORIG (an address) using the
3290 register REG. If REG is 0, a new pseudo is generated.
3291
3292 There are two types of references that must be handled:
3293
3294 1. Global data references must load the address from the GOT, via
3295 the PIC reg. An insn is emitted to do this load, and the reg is
3296 returned.
3297
3298 2. Static data references, constant pool addresses, and code labels
3299 compute the address as an offset from the GOT, whose base is in
3300 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3301 differentiate them from global data objects. The returned
3302 address is the PIC reg + an unspec constant.
3303
3304 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3305 reg also appears in the address. */
3306
3307 rtx
3308 legitimize_pic_address (rtx orig, rtx reg)
3309 {
3310 rtx addr = orig;
3311 rtx new_rtx = orig;
3312 rtx base;
3313
3314 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3315
3316 if (GET_CODE (addr) == LABEL_REF
3317 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3318 {
3319 /* This is a local symbol. */
3320 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3321 {
3322 /* Access local symbols PC-relative via LARL.
3323 This is the same as in the non-PIC case, so it is
3324 handled automatically ... */
3325 }
3326 else
3327 {
3328 /* Access local symbols relative to the GOT. */
3329
3330 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3331
3332 if (reload_in_progress || reload_completed)
3333 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3334
3335 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3336 addr = gen_rtx_CONST (Pmode, addr);
3337 addr = force_const_mem (Pmode, addr);
3338 emit_move_insn (temp, addr);
3339
3340 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3341 if (reg != 0)
3342 {
3343 s390_load_address (reg, new_rtx);
3344 new_rtx = reg;
3345 }
3346 }
3347 }
3348 else if (GET_CODE (addr) == SYMBOL_REF)
3349 {
3350 if (reg == 0)
3351 reg = gen_reg_rtx (Pmode);
3352
3353 if (flag_pic == 1)
3354 {
3355 /* Assume GOT offset < 4k. This is handled the same way
3356 in both 31- and 64-bit code (@GOT). */
3357
3358 if (reload_in_progress || reload_completed)
3359 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3360
3361 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3362 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3363 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3364 new_rtx = gen_const_mem (Pmode, new_rtx);
3365 emit_move_insn (reg, new_rtx);
3366 new_rtx = reg;
3367 }
3368 else if (TARGET_CPU_ZARCH)
3369 {
3370 /* If the GOT offset might be >= 4k, we determine the position
3371 of the GOT entry via a PC-relative LARL (@GOTENT). */
3372
3373 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3374
3375 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3376 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3377
3378 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3379 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3380 emit_move_insn (temp, new_rtx);
3381
3382 new_rtx = gen_const_mem (Pmode, temp);
3383 emit_move_insn (reg, new_rtx);
3384 new_rtx = reg;
3385 }
3386 else
3387 {
3388 /* If the GOT offset might be >= 4k, we have to load it
3389 from the literal pool (@GOT). */
3390
3391 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3392
3393 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3394 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3395
3396 if (reload_in_progress || reload_completed)
3397 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3398
3399 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3400 addr = gen_rtx_CONST (Pmode, addr);
3401 addr = force_const_mem (Pmode, addr);
3402 emit_move_insn (temp, addr);
3403
3404 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3405 new_rtx = gen_const_mem (Pmode, new_rtx);
3406 emit_move_insn (reg, new_rtx);
3407 new_rtx = reg;
3408 }
3409 }
3410 else
3411 {
3412 if (GET_CODE (addr) == CONST)
3413 {
3414 addr = XEXP (addr, 0);
3415 if (GET_CODE (addr) == UNSPEC)
3416 {
3417 gcc_assert (XVECLEN (addr, 0) == 1);
3418 switch (XINT (addr, 1))
3419 {
3420 /* If someone moved a GOT-relative UNSPEC
3421 out of the literal pool, force them back in. */
3422 case UNSPEC_GOTOFF:
3423 case UNSPEC_PLTOFF:
3424 new_rtx = force_const_mem (Pmode, orig);
3425 break;
3426
3427 /* @GOT is OK as is if small. */
3428 case UNSPEC_GOT:
3429 if (flag_pic == 2)
3430 new_rtx = force_const_mem (Pmode, orig);
3431 break;
3432
3433 /* @GOTENT is OK as is. */
3434 case UNSPEC_GOTENT:
3435 break;
3436
3437 /* @PLT is OK as is on 64-bit, must be converted to
3438 GOT-relative @PLTOFF on 31-bit. */
3439 case UNSPEC_PLT:
3440 if (!TARGET_CPU_ZARCH)
3441 {
3442 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3443
3444 if (reload_in_progress || reload_completed)
3445 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3446
3447 addr = XVECEXP (addr, 0, 0);
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3449 UNSPEC_PLTOFF);
3450 addr = gen_rtx_CONST (Pmode, addr);
3451 addr = force_const_mem (Pmode, addr);
3452 emit_move_insn (temp, addr);
3453
3454 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3455 if (reg != 0)
3456 {
3457 s390_load_address (reg, new_rtx);
3458 new_rtx = reg;
3459 }
3460 }
3461 break;
3462
3463 /* Everything else cannot happen. */
3464 default:
3465 gcc_unreachable ();
3466 }
3467 }
3468 else
3469 gcc_assert (GET_CODE (addr) == PLUS);
3470 }
3471 if (GET_CODE (addr) == PLUS)
3472 {
3473 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3474
3475 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3476 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3477
3478 /* Check first to see if this is a constant offset
3479 from a local symbol reference. */
3480 if ((GET_CODE (op0) == LABEL_REF
3481 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3482 && GET_CODE (op1) == CONST_INT)
3483 {
3484 if (TARGET_CPU_ZARCH
3485 && larl_operand (op0, VOIDmode)
3486 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3487 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3488 {
3489 if (INTVAL (op1) & 1)
3490 {
3491 /* LARL can't handle odd offsets, so emit a
3492 pair of LARL and LA. */
3493 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3494
3495 if (!DISP_IN_RANGE (INTVAL (op1)))
3496 {
3497 HOST_WIDE_INT even = INTVAL (op1) - 1;
3498 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3499 op0 = gen_rtx_CONST (Pmode, op0);
3500 op1 = const1_rtx;
3501 }
3502
3503 emit_move_insn (temp, op0);
3504 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3505
3506 if (reg != 0)
3507 {
3508 s390_load_address (reg, new_rtx);
3509 new_rtx = reg;
3510 }
3511 }
3512 else
3513 {
3514 /* If the offset is even, we can just use LARL.
3515 This will happen automatically. */
3516 }
3517 }
3518 else
3519 {
3520 /* Access local symbols relative to the GOT. */
3521
3522 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3523
3524 if (reload_in_progress || reload_completed)
3525 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3526
3527 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3528 UNSPEC_GOTOFF);
3529 addr = gen_rtx_PLUS (Pmode, addr, op1);
3530 addr = gen_rtx_CONST (Pmode, addr);
3531 addr = force_const_mem (Pmode, addr);
3532 emit_move_insn (temp, addr);
3533
3534 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3535 if (reg != 0)
3536 {
3537 s390_load_address (reg, new_rtx);
3538 new_rtx = reg;
3539 }
3540 }
3541 }
3542
3543 /* Now, check whether it is a GOT relative symbol plus offset
3544 that was pulled out of the literal pool. Force it back in. */
3545
3546 else if (GET_CODE (op0) == UNSPEC
3547 && GET_CODE (op1) == CONST_INT
3548 && XINT (op0, 1) == UNSPEC_GOTOFF)
3549 {
3550 gcc_assert (XVECLEN (op0, 0) == 1);
3551
3552 new_rtx = force_const_mem (Pmode, orig);
3553 }
3554
3555 /* Otherwise, compute the sum. */
3556 else
3557 {
3558 base = legitimize_pic_address (XEXP (addr, 0), reg);
3559 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3560 base == reg ? NULL_RTX : reg);
3561 if (GET_CODE (new_rtx) == CONST_INT)
3562 new_rtx = plus_constant (base, INTVAL (new_rtx));
3563 else
3564 {
3565 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3566 {
3567 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3568 new_rtx = XEXP (new_rtx, 1);
3569 }
3570 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3571 }
3572
3573 if (GET_CODE (new_rtx) == CONST)
3574 new_rtx = XEXP (new_rtx, 0);
3575 new_rtx = force_operand (new_rtx, 0);
3576 }
3577 }
3578 }
3579 return new_rtx;
3580 }
3581
3582 /* Load the thread pointer into a register. */
3583
3584 rtx
3585 s390_get_thread_pointer (void)
3586 {
3587 rtx tp = gen_reg_rtx (Pmode);
3588
3589 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3590 mark_reg_pointer (tp, BITS_PER_WORD);
3591
3592 return tp;
3593 }
3594
3595 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3596 in s390_tls_symbol which always refers to __tls_get_offset.
3597 The returned offset is written to RESULT_REG and an USE rtx is
3598 generated for TLS_CALL. */
3599
3600 static GTY(()) rtx s390_tls_symbol;
3601
3602 static void
3603 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3604 {
3605 rtx insn;
3606
3607 if (!flag_pic)
3608 emit_insn (s390_load_got ());
3609
3610 if (!s390_tls_symbol)
3611 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3612
3613 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3614 gen_rtx_REG (Pmode, RETURN_REGNUM));
3615
3616 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3617 RTL_CONST_CALL_P (insn) = 1;
3618 }
3619
3620 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3621 this (thread-local) address. REG may be used as temporary. */
3622
3623 static rtx
3624 legitimize_tls_address (rtx addr, rtx reg)
3625 {
3626 rtx new_rtx, tls_call, temp, base, r2, insn;
3627
3628 if (GET_CODE (addr) == SYMBOL_REF)
3629 switch (tls_symbolic_operand (addr))
3630 {
3631 case TLS_MODEL_GLOBAL_DYNAMIC:
3632 start_sequence ();
3633 r2 = gen_rtx_REG (Pmode, 2);
3634 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3635 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3636 new_rtx = force_const_mem (Pmode, new_rtx);
3637 emit_move_insn (r2, new_rtx);
3638 s390_emit_tls_call_insn (r2, tls_call);
3639 insn = get_insns ();
3640 end_sequence ();
3641
3642 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3643 temp = gen_reg_rtx (Pmode);
3644 emit_libcall_block (insn, temp, r2, new_rtx);
3645
3646 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3647 if (reg != 0)
3648 {
3649 s390_load_address (reg, new_rtx);
3650 new_rtx = reg;
3651 }
3652 break;
3653
3654 case TLS_MODEL_LOCAL_DYNAMIC:
3655 start_sequence ();
3656 r2 = gen_rtx_REG (Pmode, 2);
3657 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3658 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3659 new_rtx = force_const_mem (Pmode, new_rtx);
3660 emit_move_insn (r2, new_rtx);
3661 s390_emit_tls_call_insn (r2, tls_call);
3662 insn = get_insns ();
3663 end_sequence ();
3664
3665 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3666 temp = gen_reg_rtx (Pmode);
3667 emit_libcall_block (insn, temp, r2, new_rtx);
3668
3669 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3670 base = gen_reg_rtx (Pmode);
3671 s390_load_address (base, new_rtx);
3672
3673 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3674 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3675 new_rtx = force_const_mem (Pmode, new_rtx);
3676 temp = gen_reg_rtx (Pmode);
3677 emit_move_insn (temp, new_rtx);
3678
3679 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3680 if (reg != 0)
3681 {
3682 s390_load_address (reg, new_rtx);
3683 new_rtx = reg;
3684 }
3685 break;
3686
3687 case TLS_MODEL_INITIAL_EXEC:
3688 if (flag_pic == 1)
3689 {
3690 /* Assume GOT offset < 4k. This is handled the same way
3691 in both 31- and 64-bit code. */
3692
3693 if (reload_in_progress || reload_completed)
3694 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3695
3696 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3697 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3698 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3699 new_rtx = gen_const_mem (Pmode, new_rtx);
3700 temp = gen_reg_rtx (Pmode);
3701 emit_move_insn (temp, new_rtx);
3702 }
3703 else if (TARGET_CPU_ZARCH)
3704 {
3705 /* If the GOT offset might be >= 4k, we determine the position
3706 of the GOT entry via a PC-relative LARL. */
3707
3708 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3709 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3710 temp = gen_reg_rtx (Pmode);
3711 emit_move_insn (temp, new_rtx);
3712
3713 new_rtx = gen_const_mem (Pmode, temp);
3714 temp = gen_reg_rtx (Pmode);
3715 emit_move_insn (temp, new_rtx);
3716 }
3717 else if (flag_pic)
3718 {
3719 /* If the GOT offset might be >= 4k, we have to load it
3720 from the literal pool. */
3721
3722 if (reload_in_progress || reload_completed)
3723 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3724
3725 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3726 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3727 new_rtx = force_const_mem (Pmode, new_rtx);
3728 temp = gen_reg_rtx (Pmode);
3729 emit_move_insn (temp, new_rtx);
3730
3731 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3732 new_rtx = gen_const_mem (Pmode, new_rtx);
3733
3734 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3737 }
3738 else
3739 {
3740 /* In position-dependent code, load the absolute address of
3741 the GOT entry from the literal pool. */
3742
3743 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3744 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3745 new_rtx = force_const_mem (Pmode, new_rtx);
3746 temp = gen_reg_rtx (Pmode);
3747 emit_move_insn (temp, new_rtx);
3748
3749 new_rtx = temp;
3750 new_rtx = gen_const_mem (Pmode, new_rtx);
3751 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3752 temp = gen_reg_rtx (Pmode);
3753 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3754 }
3755
3756 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3757 if (reg != 0)
3758 {
3759 s390_load_address (reg, new_rtx);
3760 new_rtx = reg;
3761 }
3762 break;
3763
3764 case TLS_MODEL_LOCAL_EXEC:
3765 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3766 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3767 new_rtx = force_const_mem (Pmode, new_rtx);
3768 temp = gen_reg_rtx (Pmode);
3769 emit_move_insn (temp, new_rtx);
3770
3771 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3772 if (reg != 0)
3773 {
3774 s390_load_address (reg, new_rtx);
3775 new_rtx = reg;
3776 }
3777 break;
3778
3779 default:
3780 gcc_unreachable ();
3781 }
3782
3783 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3784 {
3785 switch (XINT (XEXP (addr, 0), 1))
3786 {
3787 case UNSPEC_INDNTPOFF:
3788 gcc_assert (TARGET_CPU_ZARCH);
3789 new_rtx = addr;
3790 break;
3791
3792 default:
3793 gcc_unreachable ();
3794 }
3795 }
3796
3797 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3798 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3799 {
3800 new_rtx = XEXP (XEXP (addr, 0), 0);
3801 if (GET_CODE (new_rtx) != SYMBOL_REF)
3802 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3803
3804 new_rtx = legitimize_tls_address (new_rtx, reg);
3805 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3806 new_rtx = force_operand (new_rtx, 0);
3807 }
3808
3809 else
3810 gcc_unreachable (); /* for now ... */
3811
3812 return new_rtx;
3813 }
3814
3815 /* Emit insns making the address in operands[1] valid for a standard
3816 move to operands[0]. operands[1] is replaced by an address which
3817 should be used instead of the former RTX to emit the move
3818 pattern. */
3819
3820 void
3821 emit_symbolic_move (rtx *operands)
3822 {
3823 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3824
3825 if (GET_CODE (operands[0]) == MEM)
3826 operands[1] = force_reg (Pmode, operands[1]);
3827 else if (TLS_SYMBOLIC_CONST (operands[1]))
3828 operands[1] = legitimize_tls_address (operands[1], temp);
3829 else if (flag_pic)
3830 operands[1] = legitimize_pic_address (operands[1], temp);
3831 }
3832
3833 /* Try machine-dependent ways of modifying an illegitimate address X
3834 to be legitimate. If we find one, return the new, valid address.
3835
3836 OLDX is the address as it was before break_out_memory_refs was called.
3837 In some cases it is useful to look at this to decide what needs to be done.
3838
3839 MODE is the mode of the operand pointed to by X.
3840
3841 When -fpic is used, special handling is needed for symbolic references.
3842 See comments by legitimize_pic_address for details. */
3843
3844 static rtx
3845 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3846 enum machine_mode mode ATTRIBUTE_UNUSED)
3847 {
3848 rtx constant_term = const0_rtx;
3849
3850 if (TLS_SYMBOLIC_CONST (x))
3851 {
3852 x = legitimize_tls_address (x, 0);
3853
3854 if (s390_legitimate_address_p (mode, x, FALSE))
3855 return x;
3856 }
3857 else if (GET_CODE (x) == PLUS
3858 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3859 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3860 {
3861 return x;
3862 }
3863 else if (flag_pic)
3864 {
3865 if (SYMBOLIC_CONST (x)
3866 || (GET_CODE (x) == PLUS
3867 && (SYMBOLIC_CONST (XEXP (x, 0))
3868 || SYMBOLIC_CONST (XEXP (x, 1)))))
3869 x = legitimize_pic_address (x, 0);
3870
3871 if (s390_legitimate_address_p (mode, x, FALSE))
3872 return x;
3873 }
3874
3875 x = eliminate_constant_term (x, &constant_term);
3876
3877 /* Optimize loading of large displacements by splitting them
3878 into the multiple of 4K and the rest; this allows the
3879 former to be CSE'd if possible.
3880
3881 Don't do this if the displacement is added to a register
3882 pointing into the stack frame, as the offsets will
3883 change later anyway. */
3884
3885 if (GET_CODE (constant_term) == CONST_INT
3886 && !TARGET_LONG_DISPLACEMENT
3887 && !DISP_IN_RANGE (INTVAL (constant_term))
3888 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3889 {
3890 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3891 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3892
3893 rtx temp = gen_reg_rtx (Pmode);
3894 rtx val = force_operand (GEN_INT (upper), temp);
3895 if (val != temp)
3896 emit_move_insn (temp, val);
3897
3898 x = gen_rtx_PLUS (Pmode, x, temp);
3899 constant_term = GEN_INT (lower);
3900 }
3901
3902 if (GET_CODE (x) == PLUS)
3903 {
3904 if (GET_CODE (XEXP (x, 0)) == REG)
3905 {
3906 rtx temp = gen_reg_rtx (Pmode);
3907 rtx val = force_operand (XEXP (x, 1), temp);
3908 if (val != temp)
3909 emit_move_insn (temp, val);
3910
3911 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3912 }
3913
3914 else if (GET_CODE (XEXP (x, 1)) == REG)
3915 {
3916 rtx temp = gen_reg_rtx (Pmode);
3917 rtx val = force_operand (XEXP (x, 0), temp);
3918 if (val != temp)
3919 emit_move_insn (temp, val);
3920
3921 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3922 }
3923 }
3924
3925 if (constant_term != const0_rtx)
3926 x = gen_rtx_PLUS (Pmode, x, constant_term);
3927
3928 return x;
3929 }
3930
3931 /* Try a machine-dependent way of reloading an illegitimate address AD
3932 operand. If we find one, push the reload and return the new address.
3933
3934 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3935 and TYPE is the reload type of the current reload. */
3936
3937 rtx
3938 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3939 int opnum, int type)
3940 {
3941 if (!optimize || TARGET_LONG_DISPLACEMENT)
3942 return NULL_RTX;
3943
3944 if (GET_CODE (ad) == PLUS)
3945 {
3946 rtx tem = simplify_binary_operation (PLUS, Pmode,
3947 XEXP (ad, 0), XEXP (ad, 1));
3948 if (tem)
3949 ad = tem;
3950 }
3951
3952 if (GET_CODE (ad) == PLUS
3953 && GET_CODE (XEXP (ad, 0)) == REG
3954 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3955 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3956 {
3957 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3958 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3959 rtx cst, tem, new_rtx;
3960
3961 cst = GEN_INT (upper);
3962 if (!legitimate_reload_constant_p (cst))
3963 cst = force_const_mem (Pmode, cst);
3964
3965 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3966 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3967
3968 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3969 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3970 opnum, (enum reload_type) type);
3971 return new_rtx;
3972 }
3973
3974 return NULL_RTX;
3975 }
3976
3977 /* Emit code to move LEN bytes from DST to SRC. */
3978
3979 void
3980 s390_expand_movmem (rtx dst, rtx src, rtx len)
3981 {
3982 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3983 {
3984 if (INTVAL (len) > 0)
3985 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3986 }
3987
3988 else if (TARGET_MVCLE)
3989 {
3990 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3991 }
3992
3993 else
3994 {
3995 rtx dst_addr, src_addr, count, blocks, temp;
3996 rtx loop_start_label = gen_label_rtx ();
3997 rtx loop_end_label = gen_label_rtx ();
3998 rtx end_label = gen_label_rtx ();
3999 enum machine_mode mode;
4000
4001 mode = GET_MODE (len);
4002 if (mode == VOIDmode)
4003 mode = Pmode;
4004
4005 dst_addr = gen_reg_rtx (Pmode);
4006 src_addr = gen_reg_rtx (Pmode);
4007 count = gen_reg_rtx (mode);
4008 blocks = gen_reg_rtx (mode);
4009
4010 convert_move (count, len, 1);
4011 emit_cmp_and_jump_insns (count, const0_rtx,
4012 EQ, NULL_RTX, mode, 1, end_label);
4013
4014 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4015 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4016 dst = change_address (dst, VOIDmode, dst_addr);
4017 src = change_address (src, VOIDmode, src_addr);
4018
4019 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4020 OPTAB_DIRECT);
4021 if (temp != count)
4022 emit_move_insn (count, temp);
4023
4024 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4025 OPTAB_DIRECT);
4026 if (temp != blocks)
4027 emit_move_insn (blocks, temp);
4028
4029 emit_cmp_and_jump_insns (blocks, const0_rtx,
4030 EQ, NULL_RTX, mode, 1, loop_end_label);
4031
4032 emit_label (loop_start_label);
4033
4034 if (TARGET_Z10
4035 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4036 {
4037 rtx prefetch;
4038
4039 /* Issue a read prefetch for the +3 cache line. */
4040 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4041 const0_rtx, const0_rtx);
4042 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4043 emit_insn (prefetch);
4044
4045 /* Issue a write prefetch for the +3 cache line. */
4046 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4047 const1_rtx, const0_rtx);
4048 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4049 emit_insn (prefetch);
4050 }
4051
4052 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4053 s390_load_address (dst_addr,
4054 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4055 s390_load_address (src_addr,
4056 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4057
4058 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4059 OPTAB_DIRECT);
4060 if (temp != blocks)
4061 emit_move_insn (blocks, temp);
4062
4063 emit_cmp_and_jump_insns (blocks, const0_rtx,
4064 EQ, NULL_RTX, mode, 1, loop_end_label);
4065
4066 emit_jump (loop_start_label);
4067 emit_label (loop_end_label);
4068
4069 emit_insn (gen_movmem_short (dst, src,
4070 convert_to_mode (Pmode, count, 1)));
4071 emit_label (end_label);
4072 }
4073 }
4074
4075 /* Emit code to set LEN bytes at DST to VAL.
4076 Make use of clrmem if VAL is zero. */
4077
4078 void
4079 s390_expand_setmem (rtx dst, rtx len, rtx val)
4080 {
4081 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4082 return;
4083
4084 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4085
4086 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4087 {
4088 if (val == const0_rtx && INTVAL (len) <= 256)
4089 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4090 else
4091 {
4092 /* Initialize memory by storing the first byte. */
4093 emit_move_insn (adjust_address (dst, QImode, 0), val);
4094
4095 if (INTVAL (len) > 1)
4096 {
4097 /* Initiate 1 byte overlap move.
4098 The first byte of DST is propagated through DSTP1.
4099 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4100 DST is set to size 1 so the rest of the memory location
4101 does not count as source operand. */
4102 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4103 set_mem_size (dst, 1);
4104
4105 emit_insn (gen_movmem_short (dstp1, dst,
4106 GEN_INT (INTVAL (len) - 2)));
4107 }
4108 }
4109 }
4110
4111 else if (TARGET_MVCLE)
4112 {
4113 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4114 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4115 }
4116
4117 else
4118 {
4119 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4120 rtx loop_start_label = gen_label_rtx ();
4121 rtx loop_end_label = gen_label_rtx ();
4122 rtx end_label = gen_label_rtx ();
4123 enum machine_mode mode;
4124
4125 mode = GET_MODE (len);
4126 if (mode == VOIDmode)
4127 mode = Pmode;
4128
4129 dst_addr = gen_reg_rtx (Pmode);
4130 count = gen_reg_rtx (mode);
4131 blocks = gen_reg_rtx (mode);
4132
4133 convert_move (count, len, 1);
4134 emit_cmp_and_jump_insns (count, const0_rtx,
4135 EQ, NULL_RTX, mode, 1, end_label);
4136
4137 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4138 dst = change_address (dst, VOIDmode, dst_addr);
4139
4140 if (val == const0_rtx)
4141 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4142 OPTAB_DIRECT);
4143 else
4144 {
4145 dstp1 = adjust_address (dst, VOIDmode, 1);
4146 set_mem_size (dst, 1);
4147
4148 /* Initialize memory by storing the first byte. */
4149 emit_move_insn (adjust_address (dst, QImode, 0), val);
4150
4151 /* If count is 1 we are done. */
4152 emit_cmp_and_jump_insns (count, const1_rtx,
4153 EQ, NULL_RTX, mode, 1, end_label);
4154
4155 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4156 OPTAB_DIRECT);
4157 }
4158 if (temp != count)
4159 emit_move_insn (count, temp);
4160
4161 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4162 OPTAB_DIRECT);
4163 if (temp != blocks)
4164 emit_move_insn (blocks, temp);
4165
4166 emit_cmp_and_jump_insns (blocks, const0_rtx,
4167 EQ, NULL_RTX, mode, 1, loop_end_label);
4168
4169 emit_label (loop_start_label);
4170
4171 if (TARGET_Z10
4172 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4173 {
4174 /* Issue a write prefetch for the +4 cache line. */
4175 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4176 GEN_INT (1024)),
4177 const1_rtx, const0_rtx);
4178 emit_insn (prefetch);
4179 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4180 }
4181
4182 if (val == const0_rtx)
4183 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4184 else
4185 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4186 s390_load_address (dst_addr,
4187 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4188
4189 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4190 OPTAB_DIRECT);
4191 if (temp != blocks)
4192 emit_move_insn (blocks, temp);
4193
4194 emit_cmp_and_jump_insns (blocks, const0_rtx,
4195 EQ, NULL_RTX, mode, 1, loop_end_label);
4196
4197 emit_jump (loop_start_label);
4198 emit_label (loop_end_label);
4199
4200 if (val == const0_rtx)
4201 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4202 else
4203 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4204 emit_label (end_label);
4205 }
4206 }
4207
4208 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4209 and return the result in TARGET. */
4210
4211 void
4212 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4213 {
4214 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4215 rtx tmp;
4216
4217 /* As the result of CMPINT is inverted compared to what we need,
4218 we have to swap the operands. */
4219 tmp = op0; op0 = op1; op1 = tmp;
4220
4221 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4222 {
4223 if (INTVAL (len) > 0)
4224 {
4225 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4226 emit_insn (gen_cmpint (target, ccreg));
4227 }
4228 else
4229 emit_move_insn (target, const0_rtx);
4230 }
4231 else if (TARGET_MVCLE)
4232 {
4233 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4234 emit_insn (gen_cmpint (target, ccreg));
4235 }
4236 else
4237 {
4238 rtx addr0, addr1, count, blocks, temp;
4239 rtx loop_start_label = gen_label_rtx ();
4240 rtx loop_end_label = gen_label_rtx ();
4241 rtx end_label = gen_label_rtx ();
4242 enum machine_mode mode;
4243
4244 mode = GET_MODE (len);
4245 if (mode == VOIDmode)
4246 mode = Pmode;
4247
4248 addr0 = gen_reg_rtx (Pmode);
4249 addr1 = gen_reg_rtx (Pmode);
4250 count = gen_reg_rtx (mode);
4251 blocks = gen_reg_rtx (mode);
4252
4253 convert_move (count, len, 1);
4254 emit_cmp_and_jump_insns (count, const0_rtx,
4255 EQ, NULL_RTX, mode, 1, end_label);
4256
4257 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4258 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4259 op0 = change_address (op0, VOIDmode, addr0);
4260 op1 = change_address (op1, VOIDmode, addr1);
4261
4262 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4263 OPTAB_DIRECT);
4264 if (temp != count)
4265 emit_move_insn (count, temp);
4266
4267 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4268 OPTAB_DIRECT);
4269 if (temp != blocks)
4270 emit_move_insn (blocks, temp);
4271
4272 emit_cmp_and_jump_insns (blocks, const0_rtx,
4273 EQ, NULL_RTX, mode, 1, loop_end_label);
4274
4275 emit_label (loop_start_label);
4276
4277 if (TARGET_Z10
4278 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4279 {
4280 rtx prefetch;
4281
4282 /* Issue a read prefetch for the +2 cache line of operand 1. */
4283 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4284 const0_rtx, const0_rtx);
4285 emit_insn (prefetch);
4286 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4287
4288 /* Issue a read prefetch for the +2 cache line of operand 2. */
4289 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4290 const0_rtx, const0_rtx);
4291 emit_insn (prefetch);
4292 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4293 }
4294
4295 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4296 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4297 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4298 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4299 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4300 emit_jump_insn (temp);
4301
4302 s390_load_address (addr0,
4303 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4304 s390_load_address (addr1,
4305 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4306
4307 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4308 OPTAB_DIRECT);
4309 if (temp != blocks)
4310 emit_move_insn (blocks, temp);
4311
4312 emit_cmp_and_jump_insns (blocks, const0_rtx,
4313 EQ, NULL_RTX, mode, 1, loop_end_label);
4314
4315 emit_jump (loop_start_label);
4316 emit_label (loop_end_label);
4317
4318 emit_insn (gen_cmpmem_short (op0, op1,
4319 convert_to_mode (Pmode, count, 1)));
4320 emit_label (end_label);
4321
4322 emit_insn (gen_cmpint (target, ccreg));
4323 }
4324 }
4325
4326
4327 /* Expand conditional increment or decrement using alc/slb instructions.
4328 Should generate code setting DST to either SRC or SRC + INCREMENT,
4329 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4330 Returns true if successful, false otherwise.
4331
4332 That makes it possible to implement some if-constructs without jumps e.g.:
4333 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4334 unsigned int a, b, c;
4335 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4336 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4337 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4338 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4339
4340 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4341 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4342 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4343 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4344 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4345
4346 bool
4347 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4348 rtx dst, rtx src, rtx increment)
4349 {
4350 enum machine_mode cmp_mode;
4351 enum machine_mode cc_mode;
4352 rtx op_res;
4353 rtx insn;
4354 rtvec p;
4355 int ret;
4356
4357 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4358 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4359 cmp_mode = SImode;
4360 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4361 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4362 cmp_mode = DImode;
4363 else
4364 return false;
4365
4366 /* Try ADD LOGICAL WITH CARRY. */
4367 if (increment == const1_rtx)
4368 {
4369 /* Determine CC mode to use. */
4370 if (cmp_code == EQ || cmp_code == NE)
4371 {
4372 if (cmp_op1 != const0_rtx)
4373 {
4374 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4375 NULL_RTX, 0, OPTAB_WIDEN);
4376 cmp_op1 = const0_rtx;
4377 }
4378
4379 cmp_code = cmp_code == EQ ? LEU : GTU;
4380 }
4381
4382 if (cmp_code == LTU || cmp_code == LEU)
4383 {
4384 rtx tem = cmp_op0;
4385 cmp_op0 = cmp_op1;
4386 cmp_op1 = tem;
4387 cmp_code = swap_condition (cmp_code);
4388 }
4389
4390 switch (cmp_code)
4391 {
4392 case GTU:
4393 cc_mode = CCUmode;
4394 break;
4395
4396 case GEU:
4397 cc_mode = CCL3mode;
4398 break;
4399
4400 default:
4401 return false;
4402 }
4403
4404 /* Emit comparison instruction pattern. */
4405 if (!register_operand (cmp_op0, cmp_mode))
4406 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4407
4408 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4409 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4410 /* We use insn_invalid_p here to add clobbers if required. */
4411 ret = insn_invalid_p (emit_insn (insn));
4412 gcc_assert (!ret);
4413
4414 /* Emit ALC instruction pattern. */
4415 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4416 gen_rtx_REG (cc_mode, CC_REGNUM),
4417 const0_rtx);
4418
4419 if (src != const0_rtx)
4420 {
4421 if (!register_operand (src, GET_MODE (dst)))
4422 src = force_reg (GET_MODE (dst), src);
4423
4424 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4425 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4426 }
4427
4428 p = rtvec_alloc (2);
4429 RTVEC_ELT (p, 0) =
4430 gen_rtx_SET (VOIDmode, dst, op_res);
4431 RTVEC_ELT (p, 1) =
4432 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4433 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4434
4435 return true;
4436 }
4437
4438 /* Try SUBTRACT LOGICAL WITH BORROW. */
4439 if (increment == constm1_rtx)
4440 {
4441 /* Determine CC mode to use. */
4442 if (cmp_code == EQ || cmp_code == NE)
4443 {
4444 if (cmp_op1 != const0_rtx)
4445 {
4446 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4447 NULL_RTX, 0, OPTAB_WIDEN);
4448 cmp_op1 = const0_rtx;
4449 }
4450
4451 cmp_code = cmp_code == EQ ? LEU : GTU;
4452 }
4453
4454 if (cmp_code == GTU || cmp_code == GEU)
4455 {
4456 rtx tem = cmp_op0;
4457 cmp_op0 = cmp_op1;
4458 cmp_op1 = tem;
4459 cmp_code = swap_condition (cmp_code);
4460 }
4461
4462 switch (cmp_code)
4463 {
4464 case LEU:
4465 cc_mode = CCUmode;
4466 break;
4467
4468 case LTU:
4469 cc_mode = CCL3mode;
4470 break;
4471
4472 default:
4473 return false;
4474 }
4475
4476 /* Emit comparison instruction pattern. */
4477 if (!register_operand (cmp_op0, cmp_mode))
4478 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4479
4480 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4481 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4482 /* We use insn_invalid_p here to add clobbers if required. */
4483 ret = insn_invalid_p (emit_insn (insn));
4484 gcc_assert (!ret);
4485
4486 /* Emit SLB instruction pattern. */
4487 if (!register_operand (src, GET_MODE (dst)))
4488 src = force_reg (GET_MODE (dst), src);
4489
4490 op_res = gen_rtx_MINUS (GET_MODE (dst),
4491 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4492 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4493 gen_rtx_REG (cc_mode, CC_REGNUM),
4494 const0_rtx));
4495 p = rtvec_alloc (2);
4496 RTVEC_ELT (p, 0) =
4497 gen_rtx_SET (VOIDmode, dst, op_res);
4498 RTVEC_ELT (p, 1) =
4499 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4500 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4501
4502 return true;
4503 }
4504
4505 return false;
4506 }
4507
4508 /* Expand code for the insv template. Return true if successful. */
4509
4510 bool
4511 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4512 {
4513 int bitsize = INTVAL (op1);
4514 int bitpos = INTVAL (op2);
4515
4516 /* On z10 we can use the risbg instruction to implement insv. */
4517 if (TARGET_Z10
4518 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4519 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4520 {
4521 rtx op;
4522 rtx clobber;
4523
4524 op = gen_rtx_SET (GET_MODE(src),
4525 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4526 src);
4527 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4528 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4529
4530 return true;
4531 }
4532
4533 /* We need byte alignment. */
4534 if (bitsize % BITS_PER_UNIT)
4535 return false;
4536
4537 if (bitpos == 0
4538 && memory_operand (dest, VOIDmode)
4539 && (register_operand (src, word_mode)
4540 || const_int_operand (src, VOIDmode)))
4541 {
4542 /* Emit standard pattern if possible. */
4543 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4544 if (GET_MODE_BITSIZE (mode) == bitsize)
4545 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4546
4547 /* (set (ze (mem)) (const_int)). */
4548 else if (const_int_operand (src, VOIDmode))
4549 {
4550 int size = bitsize / BITS_PER_UNIT;
4551 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4552 GET_MODE_SIZE (word_mode) - size);
4553
4554 dest = adjust_address (dest, BLKmode, 0);
4555 set_mem_size (dest, size);
4556 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4557 }
4558
4559 /* (set (ze (mem)) (reg)). */
4560 else if (register_operand (src, word_mode))
4561 {
4562 if (bitsize <= GET_MODE_BITSIZE (SImode))
4563 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4564 const0_rtx), src);
4565 else
4566 {
4567 /* Emit st,stcmh sequence. */
4568 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4569 int size = stcmh_width / BITS_PER_UNIT;
4570
4571 emit_move_insn (adjust_address (dest, SImode, size),
4572 gen_lowpart (SImode, src));
4573 set_mem_size (dest, size);
4574 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4575 (stcmh_width), const0_rtx),
4576 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4577 (GET_MODE_BITSIZE (SImode))));
4578 }
4579 }
4580 else
4581 return false;
4582
4583 return true;
4584 }
4585
4586 /* (set (ze (reg)) (const_int)). */
4587 if (TARGET_ZARCH
4588 && register_operand (dest, word_mode)
4589 && (bitpos % 16) == 0
4590 && (bitsize % 16) == 0
4591 && const_int_operand (src, VOIDmode))
4592 {
4593 HOST_WIDE_INT val = INTVAL (src);
4594 int regpos = bitpos + bitsize;
4595
4596 while (regpos > bitpos)
4597 {
4598 enum machine_mode putmode;
4599 int putsize;
4600
4601 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4602 putmode = SImode;
4603 else
4604 putmode = HImode;
4605
4606 putsize = GET_MODE_BITSIZE (putmode);
4607 regpos -= putsize;
4608 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4609 GEN_INT (putsize),
4610 GEN_INT (regpos)),
4611 gen_int_mode (val, putmode));
4612 val >>= putsize;
4613 }
4614 gcc_assert (regpos == bitpos);
4615 return true;
4616 }
4617
4618 return false;
4619 }
4620
4621 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4622 register that holds VAL of mode MODE shifted by COUNT bits. */
4623
4624 static inline rtx
4625 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4626 {
4627 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4628 NULL_RTX, 1, OPTAB_DIRECT);
4629 return expand_simple_binop (SImode, ASHIFT, val, count,
4630 NULL_RTX, 1, OPTAB_DIRECT);
4631 }
4632
4633 /* Structure to hold the initial parameters for a compare_and_swap operation
4634 in HImode and QImode. */
4635
4636 struct alignment_context
4637 {
4638 rtx memsi; /* SI aligned memory location. */
4639 rtx shift; /* Bit offset with regard to lsb. */
4640 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4641 rtx modemaski; /* ~modemask */
4642 bool aligned; /* True if memory is aligned, false else. */
4643 };
4644
4645 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4646 structure AC for transparent simplifying, if the memory alignment is known
4647 to be at least 32bit. MEM is the memory location for the actual operation
4648 and MODE its mode. */
4649
4650 static void
4651 init_alignment_context (struct alignment_context *ac, rtx mem,
4652 enum machine_mode mode)
4653 {
4654 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4655 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4656
4657 if (ac->aligned)
4658 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4659 else
4660 {
4661 /* Alignment is unknown. */
4662 rtx byteoffset, addr, align;
4663
4664 /* Force the address into a register. */
4665 addr = force_reg (Pmode, XEXP (mem, 0));
4666
4667 /* Align it to SImode. */
4668 align = expand_simple_binop (Pmode, AND, addr,
4669 GEN_INT (-GET_MODE_SIZE (SImode)),
4670 NULL_RTX, 1, OPTAB_DIRECT);
4671 /* Generate MEM. */
4672 ac->memsi = gen_rtx_MEM (SImode, align);
4673 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4674 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4675 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4676
4677 /* Calculate shiftcount. */
4678 byteoffset = expand_simple_binop (Pmode, AND, addr,
4679 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4680 NULL_RTX, 1, OPTAB_DIRECT);
4681 /* As we already have some offset, evaluate the remaining distance. */
4682 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4683 NULL_RTX, 1, OPTAB_DIRECT);
4684
4685 }
4686 /* Shift is the byte count, but we need the bitcount. */
4687 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4688 NULL_RTX, 1, OPTAB_DIRECT);
4689 /* Calculate masks. */
4690 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4691 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4692 NULL_RTX, 1, OPTAB_DIRECT);
4693 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4694 }
4695
4696 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4697 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4698 to set if CMP == MEM.
4699 CMP is never in memory for compare_and_swap_cc because
4700 expand_bool_compare_and_swap puts it into a register for later compare. */
4701
4702 void
4703 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4704 {
4705 struct alignment_context ac;
4706 rtx cmpv, newv, val, resv, cc;
4707 rtx res = gen_reg_rtx (SImode);
4708 rtx csloop = gen_label_rtx ();
4709 rtx csend = gen_label_rtx ();
4710
4711 gcc_assert (register_operand (target, VOIDmode));
4712 gcc_assert (MEM_P (mem));
4713
4714 init_alignment_context (&ac, mem, mode);
4715
4716 /* Shift the values to the correct bit positions. */
4717 if (!(ac.aligned && MEM_P (cmp)))
4718 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4719 if (!(ac.aligned && MEM_P (new_rtx)))
4720 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4721
4722 /* Load full word. Subsequent loads are performed by CS. */
4723 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4724 NULL_RTX, 1, OPTAB_DIRECT);
4725
4726 /* Start CS loop. */
4727 emit_label (csloop);
4728 /* val = "<mem>00..0<mem>"
4729 * cmp = "00..0<cmp>00..0"
4730 * new = "00..0<new>00..0"
4731 */
4732
4733 /* Patch cmp and new with val at correct position. */
4734 if (ac.aligned && MEM_P (cmp))
4735 {
4736 cmpv = force_reg (SImode, val);
4737 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0,
4738 0, 0, SImode, cmp);
4739 }
4740 else
4741 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4742 NULL_RTX, 1, OPTAB_DIRECT));
4743 if (ac.aligned && MEM_P (new_rtx))
4744 {
4745 newv = force_reg (SImode, val);
4746 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0,
4747 0, 0, SImode, new_rtx);
4748 }
4749 else
4750 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4751 NULL_RTX, 1, OPTAB_DIRECT));
4752
4753 /* Jump to end if we're done (likely?). */
4754 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4755 cmpv, newv));
4756
4757 /* Check for changes outside mode. */
4758 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4759 NULL_RTX, 1, OPTAB_DIRECT);
4760 cc = s390_emit_compare (NE, resv, val);
4761 emit_move_insn (val, resv);
4762 /* Loop internal if so. */
4763 s390_emit_jump (csloop, cc);
4764
4765 emit_label (csend);
4766
4767 /* Return the correct part of the bitfield. */
4768 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4769 NULL_RTX, 1, OPTAB_DIRECT), 1);
4770 }
4771
4772 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4773 and VAL the value to play with. If AFTER is true then store the value
4774 MEM holds after the operation, if AFTER is false then store the value MEM
4775 holds before the operation. If TARGET is zero then discard that value, else
4776 store it to TARGET. */
4777
4778 void
4779 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4780 rtx target, rtx mem, rtx val, bool after)
4781 {
4782 struct alignment_context ac;
4783 rtx cmp;
4784 rtx new_rtx = gen_reg_rtx (SImode);
4785 rtx orig = gen_reg_rtx (SImode);
4786 rtx csloop = gen_label_rtx ();
4787
4788 gcc_assert (!target || register_operand (target, VOIDmode));
4789 gcc_assert (MEM_P (mem));
4790
4791 init_alignment_context (&ac, mem, mode);
4792
4793 /* Shift val to the correct bit positions.
4794 Preserve "icm", but prevent "ex icm". */
4795 if (!(ac.aligned && code == SET && MEM_P (val)))
4796 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4797
4798 /* Further preparation insns. */
4799 if (code == PLUS || code == MINUS)
4800 emit_move_insn (orig, val);
4801 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4802 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4803 NULL_RTX, 1, OPTAB_DIRECT);
4804
4805 /* Load full word. Subsequent loads are performed by CS. */
4806 cmp = force_reg (SImode, ac.memsi);
4807
4808 /* Start CS loop. */
4809 emit_label (csloop);
4810 emit_move_insn (new_rtx, cmp);
4811
4812 /* Patch new with val at correct position. */
4813 switch (code)
4814 {
4815 case PLUS:
4816 case MINUS:
4817 val = expand_simple_binop (SImode, code, new_rtx, orig,
4818 NULL_RTX, 1, OPTAB_DIRECT);
4819 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4820 NULL_RTX, 1, OPTAB_DIRECT);
4821 /* FALLTHRU */
4822 case SET:
4823 if (ac.aligned && MEM_P (val))
4824 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4825 0, 0, SImode, val);
4826 else
4827 {
4828 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4829 NULL_RTX, 1, OPTAB_DIRECT);
4830 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4831 NULL_RTX, 1, OPTAB_DIRECT);
4832 }
4833 break;
4834 case AND:
4835 case IOR:
4836 case XOR:
4837 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4838 NULL_RTX, 1, OPTAB_DIRECT);
4839 break;
4840 case MULT: /* NAND */
4841 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4842 NULL_RTX, 1, OPTAB_DIRECT);
4843 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4844 NULL_RTX, 1, OPTAB_DIRECT);
4845 break;
4846 default:
4847 gcc_unreachable ();
4848 }
4849
4850 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4851 ac.memsi, cmp, new_rtx));
4852
4853 /* Return the correct part of the bitfield. */
4854 if (target)
4855 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4856 after ? new_rtx : cmp, ac.shift,
4857 NULL_RTX, 1, OPTAB_DIRECT), 1);
4858 }
4859
4860 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4861 We need to emit DTP-relative relocations. */
4862
4863 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4864
4865 static void
4866 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4867 {
4868 switch (size)
4869 {
4870 case 4:
4871 fputs ("\t.long\t", file);
4872 break;
4873 case 8:
4874 fputs ("\t.quad\t", file);
4875 break;
4876 default:
4877 gcc_unreachable ();
4878 }
4879 output_addr_const (file, x);
4880 fputs ("@DTPOFF", file);
4881 }
4882
4883 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4884 /* Implement TARGET_MANGLE_TYPE. */
4885
4886 static const char *
4887 s390_mangle_type (const_tree type)
4888 {
4889 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4890 && TARGET_LONG_DOUBLE_128)
4891 return "g";
4892
4893 /* For all other types, use normal C++ mangling. */
4894 return NULL;
4895 }
4896 #endif
4897
4898 /* In the name of slightly smaller debug output, and to cater to
4899 general assembler lossage, recognize various UNSPEC sequences
4900 and turn them back into a direct symbol reference. */
4901
4902 static rtx
4903 s390_delegitimize_address (rtx orig_x)
4904 {
4905 rtx x, y;
4906
4907 orig_x = delegitimize_mem_from_attrs (orig_x);
4908 x = orig_x;
4909
4910 /* Extract the symbol ref from:
4911 (plus:SI (reg:SI 12 %r12)
4912 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
4913 UNSPEC_GOTOFF/PLTOFF)))
4914 and
4915 (plus:SI (reg:SI 12 %r12)
4916 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
4917 UNSPEC_GOTOFF/PLTOFF)
4918 (const_int 4 [0x4])))) */
4919 if (GET_CODE (x) == PLUS
4920 && REG_P (XEXP (x, 0))
4921 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4922 && GET_CODE (XEXP (x, 1)) == CONST)
4923 {
4924 HOST_WIDE_INT offset = 0;
4925
4926 /* The const operand. */
4927 y = XEXP (XEXP (x, 1), 0);
4928
4929 if (GET_CODE (y) == PLUS
4930 && GET_CODE (XEXP (y, 1)) == CONST_INT)
4931 {
4932 offset = INTVAL (XEXP (y, 1));
4933 y = XEXP (y, 0);
4934 }
4935
4936 if (GET_CODE (y) == UNSPEC
4937 && (XINT (y, 1) == UNSPEC_GOTOFF
4938 || XINT (y, 1) == UNSPEC_PLTOFF))
4939 return plus_constant (XVECEXP (y, 0, 0), offset);
4940 }
4941
4942 if (GET_CODE (x) != MEM)
4943 return orig_x;
4944
4945 x = XEXP (x, 0);
4946 if (GET_CODE (x) == PLUS
4947 && GET_CODE (XEXP (x, 1)) == CONST
4948 && GET_CODE (XEXP (x, 0)) == REG
4949 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4950 {
4951 y = XEXP (XEXP (x, 1), 0);
4952 if (GET_CODE (y) == UNSPEC
4953 && XINT (y, 1) == UNSPEC_GOT)
4954 y = XVECEXP (y, 0, 0);
4955 else
4956 return orig_x;
4957 }
4958 else if (GET_CODE (x) == CONST)
4959 {
4960 /* Extract the symbol ref from:
4961 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
4962 UNSPEC_PLT/GOTENT))) */
4963
4964 y = XEXP (x, 0);
4965 if (GET_CODE (y) == UNSPEC
4966 && (XINT (y, 1) == UNSPEC_GOTENT
4967 || XINT (y, 1) == UNSPEC_PLT))
4968 y = XVECEXP (y, 0, 0);
4969 else
4970 return orig_x;
4971 }
4972 else
4973 return orig_x;
4974
4975 if (GET_MODE (orig_x) != Pmode)
4976 {
4977 if (GET_MODE (orig_x) == BLKmode)
4978 return orig_x;
4979 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
4980 if (y == NULL_RTX)
4981 return orig_x;
4982 }
4983 return y;
4984 }
4985
4986 /* Output operand OP to stdio stream FILE.
4987 OP is an address (register + offset) which is not used to address data;
4988 instead the rightmost bits are interpreted as the value. */
4989
4990 static void
4991 print_shift_count_operand (FILE *file, rtx op)
4992 {
4993 HOST_WIDE_INT offset;
4994 rtx base;
4995
4996 /* Extract base register and offset. */
4997 if (!s390_decompose_shift_count (op, &base, &offset))
4998 gcc_unreachable ();
4999
5000 /* Sanity check. */
5001 if (base)
5002 {
5003 gcc_assert (GET_CODE (base) == REG);
5004 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5005 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5006 }
5007
5008 /* Offsets are constricted to twelve bits. */
5009 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5010 if (base)
5011 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5012 }
5013
5014 /* See 'get_some_local_dynamic_name'. */
5015
5016 static int
5017 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5018 {
5019 rtx x = *px;
5020
5021 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5022 {
5023 x = get_pool_constant (x);
5024 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5025 }
5026
5027 if (GET_CODE (x) == SYMBOL_REF
5028 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5029 {
5030 cfun->machine->some_ld_name = XSTR (x, 0);
5031 return 1;
5032 }
5033
5034 return 0;
5035 }
5036
5037 /* Locate some local-dynamic symbol still in use by this function
5038 so that we can print its name in local-dynamic base patterns. */
5039
5040 static const char *
5041 get_some_local_dynamic_name (void)
5042 {
5043 rtx insn;
5044
5045 if (cfun->machine->some_ld_name)
5046 return cfun->machine->some_ld_name;
5047
5048 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5049 if (INSN_P (insn)
5050 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5051 return cfun->machine->some_ld_name;
5052
5053 gcc_unreachable ();
5054 }
5055
5056 /* Output machine-dependent UNSPECs occurring in address constant X
5057 in assembler syntax to stdio stream FILE. Returns true if the
5058 constant X could be recognized, false otherwise. */
5059
5060 static bool
5061 s390_output_addr_const_extra (FILE *file, rtx x)
5062 {
5063 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5064 switch (XINT (x, 1))
5065 {
5066 case UNSPEC_GOTENT:
5067 output_addr_const (file, XVECEXP (x, 0, 0));
5068 fprintf (file, "@GOTENT");
5069 return true;
5070 case UNSPEC_GOT:
5071 output_addr_const (file, XVECEXP (x, 0, 0));
5072 fprintf (file, "@GOT");
5073 return true;
5074 case UNSPEC_GOTOFF:
5075 output_addr_const (file, XVECEXP (x, 0, 0));
5076 fprintf (file, "@GOTOFF");
5077 return true;
5078 case UNSPEC_PLT:
5079 output_addr_const (file, XVECEXP (x, 0, 0));
5080 fprintf (file, "@PLT");
5081 return true;
5082 case UNSPEC_PLTOFF:
5083 output_addr_const (file, XVECEXP (x, 0, 0));
5084 fprintf (file, "@PLTOFF");
5085 return true;
5086 case UNSPEC_TLSGD:
5087 output_addr_const (file, XVECEXP (x, 0, 0));
5088 fprintf (file, "@TLSGD");
5089 return true;
5090 case UNSPEC_TLSLDM:
5091 assemble_name (file, get_some_local_dynamic_name ());
5092 fprintf (file, "@TLSLDM");
5093 return true;
5094 case UNSPEC_DTPOFF:
5095 output_addr_const (file, XVECEXP (x, 0, 0));
5096 fprintf (file, "@DTPOFF");
5097 return true;
5098 case UNSPEC_NTPOFF:
5099 output_addr_const (file, XVECEXP (x, 0, 0));
5100 fprintf (file, "@NTPOFF");
5101 return true;
5102 case UNSPEC_GOTNTPOFF:
5103 output_addr_const (file, XVECEXP (x, 0, 0));
5104 fprintf (file, "@GOTNTPOFF");
5105 return true;
5106 case UNSPEC_INDNTPOFF:
5107 output_addr_const (file, XVECEXP (x, 0, 0));
5108 fprintf (file, "@INDNTPOFF");
5109 return true;
5110 }
5111
5112 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5113 switch (XINT (x, 1))
5114 {
5115 case UNSPEC_POOL_OFFSET:
5116 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5117 output_addr_const (file, x);
5118 return true;
5119 }
5120 return false;
5121 }
5122
5123 /* Output address operand ADDR in assembler syntax to
5124 stdio stream FILE. */
5125
5126 void
5127 print_operand_address (FILE *file, rtx addr)
5128 {
5129 struct s390_address ad;
5130
5131 if (s390_symref_operand_p (addr, NULL, NULL))
5132 {
5133 if (!TARGET_Z10)
5134 {
5135 output_operand_lossage ("symbolic memory references are "
5136 "only supported on z10 or later");
5137 return;
5138 }
5139 output_addr_const (file, addr);
5140 return;
5141 }
5142
5143 if (!s390_decompose_address (addr, &ad)
5144 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5145 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5146 output_operand_lossage ("cannot decompose address");
5147
5148 if (ad.disp)
5149 output_addr_const (file, ad.disp);
5150 else
5151 fprintf (file, "0");
5152
5153 if (ad.base && ad.indx)
5154 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5155 reg_names[REGNO (ad.base)]);
5156 else if (ad.base)
5157 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5158 }
5159
5160 /* Output operand X in assembler syntax to stdio stream FILE.
5161 CODE specified the format flag. The following format flags
5162 are recognized:
5163
5164 'C': print opcode suffix for branch condition.
5165 'D': print opcode suffix for inverse branch condition.
5166 'E': print opcode suffix for branch on index instruction.
5167 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5168 'G': print the size of the operand in bytes.
5169 'O': print only the displacement of a memory reference.
5170 'R': print only the base register of a memory reference.
5171 'S': print S-type memory reference (base+displacement).
5172 'N': print the second word of a DImode operand.
5173 'M': print the second word of a TImode operand.
5174 'Y': print shift count operand.
5175
5176 'b': print integer X as if it's an unsigned byte.
5177 'c': print integer X as if it's an signed byte.
5178 'x': print integer X as if it's an unsigned halfword.
5179 'h': print integer X as if it's a signed halfword.
5180 'i': print the first nonzero HImode part of X.
5181 'j': print the first HImode part unequal to -1 of X.
5182 'k': print the first nonzero SImode part of X.
5183 'm': print the first SImode part unequal to -1 of X.
5184 'o': print integer X as if it's an unsigned 32bit word. */
5185
5186 void
5187 print_operand (FILE *file, rtx x, int code)
5188 {
5189 switch (code)
5190 {
5191 case 'C':
5192 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5193 return;
5194
5195 case 'D':
5196 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5197 return;
5198
5199 case 'E':
5200 if (GET_CODE (x) == LE)
5201 fprintf (file, "l");
5202 else if (GET_CODE (x) == GT)
5203 fprintf (file, "h");
5204 else
5205 output_operand_lossage ("invalid comparison operator "
5206 "for 'E' output modifier");
5207 return;
5208
5209 case 'J':
5210 if (GET_CODE (x) == SYMBOL_REF)
5211 {
5212 fprintf (file, "%s", ":tls_load:");
5213 output_addr_const (file, x);
5214 }
5215 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5216 {
5217 fprintf (file, "%s", ":tls_gdcall:");
5218 output_addr_const (file, XVECEXP (x, 0, 0));
5219 }
5220 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5221 {
5222 fprintf (file, "%s", ":tls_ldcall:");
5223 assemble_name (file, get_some_local_dynamic_name ());
5224 }
5225 else
5226 output_operand_lossage ("invalid reference for 'J' output modifier");
5227 return;
5228
5229 case 'G':
5230 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5231 return;
5232
5233 case 'O':
5234 {
5235 struct s390_address ad;
5236 int ret;
5237
5238 if (!MEM_P (x))
5239 {
5240 output_operand_lossage ("memory reference expected for "
5241 "'O' output modifier");
5242 return;
5243 }
5244
5245 ret = s390_decompose_address (XEXP (x, 0), &ad);
5246
5247 if (!ret
5248 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5249 || ad.indx)
5250 {
5251 output_operand_lossage ("invalid address for 'O' output modifier");
5252 return;
5253 }
5254
5255 if (ad.disp)
5256 output_addr_const (file, ad.disp);
5257 else
5258 fprintf (file, "0");
5259 }
5260 return;
5261
5262 case 'R':
5263 {
5264 struct s390_address ad;
5265 int ret;
5266
5267 if (!MEM_P (x))
5268 {
5269 output_operand_lossage ("memory reference expected for "
5270 "'R' output modifier");
5271 return;
5272 }
5273
5274 ret = s390_decompose_address (XEXP (x, 0), &ad);
5275
5276 if (!ret
5277 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5278 || ad.indx)
5279 {
5280 output_operand_lossage ("invalid address for 'R' output modifier");
5281 return;
5282 }
5283
5284 if (ad.base)
5285 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5286 else
5287 fprintf (file, "0");
5288 }
5289 return;
5290
5291 case 'S':
5292 {
5293 struct s390_address ad;
5294 int ret;
5295
5296 if (!MEM_P (x))
5297 {
5298 output_operand_lossage ("memory reference expected for "
5299 "'S' output modifier");
5300 return;
5301 }
5302 ret = s390_decompose_address (XEXP (x, 0), &ad);
5303
5304 if (!ret
5305 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5306 || ad.indx)
5307 {
5308 output_operand_lossage ("invalid address for 'S' output modifier");
5309 return;
5310 }
5311
5312 if (ad.disp)
5313 output_addr_const (file, ad.disp);
5314 else
5315 fprintf (file, "0");
5316
5317 if (ad.base)
5318 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5319 }
5320 return;
5321
5322 case 'N':
5323 if (GET_CODE (x) == REG)
5324 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5325 else if (GET_CODE (x) == MEM)
5326 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5327 else
5328 output_operand_lossage ("register or memory expression expected "
5329 "for 'N' output modifier");
5330 break;
5331
5332 case 'M':
5333 if (GET_CODE (x) == REG)
5334 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5335 else if (GET_CODE (x) == MEM)
5336 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5337 else
5338 output_operand_lossage ("register or memory expression expected "
5339 "for 'M' output modifier");
5340 break;
5341
5342 case 'Y':
5343 print_shift_count_operand (file, x);
5344 return;
5345 }
5346
5347 switch (GET_CODE (x))
5348 {
5349 case REG:
5350 fprintf (file, "%s", reg_names[REGNO (x)]);
5351 break;
5352
5353 case MEM:
5354 output_address (XEXP (x, 0));
5355 break;
5356
5357 case CONST:
5358 case CODE_LABEL:
5359 case LABEL_REF:
5360 case SYMBOL_REF:
5361 output_addr_const (file, x);
5362 break;
5363
5364 case CONST_INT:
5365 if (code == 'b')
5366 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5367 else if (code == 'c')
5368 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5369 else if (code == 'x')
5370 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5371 else if (code == 'h')
5372 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5373 else if (code == 'i')
5374 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5375 s390_extract_part (x, HImode, 0));
5376 else if (code == 'j')
5377 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5378 s390_extract_part (x, HImode, -1));
5379 else if (code == 'k')
5380 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5381 s390_extract_part (x, SImode, 0));
5382 else if (code == 'm')
5383 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5384 s390_extract_part (x, SImode, -1));
5385 else if (code == 'o')
5386 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5387 else
5388 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5389 break;
5390
5391 case CONST_DOUBLE:
5392 gcc_assert (GET_MODE (x) == VOIDmode);
5393 if (code == 'b')
5394 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5395 else if (code == 'x')
5396 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5397 else if (code == 'h')
5398 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5399 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5400 else
5401 {
5402 if (code == 0)
5403 output_operand_lossage ("invalid constant - try using "
5404 "an output modifier");
5405 else
5406 output_operand_lossage ("invalid constant for output modifier '%c'",
5407 code);
5408 }
5409 break;
5410
5411 default:
5412 if (code == 0)
5413 output_operand_lossage ("invalid expression - try using "
5414 "an output modifier");
5415 else
5416 output_operand_lossage ("invalid expression for output "
5417 "modifier '%c'", code);
5418 break;
5419 }
5420 }
5421
5422 /* Target hook for assembling integer objects. We need to define it
5423 here to work a round a bug in some versions of GAS, which couldn't
5424 handle values smaller than INT_MIN when printed in decimal. */
5425
5426 static bool
5427 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5428 {
5429 if (size == 8 && aligned_p
5430 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5431 {
5432 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5433 INTVAL (x));
5434 return true;
5435 }
5436 return default_assemble_integer (x, size, aligned_p);
5437 }
5438
5439 /* Returns true if register REGNO is used for forming
5440 a memory address in expression X. */
5441
5442 static bool
5443 reg_used_in_mem_p (int regno, rtx x)
5444 {
5445 enum rtx_code code = GET_CODE (x);
5446 int i, j;
5447 const char *fmt;
5448
5449 if (code == MEM)
5450 {
5451 if (refers_to_regno_p (regno, regno+1,
5452 XEXP (x, 0), 0))
5453 return true;
5454 }
5455 else if (code == SET
5456 && GET_CODE (SET_DEST (x)) == PC)
5457 {
5458 if (refers_to_regno_p (regno, regno+1,
5459 SET_SRC (x), 0))
5460 return true;
5461 }
5462
5463 fmt = GET_RTX_FORMAT (code);
5464 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5465 {
5466 if (fmt[i] == 'e'
5467 && reg_used_in_mem_p (regno, XEXP (x, i)))
5468 return true;
5469
5470 else if (fmt[i] == 'E')
5471 for (j = 0; j < XVECLEN (x, i); j++)
5472 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5473 return true;
5474 }
5475 return false;
5476 }
5477
5478 /* Returns true if expression DEP_RTX sets an address register
5479 used by instruction INSN to address memory. */
5480
5481 static bool
5482 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5483 {
5484 rtx target, pat;
5485
5486 if (GET_CODE (dep_rtx) == INSN)
5487 dep_rtx = PATTERN (dep_rtx);
5488
5489 if (GET_CODE (dep_rtx) == SET)
5490 {
5491 target = SET_DEST (dep_rtx);
5492 if (GET_CODE (target) == STRICT_LOW_PART)
5493 target = XEXP (target, 0);
5494 while (GET_CODE (target) == SUBREG)
5495 target = SUBREG_REG (target);
5496
5497 if (GET_CODE (target) == REG)
5498 {
5499 int regno = REGNO (target);
5500
5501 if (s390_safe_attr_type (insn) == TYPE_LA)
5502 {
5503 pat = PATTERN (insn);
5504 if (GET_CODE (pat) == PARALLEL)
5505 {
5506 gcc_assert (XVECLEN (pat, 0) == 2);
5507 pat = XVECEXP (pat, 0, 0);
5508 }
5509 gcc_assert (GET_CODE (pat) == SET);
5510 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5511 }
5512 else if (get_attr_atype (insn) == ATYPE_AGEN)
5513 return reg_used_in_mem_p (regno, PATTERN (insn));
5514 }
5515 }
5516 return false;
5517 }
5518
5519 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5520
5521 int
5522 s390_agen_dep_p (rtx dep_insn, rtx insn)
5523 {
5524 rtx dep_rtx = PATTERN (dep_insn);
5525 int i;
5526
5527 if (GET_CODE (dep_rtx) == SET
5528 && addr_generation_dependency_p (dep_rtx, insn))
5529 return 1;
5530 else if (GET_CODE (dep_rtx) == PARALLEL)
5531 {
5532 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5533 {
5534 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5535 return 1;
5536 }
5537 }
5538 return 0;
5539 }
5540
5541
5542 /* A C statement (sans semicolon) to update the integer scheduling priority
5543 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5544 reduce the priority to execute INSN later. Do not define this macro if
5545 you do not need to adjust the scheduling priorities of insns.
5546
5547 A STD instruction should be scheduled earlier,
5548 in order to use the bypass. */
5549 static int
5550 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5551 {
5552 if (! INSN_P (insn))
5553 return priority;
5554
5555 if (s390_tune != PROCESSOR_2084_Z990
5556 && s390_tune != PROCESSOR_2094_Z9_109
5557 && s390_tune != PROCESSOR_2097_Z10
5558 && s390_tune != PROCESSOR_2817_Z196)
5559 return priority;
5560
5561 switch (s390_safe_attr_type (insn))
5562 {
5563 case TYPE_FSTOREDF:
5564 case TYPE_FSTORESF:
5565 priority = priority << 3;
5566 break;
5567 case TYPE_STORE:
5568 case TYPE_STM:
5569 priority = priority << 1;
5570 break;
5571 default:
5572 break;
5573 }
5574 return priority;
5575 }
5576
5577
5578 /* The number of instructions that can be issued per cycle. */
5579
5580 static int
5581 s390_issue_rate (void)
5582 {
5583 switch (s390_tune)
5584 {
5585 case PROCESSOR_2084_Z990:
5586 case PROCESSOR_2094_Z9_109:
5587 case PROCESSOR_2817_Z196:
5588 return 3;
5589 case PROCESSOR_2097_Z10:
5590 return 2;
5591 default:
5592 return 1;
5593 }
5594 }
5595
5596 static int
5597 s390_first_cycle_multipass_dfa_lookahead (void)
5598 {
5599 return 4;
5600 }
5601
5602 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5603 Fix up MEMs as required. */
5604
5605 static void
5606 annotate_constant_pool_refs (rtx *x)
5607 {
5608 int i, j;
5609 const char *fmt;
5610
5611 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5612 || !CONSTANT_POOL_ADDRESS_P (*x));
5613
5614 /* Literal pool references can only occur inside a MEM ... */
5615 if (GET_CODE (*x) == MEM)
5616 {
5617 rtx memref = XEXP (*x, 0);
5618
5619 if (GET_CODE (memref) == SYMBOL_REF
5620 && CONSTANT_POOL_ADDRESS_P (memref))
5621 {
5622 rtx base = cfun->machine->base_reg;
5623 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5624 UNSPEC_LTREF);
5625
5626 *x = replace_equiv_address (*x, addr);
5627 return;
5628 }
5629
5630 if (GET_CODE (memref) == CONST
5631 && GET_CODE (XEXP (memref, 0)) == PLUS
5632 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5633 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5634 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5635 {
5636 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5637 rtx sym = XEXP (XEXP (memref, 0), 0);
5638 rtx base = cfun->machine->base_reg;
5639 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5640 UNSPEC_LTREF);
5641
5642 *x = replace_equiv_address (*x, plus_constant (addr, off));
5643 return;
5644 }
5645 }
5646
5647 /* ... or a load-address type pattern. */
5648 if (GET_CODE (*x) == SET)
5649 {
5650 rtx addrref = SET_SRC (*x);
5651
5652 if (GET_CODE (addrref) == SYMBOL_REF
5653 && CONSTANT_POOL_ADDRESS_P (addrref))
5654 {
5655 rtx base = cfun->machine->base_reg;
5656 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5657 UNSPEC_LTREF);
5658
5659 SET_SRC (*x) = addr;
5660 return;
5661 }
5662
5663 if (GET_CODE (addrref) == CONST
5664 && GET_CODE (XEXP (addrref, 0)) == PLUS
5665 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5666 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5667 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5668 {
5669 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5670 rtx sym = XEXP (XEXP (addrref, 0), 0);
5671 rtx base = cfun->machine->base_reg;
5672 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5673 UNSPEC_LTREF);
5674
5675 SET_SRC (*x) = plus_constant (addr, off);
5676 return;
5677 }
5678 }
5679
5680 /* Annotate LTREL_BASE as well. */
5681 if (GET_CODE (*x) == UNSPEC
5682 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5683 {
5684 rtx base = cfun->machine->base_reg;
5685 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5686 UNSPEC_LTREL_BASE);
5687 return;
5688 }
5689
5690 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5691 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5692 {
5693 if (fmt[i] == 'e')
5694 {
5695 annotate_constant_pool_refs (&XEXP (*x, i));
5696 }
5697 else if (fmt[i] == 'E')
5698 {
5699 for (j = 0; j < XVECLEN (*x, i); j++)
5700 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5701 }
5702 }
5703 }
5704
5705 /* Split all branches that exceed the maximum distance.
5706 Returns true if this created a new literal pool entry. */
5707
5708 static int
5709 s390_split_branches (void)
5710 {
5711 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5712 int new_literal = 0, ret;
5713 rtx insn, pat, tmp, target;
5714 rtx *label;
5715
5716 /* We need correct insn addresses. */
5717
5718 shorten_branches (get_insns ());
5719
5720 /* Find all branches that exceed 64KB, and split them. */
5721
5722 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5723 {
5724 if (GET_CODE (insn) != JUMP_INSN)
5725 continue;
5726
5727 pat = PATTERN (insn);
5728 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5729 pat = XVECEXP (pat, 0, 0);
5730 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5731 continue;
5732
5733 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5734 {
5735 label = &SET_SRC (pat);
5736 }
5737 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5738 {
5739 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5740 label = &XEXP (SET_SRC (pat), 1);
5741 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5742 label = &XEXP (SET_SRC (pat), 2);
5743 else
5744 continue;
5745 }
5746 else
5747 continue;
5748
5749 if (get_attr_length (insn) <= 4)
5750 continue;
5751
5752 /* We are going to use the return register as scratch register,
5753 make sure it will be saved/restored by the prologue/epilogue. */
5754 cfun_frame_layout.save_return_addr_p = 1;
5755
5756 if (!flag_pic)
5757 {
5758 new_literal = 1;
5759 tmp = force_const_mem (Pmode, *label);
5760 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5761 INSN_ADDRESSES_NEW (tmp, -1);
5762 annotate_constant_pool_refs (&PATTERN (tmp));
5763
5764 target = temp_reg;
5765 }
5766 else
5767 {
5768 new_literal = 1;
5769 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5770 UNSPEC_LTREL_OFFSET);
5771 target = gen_rtx_CONST (Pmode, target);
5772 target = force_const_mem (Pmode, target);
5773 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5774 INSN_ADDRESSES_NEW (tmp, -1);
5775 annotate_constant_pool_refs (&PATTERN (tmp));
5776
5777 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5778 cfun->machine->base_reg),
5779 UNSPEC_LTREL_BASE);
5780 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5781 }
5782
5783 ret = validate_change (insn, label, target, 0);
5784 gcc_assert (ret);
5785 }
5786
5787 return new_literal;
5788 }
5789
5790
5791 /* Find an annotated literal pool symbol referenced in RTX X,
5792 and store it at REF. Will abort if X contains references to
5793 more than one such pool symbol; multiple references to the same
5794 symbol are allowed, however.
5795
5796 The rtx pointed to by REF must be initialized to NULL_RTX
5797 by the caller before calling this routine. */
5798
5799 static void
5800 find_constant_pool_ref (rtx x, rtx *ref)
5801 {
5802 int i, j;
5803 const char *fmt;
5804
5805 /* Ignore LTREL_BASE references. */
5806 if (GET_CODE (x) == UNSPEC
5807 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5808 return;
5809 /* Likewise POOL_ENTRY insns. */
5810 if (GET_CODE (x) == UNSPEC_VOLATILE
5811 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5812 return;
5813
5814 gcc_assert (GET_CODE (x) != SYMBOL_REF
5815 || !CONSTANT_POOL_ADDRESS_P (x));
5816
5817 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5818 {
5819 rtx sym = XVECEXP (x, 0, 0);
5820 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5821 && CONSTANT_POOL_ADDRESS_P (sym));
5822
5823 if (*ref == NULL_RTX)
5824 *ref = sym;
5825 else
5826 gcc_assert (*ref == sym);
5827
5828 return;
5829 }
5830
5831 fmt = GET_RTX_FORMAT (GET_CODE (x));
5832 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5833 {
5834 if (fmt[i] == 'e')
5835 {
5836 find_constant_pool_ref (XEXP (x, i), ref);
5837 }
5838 else if (fmt[i] == 'E')
5839 {
5840 for (j = 0; j < XVECLEN (x, i); j++)
5841 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5842 }
5843 }
5844 }
5845
5846 /* Replace every reference to the annotated literal pool
5847 symbol REF in X by its base plus OFFSET. */
5848
5849 static void
5850 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5851 {
5852 int i, j;
5853 const char *fmt;
5854
5855 gcc_assert (*x != ref);
5856
5857 if (GET_CODE (*x) == UNSPEC
5858 && XINT (*x, 1) == UNSPEC_LTREF
5859 && XVECEXP (*x, 0, 0) == ref)
5860 {
5861 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5862 return;
5863 }
5864
5865 if (GET_CODE (*x) == PLUS
5866 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5867 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5868 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5869 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5870 {
5871 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5872 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5873 return;
5874 }
5875
5876 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5877 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5878 {
5879 if (fmt[i] == 'e')
5880 {
5881 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5882 }
5883 else if (fmt[i] == 'E')
5884 {
5885 for (j = 0; j < XVECLEN (*x, i); j++)
5886 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5887 }
5888 }
5889 }
5890
5891 /* Check whether X contains an UNSPEC_LTREL_BASE.
5892 Return its constant pool symbol if found, NULL_RTX otherwise. */
5893
5894 static rtx
5895 find_ltrel_base (rtx x)
5896 {
5897 int i, j;
5898 const char *fmt;
5899
5900 if (GET_CODE (x) == UNSPEC
5901 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5902 return XVECEXP (x, 0, 0);
5903
5904 fmt = GET_RTX_FORMAT (GET_CODE (x));
5905 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5906 {
5907 if (fmt[i] == 'e')
5908 {
5909 rtx fnd = find_ltrel_base (XEXP (x, i));
5910 if (fnd)
5911 return fnd;
5912 }
5913 else if (fmt[i] == 'E')
5914 {
5915 for (j = 0; j < XVECLEN (x, i); j++)
5916 {
5917 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5918 if (fnd)
5919 return fnd;
5920 }
5921 }
5922 }
5923
5924 return NULL_RTX;
5925 }
5926
5927 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5928
5929 static void
5930 replace_ltrel_base (rtx *x)
5931 {
5932 int i, j;
5933 const char *fmt;
5934
5935 if (GET_CODE (*x) == UNSPEC
5936 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5937 {
5938 *x = XVECEXP (*x, 0, 1);
5939 return;
5940 }
5941
5942 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5943 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5944 {
5945 if (fmt[i] == 'e')
5946 {
5947 replace_ltrel_base (&XEXP (*x, i));
5948 }
5949 else if (fmt[i] == 'E')
5950 {
5951 for (j = 0; j < XVECLEN (*x, i); j++)
5952 replace_ltrel_base (&XVECEXP (*x, i, j));
5953 }
5954 }
5955 }
5956
5957
5958 /* We keep a list of constants which we have to add to internal
5959 constant tables in the middle of large functions. */
5960
5961 #define NR_C_MODES 11
5962 enum machine_mode constant_modes[NR_C_MODES] =
5963 {
5964 TFmode, TImode, TDmode,
5965 DFmode, DImode, DDmode,
5966 SFmode, SImode, SDmode,
5967 HImode,
5968 QImode
5969 };
5970
5971 struct constant
5972 {
5973 struct constant *next;
5974 rtx value;
5975 rtx label;
5976 };
5977
5978 struct constant_pool
5979 {
5980 struct constant_pool *next;
5981 rtx first_insn;
5982 rtx pool_insn;
5983 bitmap insns;
5984 rtx emit_pool_after;
5985
5986 struct constant *constants[NR_C_MODES];
5987 struct constant *execute;
5988 rtx label;
5989 int size;
5990 };
5991
5992 /* Allocate new constant_pool structure. */
5993
5994 static struct constant_pool *
5995 s390_alloc_pool (void)
5996 {
5997 struct constant_pool *pool;
5998 int i;
5999
6000 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6001 pool->next = NULL;
6002 for (i = 0; i < NR_C_MODES; i++)
6003 pool->constants[i] = NULL;
6004
6005 pool->execute = NULL;
6006 pool->label = gen_label_rtx ();
6007 pool->first_insn = NULL_RTX;
6008 pool->pool_insn = NULL_RTX;
6009 pool->insns = BITMAP_ALLOC (NULL);
6010 pool->size = 0;
6011 pool->emit_pool_after = NULL_RTX;
6012
6013 return pool;
6014 }
6015
6016 /* Create new constant pool covering instructions starting at INSN
6017 and chain it to the end of POOL_LIST. */
6018
6019 static struct constant_pool *
6020 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6021 {
6022 struct constant_pool *pool, **prev;
6023
6024 pool = s390_alloc_pool ();
6025 pool->first_insn = insn;
6026
6027 for (prev = pool_list; *prev; prev = &(*prev)->next)
6028 ;
6029 *prev = pool;
6030
6031 return pool;
6032 }
6033
6034 /* End range of instructions covered by POOL at INSN and emit
6035 placeholder insn representing the pool. */
6036
6037 static void
6038 s390_end_pool (struct constant_pool *pool, rtx insn)
6039 {
6040 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6041
6042 if (!insn)
6043 insn = get_last_insn ();
6044
6045 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6046 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6047 }
6048
6049 /* Add INSN to the list of insns covered by POOL. */
6050
6051 static void
6052 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6053 {
6054 bitmap_set_bit (pool->insns, INSN_UID (insn));
6055 }
6056
6057 /* Return pool out of POOL_LIST that covers INSN. */
6058
6059 static struct constant_pool *
6060 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6061 {
6062 struct constant_pool *pool;
6063
6064 for (pool = pool_list; pool; pool = pool->next)
6065 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6066 break;
6067
6068 return pool;
6069 }
6070
6071 /* Add constant VAL of mode MODE to the constant pool POOL. */
6072
6073 static void
6074 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6075 {
6076 struct constant *c;
6077 int i;
6078
6079 for (i = 0; i < NR_C_MODES; i++)
6080 if (constant_modes[i] == mode)
6081 break;
6082 gcc_assert (i != NR_C_MODES);
6083
6084 for (c = pool->constants[i]; c != NULL; c = c->next)
6085 if (rtx_equal_p (val, c->value))
6086 break;
6087
6088 if (c == NULL)
6089 {
6090 c = (struct constant *) xmalloc (sizeof *c);
6091 c->value = val;
6092 c->label = gen_label_rtx ();
6093 c->next = pool->constants[i];
6094 pool->constants[i] = c;
6095 pool->size += GET_MODE_SIZE (mode);
6096 }
6097 }
6098
6099 /* Return an rtx that represents the offset of X from the start of
6100 pool POOL. */
6101
6102 static rtx
6103 s390_pool_offset (struct constant_pool *pool, rtx x)
6104 {
6105 rtx label;
6106
6107 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6108 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6109 UNSPEC_POOL_OFFSET);
6110 return gen_rtx_CONST (GET_MODE (x), x);
6111 }
6112
6113 /* Find constant VAL of mode MODE in the constant pool POOL.
6114 Return an RTX describing the distance from the start of
6115 the pool to the location of the new constant. */
6116
6117 static rtx
6118 s390_find_constant (struct constant_pool *pool, rtx val,
6119 enum machine_mode mode)
6120 {
6121 struct constant *c;
6122 int i;
6123
6124 for (i = 0; i < NR_C_MODES; i++)
6125 if (constant_modes[i] == mode)
6126 break;
6127 gcc_assert (i != NR_C_MODES);
6128
6129 for (c = pool->constants[i]; c != NULL; c = c->next)
6130 if (rtx_equal_p (val, c->value))
6131 break;
6132
6133 gcc_assert (c);
6134
6135 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6136 }
6137
6138 /* Check whether INSN is an execute. Return the label_ref to its
6139 execute target template if so, NULL_RTX otherwise. */
6140
6141 static rtx
6142 s390_execute_label (rtx insn)
6143 {
6144 if (GET_CODE (insn) == INSN
6145 && GET_CODE (PATTERN (insn)) == PARALLEL
6146 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6147 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6148 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6149
6150 return NULL_RTX;
6151 }
6152
6153 /* Add execute target for INSN to the constant pool POOL. */
6154
6155 static void
6156 s390_add_execute (struct constant_pool *pool, rtx insn)
6157 {
6158 struct constant *c;
6159
6160 for (c = pool->execute; c != NULL; c = c->next)
6161 if (INSN_UID (insn) == INSN_UID (c->value))
6162 break;
6163
6164 if (c == NULL)
6165 {
6166 c = (struct constant *) xmalloc (sizeof *c);
6167 c->value = insn;
6168 c->label = gen_label_rtx ();
6169 c->next = pool->execute;
6170 pool->execute = c;
6171 pool->size += 6;
6172 }
6173 }
6174
6175 /* Find execute target for INSN in the constant pool POOL.
6176 Return an RTX describing the distance from the start of
6177 the pool to the location of the execute target. */
6178
6179 static rtx
6180 s390_find_execute (struct constant_pool *pool, rtx insn)
6181 {
6182 struct constant *c;
6183
6184 for (c = pool->execute; c != NULL; c = c->next)
6185 if (INSN_UID (insn) == INSN_UID (c->value))
6186 break;
6187
6188 gcc_assert (c);
6189
6190 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6191 }
6192
6193 /* For an execute INSN, extract the execute target template. */
6194
6195 static rtx
6196 s390_execute_target (rtx insn)
6197 {
6198 rtx pattern = PATTERN (insn);
6199 gcc_assert (s390_execute_label (insn));
6200
6201 if (XVECLEN (pattern, 0) == 2)
6202 {
6203 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6204 }
6205 else
6206 {
6207 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6208 int i;
6209
6210 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6211 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6212
6213 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6214 }
6215
6216 return pattern;
6217 }
6218
6219 /* Indicate that INSN cannot be duplicated. This is the case for
6220 execute insns that carry a unique label. */
6221
6222 static bool
6223 s390_cannot_copy_insn_p (rtx insn)
6224 {
6225 rtx label = s390_execute_label (insn);
6226 return label && label != const0_rtx;
6227 }
6228
6229 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6230 do not emit the pool base label. */
6231
6232 static void
6233 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6234 {
6235 struct constant *c;
6236 rtx insn = pool->pool_insn;
6237 int i;
6238
6239 /* Switch to rodata section. */
6240 if (TARGET_CPU_ZARCH)
6241 {
6242 insn = emit_insn_after (gen_pool_section_start (), insn);
6243 INSN_ADDRESSES_NEW (insn, -1);
6244 }
6245
6246 /* Ensure minimum pool alignment. */
6247 if (TARGET_CPU_ZARCH)
6248 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6249 else
6250 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6251 INSN_ADDRESSES_NEW (insn, -1);
6252
6253 /* Emit pool base label. */
6254 if (!remote_label)
6255 {
6256 insn = emit_label_after (pool->label, insn);
6257 INSN_ADDRESSES_NEW (insn, -1);
6258 }
6259
6260 /* Dump constants in descending alignment requirement order,
6261 ensuring proper alignment for every constant. */
6262 for (i = 0; i < NR_C_MODES; i++)
6263 for (c = pool->constants[i]; c; c = c->next)
6264 {
6265 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6266 rtx value = copy_rtx (c->value);
6267 if (GET_CODE (value) == CONST
6268 && GET_CODE (XEXP (value, 0)) == UNSPEC
6269 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6270 && XVECLEN (XEXP (value, 0), 0) == 1)
6271 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6272
6273 insn = emit_label_after (c->label, insn);
6274 INSN_ADDRESSES_NEW (insn, -1);
6275
6276 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6277 gen_rtvec (1, value),
6278 UNSPECV_POOL_ENTRY);
6279 insn = emit_insn_after (value, insn);
6280 INSN_ADDRESSES_NEW (insn, -1);
6281 }
6282
6283 /* Ensure minimum alignment for instructions. */
6284 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6285 INSN_ADDRESSES_NEW (insn, -1);
6286
6287 /* Output in-pool execute template insns. */
6288 for (c = pool->execute; c; c = c->next)
6289 {
6290 insn = emit_label_after (c->label, insn);
6291 INSN_ADDRESSES_NEW (insn, -1);
6292
6293 insn = emit_insn_after (s390_execute_target (c->value), insn);
6294 INSN_ADDRESSES_NEW (insn, -1);
6295 }
6296
6297 /* Switch back to previous section. */
6298 if (TARGET_CPU_ZARCH)
6299 {
6300 insn = emit_insn_after (gen_pool_section_end (), insn);
6301 INSN_ADDRESSES_NEW (insn, -1);
6302 }
6303
6304 insn = emit_barrier_after (insn);
6305 INSN_ADDRESSES_NEW (insn, -1);
6306
6307 /* Remove placeholder insn. */
6308 remove_insn (pool->pool_insn);
6309 }
6310
6311 /* Free all memory used by POOL. */
6312
6313 static void
6314 s390_free_pool (struct constant_pool *pool)
6315 {
6316 struct constant *c, *next;
6317 int i;
6318
6319 for (i = 0; i < NR_C_MODES; i++)
6320 for (c = pool->constants[i]; c; c = next)
6321 {
6322 next = c->next;
6323 free (c);
6324 }
6325
6326 for (c = pool->execute; c; c = next)
6327 {
6328 next = c->next;
6329 free (c);
6330 }
6331
6332 BITMAP_FREE (pool->insns);
6333 free (pool);
6334 }
6335
6336
6337 /* Collect main literal pool. Return NULL on overflow. */
6338
6339 static struct constant_pool *
6340 s390_mainpool_start (void)
6341 {
6342 struct constant_pool *pool;
6343 rtx insn;
6344
6345 pool = s390_alloc_pool ();
6346
6347 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6348 {
6349 if (GET_CODE (insn) == INSN
6350 && GET_CODE (PATTERN (insn)) == SET
6351 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6352 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6353 {
6354 gcc_assert (!pool->pool_insn);
6355 pool->pool_insn = insn;
6356 }
6357
6358 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6359 {
6360 s390_add_execute (pool, insn);
6361 }
6362 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6363 {
6364 rtx pool_ref = NULL_RTX;
6365 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6366 if (pool_ref)
6367 {
6368 rtx constant = get_pool_constant (pool_ref);
6369 enum machine_mode mode = get_pool_mode (pool_ref);
6370 s390_add_constant (pool, constant, mode);
6371 }
6372 }
6373
6374 /* If hot/cold partitioning is enabled we have to make sure that
6375 the literal pool is emitted in the same section where the
6376 initialization of the literal pool base pointer takes place.
6377 emit_pool_after is only used in the non-overflow case on non
6378 Z cpus where we can emit the literal pool at the end of the
6379 function body within the text section. */
6380 if (NOTE_P (insn)
6381 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6382 && !pool->emit_pool_after)
6383 pool->emit_pool_after = PREV_INSN (insn);
6384 }
6385
6386 gcc_assert (pool->pool_insn || pool->size == 0);
6387
6388 if (pool->size >= 4096)
6389 {
6390 /* We're going to chunkify the pool, so remove the main
6391 pool placeholder insn. */
6392 remove_insn (pool->pool_insn);
6393
6394 s390_free_pool (pool);
6395 pool = NULL;
6396 }
6397
6398 /* If the functions ends with the section where the literal pool
6399 should be emitted set the marker to its end. */
6400 if (pool && !pool->emit_pool_after)
6401 pool->emit_pool_after = get_last_insn ();
6402
6403 return pool;
6404 }
6405
6406 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6407 Modify the current function to output the pool constants as well as
6408 the pool register setup instruction. */
6409
6410 static void
6411 s390_mainpool_finish (struct constant_pool *pool)
6412 {
6413 rtx base_reg = cfun->machine->base_reg;
6414 rtx insn;
6415
6416 /* If the pool is empty, we're done. */
6417 if (pool->size == 0)
6418 {
6419 /* We don't actually need a base register after all. */
6420 cfun->machine->base_reg = NULL_RTX;
6421
6422 if (pool->pool_insn)
6423 remove_insn (pool->pool_insn);
6424 s390_free_pool (pool);
6425 return;
6426 }
6427
6428 /* We need correct insn addresses. */
6429 shorten_branches (get_insns ());
6430
6431 /* On zSeries, we use a LARL to load the pool register. The pool is
6432 located in the .rodata section, so we emit it after the function. */
6433 if (TARGET_CPU_ZARCH)
6434 {
6435 insn = gen_main_base_64 (base_reg, pool->label);
6436 insn = emit_insn_after (insn, pool->pool_insn);
6437 INSN_ADDRESSES_NEW (insn, -1);
6438 remove_insn (pool->pool_insn);
6439
6440 insn = get_last_insn ();
6441 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6442 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6443
6444 s390_dump_pool (pool, 0);
6445 }
6446
6447 /* On S/390, if the total size of the function's code plus literal pool
6448 does not exceed 4096 bytes, we use BASR to set up a function base
6449 pointer, and emit the literal pool at the end of the function. */
6450 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6451 + pool->size + 8 /* alignment slop */ < 4096)
6452 {
6453 insn = gen_main_base_31_small (base_reg, pool->label);
6454 insn = emit_insn_after (insn, pool->pool_insn);
6455 INSN_ADDRESSES_NEW (insn, -1);
6456 remove_insn (pool->pool_insn);
6457
6458 insn = emit_label_after (pool->label, insn);
6459 INSN_ADDRESSES_NEW (insn, -1);
6460
6461 /* emit_pool_after will be set by s390_mainpool_start to the
6462 last insn of the section where the literal pool should be
6463 emitted. */
6464 insn = pool->emit_pool_after;
6465
6466 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6467 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6468
6469 s390_dump_pool (pool, 1);
6470 }
6471
6472 /* Otherwise, we emit an inline literal pool and use BASR to branch
6473 over it, setting up the pool register at the same time. */
6474 else
6475 {
6476 rtx pool_end = gen_label_rtx ();
6477
6478 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6479 insn = emit_jump_insn_after (insn, pool->pool_insn);
6480 JUMP_LABEL (insn) = pool_end;
6481 INSN_ADDRESSES_NEW (insn, -1);
6482 remove_insn (pool->pool_insn);
6483
6484 insn = emit_label_after (pool->label, insn);
6485 INSN_ADDRESSES_NEW (insn, -1);
6486
6487 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6488 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6489
6490 insn = emit_label_after (pool_end, pool->pool_insn);
6491 INSN_ADDRESSES_NEW (insn, -1);
6492
6493 s390_dump_pool (pool, 1);
6494 }
6495
6496
6497 /* Replace all literal pool references. */
6498
6499 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6500 {
6501 if (INSN_P (insn))
6502 replace_ltrel_base (&PATTERN (insn));
6503
6504 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6505 {
6506 rtx addr, pool_ref = NULL_RTX;
6507 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6508 if (pool_ref)
6509 {
6510 if (s390_execute_label (insn))
6511 addr = s390_find_execute (pool, insn);
6512 else
6513 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6514 get_pool_mode (pool_ref));
6515
6516 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6517 INSN_CODE (insn) = -1;
6518 }
6519 }
6520 }
6521
6522
6523 /* Free the pool. */
6524 s390_free_pool (pool);
6525 }
6526
6527 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6528 We have decided we cannot use this pool, so revert all changes
6529 to the current function that were done by s390_mainpool_start. */
6530 static void
6531 s390_mainpool_cancel (struct constant_pool *pool)
6532 {
6533 /* We didn't actually change the instruction stream, so simply
6534 free the pool memory. */
6535 s390_free_pool (pool);
6536 }
6537
6538
6539 /* Chunkify the literal pool. */
6540
6541 #define S390_POOL_CHUNK_MIN 0xc00
6542 #define S390_POOL_CHUNK_MAX 0xe00
6543
6544 static struct constant_pool *
6545 s390_chunkify_start (void)
6546 {
6547 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6548 int extra_size = 0;
6549 bitmap far_labels;
6550 rtx pending_ltrel = NULL_RTX;
6551 rtx insn;
6552
6553 rtx (*gen_reload_base) (rtx, rtx) =
6554 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6555
6556
6557 /* We need correct insn addresses. */
6558
6559 shorten_branches (get_insns ());
6560
6561 /* Scan all insns and move literals to pool chunks. */
6562
6563 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6564 {
6565 bool section_switch_p = false;
6566
6567 /* Check for pending LTREL_BASE. */
6568 if (INSN_P (insn))
6569 {
6570 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6571 if (ltrel_base)
6572 {
6573 gcc_assert (ltrel_base == pending_ltrel);
6574 pending_ltrel = NULL_RTX;
6575 }
6576 }
6577
6578 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6579 {
6580 if (!curr_pool)
6581 curr_pool = s390_start_pool (&pool_list, insn);
6582
6583 s390_add_execute (curr_pool, insn);
6584 s390_add_pool_insn (curr_pool, insn);
6585 }
6586 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6587 {
6588 rtx pool_ref = NULL_RTX;
6589 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6590 if (pool_ref)
6591 {
6592 rtx constant = get_pool_constant (pool_ref);
6593 enum machine_mode mode = get_pool_mode (pool_ref);
6594
6595 if (!curr_pool)
6596 curr_pool = s390_start_pool (&pool_list, insn);
6597
6598 s390_add_constant (curr_pool, constant, mode);
6599 s390_add_pool_insn (curr_pool, insn);
6600
6601 /* Don't split the pool chunk between a LTREL_OFFSET load
6602 and the corresponding LTREL_BASE. */
6603 if (GET_CODE (constant) == CONST
6604 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6605 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6606 {
6607 gcc_assert (!pending_ltrel);
6608 pending_ltrel = pool_ref;
6609 }
6610 }
6611 /* Make sure we do not split between a call and its
6612 corresponding CALL_ARG_LOCATION note. */
6613 if (CALL_P (insn))
6614 {
6615 rtx next = NEXT_INSN (insn);
6616 if (next && NOTE_P (next)
6617 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6618 continue;
6619 }
6620 }
6621
6622 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6623 {
6624 if (curr_pool)
6625 s390_add_pool_insn (curr_pool, insn);
6626 /* An LTREL_BASE must follow within the same basic block. */
6627 gcc_assert (!pending_ltrel);
6628 }
6629
6630 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6631 section_switch_p = true;
6632
6633 if (!curr_pool
6634 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6635 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6636 continue;
6637
6638 if (TARGET_CPU_ZARCH)
6639 {
6640 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6641 continue;
6642
6643 s390_end_pool (curr_pool, NULL_RTX);
6644 curr_pool = NULL;
6645 }
6646 else
6647 {
6648 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6649 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6650 + extra_size;
6651
6652 /* We will later have to insert base register reload insns.
6653 Those will have an effect on code size, which we need to
6654 consider here. This calculation makes rather pessimistic
6655 worst-case assumptions. */
6656 if (GET_CODE (insn) == CODE_LABEL)
6657 extra_size += 6;
6658
6659 if (chunk_size < S390_POOL_CHUNK_MIN
6660 && curr_pool->size < S390_POOL_CHUNK_MIN
6661 && !section_switch_p)
6662 continue;
6663
6664 /* Pool chunks can only be inserted after BARRIERs ... */
6665 if (GET_CODE (insn) == BARRIER)
6666 {
6667 s390_end_pool (curr_pool, insn);
6668 curr_pool = NULL;
6669 extra_size = 0;
6670 }
6671
6672 /* ... so if we don't find one in time, create one. */
6673 else if (chunk_size > S390_POOL_CHUNK_MAX
6674 || curr_pool->size > S390_POOL_CHUNK_MAX
6675 || section_switch_p)
6676 {
6677 rtx label, jump, barrier;
6678
6679 if (!section_switch_p)
6680 {
6681 /* We can insert the barrier only after a 'real' insn. */
6682 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6683 continue;
6684 if (get_attr_length (insn) == 0)
6685 continue;
6686 /* Don't separate LTREL_BASE from the corresponding
6687 LTREL_OFFSET load. */
6688 if (pending_ltrel)
6689 continue;
6690 }
6691 else
6692 {
6693 gcc_assert (!pending_ltrel);
6694
6695 /* The old pool has to end before the section switch
6696 note in order to make it part of the current
6697 section. */
6698 insn = PREV_INSN (insn);
6699 }
6700
6701 label = gen_label_rtx ();
6702 jump = emit_jump_insn_after (gen_jump (label), insn);
6703 barrier = emit_barrier_after (jump);
6704 insn = emit_label_after (label, barrier);
6705 JUMP_LABEL (jump) = label;
6706 LABEL_NUSES (label) = 1;
6707
6708 INSN_ADDRESSES_NEW (jump, -1);
6709 INSN_ADDRESSES_NEW (barrier, -1);
6710 INSN_ADDRESSES_NEW (insn, -1);
6711
6712 s390_end_pool (curr_pool, barrier);
6713 curr_pool = NULL;
6714 extra_size = 0;
6715 }
6716 }
6717 }
6718
6719 if (curr_pool)
6720 s390_end_pool (curr_pool, NULL_RTX);
6721 gcc_assert (!pending_ltrel);
6722
6723 /* Find all labels that are branched into
6724 from an insn belonging to a different chunk. */
6725
6726 far_labels = BITMAP_ALLOC (NULL);
6727
6728 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6729 {
6730 /* Labels marked with LABEL_PRESERVE_P can be target
6731 of non-local jumps, so we have to mark them.
6732 The same holds for named labels.
6733
6734 Don't do that, however, if it is the label before
6735 a jump table. */
6736
6737 if (GET_CODE (insn) == CODE_LABEL
6738 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6739 {
6740 rtx vec_insn = next_real_insn (insn);
6741 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6742 PATTERN (vec_insn) : NULL_RTX;
6743 if (!vec_pat
6744 || !(GET_CODE (vec_pat) == ADDR_VEC
6745 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6746 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6747 }
6748
6749 /* If we have a direct jump (conditional or unconditional)
6750 or a casesi jump, check all potential targets. */
6751 else if (GET_CODE (insn) == JUMP_INSN)
6752 {
6753 rtx pat = PATTERN (insn);
6754 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6755 pat = XVECEXP (pat, 0, 0);
6756
6757 if (GET_CODE (pat) == SET)
6758 {
6759 rtx label = JUMP_LABEL (insn);
6760 if (label)
6761 {
6762 if (s390_find_pool (pool_list, label)
6763 != s390_find_pool (pool_list, insn))
6764 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6765 }
6766 }
6767 else if (GET_CODE (pat) == PARALLEL
6768 && XVECLEN (pat, 0) == 2
6769 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6770 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6771 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6772 {
6773 /* Find the jump table used by this casesi jump. */
6774 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6775 rtx vec_insn = next_real_insn (vec_label);
6776 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6777 PATTERN (vec_insn) : NULL_RTX;
6778 if (vec_pat
6779 && (GET_CODE (vec_pat) == ADDR_VEC
6780 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6781 {
6782 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6783
6784 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6785 {
6786 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6787
6788 if (s390_find_pool (pool_list, label)
6789 != s390_find_pool (pool_list, insn))
6790 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6791 }
6792 }
6793 }
6794 }
6795 }
6796
6797 /* Insert base register reload insns before every pool. */
6798
6799 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6800 {
6801 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6802 curr_pool->label);
6803 rtx insn = curr_pool->first_insn;
6804 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6805 }
6806
6807 /* Insert base register reload insns at every far label. */
6808
6809 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6810 if (GET_CODE (insn) == CODE_LABEL
6811 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6812 {
6813 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6814 if (pool)
6815 {
6816 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6817 pool->label);
6818 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6819 }
6820 }
6821
6822
6823 BITMAP_FREE (far_labels);
6824
6825
6826 /* Recompute insn addresses. */
6827
6828 init_insn_lengths ();
6829 shorten_branches (get_insns ());
6830
6831 return pool_list;
6832 }
6833
6834 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6835 After we have decided to use this list, finish implementing
6836 all changes to the current function as required. */
6837
6838 static void
6839 s390_chunkify_finish (struct constant_pool *pool_list)
6840 {
6841 struct constant_pool *curr_pool = NULL;
6842 rtx insn;
6843
6844
6845 /* Replace all literal pool references. */
6846
6847 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6848 {
6849 if (INSN_P (insn))
6850 replace_ltrel_base (&PATTERN (insn));
6851
6852 curr_pool = s390_find_pool (pool_list, insn);
6853 if (!curr_pool)
6854 continue;
6855
6856 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6857 {
6858 rtx addr, pool_ref = NULL_RTX;
6859 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6860 if (pool_ref)
6861 {
6862 if (s390_execute_label (insn))
6863 addr = s390_find_execute (curr_pool, insn);
6864 else
6865 addr = s390_find_constant (curr_pool,
6866 get_pool_constant (pool_ref),
6867 get_pool_mode (pool_ref));
6868
6869 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6870 INSN_CODE (insn) = -1;
6871 }
6872 }
6873 }
6874
6875 /* Dump out all literal pools. */
6876
6877 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6878 s390_dump_pool (curr_pool, 0);
6879
6880 /* Free pool list. */
6881
6882 while (pool_list)
6883 {
6884 struct constant_pool *next = pool_list->next;
6885 s390_free_pool (pool_list);
6886 pool_list = next;
6887 }
6888 }
6889
6890 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6891 We have decided we cannot use this list, so revert all changes
6892 to the current function that were done by s390_chunkify_start. */
6893
6894 static void
6895 s390_chunkify_cancel (struct constant_pool *pool_list)
6896 {
6897 struct constant_pool *curr_pool = NULL;
6898 rtx insn;
6899
6900 /* Remove all pool placeholder insns. */
6901
6902 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6903 {
6904 /* Did we insert an extra barrier? Remove it. */
6905 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6906 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6907 rtx label = NEXT_INSN (curr_pool->pool_insn);
6908
6909 if (jump && GET_CODE (jump) == JUMP_INSN
6910 && barrier && GET_CODE (barrier) == BARRIER
6911 && label && GET_CODE (label) == CODE_LABEL
6912 && GET_CODE (PATTERN (jump)) == SET
6913 && SET_DEST (PATTERN (jump)) == pc_rtx
6914 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6915 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6916 {
6917 remove_insn (jump);
6918 remove_insn (barrier);
6919 remove_insn (label);
6920 }
6921
6922 remove_insn (curr_pool->pool_insn);
6923 }
6924
6925 /* Remove all base register reload insns. */
6926
6927 for (insn = get_insns (); insn; )
6928 {
6929 rtx next_insn = NEXT_INSN (insn);
6930
6931 if (GET_CODE (insn) == INSN
6932 && GET_CODE (PATTERN (insn)) == SET
6933 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6934 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6935 remove_insn (insn);
6936
6937 insn = next_insn;
6938 }
6939
6940 /* Free pool list. */
6941
6942 while (pool_list)
6943 {
6944 struct constant_pool *next = pool_list->next;
6945 s390_free_pool (pool_list);
6946 pool_list = next;
6947 }
6948 }
6949
6950 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6951
6952 void
6953 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6954 {
6955 REAL_VALUE_TYPE r;
6956
6957 switch (GET_MODE_CLASS (mode))
6958 {
6959 case MODE_FLOAT:
6960 case MODE_DECIMAL_FLOAT:
6961 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6962
6963 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6964 assemble_real (r, mode, align);
6965 break;
6966
6967 case MODE_INT:
6968 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6969 mark_symbol_refs_as_used (exp);
6970 break;
6971
6972 default:
6973 gcc_unreachable ();
6974 }
6975 }
6976
6977
6978 /* Return an RTL expression representing the value of the return address
6979 for the frame COUNT steps up from the current frame. FRAME is the
6980 frame pointer of that frame. */
6981
6982 rtx
6983 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6984 {
6985 int offset;
6986 rtx addr;
6987
6988 /* Without backchain, we fail for all but the current frame. */
6989
6990 if (!TARGET_BACKCHAIN && count > 0)
6991 return NULL_RTX;
6992
6993 /* For the current frame, we need to make sure the initial
6994 value of RETURN_REGNUM is actually saved. */
6995
6996 if (count == 0)
6997 {
6998 /* On non-z architectures branch splitting could overwrite r14. */
6999 if (TARGET_CPU_ZARCH)
7000 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7001 else
7002 {
7003 cfun_frame_layout.save_return_addr_p = true;
7004 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7005 }
7006 }
7007
7008 if (TARGET_PACKED_STACK)
7009 offset = -2 * UNITS_PER_LONG;
7010 else
7011 offset = RETURN_REGNUM * UNITS_PER_LONG;
7012
7013 addr = plus_constant (frame, offset);
7014 addr = memory_address (Pmode, addr);
7015 return gen_rtx_MEM (Pmode, addr);
7016 }
7017
7018 /* Return an RTL expression representing the back chain stored in
7019 the current stack frame. */
7020
7021 rtx
7022 s390_back_chain_rtx (void)
7023 {
7024 rtx chain;
7025
7026 gcc_assert (TARGET_BACKCHAIN);
7027
7028 if (TARGET_PACKED_STACK)
7029 chain = plus_constant (stack_pointer_rtx,
7030 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7031 else
7032 chain = stack_pointer_rtx;
7033
7034 chain = gen_rtx_MEM (Pmode, chain);
7035 return chain;
7036 }
7037
7038 /* Find first call clobbered register unused in a function.
7039 This could be used as base register in a leaf function
7040 or for holding the return address before epilogue. */
7041
7042 static int
7043 find_unused_clobbered_reg (void)
7044 {
7045 int i;
7046 for (i = 0; i < 6; i++)
7047 if (!df_regs_ever_live_p (i))
7048 return i;
7049 return 0;
7050 }
7051
7052
7053 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7054 clobbered hard regs in SETREG. */
7055
7056 static void
7057 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7058 {
7059 int *regs_ever_clobbered = (int *)data;
7060 unsigned int i, regno;
7061 enum machine_mode mode = GET_MODE (setreg);
7062
7063 if (GET_CODE (setreg) == SUBREG)
7064 {
7065 rtx inner = SUBREG_REG (setreg);
7066 if (!GENERAL_REG_P (inner))
7067 return;
7068 regno = subreg_regno (setreg);
7069 }
7070 else if (GENERAL_REG_P (setreg))
7071 regno = REGNO (setreg);
7072 else
7073 return;
7074
7075 for (i = regno;
7076 i < regno + HARD_REGNO_NREGS (regno, mode);
7077 i++)
7078 regs_ever_clobbered[i] = 1;
7079 }
7080
7081 /* Walks through all basic blocks of the current function looking
7082 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7083 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7084 each of those regs. */
7085
7086 static void
7087 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7088 {
7089 basic_block cur_bb;
7090 rtx cur_insn;
7091 unsigned int i;
7092
7093 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7094
7095 /* For non-leaf functions we have to consider all call clobbered regs to be
7096 clobbered. */
7097 if (!current_function_is_leaf)
7098 {
7099 for (i = 0; i < 16; i++)
7100 regs_ever_clobbered[i] = call_really_used_regs[i];
7101 }
7102
7103 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7104 this work is done by liveness analysis (mark_regs_live_at_end).
7105 Special care is needed for functions containing landing pads. Landing pads
7106 may use the eh registers, but the code which sets these registers is not
7107 contained in that function. Hence s390_regs_ever_clobbered is not able to
7108 deal with this automatically. */
7109 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7110 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7111 if (crtl->calls_eh_return
7112 || (cfun->machine->has_landing_pad_p
7113 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7114 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7115
7116 /* For nonlocal gotos all call-saved registers have to be saved.
7117 This flag is also set for the unwinding code in libgcc.
7118 See expand_builtin_unwind_init. For regs_ever_live this is done by
7119 reload. */
7120 if (cfun->has_nonlocal_label)
7121 for (i = 0; i < 16; i++)
7122 if (!call_really_used_regs[i])
7123 regs_ever_clobbered[i] = 1;
7124
7125 FOR_EACH_BB (cur_bb)
7126 {
7127 FOR_BB_INSNS (cur_bb, cur_insn)
7128 {
7129 if (INSN_P (cur_insn))
7130 note_stores (PATTERN (cur_insn),
7131 s390_reg_clobbered_rtx,
7132 regs_ever_clobbered);
7133 }
7134 }
7135 }
7136
7137 /* Determine the frame area which actually has to be accessed
7138 in the function epilogue. The values are stored at the
7139 given pointers AREA_BOTTOM (address of the lowest used stack
7140 address) and AREA_TOP (address of the first item which does
7141 not belong to the stack frame). */
7142
7143 static void
7144 s390_frame_area (int *area_bottom, int *area_top)
7145 {
7146 int b, t;
7147 int i;
7148
7149 b = INT_MAX;
7150 t = INT_MIN;
7151
7152 if (cfun_frame_layout.first_restore_gpr != -1)
7153 {
7154 b = (cfun_frame_layout.gprs_offset
7155 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7156 t = b + (cfun_frame_layout.last_restore_gpr
7157 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7158 }
7159
7160 if (TARGET_64BIT && cfun_save_high_fprs_p)
7161 {
7162 b = MIN (b, cfun_frame_layout.f8_offset);
7163 t = MAX (t, (cfun_frame_layout.f8_offset
7164 + cfun_frame_layout.high_fprs * 8));
7165 }
7166
7167 if (!TARGET_64BIT)
7168 for (i = 2; i < 4; i++)
7169 if (cfun_fpr_bit_p (i))
7170 {
7171 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7172 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7173 }
7174
7175 *area_bottom = b;
7176 *area_top = t;
7177 }
7178
7179 /* Fill cfun->machine with info about register usage of current function.
7180 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7181
7182 static void
7183 s390_register_info (int clobbered_regs[])
7184 {
7185 int i, j;
7186
7187 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7188 cfun_frame_layout.fpr_bitmap = 0;
7189 cfun_frame_layout.high_fprs = 0;
7190 if (TARGET_64BIT)
7191 for (i = 24; i < 32; i++)
7192 if (df_regs_ever_live_p (i) && !global_regs[i])
7193 {
7194 cfun_set_fpr_bit (i - 16);
7195 cfun_frame_layout.high_fprs++;
7196 }
7197
7198 /* Find first and last gpr to be saved. We trust regs_ever_live
7199 data, except that we don't save and restore global registers.
7200
7201 Also, all registers with special meaning to the compiler need
7202 to be handled extra. */
7203
7204 s390_regs_ever_clobbered (clobbered_regs);
7205
7206 for (i = 0; i < 16; i++)
7207 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7208
7209 if (frame_pointer_needed)
7210 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7211
7212 if (flag_pic)
7213 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7214 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7215
7216 clobbered_regs[BASE_REGNUM]
7217 |= (cfun->machine->base_reg
7218 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7219
7220 clobbered_regs[RETURN_REGNUM]
7221 |= (!current_function_is_leaf
7222 || TARGET_TPF_PROFILING
7223 || cfun->machine->split_branches_pending_p
7224 || cfun_frame_layout.save_return_addr_p
7225 || crtl->calls_eh_return
7226 || cfun->stdarg);
7227
7228 clobbered_regs[STACK_POINTER_REGNUM]
7229 |= (!current_function_is_leaf
7230 || TARGET_TPF_PROFILING
7231 || cfun_save_high_fprs_p
7232 || get_frame_size () > 0
7233 || cfun->calls_alloca
7234 || cfun->stdarg);
7235
7236 for (i = 6; i < 16; i++)
7237 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7238 break;
7239 for (j = 15; j > i; j--)
7240 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7241 break;
7242
7243 if (i == 16)
7244 {
7245 /* Nothing to save/restore. */
7246 cfun_frame_layout.first_save_gpr_slot = -1;
7247 cfun_frame_layout.last_save_gpr_slot = -1;
7248 cfun_frame_layout.first_save_gpr = -1;
7249 cfun_frame_layout.first_restore_gpr = -1;
7250 cfun_frame_layout.last_save_gpr = -1;
7251 cfun_frame_layout.last_restore_gpr = -1;
7252 }
7253 else
7254 {
7255 /* Save slots for gprs from i to j. */
7256 cfun_frame_layout.first_save_gpr_slot = i;
7257 cfun_frame_layout.last_save_gpr_slot = j;
7258
7259 for (i = cfun_frame_layout.first_save_gpr_slot;
7260 i < cfun_frame_layout.last_save_gpr_slot + 1;
7261 i++)
7262 if (clobbered_regs[i])
7263 break;
7264
7265 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7266 if (clobbered_regs[j])
7267 break;
7268
7269 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7270 {
7271 /* Nothing to save/restore. */
7272 cfun_frame_layout.first_save_gpr = -1;
7273 cfun_frame_layout.first_restore_gpr = -1;
7274 cfun_frame_layout.last_save_gpr = -1;
7275 cfun_frame_layout.last_restore_gpr = -1;
7276 }
7277 else
7278 {
7279 /* Save / Restore from gpr i to j. */
7280 cfun_frame_layout.first_save_gpr = i;
7281 cfun_frame_layout.first_restore_gpr = i;
7282 cfun_frame_layout.last_save_gpr = j;
7283 cfun_frame_layout.last_restore_gpr = j;
7284 }
7285 }
7286
7287 if (cfun->stdarg)
7288 {
7289 /* Varargs functions need to save gprs 2 to 6. */
7290 if (cfun->va_list_gpr_size
7291 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7292 {
7293 int min_gpr = crtl->args.info.gprs;
7294 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7295 if (max_gpr > GP_ARG_NUM_REG)
7296 max_gpr = GP_ARG_NUM_REG;
7297
7298 if (cfun_frame_layout.first_save_gpr == -1
7299 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7300 {
7301 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7302 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7303 }
7304
7305 if (cfun_frame_layout.last_save_gpr == -1
7306 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7307 {
7308 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7309 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7310 }
7311 }
7312
7313 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7314 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7315 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7316 {
7317 int min_fpr = crtl->args.info.fprs;
7318 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7319 if (max_fpr > FP_ARG_NUM_REG)
7320 max_fpr = FP_ARG_NUM_REG;
7321
7322 /* ??? This is currently required to ensure proper location
7323 of the fpr save slots within the va_list save area. */
7324 if (TARGET_PACKED_STACK)
7325 min_fpr = 0;
7326
7327 for (i = min_fpr; i < max_fpr; i++)
7328 cfun_set_fpr_bit (i);
7329 }
7330 }
7331
7332 if (!TARGET_64BIT)
7333 for (i = 2; i < 4; i++)
7334 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7335 cfun_set_fpr_bit (i);
7336 }
7337
7338 /* Fill cfun->machine with info about frame of current function. */
7339
7340 static void
7341 s390_frame_info (void)
7342 {
7343 int i;
7344
7345 cfun_frame_layout.frame_size = get_frame_size ();
7346 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7347 fatal_error ("total size of local variables exceeds architecture limit");
7348
7349 if (!TARGET_PACKED_STACK)
7350 {
7351 cfun_frame_layout.backchain_offset = 0;
7352 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7353 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7354 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7355 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7356 * UNITS_PER_LONG);
7357 }
7358 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7359 {
7360 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7361 - UNITS_PER_LONG);
7362 cfun_frame_layout.gprs_offset
7363 = (cfun_frame_layout.backchain_offset
7364 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7365 * UNITS_PER_LONG);
7366
7367 if (TARGET_64BIT)
7368 {
7369 cfun_frame_layout.f4_offset
7370 = (cfun_frame_layout.gprs_offset
7371 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7372
7373 cfun_frame_layout.f0_offset
7374 = (cfun_frame_layout.f4_offset
7375 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7376 }
7377 else
7378 {
7379 /* On 31 bit we have to care about alignment of the
7380 floating point regs to provide fastest access. */
7381 cfun_frame_layout.f0_offset
7382 = ((cfun_frame_layout.gprs_offset
7383 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7384 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7385
7386 cfun_frame_layout.f4_offset
7387 = (cfun_frame_layout.f0_offset
7388 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7389 }
7390 }
7391 else /* no backchain */
7392 {
7393 cfun_frame_layout.f4_offset
7394 = (STACK_POINTER_OFFSET
7395 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7396
7397 cfun_frame_layout.f0_offset
7398 = (cfun_frame_layout.f4_offset
7399 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7400
7401 cfun_frame_layout.gprs_offset
7402 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7403 }
7404
7405 if (current_function_is_leaf
7406 && !TARGET_TPF_PROFILING
7407 && cfun_frame_layout.frame_size == 0
7408 && !cfun_save_high_fprs_p
7409 && !cfun->calls_alloca
7410 && !cfun->stdarg)
7411 return;
7412
7413 if (!TARGET_PACKED_STACK)
7414 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7415 + crtl->outgoing_args_size
7416 + cfun_frame_layout.high_fprs * 8);
7417 else
7418 {
7419 if (TARGET_BACKCHAIN)
7420 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7421
7422 /* No alignment trouble here because f8-f15 are only saved under
7423 64 bit. */
7424 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7425 cfun_frame_layout.f4_offset),
7426 cfun_frame_layout.gprs_offset)
7427 - cfun_frame_layout.high_fprs * 8);
7428
7429 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7430
7431 for (i = 0; i < 8; i++)
7432 if (cfun_fpr_bit_p (i))
7433 cfun_frame_layout.frame_size += 8;
7434
7435 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7436
7437 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7438 the frame size to sustain 8 byte alignment of stack frames. */
7439 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7440 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7441 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7442
7443 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7444 }
7445 }
7446
7447 /* Generate frame layout. Fills in register and frame data for the current
7448 function in cfun->machine. This routine can be called multiple times;
7449 it will re-do the complete frame layout every time. */
7450
7451 static void
7452 s390_init_frame_layout (void)
7453 {
7454 HOST_WIDE_INT frame_size;
7455 int base_used;
7456 int clobbered_regs[16];
7457
7458 /* On S/390 machines, we may need to perform branch splitting, which
7459 will require both base and return address register. We have no
7460 choice but to assume we're going to need them until right at the
7461 end of the machine dependent reorg phase. */
7462 if (!TARGET_CPU_ZARCH)
7463 cfun->machine->split_branches_pending_p = true;
7464
7465 do
7466 {
7467 frame_size = cfun_frame_layout.frame_size;
7468
7469 /* Try to predict whether we'll need the base register. */
7470 base_used = cfun->machine->split_branches_pending_p
7471 || crtl->uses_const_pool
7472 || (!DISP_IN_RANGE (frame_size)
7473 && !CONST_OK_FOR_K (frame_size));
7474
7475 /* Decide which register to use as literal pool base. In small
7476 leaf functions, try to use an unused call-clobbered register
7477 as base register to avoid save/restore overhead. */
7478 if (!base_used)
7479 cfun->machine->base_reg = NULL_RTX;
7480 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7481 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7482 else
7483 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7484
7485 s390_register_info (clobbered_regs);
7486 s390_frame_info ();
7487 }
7488 while (frame_size != cfun_frame_layout.frame_size);
7489 }
7490
7491 /* Update frame layout. Recompute actual register save data based on
7492 current info and update regs_ever_live for the special registers.
7493 May be called multiple times, but may never cause *more* registers
7494 to be saved than s390_init_frame_layout allocated room for. */
7495
7496 static void
7497 s390_update_frame_layout (void)
7498 {
7499 int clobbered_regs[16];
7500
7501 s390_register_info (clobbered_regs);
7502
7503 df_set_regs_ever_live (BASE_REGNUM,
7504 clobbered_regs[BASE_REGNUM] ? true : false);
7505 df_set_regs_ever_live (RETURN_REGNUM,
7506 clobbered_regs[RETURN_REGNUM] ? true : false);
7507 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7508 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7509
7510 if (cfun->machine->base_reg)
7511 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7512 }
7513
7514 /* Return true if it is legal to put a value with MODE into REGNO. */
7515
7516 bool
7517 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7518 {
7519 switch (REGNO_REG_CLASS (regno))
7520 {
7521 case FP_REGS:
7522 if (REGNO_PAIR_OK (regno, mode))
7523 {
7524 if (mode == SImode || mode == DImode)
7525 return true;
7526
7527 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7528 return true;
7529 }
7530 break;
7531 case ADDR_REGS:
7532 if (FRAME_REGNO_P (regno) && mode == Pmode)
7533 return true;
7534
7535 /* fallthrough */
7536 case GENERAL_REGS:
7537 if (REGNO_PAIR_OK (regno, mode))
7538 {
7539 if (TARGET_ZARCH
7540 || (mode != TFmode && mode != TCmode && mode != TDmode))
7541 return true;
7542 }
7543 break;
7544 case CC_REGS:
7545 if (GET_MODE_CLASS (mode) == MODE_CC)
7546 return true;
7547 break;
7548 case ACCESS_REGS:
7549 if (REGNO_PAIR_OK (regno, mode))
7550 {
7551 if (mode == SImode || mode == Pmode)
7552 return true;
7553 }
7554 break;
7555 default:
7556 return false;
7557 }
7558
7559 return false;
7560 }
7561
7562 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7563
7564 bool
7565 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7566 {
7567 /* Once we've decided upon a register to use as base register, it must
7568 no longer be used for any other purpose. */
7569 if (cfun->machine->base_reg)
7570 if (REGNO (cfun->machine->base_reg) == old_reg
7571 || REGNO (cfun->machine->base_reg) == new_reg)
7572 return false;
7573
7574 return true;
7575 }
7576
7577 /* Maximum number of registers to represent a value of mode MODE
7578 in a register of class RCLASS. */
7579
7580 int
7581 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7582 {
7583 switch (rclass)
7584 {
7585 case FP_REGS:
7586 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7587 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7588 else
7589 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7590 case ACCESS_REGS:
7591 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7592 default:
7593 break;
7594 }
7595 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7596 }
7597
7598 /* Return true if register FROM can be eliminated via register TO. */
7599
7600 static bool
7601 s390_can_eliminate (const int from, const int to)
7602 {
7603 /* On zSeries machines, we have not marked the base register as fixed.
7604 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7605 If a function requires the base register, we say here that this
7606 elimination cannot be performed. This will cause reload to free
7607 up the base register (as if it were fixed). On the other hand,
7608 if the current function does *not* require the base register, we
7609 say here the elimination succeeds, which in turn allows reload
7610 to allocate the base register for any other purpose. */
7611 if (from == BASE_REGNUM && to == BASE_REGNUM)
7612 {
7613 if (TARGET_CPU_ZARCH)
7614 {
7615 s390_init_frame_layout ();
7616 return cfun->machine->base_reg == NULL_RTX;
7617 }
7618
7619 return false;
7620 }
7621
7622 /* Everything else must point into the stack frame. */
7623 gcc_assert (to == STACK_POINTER_REGNUM
7624 || to == HARD_FRAME_POINTER_REGNUM);
7625
7626 gcc_assert (from == FRAME_POINTER_REGNUM
7627 || from == ARG_POINTER_REGNUM
7628 || from == RETURN_ADDRESS_POINTER_REGNUM);
7629
7630 /* Make sure we actually saved the return address. */
7631 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7632 if (!crtl->calls_eh_return
7633 && !cfun->stdarg
7634 && !cfun_frame_layout.save_return_addr_p)
7635 return false;
7636
7637 return true;
7638 }
7639
7640 /* Return offset between register FROM and TO initially after prolog. */
7641
7642 HOST_WIDE_INT
7643 s390_initial_elimination_offset (int from, int to)
7644 {
7645 HOST_WIDE_INT offset;
7646 int index;
7647
7648 /* ??? Why are we called for non-eliminable pairs? */
7649 if (!s390_can_eliminate (from, to))
7650 return 0;
7651
7652 switch (from)
7653 {
7654 case FRAME_POINTER_REGNUM:
7655 offset = (get_frame_size()
7656 + STACK_POINTER_OFFSET
7657 + crtl->outgoing_args_size);
7658 break;
7659
7660 case ARG_POINTER_REGNUM:
7661 s390_init_frame_layout ();
7662 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7663 break;
7664
7665 case RETURN_ADDRESS_POINTER_REGNUM:
7666 s390_init_frame_layout ();
7667 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7668 gcc_assert (index >= 0);
7669 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7670 offset += index * UNITS_PER_LONG;
7671 break;
7672
7673 case BASE_REGNUM:
7674 offset = 0;
7675 break;
7676
7677 default:
7678 gcc_unreachable ();
7679 }
7680
7681 return offset;
7682 }
7683
7684 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7685 to register BASE. Return generated insn. */
7686
7687 static rtx
7688 save_fpr (rtx base, int offset, int regnum)
7689 {
7690 rtx addr;
7691 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7692
7693 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7694 set_mem_alias_set (addr, get_varargs_alias_set ());
7695 else
7696 set_mem_alias_set (addr, get_frame_alias_set ());
7697
7698 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7699 }
7700
7701 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7702 to register BASE. Return generated insn. */
7703
7704 static rtx
7705 restore_fpr (rtx base, int offset, int regnum)
7706 {
7707 rtx addr;
7708 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7709 set_mem_alias_set (addr, get_frame_alias_set ());
7710
7711 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7712 }
7713
7714 /* Return true if REGNO is a global register, but not one
7715 of the special ones that need to be saved/restored in anyway. */
7716
7717 static inline bool
7718 global_not_special_regno_p (int regno)
7719 {
7720 return (global_regs[regno]
7721 /* These registers are special and need to be
7722 restored in any case. */
7723 && !(regno == STACK_POINTER_REGNUM
7724 || regno == RETURN_REGNUM
7725 || regno == BASE_REGNUM
7726 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7727 }
7728
7729 /* Generate insn to save registers FIRST to LAST into
7730 the register save area located at offset OFFSET
7731 relative to register BASE. */
7732
7733 static rtx
7734 save_gprs (rtx base, int offset, int first, int last)
7735 {
7736 rtx addr, insn, note;
7737 int i;
7738
7739 addr = plus_constant (base, offset);
7740 addr = gen_rtx_MEM (Pmode, addr);
7741
7742 set_mem_alias_set (addr, get_frame_alias_set ());
7743
7744 /* Special-case single register. */
7745 if (first == last)
7746 {
7747 if (TARGET_64BIT)
7748 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7749 else
7750 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7751
7752 if (!global_not_special_regno_p (first))
7753 RTX_FRAME_RELATED_P (insn) = 1;
7754 return insn;
7755 }
7756
7757
7758 insn = gen_store_multiple (addr,
7759 gen_rtx_REG (Pmode, first),
7760 GEN_INT (last - first + 1));
7761
7762 if (first <= 6 && cfun->stdarg)
7763 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7764 {
7765 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7766
7767 if (first + i <= 6)
7768 set_mem_alias_set (mem, get_varargs_alias_set ());
7769 }
7770
7771 /* We need to set the FRAME_RELATED flag on all SETs
7772 inside the store-multiple pattern.
7773
7774 However, we must not emit DWARF records for registers 2..5
7775 if they are stored for use by variable arguments ...
7776
7777 ??? Unfortunately, it is not enough to simply not the
7778 FRAME_RELATED flags for those SETs, because the first SET
7779 of the PARALLEL is always treated as if it had the flag
7780 set, even if it does not. Therefore we emit a new pattern
7781 without those registers as REG_FRAME_RELATED_EXPR note. */
7782
7783 if (first >= 6 && !global_not_special_regno_p (first))
7784 {
7785 rtx pat = PATTERN (insn);
7786
7787 for (i = 0; i < XVECLEN (pat, 0); i++)
7788 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7789 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7790 0, i)))))
7791 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7792
7793 RTX_FRAME_RELATED_P (insn) = 1;
7794 }
7795 else if (last >= 6)
7796 {
7797 int start;
7798
7799 for (start = first >= 6 ? first : 6; start <= last; start++)
7800 if (!global_not_special_regno_p (start))
7801 break;
7802
7803 if (start > last)
7804 return insn;
7805
7806 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7807 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7808 gen_rtx_REG (Pmode, start),
7809 GEN_INT (last - start + 1));
7810 note = PATTERN (note);
7811
7812 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7813
7814 for (i = 0; i < XVECLEN (note, 0); i++)
7815 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7816 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7817 0, i)))))
7818 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7819
7820 RTX_FRAME_RELATED_P (insn) = 1;
7821 }
7822
7823 return insn;
7824 }
7825
7826 /* Generate insn to restore registers FIRST to LAST from
7827 the register save area located at offset OFFSET
7828 relative to register BASE. */
7829
7830 static rtx
7831 restore_gprs (rtx base, int offset, int first, int last)
7832 {
7833 rtx addr, insn;
7834
7835 addr = plus_constant (base, offset);
7836 addr = gen_rtx_MEM (Pmode, addr);
7837 set_mem_alias_set (addr, get_frame_alias_set ());
7838
7839 /* Special-case single register. */
7840 if (first == last)
7841 {
7842 if (TARGET_64BIT)
7843 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7844 else
7845 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7846
7847 return insn;
7848 }
7849
7850 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7851 addr,
7852 GEN_INT (last - first + 1));
7853 return insn;
7854 }
7855
7856 /* Return insn sequence to load the GOT register. */
7857
7858 static GTY(()) rtx got_symbol;
7859 rtx
7860 s390_load_got (void)
7861 {
7862 rtx insns;
7863
7864 /* We cannot use pic_offset_table_rtx here since we use this
7865 function also for non-pic if __tls_get_offset is called and in
7866 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
7867 aren't usable. */
7868 rtx got_rtx = gen_rtx_REG (Pmode, 12);
7869
7870 if (!got_symbol)
7871 {
7872 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7873 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7874 }
7875
7876 start_sequence ();
7877
7878 if (TARGET_CPU_ZARCH)
7879 {
7880 emit_move_insn (got_rtx, got_symbol);
7881 }
7882 else
7883 {
7884 rtx offset;
7885
7886 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7887 UNSPEC_LTREL_OFFSET);
7888 offset = gen_rtx_CONST (Pmode, offset);
7889 offset = force_const_mem (Pmode, offset);
7890
7891 emit_move_insn (got_rtx, offset);
7892
7893 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7894 UNSPEC_LTREL_BASE);
7895 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
7896
7897 emit_move_insn (got_rtx, offset);
7898 }
7899
7900 insns = get_insns ();
7901 end_sequence ();
7902 return insns;
7903 }
7904
7905 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7906 and the change to the stack pointer. */
7907
7908 static void
7909 s390_emit_stack_tie (void)
7910 {
7911 rtx mem = gen_frame_mem (BLKmode,
7912 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7913
7914 emit_insn (gen_stack_tie (mem));
7915 }
7916
7917 /* Expand the prologue into a bunch of separate insns. */
7918
7919 void
7920 s390_emit_prologue (void)
7921 {
7922 rtx insn, addr;
7923 rtx temp_reg;
7924 int i;
7925 int offset;
7926 int next_fpr = 0;
7927
7928 /* Complete frame layout. */
7929
7930 s390_update_frame_layout ();
7931
7932 /* Annotate all constant pool references to let the scheduler know
7933 they implicitly use the base register. */
7934
7935 push_topmost_sequence ();
7936
7937 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7938 if (INSN_P (insn))
7939 {
7940 annotate_constant_pool_refs (&PATTERN (insn));
7941 df_insn_rescan (insn);
7942 }
7943
7944 pop_topmost_sequence ();
7945
7946 /* Choose best register to use for temp use within prologue.
7947 See below for why TPF must use the register 1. */
7948
7949 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7950 && !current_function_is_leaf
7951 && !TARGET_TPF_PROFILING)
7952 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7953 else
7954 temp_reg = gen_rtx_REG (Pmode, 1);
7955
7956 /* Save call saved gprs. */
7957 if (cfun_frame_layout.first_save_gpr != -1)
7958 {
7959 insn = save_gprs (stack_pointer_rtx,
7960 cfun_frame_layout.gprs_offset +
7961 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7962 - cfun_frame_layout.first_save_gpr_slot),
7963 cfun_frame_layout.first_save_gpr,
7964 cfun_frame_layout.last_save_gpr);
7965 emit_insn (insn);
7966 }
7967
7968 /* Dummy insn to mark literal pool slot. */
7969
7970 if (cfun->machine->base_reg)
7971 emit_insn (gen_main_pool (cfun->machine->base_reg));
7972
7973 offset = cfun_frame_layout.f0_offset;
7974
7975 /* Save f0 and f2. */
7976 for (i = 0; i < 2; i++)
7977 {
7978 if (cfun_fpr_bit_p (i))
7979 {
7980 save_fpr (stack_pointer_rtx, offset, i + 16);
7981 offset += 8;
7982 }
7983 else if (!TARGET_PACKED_STACK)
7984 offset += 8;
7985 }
7986
7987 /* Save f4 and f6. */
7988 offset = cfun_frame_layout.f4_offset;
7989 for (i = 2; i < 4; i++)
7990 {
7991 if (cfun_fpr_bit_p (i))
7992 {
7993 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7994 offset += 8;
7995
7996 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7997 therefore are not frame related. */
7998 if (!call_really_used_regs[i + 16])
7999 RTX_FRAME_RELATED_P (insn) = 1;
8000 }
8001 else if (!TARGET_PACKED_STACK)
8002 offset += 8;
8003 }
8004
8005 if (TARGET_PACKED_STACK
8006 && cfun_save_high_fprs_p
8007 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8008 {
8009 offset = (cfun_frame_layout.f8_offset
8010 + (cfun_frame_layout.high_fprs - 1) * 8);
8011
8012 for (i = 15; i > 7 && offset >= 0; i--)
8013 if (cfun_fpr_bit_p (i))
8014 {
8015 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8016
8017 RTX_FRAME_RELATED_P (insn) = 1;
8018 offset -= 8;
8019 }
8020 if (offset >= cfun_frame_layout.f8_offset)
8021 next_fpr = i + 16;
8022 }
8023
8024 if (!TARGET_PACKED_STACK)
8025 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8026
8027 if (flag_stack_usage_info)
8028 current_function_static_stack_size = cfun_frame_layout.frame_size;
8029
8030 /* Decrement stack pointer. */
8031
8032 if (cfun_frame_layout.frame_size > 0)
8033 {
8034 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8035 rtx real_frame_off;
8036
8037 if (s390_stack_size)
8038 {
8039 HOST_WIDE_INT stack_guard;
8040
8041 if (s390_stack_guard)
8042 stack_guard = s390_stack_guard;
8043 else
8044 {
8045 /* If no value for stack guard is provided the smallest power of 2
8046 larger than the current frame size is chosen. */
8047 stack_guard = 1;
8048 while (stack_guard < cfun_frame_layout.frame_size)
8049 stack_guard <<= 1;
8050 }
8051
8052 if (cfun_frame_layout.frame_size >= s390_stack_size)
8053 {
8054 warning (0, "frame size of function %qs is %wd"
8055 " bytes exceeding user provided stack limit of "
8056 "%d bytes. "
8057 "An unconditional trap is added.",
8058 current_function_name(), cfun_frame_layout.frame_size,
8059 s390_stack_size);
8060 emit_insn (gen_trap ());
8061 }
8062 else
8063 {
8064 /* stack_guard has to be smaller than s390_stack_size.
8065 Otherwise we would emit an AND with zero which would
8066 not match the test under mask pattern. */
8067 if (stack_guard >= s390_stack_size)
8068 {
8069 warning (0, "frame size of function %qs is %wd"
8070 " bytes which is more than half the stack size. "
8071 "The dynamic check would not be reliable. "
8072 "No check emitted for this function.",
8073 current_function_name(),
8074 cfun_frame_layout.frame_size);
8075 }
8076 else
8077 {
8078 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8079 & ~(stack_guard - 1));
8080
8081 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8082 GEN_INT (stack_check_mask));
8083 if (TARGET_64BIT)
8084 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8085 t, const0_rtx),
8086 t, const0_rtx, const0_rtx));
8087 else
8088 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8089 t, const0_rtx),
8090 t, const0_rtx, const0_rtx));
8091 }
8092 }
8093 }
8094
8095 if (s390_warn_framesize > 0
8096 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8097 warning (0, "frame size of %qs is %wd bytes",
8098 current_function_name (), cfun_frame_layout.frame_size);
8099
8100 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8101 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8102
8103 /* Save incoming stack pointer into temp reg. */
8104 if (TARGET_BACKCHAIN || next_fpr)
8105 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8106
8107 /* Subtract frame size from stack pointer. */
8108
8109 if (DISP_IN_RANGE (INTVAL (frame_off)))
8110 {
8111 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8112 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8113 frame_off));
8114 insn = emit_insn (insn);
8115 }
8116 else
8117 {
8118 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8119 frame_off = force_const_mem (Pmode, frame_off);
8120
8121 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8122 annotate_constant_pool_refs (&PATTERN (insn));
8123 }
8124
8125 RTX_FRAME_RELATED_P (insn) = 1;
8126 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8127 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8128 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8129 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8130 real_frame_off)));
8131
8132 /* Set backchain. */
8133
8134 if (TARGET_BACKCHAIN)
8135 {
8136 if (cfun_frame_layout.backchain_offset)
8137 addr = gen_rtx_MEM (Pmode,
8138 plus_constant (stack_pointer_rtx,
8139 cfun_frame_layout.backchain_offset));
8140 else
8141 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8142 set_mem_alias_set (addr, get_frame_alias_set ());
8143 insn = emit_insn (gen_move_insn (addr, temp_reg));
8144 }
8145
8146 /* If we support non-call exceptions (e.g. for Java),
8147 we need to make sure the backchain pointer is set up
8148 before any possibly trapping memory access. */
8149 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8150 {
8151 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8152 emit_clobber (addr);
8153 }
8154 }
8155
8156 /* Save fprs 8 - 15 (64 bit ABI). */
8157
8158 if (cfun_save_high_fprs_p && next_fpr)
8159 {
8160 /* If the stack might be accessed through a different register
8161 we have to make sure that the stack pointer decrement is not
8162 moved below the use of the stack slots. */
8163 s390_emit_stack_tie ();
8164
8165 insn = emit_insn (gen_add2_insn (temp_reg,
8166 GEN_INT (cfun_frame_layout.f8_offset)));
8167
8168 offset = 0;
8169
8170 for (i = 24; i <= next_fpr; i++)
8171 if (cfun_fpr_bit_p (i - 16))
8172 {
8173 rtx addr = plus_constant (stack_pointer_rtx,
8174 cfun_frame_layout.frame_size
8175 + cfun_frame_layout.f8_offset
8176 + offset);
8177
8178 insn = save_fpr (temp_reg, offset, i);
8179 offset += 8;
8180 RTX_FRAME_RELATED_P (insn) = 1;
8181 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8182 gen_rtx_SET (VOIDmode,
8183 gen_rtx_MEM (DFmode, addr),
8184 gen_rtx_REG (DFmode, i)));
8185 }
8186 }
8187
8188 /* Set frame pointer, if needed. */
8189
8190 if (frame_pointer_needed)
8191 {
8192 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8193 RTX_FRAME_RELATED_P (insn) = 1;
8194 }
8195
8196 /* Set up got pointer, if needed. */
8197
8198 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8199 {
8200 rtx insns = s390_load_got ();
8201
8202 for (insn = insns; insn; insn = NEXT_INSN (insn))
8203 annotate_constant_pool_refs (&PATTERN (insn));
8204
8205 emit_insn (insns);
8206 }
8207
8208 if (TARGET_TPF_PROFILING)
8209 {
8210 /* Generate a BAS instruction to serve as a function
8211 entry intercept to facilitate the use of tracing
8212 algorithms located at the branch target. */
8213 emit_insn (gen_prologue_tpf ());
8214
8215 /* Emit a blockage here so that all code
8216 lies between the profiling mechanisms. */
8217 emit_insn (gen_blockage ());
8218 }
8219 }
8220
8221 /* Expand the epilogue into a bunch of separate insns. */
8222
8223 void
8224 s390_emit_epilogue (bool sibcall)
8225 {
8226 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8227 int area_bottom, area_top, offset = 0;
8228 int next_offset;
8229 rtvec p;
8230 int i;
8231
8232 if (TARGET_TPF_PROFILING)
8233 {
8234
8235 /* Generate a BAS instruction to serve as a function
8236 entry intercept to facilitate the use of tracing
8237 algorithms located at the branch target. */
8238
8239 /* Emit a blockage here so that all code
8240 lies between the profiling mechanisms. */
8241 emit_insn (gen_blockage ());
8242
8243 emit_insn (gen_epilogue_tpf ());
8244 }
8245
8246 /* Check whether to use frame or stack pointer for restore. */
8247
8248 frame_pointer = (frame_pointer_needed
8249 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8250
8251 s390_frame_area (&area_bottom, &area_top);
8252
8253 /* Check whether we can access the register save area.
8254 If not, increment the frame pointer as required. */
8255
8256 if (area_top <= area_bottom)
8257 {
8258 /* Nothing to restore. */
8259 }
8260 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8261 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8262 {
8263 /* Area is in range. */
8264 offset = cfun_frame_layout.frame_size;
8265 }
8266 else
8267 {
8268 rtx insn, frame_off, cfa;
8269
8270 offset = area_bottom < 0 ? -area_bottom : 0;
8271 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8272
8273 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8274 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8275 if (DISP_IN_RANGE (INTVAL (frame_off)))
8276 {
8277 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8278 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8279 insn = emit_insn (insn);
8280 }
8281 else
8282 {
8283 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8284 frame_off = force_const_mem (Pmode, frame_off);
8285
8286 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8287 annotate_constant_pool_refs (&PATTERN (insn));
8288 }
8289 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8290 RTX_FRAME_RELATED_P (insn) = 1;
8291 }
8292
8293 /* Restore call saved fprs. */
8294
8295 if (TARGET_64BIT)
8296 {
8297 if (cfun_save_high_fprs_p)
8298 {
8299 next_offset = cfun_frame_layout.f8_offset;
8300 for (i = 24; i < 32; i++)
8301 {
8302 if (cfun_fpr_bit_p (i - 16))
8303 {
8304 restore_fpr (frame_pointer,
8305 offset + next_offset, i);
8306 cfa_restores
8307 = alloc_reg_note (REG_CFA_RESTORE,
8308 gen_rtx_REG (DFmode, i), cfa_restores);
8309 next_offset += 8;
8310 }
8311 }
8312 }
8313
8314 }
8315 else
8316 {
8317 next_offset = cfun_frame_layout.f4_offset;
8318 for (i = 18; i < 20; i++)
8319 {
8320 if (cfun_fpr_bit_p (i - 16))
8321 {
8322 restore_fpr (frame_pointer,
8323 offset + next_offset, i);
8324 cfa_restores
8325 = alloc_reg_note (REG_CFA_RESTORE,
8326 gen_rtx_REG (DFmode, i), cfa_restores);
8327 next_offset += 8;
8328 }
8329 else if (!TARGET_PACKED_STACK)
8330 next_offset += 8;
8331 }
8332
8333 }
8334
8335 /* Return register. */
8336
8337 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8338
8339 /* Restore call saved gprs. */
8340
8341 if (cfun_frame_layout.first_restore_gpr != -1)
8342 {
8343 rtx insn, addr;
8344 int i;
8345
8346 /* Check for global register and save them
8347 to stack location from where they get restored. */
8348
8349 for (i = cfun_frame_layout.first_restore_gpr;
8350 i <= cfun_frame_layout.last_restore_gpr;
8351 i++)
8352 {
8353 if (global_not_special_regno_p (i))
8354 {
8355 addr = plus_constant (frame_pointer,
8356 offset + cfun_frame_layout.gprs_offset
8357 + (i - cfun_frame_layout.first_save_gpr_slot)
8358 * UNITS_PER_LONG);
8359 addr = gen_rtx_MEM (Pmode, addr);
8360 set_mem_alias_set (addr, get_frame_alias_set ());
8361 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8362 }
8363 else
8364 cfa_restores
8365 = alloc_reg_note (REG_CFA_RESTORE,
8366 gen_rtx_REG (Pmode, i), cfa_restores);
8367 }
8368
8369 if (! sibcall)
8370 {
8371 /* Fetch return address from stack before load multiple,
8372 this will do good for scheduling. */
8373
8374 if (cfun_frame_layout.save_return_addr_p
8375 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8376 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8377 {
8378 int return_regnum = find_unused_clobbered_reg();
8379 if (!return_regnum)
8380 return_regnum = 4;
8381 return_reg = gen_rtx_REG (Pmode, return_regnum);
8382
8383 addr = plus_constant (frame_pointer,
8384 offset + cfun_frame_layout.gprs_offset
8385 + (RETURN_REGNUM
8386 - cfun_frame_layout.first_save_gpr_slot)
8387 * UNITS_PER_LONG);
8388 addr = gen_rtx_MEM (Pmode, addr);
8389 set_mem_alias_set (addr, get_frame_alias_set ());
8390 emit_move_insn (return_reg, addr);
8391 }
8392 }
8393
8394 insn = restore_gprs (frame_pointer,
8395 offset + cfun_frame_layout.gprs_offset
8396 + (cfun_frame_layout.first_restore_gpr
8397 - cfun_frame_layout.first_save_gpr_slot)
8398 * UNITS_PER_LONG,
8399 cfun_frame_layout.first_restore_gpr,
8400 cfun_frame_layout.last_restore_gpr);
8401 insn = emit_insn (insn);
8402 REG_NOTES (insn) = cfa_restores;
8403 add_reg_note (insn, REG_CFA_DEF_CFA,
8404 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8405 RTX_FRAME_RELATED_P (insn) = 1;
8406 }
8407
8408 if (! sibcall)
8409 {
8410
8411 /* Return to caller. */
8412
8413 p = rtvec_alloc (2);
8414
8415 RTVEC_ELT (p, 0) = ret_rtx;
8416 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8417 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8418 }
8419 }
8420
8421
8422 /* Return the size in bytes of a function argument of
8423 type TYPE and/or mode MODE. At least one of TYPE or
8424 MODE must be specified. */
8425
8426 static int
8427 s390_function_arg_size (enum machine_mode mode, const_tree type)
8428 {
8429 if (type)
8430 return int_size_in_bytes (type);
8431
8432 /* No type info available for some library calls ... */
8433 if (mode != BLKmode)
8434 return GET_MODE_SIZE (mode);
8435
8436 /* If we have neither type nor mode, abort */
8437 gcc_unreachable ();
8438 }
8439
8440 /* Return true if a function argument of type TYPE and mode MODE
8441 is to be passed in a floating-point register, if available. */
8442
8443 static bool
8444 s390_function_arg_float (enum machine_mode mode, const_tree type)
8445 {
8446 int size = s390_function_arg_size (mode, type);
8447 if (size > 8)
8448 return false;
8449
8450 /* Soft-float changes the ABI: no floating-point registers are used. */
8451 if (TARGET_SOFT_FLOAT)
8452 return false;
8453
8454 /* No type info available for some library calls ... */
8455 if (!type)
8456 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8457
8458 /* The ABI says that record types with a single member are treated
8459 just like that member would be. */
8460 while (TREE_CODE (type) == RECORD_TYPE)
8461 {
8462 tree field, single = NULL_TREE;
8463
8464 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8465 {
8466 if (TREE_CODE (field) != FIELD_DECL)
8467 continue;
8468
8469 if (single == NULL_TREE)
8470 single = TREE_TYPE (field);
8471 else
8472 return false;
8473 }
8474
8475 if (single == NULL_TREE)
8476 return false;
8477 else
8478 type = single;
8479 }
8480
8481 return TREE_CODE (type) == REAL_TYPE;
8482 }
8483
8484 /* Return true if a function argument of type TYPE and mode MODE
8485 is to be passed in an integer register, or a pair of integer
8486 registers, if available. */
8487
8488 static bool
8489 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8490 {
8491 int size = s390_function_arg_size (mode, type);
8492 if (size > 8)
8493 return false;
8494
8495 /* No type info available for some library calls ... */
8496 if (!type)
8497 return GET_MODE_CLASS (mode) == MODE_INT
8498 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8499
8500 /* We accept small integral (and similar) types. */
8501 if (INTEGRAL_TYPE_P (type)
8502 || POINTER_TYPE_P (type)
8503 || TREE_CODE (type) == NULLPTR_TYPE
8504 || TREE_CODE (type) == OFFSET_TYPE
8505 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8506 return true;
8507
8508 /* We also accept structs of size 1, 2, 4, 8 that are not
8509 passed in floating-point registers. */
8510 if (AGGREGATE_TYPE_P (type)
8511 && exact_log2 (size) >= 0
8512 && !s390_function_arg_float (mode, type))
8513 return true;
8514
8515 return false;
8516 }
8517
8518 /* Return 1 if a function argument of type TYPE and mode MODE
8519 is to be passed by reference. The ABI specifies that only
8520 structures of size 1, 2, 4, or 8 bytes are passed by value,
8521 all other structures (and complex numbers) are passed by
8522 reference. */
8523
8524 static bool
8525 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8526 enum machine_mode mode, const_tree type,
8527 bool named ATTRIBUTE_UNUSED)
8528 {
8529 int size = s390_function_arg_size (mode, type);
8530 if (size > 8)
8531 return true;
8532
8533 if (type)
8534 {
8535 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8536 return 1;
8537
8538 if (TREE_CODE (type) == COMPLEX_TYPE
8539 || TREE_CODE (type) == VECTOR_TYPE)
8540 return 1;
8541 }
8542
8543 return 0;
8544 }
8545
8546 /* Update the data in CUM to advance over an argument of mode MODE and
8547 data type TYPE. (TYPE is null for libcalls where that information
8548 may not be available.). The boolean NAMED specifies whether the
8549 argument is a named argument (as opposed to an unnamed argument
8550 matching an ellipsis). */
8551
8552 static void
8553 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8554 const_tree type, bool named ATTRIBUTE_UNUSED)
8555 {
8556 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8557
8558 if (s390_function_arg_float (mode, type))
8559 {
8560 cum->fprs += 1;
8561 }
8562 else if (s390_function_arg_integer (mode, type))
8563 {
8564 int size = s390_function_arg_size (mode, type);
8565 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8566 }
8567 else
8568 gcc_unreachable ();
8569 }
8570
8571 /* Define where to put the arguments to a function.
8572 Value is zero to push the argument on the stack,
8573 or a hard register in which to store the argument.
8574
8575 MODE is the argument's machine mode.
8576 TYPE is the data type of the argument (as a tree).
8577 This is null for libcalls where that information may
8578 not be available.
8579 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8580 the preceding args and about the function being called.
8581 NAMED is nonzero if this argument is a named parameter
8582 (otherwise it is an extra parameter matching an ellipsis).
8583
8584 On S/390, we use general purpose registers 2 through 6 to
8585 pass integer, pointer, and certain structure arguments, and
8586 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8587 to pass floating point arguments. All remaining arguments
8588 are pushed to the stack. */
8589
8590 static rtx
8591 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8592 const_tree type, bool named ATTRIBUTE_UNUSED)
8593 {
8594 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8595
8596 if (s390_function_arg_float (mode, type))
8597 {
8598 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8599 return 0;
8600 else
8601 return gen_rtx_REG (mode, cum->fprs + 16);
8602 }
8603 else if (s390_function_arg_integer (mode, type))
8604 {
8605 int size = s390_function_arg_size (mode, type);
8606 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8607
8608 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8609 return 0;
8610 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8611 return gen_rtx_REG (mode, cum->gprs + 2);
8612 else if (n_gprs == 2)
8613 {
8614 rtvec p = rtvec_alloc (2);
8615
8616 RTVEC_ELT (p, 0)
8617 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8618 const0_rtx);
8619 RTVEC_ELT (p, 1)
8620 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8621 GEN_INT (4));
8622
8623 return gen_rtx_PARALLEL (mode, p);
8624 }
8625 }
8626
8627 /* After the real arguments, expand_call calls us once again
8628 with a void_type_node type. Whatever we return here is
8629 passed as operand 2 to the call expanders.
8630
8631 We don't need this feature ... */
8632 else if (type == void_type_node)
8633 return const0_rtx;
8634
8635 gcc_unreachable ();
8636 }
8637
8638 /* Return true if return values of type TYPE should be returned
8639 in a memory buffer whose address is passed by the caller as
8640 hidden first argument. */
8641
8642 static bool
8643 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8644 {
8645 /* We accept small integral (and similar) types. */
8646 if (INTEGRAL_TYPE_P (type)
8647 || POINTER_TYPE_P (type)
8648 || TREE_CODE (type) == OFFSET_TYPE
8649 || TREE_CODE (type) == REAL_TYPE)
8650 return int_size_in_bytes (type) > 8;
8651
8652 /* Aggregates and similar constructs are always returned
8653 in memory. */
8654 if (AGGREGATE_TYPE_P (type)
8655 || TREE_CODE (type) == COMPLEX_TYPE
8656 || TREE_CODE (type) == VECTOR_TYPE)
8657 return true;
8658
8659 /* ??? We get called on all sorts of random stuff from
8660 aggregate_value_p. We can't abort, but it's not clear
8661 what's safe to return. Pretend it's a struct I guess. */
8662 return true;
8663 }
8664
8665 /* Function arguments and return values are promoted to word size. */
8666
8667 static enum machine_mode
8668 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8669 int *punsignedp,
8670 const_tree fntype ATTRIBUTE_UNUSED,
8671 int for_return ATTRIBUTE_UNUSED)
8672 {
8673 if (INTEGRAL_MODE_P (mode)
8674 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8675 {
8676 if (type != NULL_TREE && POINTER_TYPE_P (type))
8677 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8678 return Pmode;
8679 }
8680
8681 return mode;
8682 }
8683
8684 /* Define where to return a (scalar) value of type RET_TYPE.
8685 If RET_TYPE is null, define where to return a (scalar)
8686 value of mode MODE from a libcall. */
8687
8688 static rtx
8689 s390_function_and_libcall_value (enum machine_mode mode,
8690 const_tree ret_type,
8691 const_tree fntype_or_decl,
8692 bool outgoing ATTRIBUTE_UNUSED)
8693 {
8694 /* For normal functions perform the promotion as
8695 promote_function_mode would do. */
8696 if (ret_type)
8697 {
8698 int unsignedp = TYPE_UNSIGNED (ret_type);
8699 mode = promote_function_mode (ret_type, mode, &unsignedp,
8700 fntype_or_decl, 1);
8701 }
8702
8703 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8704 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8705
8706 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8707 return gen_rtx_REG (mode, 16);
8708 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8709 || UNITS_PER_LONG == UNITS_PER_WORD)
8710 return gen_rtx_REG (mode, 2);
8711 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8712 {
8713 /* This case is triggered when returning a 64 bit value with
8714 -m31 -mzarch. Although the value would fit into a single
8715 register it has to be forced into a 32 bit register pair in
8716 order to match the ABI. */
8717 rtvec p = rtvec_alloc (2);
8718
8719 RTVEC_ELT (p, 0)
8720 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8721 RTVEC_ELT (p, 1)
8722 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8723
8724 return gen_rtx_PARALLEL (mode, p);
8725 }
8726
8727 gcc_unreachable ();
8728 }
8729
8730 /* Define where to return a scalar return value of type RET_TYPE. */
8731
8732 static rtx
8733 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8734 bool outgoing)
8735 {
8736 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8737 fn_decl_or_type, outgoing);
8738 }
8739
8740 /* Define where to return a scalar libcall return value of mode
8741 MODE. */
8742
8743 static rtx
8744 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8745 {
8746 return s390_function_and_libcall_value (mode, NULL_TREE,
8747 NULL_TREE, true);
8748 }
8749
8750
8751 /* Create and return the va_list datatype.
8752
8753 On S/390, va_list is an array type equivalent to
8754
8755 typedef struct __va_list_tag
8756 {
8757 long __gpr;
8758 long __fpr;
8759 void *__overflow_arg_area;
8760 void *__reg_save_area;
8761 } va_list[1];
8762
8763 where __gpr and __fpr hold the number of general purpose
8764 or floating point arguments used up to now, respectively,
8765 __overflow_arg_area points to the stack location of the
8766 next argument passed on the stack, and __reg_save_area
8767 always points to the start of the register area in the
8768 call frame of the current function. The function prologue
8769 saves all registers used for argument passing into this
8770 area if the function uses variable arguments. */
8771
8772 static tree
8773 s390_build_builtin_va_list (void)
8774 {
8775 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8776
8777 record = lang_hooks.types.make_type (RECORD_TYPE);
8778
8779 type_decl =
8780 build_decl (BUILTINS_LOCATION,
8781 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8782
8783 f_gpr = build_decl (BUILTINS_LOCATION,
8784 FIELD_DECL, get_identifier ("__gpr"),
8785 long_integer_type_node);
8786 f_fpr = build_decl (BUILTINS_LOCATION,
8787 FIELD_DECL, get_identifier ("__fpr"),
8788 long_integer_type_node);
8789 f_ovf = build_decl (BUILTINS_LOCATION,
8790 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8791 ptr_type_node);
8792 f_sav = build_decl (BUILTINS_LOCATION,
8793 FIELD_DECL, get_identifier ("__reg_save_area"),
8794 ptr_type_node);
8795
8796 va_list_gpr_counter_field = f_gpr;
8797 va_list_fpr_counter_field = f_fpr;
8798
8799 DECL_FIELD_CONTEXT (f_gpr) = record;
8800 DECL_FIELD_CONTEXT (f_fpr) = record;
8801 DECL_FIELD_CONTEXT (f_ovf) = record;
8802 DECL_FIELD_CONTEXT (f_sav) = record;
8803
8804 TYPE_STUB_DECL (record) = type_decl;
8805 TYPE_NAME (record) = type_decl;
8806 TYPE_FIELDS (record) = f_gpr;
8807 DECL_CHAIN (f_gpr) = f_fpr;
8808 DECL_CHAIN (f_fpr) = f_ovf;
8809 DECL_CHAIN (f_ovf) = f_sav;
8810
8811 layout_type (record);
8812
8813 /* The correct type is an array type of one element. */
8814 return build_array_type (record, build_index_type (size_zero_node));
8815 }
8816
8817 /* Implement va_start by filling the va_list structure VALIST.
8818 STDARG_P is always true, and ignored.
8819 NEXTARG points to the first anonymous stack argument.
8820
8821 The following global variables are used to initialize
8822 the va_list structure:
8823
8824 crtl->args.info:
8825 holds number of gprs and fprs used for named arguments.
8826 crtl->args.arg_offset_rtx:
8827 holds the offset of the first anonymous stack argument
8828 (relative to the virtual arg pointer). */
8829
8830 static void
8831 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8832 {
8833 HOST_WIDE_INT n_gpr, n_fpr;
8834 int off;
8835 tree f_gpr, f_fpr, f_ovf, f_sav;
8836 tree gpr, fpr, ovf, sav, t;
8837
8838 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8839 f_fpr = DECL_CHAIN (f_gpr);
8840 f_ovf = DECL_CHAIN (f_fpr);
8841 f_sav = DECL_CHAIN (f_ovf);
8842
8843 valist = build_simple_mem_ref (valist);
8844 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8845 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8846 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8847 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8848
8849 /* Count number of gp and fp argument registers used. */
8850
8851 n_gpr = crtl->args.info.gprs;
8852 n_fpr = crtl->args.info.fprs;
8853
8854 if (cfun->va_list_gpr_size)
8855 {
8856 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8857 build_int_cst (NULL_TREE, n_gpr));
8858 TREE_SIDE_EFFECTS (t) = 1;
8859 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8860 }
8861
8862 if (cfun->va_list_fpr_size)
8863 {
8864 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8865 build_int_cst (NULL_TREE, n_fpr));
8866 TREE_SIDE_EFFECTS (t) = 1;
8867 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8868 }
8869
8870 /* Find the overflow area. */
8871 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8872 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8873 {
8874 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8875
8876 off = INTVAL (crtl->args.arg_offset_rtx);
8877 off = off < 0 ? 0 : off;
8878 if (TARGET_DEBUG_ARG)
8879 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8880 (int)n_gpr, (int)n_fpr, off);
8881
8882 t = fold_build_pointer_plus_hwi (t, off);
8883
8884 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8885 TREE_SIDE_EFFECTS (t) = 1;
8886 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8887 }
8888
8889 /* Find the register save area. */
8890 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8891 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8892 {
8893 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8894 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
8895
8896 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8897 TREE_SIDE_EFFECTS (t) = 1;
8898 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8899 }
8900 }
8901
8902 /* Implement va_arg by updating the va_list structure
8903 VALIST as required to retrieve an argument of type
8904 TYPE, and returning that argument.
8905
8906 Generates code equivalent to:
8907
8908 if (integral value) {
8909 if (size <= 4 && args.gpr < 5 ||
8910 size > 4 && args.gpr < 4 )
8911 ret = args.reg_save_area[args.gpr+8]
8912 else
8913 ret = *args.overflow_arg_area++;
8914 } else if (float value) {
8915 if (args.fgpr < 2)
8916 ret = args.reg_save_area[args.fpr+64]
8917 else
8918 ret = *args.overflow_arg_area++;
8919 } else if (aggregate value) {
8920 if (args.gpr < 5)
8921 ret = *args.reg_save_area[args.gpr]
8922 else
8923 ret = **args.overflow_arg_area++;
8924 } */
8925
8926 static tree
8927 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8928 gimple_seq *post_p ATTRIBUTE_UNUSED)
8929 {
8930 tree f_gpr, f_fpr, f_ovf, f_sav;
8931 tree gpr, fpr, ovf, sav, reg, t, u;
8932 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8933 tree lab_false, lab_over, addr;
8934
8935 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8936 f_fpr = DECL_CHAIN (f_gpr);
8937 f_ovf = DECL_CHAIN (f_fpr);
8938 f_sav = DECL_CHAIN (f_ovf);
8939
8940 valist = build_va_arg_indirect_ref (valist);
8941 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8942 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8943 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8944
8945 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8946 both appear on a lhs. */
8947 valist = unshare_expr (valist);
8948 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8949
8950 size = int_size_in_bytes (type);
8951
8952 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8953 {
8954 if (TARGET_DEBUG_ARG)
8955 {
8956 fprintf (stderr, "va_arg: aggregate type");
8957 debug_tree (type);
8958 }
8959
8960 /* Aggregates are passed by reference. */
8961 indirect_p = 1;
8962 reg = gpr;
8963 n_reg = 1;
8964
8965 /* kernel stack layout on 31 bit: It is assumed here that no padding
8966 will be added by s390_frame_info because for va_args always an even
8967 number of gprs has to be saved r15-r2 = 14 regs. */
8968 sav_ofs = 2 * UNITS_PER_LONG;
8969 sav_scale = UNITS_PER_LONG;
8970 size = UNITS_PER_LONG;
8971 max_reg = GP_ARG_NUM_REG - n_reg;
8972 }
8973 else if (s390_function_arg_float (TYPE_MODE (type), type))
8974 {
8975 if (TARGET_DEBUG_ARG)
8976 {
8977 fprintf (stderr, "va_arg: float type");
8978 debug_tree (type);
8979 }
8980
8981 /* FP args go in FP registers, if present. */
8982 indirect_p = 0;
8983 reg = fpr;
8984 n_reg = 1;
8985 sav_ofs = 16 * UNITS_PER_LONG;
8986 sav_scale = 8;
8987 max_reg = FP_ARG_NUM_REG - n_reg;
8988 }
8989 else
8990 {
8991 if (TARGET_DEBUG_ARG)
8992 {
8993 fprintf (stderr, "va_arg: other type");
8994 debug_tree (type);
8995 }
8996
8997 /* Otherwise into GP registers. */
8998 indirect_p = 0;
8999 reg = gpr;
9000 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9001
9002 /* kernel stack layout on 31 bit: It is assumed here that no padding
9003 will be added by s390_frame_info because for va_args always an even
9004 number of gprs has to be saved r15-r2 = 14 regs. */
9005 sav_ofs = 2 * UNITS_PER_LONG;
9006
9007 if (size < UNITS_PER_LONG)
9008 sav_ofs += UNITS_PER_LONG - size;
9009
9010 sav_scale = UNITS_PER_LONG;
9011 max_reg = GP_ARG_NUM_REG - n_reg;
9012 }
9013
9014 /* Pull the value out of the saved registers ... */
9015
9016 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9017 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9018 addr = create_tmp_var (ptr_type_node, "addr");
9019
9020 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9021 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9022 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9023 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9024 gimplify_and_add (t, pre_p);
9025
9026 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9027 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9028 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9029 t = fold_build_pointer_plus (t, u);
9030
9031 gimplify_assign (addr, t, pre_p);
9032
9033 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9034
9035 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9036
9037
9038 /* ... Otherwise out of the overflow area. */
9039
9040 t = ovf;
9041 if (size < UNITS_PER_LONG)
9042 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9043
9044 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9045
9046 gimplify_assign (addr, t, pre_p);
9047
9048 t = fold_build_pointer_plus_hwi (t, size);
9049 gimplify_assign (ovf, t, pre_p);
9050
9051 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9052
9053
9054 /* Increment register save count. */
9055
9056 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9057 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9058 gimplify_and_add (u, pre_p);
9059
9060 if (indirect_p)
9061 {
9062 t = build_pointer_type_for_mode (build_pointer_type (type),
9063 ptr_mode, true);
9064 addr = fold_convert (t, addr);
9065 addr = build_va_arg_indirect_ref (addr);
9066 }
9067 else
9068 {
9069 t = build_pointer_type_for_mode (type, ptr_mode, true);
9070 addr = fold_convert (t, addr);
9071 }
9072
9073 return build_va_arg_indirect_ref (addr);
9074 }
9075
9076
9077 /* Builtins. */
9078
9079 enum s390_builtin
9080 {
9081 S390_BUILTIN_THREAD_POINTER,
9082 S390_BUILTIN_SET_THREAD_POINTER,
9083
9084 S390_BUILTIN_max
9085 };
9086
9087 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9088 CODE_FOR_get_tp_64,
9089 CODE_FOR_set_tp_64
9090 };
9091
9092 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9093 CODE_FOR_get_tp_31,
9094 CODE_FOR_set_tp_31
9095 };
9096
9097 static void
9098 s390_init_builtins (void)
9099 {
9100 tree ftype;
9101
9102 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9103 add_builtin_function ("__builtin_thread_pointer", ftype,
9104 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9105 NULL, NULL_TREE);
9106
9107 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9108 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9109 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9110 NULL, NULL_TREE);
9111 }
9112
9113 /* Expand an expression EXP that calls a built-in function,
9114 with result going to TARGET if that's convenient
9115 (and in mode MODE if that's convenient).
9116 SUBTARGET may be used as the target for computing one of EXP's operands.
9117 IGNORE is nonzero if the value is to be ignored. */
9118
9119 static rtx
9120 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9121 enum machine_mode mode ATTRIBUTE_UNUSED,
9122 int ignore ATTRIBUTE_UNUSED)
9123 {
9124 #define MAX_ARGS 2
9125
9126 enum insn_code const *code_for_builtin =
9127 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9128
9129 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9130 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9131 enum insn_code icode;
9132 rtx op[MAX_ARGS], pat;
9133 int arity;
9134 bool nonvoid;
9135 tree arg;
9136 call_expr_arg_iterator iter;
9137
9138 if (fcode >= S390_BUILTIN_max)
9139 internal_error ("bad builtin fcode");
9140 icode = code_for_builtin[fcode];
9141 if (icode == 0)
9142 internal_error ("bad builtin fcode");
9143
9144 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9145
9146 arity = 0;
9147 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9148 {
9149 const struct insn_operand_data *insn_op;
9150
9151 if (arg == error_mark_node)
9152 return NULL_RTX;
9153 if (arity > MAX_ARGS)
9154 return NULL_RTX;
9155
9156 insn_op = &insn_data[icode].operand[arity + nonvoid];
9157
9158 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9159
9160 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9161 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9162 arity++;
9163 }
9164
9165 if (nonvoid)
9166 {
9167 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9168 if (!target
9169 || GET_MODE (target) != tmode
9170 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9171 target = gen_reg_rtx (tmode);
9172 }
9173
9174 switch (arity)
9175 {
9176 case 0:
9177 pat = GEN_FCN (icode) (target);
9178 break;
9179 case 1:
9180 if (nonvoid)
9181 pat = GEN_FCN (icode) (target, op[0]);
9182 else
9183 pat = GEN_FCN (icode) (op[0]);
9184 break;
9185 case 2:
9186 pat = GEN_FCN (icode) (target, op[0], op[1]);
9187 break;
9188 default:
9189 gcc_unreachable ();
9190 }
9191 if (!pat)
9192 return NULL_RTX;
9193 emit_insn (pat);
9194
9195 if (nonvoid)
9196 return target;
9197 else
9198 return const0_rtx;
9199 }
9200
9201
9202 /* Output assembly code for the trampoline template to
9203 stdio stream FILE.
9204
9205 On S/390, we use gpr 1 internally in the trampoline code;
9206 gpr 0 is used to hold the static chain. */
9207
9208 static void
9209 s390_asm_trampoline_template (FILE *file)
9210 {
9211 rtx op[2];
9212 op[0] = gen_rtx_REG (Pmode, 0);
9213 op[1] = gen_rtx_REG (Pmode, 1);
9214
9215 if (TARGET_64BIT)
9216 {
9217 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9218 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9219 output_asm_insn ("br\t%1", op); /* 2 byte */
9220 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9221 }
9222 else
9223 {
9224 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9225 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9226 output_asm_insn ("br\t%1", op); /* 2 byte */
9227 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9228 }
9229 }
9230
9231 /* Emit RTL insns to initialize the variable parts of a trampoline.
9232 FNADDR is an RTX for the address of the function's pure code.
9233 CXT is an RTX for the static chain value for the function. */
9234
9235 static void
9236 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9237 {
9238 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9239 rtx mem;
9240
9241 emit_block_move (m_tramp, assemble_trampoline_template (),
9242 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9243
9244 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9245 emit_move_insn (mem, cxt);
9246 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9247 emit_move_insn (mem, fnaddr);
9248 }
9249
9250 /* Output assembler code to FILE to increment profiler label # LABELNO
9251 for profiling a function entry. */
9252
9253 void
9254 s390_function_profiler (FILE *file, int labelno)
9255 {
9256 rtx op[7];
9257
9258 char label[128];
9259 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9260
9261 fprintf (file, "# function profiler \n");
9262
9263 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9264 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9265 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9266
9267 op[2] = gen_rtx_REG (Pmode, 1);
9268 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9269 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9270
9271 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9272 if (flag_pic)
9273 {
9274 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9275 op[4] = gen_rtx_CONST (Pmode, op[4]);
9276 }
9277
9278 if (TARGET_64BIT)
9279 {
9280 output_asm_insn ("stg\t%0,%1", op);
9281 output_asm_insn ("larl\t%2,%3", op);
9282 output_asm_insn ("brasl\t%0,%4", op);
9283 output_asm_insn ("lg\t%0,%1", op);
9284 }
9285 else if (!flag_pic)
9286 {
9287 op[6] = gen_label_rtx ();
9288
9289 output_asm_insn ("st\t%0,%1", op);
9290 output_asm_insn ("bras\t%2,%l6", op);
9291 output_asm_insn (".long\t%4", op);
9292 output_asm_insn (".long\t%3", op);
9293 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9294 output_asm_insn ("l\t%0,0(%2)", op);
9295 output_asm_insn ("l\t%2,4(%2)", op);
9296 output_asm_insn ("basr\t%0,%0", op);
9297 output_asm_insn ("l\t%0,%1", op);
9298 }
9299 else
9300 {
9301 op[5] = gen_label_rtx ();
9302 op[6] = gen_label_rtx ();
9303
9304 output_asm_insn ("st\t%0,%1", op);
9305 output_asm_insn ("bras\t%2,%l6", op);
9306 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9307 output_asm_insn (".long\t%4-%l5", op);
9308 output_asm_insn (".long\t%3-%l5", op);
9309 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9310 output_asm_insn ("lr\t%0,%2", op);
9311 output_asm_insn ("a\t%0,0(%2)", op);
9312 output_asm_insn ("a\t%2,4(%2)", op);
9313 output_asm_insn ("basr\t%0,%0", op);
9314 output_asm_insn ("l\t%0,%1", op);
9315 }
9316 }
9317
9318 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9319 into its SYMBOL_REF_FLAGS. */
9320
9321 static void
9322 s390_encode_section_info (tree decl, rtx rtl, int first)
9323 {
9324 default_encode_section_info (decl, rtl, first);
9325
9326 if (TREE_CODE (decl) == VAR_DECL)
9327 {
9328 /* If a variable has a forced alignment to < 2 bytes, mark it
9329 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9330 operand. */
9331 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9332 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9333 if (!DECL_SIZE (decl)
9334 || !DECL_ALIGN (decl)
9335 || !host_integerp (DECL_SIZE (decl), 0)
9336 || (DECL_ALIGN (decl) <= 64
9337 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9338 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9339 }
9340
9341 /* Literal pool references don't have a decl so they are handled
9342 differently here. We rely on the information in the MEM_ALIGN
9343 entry to decide upon natural alignment. */
9344 if (MEM_P (rtl)
9345 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9346 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9347 && (MEM_ALIGN (rtl) == 0
9348 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9349 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9350 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9351 }
9352
9353 /* Output thunk to FILE that implements a C++ virtual function call (with
9354 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9355 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9356 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9357 relative to the resulting this pointer. */
9358
9359 static void
9360 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9361 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9362 tree function)
9363 {
9364 rtx op[10];
9365 int nonlocal = 0;
9366
9367 /* Make sure unwind info is emitted for the thunk if needed. */
9368 final_start_function (emit_barrier (), file, 1);
9369
9370 /* Operand 0 is the target function. */
9371 op[0] = XEXP (DECL_RTL (function), 0);
9372 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9373 {
9374 nonlocal = 1;
9375 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9376 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9377 op[0] = gen_rtx_CONST (Pmode, op[0]);
9378 }
9379
9380 /* Operand 1 is the 'this' pointer. */
9381 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9382 op[1] = gen_rtx_REG (Pmode, 3);
9383 else
9384 op[1] = gen_rtx_REG (Pmode, 2);
9385
9386 /* Operand 2 is the delta. */
9387 op[2] = GEN_INT (delta);
9388
9389 /* Operand 3 is the vcall_offset. */
9390 op[3] = GEN_INT (vcall_offset);
9391
9392 /* Operand 4 is the temporary register. */
9393 op[4] = gen_rtx_REG (Pmode, 1);
9394
9395 /* Operands 5 to 8 can be used as labels. */
9396 op[5] = NULL_RTX;
9397 op[6] = NULL_RTX;
9398 op[7] = NULL_RTX;
9399 op[8] = NULL_RTX;
9400
9401 /* Operand 9 can be used for temporary register. */
9402 op[9] = NULL_RTX;
9403
9404 /* Generate code. */
9405 if (TARGET_64BIT)
9406 {
9407 /* Setup literal pool pointer if required. */
9408 if ((!DISP_IN_RANGE (delta)
9409 && !CONST_OK_FOR_K (delta)
9410 && !CONST_OK_FOR_Os (delta))
9411 || (!DISP_IN_RANGE (vcall_offset)
9412 && !CONST_OK_FOR_K (vcall_offset)
9413 && !CONST_OK_FOR_Os (vcall_offset)))
9414 {
9415 op[5] = gen_label_rtx ();
9416 output_asm_insn ("larl\t%4,%5", op);
9417 }
9418
9419 /* Add DELTA to this pointer. */
9420 if (delta)
9421 {
9422 if (CONST_OK_FOR_J (delta))
9423 output_asm_insn ("la\t%1,%2(%1)", op);
9424 else if (DISP_IN_RANGE (delta))
9425 output_asm_insn ("lay\t%1,%2(%1)", op);
9426 else if (CONST_OK_FOR_K (delta))
9427 output_asm_insn ("aghi\t%1,%2", op);
9428 else if (CONST_OK_FOR_Os (delta))
9429 output_asm_insn ("agfi\t%1,%2", op);
9430 else
9431 {
9432 op[6] = gen_label_rtx ();
9433 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9434 }
9435 }
9436
9437 /* Perform vcall adjustment. */
9438 if (vcall_offset)
9439 {
9440 if (DISP_IN_RANGE (vcall_offset))
9441 {
9442 output_asm_insn ("lg\t%4,0(%1)", op);
9443 output_asm_insn ("ag\t%1,%3(%4)", op);
9444 }
9445 else if (CONST_OK_FOR_K (vcall_offset))
9446 {
9447 output_asm_insn ("lghi\t%4,%3", op);
9448 output_asm_insn ("ag\t%4,0(%1)", op);
9449 output_asm_insn ("ag\t%1,0(%4)", op);
9450 }
9451 else if (CONST_OK_FOR_Os (vcall_offset))
9452 {
9453 output_asm_insn ("lgfi\t%4,%3", op);
9454 output_asm_insn ("ag\t%4,0(%1)", op);
9455 output_asm_insn ("ag\t%1,0(%4)", op);
9456 }
9457 else
9458 {
9459 op[7] = gen_label_rtx ();
9460 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9461 output_asm_insn ("ag\t%4,0(%1)", op);
9462 output_asm_insn ("ag\t%1,0(%4)", op);
9463 }
9464 }
9465
9466 /* Jump to target. */
9467 output_asm_insn ("jg\t%0", op);
9468
9469 /* Output literal pool if required. */
9470 if (op[5])
9471 {
9472 output_asm_insn (".align\t4", op);
9473 targetm.asm_out.internal_label (file, "L",
9474 CODE_LABEL_NUMBER (op[5]));
9475 }
9476 if (op[6])
9477 {
9478 targetm.asm_out.internal_label (file, "L",
9479 CODE_LABEL_NUMBER (op[6]));
9480 output_asm_insn (".long\t%2", op);
9481 }
9482 if (op[7])
9483 {
9484 targetm.asm_out.internal_label (file, "L",
9485 CODE_LABEL_NUMBER (op[7]));
9486 output_asm_insn (".long\t%3", op);
9487 }
9488 }
9489 else
9490 {
9491 /* Setup base pointer if required. */
9492 if (!vcall_offset
9493 || (!DISP_IN_RANGE (delta)
9494 && !CONST_OK_FOR_K (delta)
9495 && !CONST_OK_FOR_Os (delta))
9496 || (!DISP_IN_RANGE (delta)
9497 && !CONST_OK_FOR_K (vcall_offset)
9498 && !CONST_OK_FOR_Os (vcall_offset)))
9499 {
9500 op[5] = gen_label_rtx ();
9501 output_asm_insn ("basr\t%4,0", op);
9502 targetm.asm_out.internal_label (file, "L",
9503 CODE_LABEL_NUMBER (op[5]));
9504 }
9505
9506 /* Add DELTA to this pointer. */
9507 if (delta)
9508 {
9509 if (CONST_OK_FOR_J (delta))
9510 output_asm_insn ("la\t%1,%2(%1)", op);
9511 else if (DISP_IN_RANGE (delta))
9512 output_asm_insn ("lay\t%1,%2(%1)", op);
9513 else if (CONST_OK_FOR_K (delta))
9514 output_asm_insn ("ahi\t%1,%2", op);
9515 else if (CONST_OK_FOR_Os (delta))
9516 output_asm_insn ("afi\t%1,%2", op);
9517 else
9518 {
9519 op[6] = gen_label_rtx ();
9520 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9521 }
9522 }
9523
9524 /* Perform vcall adjustment. */
9525 if (vcall_offset)
9526 {
9527 if (CONST_OK_FOR_J (vcall_offset))
9528 {
9529 output_asm_insn ("l\t%4,0(%1)", op);
9530 output_asm_insn ("a\t%1,%3(%4)", op);
9531 }
9532 else if (DISP_IN_RANGE (vcall_offset))
9533 {
9534 output_asm_insn ("l\t%4,0(%1)", op);
9535 output_asm_insn ("ay\t%1,%3(%4)", op);
9536 }
9537 else if (CONST_OK_FOR_K (vcall_offset))
9538 {
9539 output_asm_insn ("lhi\t%4,%3", op);
9540 output_asm_insn ("a\t%4,0(%1)", op);
9541 output_asm_insn ("a\t%1,0(%4)", op);
9542 }
9543 else if (CONST_OK_FOR_Os (vcall_offset))
9544 {
9545 output_asm_insn ("iilf\t%4,%3", op);
9546 output_asm_insn ("a\t%4,0(%1)", op);
9547 output_asm_insn ("a\t%1,0(%4)", op);
9548 }
9549 else
9550 {
9551 op[7] = gen_label_rtx ();
9552 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9553 output_asm_insn ("a\t%4,0(%1)", op);
9554 output_asm_insn ("a\t%1,0(%4)", op);
9555 }
9556
9557 /* We had to clobber the base pointer register.
9558 Re-setup the base pointer (with a different base). */
9559 op[5] = gen_label_rtx ();
9560 output_asm_insn ("basr\t%4,0", op);
9561 targetm.asm_out.internal_label (file, "L",
9562 CODE_LABEL_NUMBER (op[5]));
9563 }
9564
9565 /* Jump to target. */
9566 op[8] = gen_label_rtx ();
9567
9568 if (!flag_pic)
9569 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9570 else if (!nonlocal)
9571 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9572 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9573 else if (flag_pic == 1)
9574 {
9575 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9576 output_asm_insn ("l\t%4,%0(%4)", op);
9577 }
9578 else if (flag_pic == 2)
9579 {
9580 op[9] = gen_rtx_REG (Pmode, 0);
9581 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9582 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9583 output_asm_insn ("ar\t%4,%9", op);
9584 output_asm_insn ("l\t%4,0(%4)", op);
9585 }
9586
9587 output_asm_insn ("br\t%4", op);
9588
9589 /* Output literal pool. */
9590 output_asm_insn (".align\t4", op);
9591
9592 if (nonlocal && flag_pic == 2)
9593 output_asm_insn (".long\t%0", op);
9594 if (nonlocal)
9595 {
9596 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9597 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9598 }
9599
9600 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9601 if (!flag_pic)
9602 output_asm_insn (".long\t%0", op);
9603 else
9604 output_asm_insn (".long\t%0-%5", op);
9605
9606 if (op[6])
9607 {
9608 targetm.asm_out.internal_label (file, "L",
9609 CODE_LABEL_NUMBER (op[6]));
9610 output_asm_insn (".long\t%2", op);
9611 }
9612 if (op[7])
9613 {
9614 targetm.asm_out.internal_label (file, "L",
9615 CODE_LABEL_NUMBER (op[7]));
9616 output_asm_insn (".long\t%3", op);
9617 }
9618 }
9619 final_end_function ();
9620 }
9621
9622 static bool
9623 s390_valid_pointer_mode (enum machine_mode mode)
9624 {
9625 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9626 }
9627
9628 /* Checks whether the given CALL_EXPR would use a caller
9629 saved register. This is used to decide whether sibling call
9630 optimization could be performed on the respective function
9631 call. */
9632
9633 static bool
9634 s390_call_saved_register_used (tree call_expr)
9635 {
9636 CUMULATIVE_ARGS cum_v;
9637 cumulative_args_t cum;
9638 tree parameter;
9639 enum machine_mode mode;
9640 tree type;
9641 rtx parm_rtx;
9642 int reg, i;
9643
9644 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9645 cum = pack_cumulative_args (&cum_v);
9646
9647 for (i = 0; i < call_expr_nargs (call_expr); i++)
9648 {
9649 parameter = CALL_EXPR_ARG (call_expr, i);
9650 gcc_assert (parameter);
9651
9652 /* For an undeclared variable passed as parameter we will get
9653 an ERROR_MARK node here. */
9654 if (TREE_CODE (parameter) == ERROR_MARK)
9655 return true;
9656
9657 type = TREE_TYPE (parameter);
9658 gcc_assert (type);
9659
9660 mode = TYPE_MODE (type);
9661 gcc_assert (mode);
9662
9663 if (pass_by_reference (&cum_v, mode, type, true))
9664 {
9665 mode = Pmode;
9666 type = build_pointer_type (type);
9667 }
9668
9669 parm_rtx = s390_function_arg (cum, mode, type, 0);
9670
9671 s390_function_arg_advance (cum, mode, type, 0);
9672
9673 if (!parm_rtx)
9674 continue;
9675
9676 if (REG_P (parm_rtx))
9677 {
9678 for (reg = 0;
9679 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9680 reg++)
9681 if (!call_used_regs[reg + REGNO (parm_rtx)])
9682 return true;
9683 }
9684
9685 if (GET_CODE (parm_rtx) == PARALLEL)
9686 {
9687 int i;
9688
9689 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9690 {
9691 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9692
9693 gcc_assert (REG_P (r));
9694
9695 for (reg = 0;
9696 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9697 reg++)
9698 if (!call_used_regs[reg + REGNO (r)])
9699 return true;
9700 }
9701 }
9702
9703 }
9704 return false;
9705 }
9706
9707 /* Return true if the given call expression can be
9708 turned into a sibling call.
9709 DECL holds the declaration of the function to be called whereas
9710 EXP is the call expression itself. */
9711
9712 static bool
9713 s390_function_ok_for_sibcall (tree decl, tree exp)
9714 {
9715 /* The TPF epilogue uses register 1. */
9716 if (TARGET_TPF_PROFILING)
9717 return false;
9718
9719 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9720 which would have to be restored before the sibcall. */
9721 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9722 return false;
9723
9724 /* Register 6 on s390 is available as an argument register but unfortunately
9725 "caller saved". This makes functions needing this register for arguments
9726 not suitable for sibcalls. */
9727 return !s390_call_saved_register_used (exp);
9728 }
9729
9730 /* Return the fixed registers used for condition codes. */
9731
9732 static bool
9733 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9734 {
9735 *p1 = CC_REGNUM;
9736 *p2 = INVALID_REGNUM;
9737
9738 return true;
9739 }
9740
9741 /* This function is used by the call expanders of the machine description.
9742 It emits the call insn itself together with the necessary operations
9743 to adjust the target address and returns the emitted insn.
9744 ADDR_LOCATION is the target address rtx
9745 TLS_CALL the location of the thread-local symbol
9746 RESULT_REG the register where the result of the call should be stored
9747 RETADDR_REG the register where the return address should be stored
9748 If this parameter is NULL_RTX the call is considered
9749 to be a sibling call. */
9750
9751 rtx
9752 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9753 rtx retaddr_reg)
9754 {
9755 bool plt_call = false;
9756 rtx insn;
9757 rtx call;
9758 rtx clobber;
9759 rtvec vec;
9760
9761 /* Direct function calls need special treatment. */
9762 if (GET_CODE (addr_location) == SYMBOL_REF)
9763 {
9764 /* When calling a global routine in PIC mode, we must
9765 replace the symbol itself with the PLT stub. */
9766 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9767 {
9768 if (retaddr_reg != NULL_RTX)
9769 {
9770 addr_location = gen_rtx_UNSPEC (Pmode,
9771 gen_rtvec (1, addr_location),
9772 UNSPEC_PLT);
9773 addr_location = gen_rtx_CONST (Pmode, addr_location);
9774 plt_call = true;
9775 }
9776 else
9777 /* For -fpic code the PLT entries might use r12 which is
9778 call-saved. Therefore we cannot do a sibcall when
9779 calling directly using a symbol ref. When reaching
9780 this point we decided (in s390_function_ok_for_sibcall)
9781 to do a sibcall for a function pointer but one of the
9782 optimizers was able to get rid of the function pointer
9783 by propagating the symbol ref into the call. This
9784 optimization is illegal for S/390 so we turn the direct
9785 call into a indirect call again. */
9786 addr_location = force_reg (Pmode, addr_location);
9787 }
9788
9789 /* Unless we can use the bras(l) insn, force the
9790 routine address into a register. */
9791 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9792 {
9793 if (flag_pic)
9794 addr_location = legitimize_pic_address (addr_location, 0);
9795 else
9796 addr_location = force_reg (Pmode, addr_location);
9797 }
9798 }
9799
9800 /* If it is already an indirect call or the code above moved the
9801 SYMBOL_REF to somewhere else make sure the address can be found in
9802 register 1. */
9803 if (retaddr_reg == NULL_RTX
9804 && GET_CODE (addr_location) != SYMBOL_REF
9805 && !plt_call)
9806 {
9807 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9808 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9809 }
9810
9811 addr_location = gen_rtx_MEM (QImode, addr_location);
9812 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9813
9814 if (result_reg != NULL_RTX)
9815 call = gen_rtx_SET (VOIDmode, result_reg, call);
9816
9817 if (retaddr_reg != NULL_RTX)
9818 {
9819 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9820
9821 if (tls_call != NULL_RTX)
9822 vec = gen_rtvec (3, call, clobber,
9823 gen_rtx_USE (VOIDmode, tls_call));
9824 else
9825 vec = gen_rtvec (2, call, clobber);
9826
9827 call = gen_rtx_PARALLEL (VOIDmode, vec);
9828 }
9829
9830 insn = emit_call_insn (call);
9831
9832 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9833 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9834 {
9835 /* s390_function_ok_for_sibcall should
9836 have denied sibcalls in this case. */
9837 gcc_assert (retaddr_reg != NULL_RTX);
9838 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9839 }
9840 return insn;
9841 }
9842
9843 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9844
9845 static void
9846 s390_conditional_register_usage (void)
9847 {
9848 int i;
9849
9850 if (flag_pic)
9851 {
9852 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9853 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9854 }
9855 if (TARGET_CPU_ZARCH)
9856 {
9857 fixed_regs[BASE_REGNUM] = 0;
9858 call_used_regs[BASE_REGNUM] = 0;
9859 fixed_regs[RETURN_REGNUM] = 0;
9860 call_used_regs[RETURN_REGNUM] = 0;
9861 }
9862 if (TARGET_64BIT)
9863 {
9864 for (i = 24; i < 32; i++)
9865 call_used_regs[i] = call_really_used_regs[i] = 0;
9866 }
9867 else
9868 {
9869 for (i = 18; i < 20; i++)
9870 call_used_regs[i] = call_really_used_regs[i] = 0;
9871 }
9872
9873 if (TARGET_SOFT_FLOAT)
9874 {
9875 for (i = 16; i < 32; i++)
9876 call_used_regs[i] = fixed_regs[i] = 1;
9877 }
9878 }
9879
9880 /* Corresponding function to eh_return expander. */
9881
9882 static GTY(()) rtx s390_tpf_eh_return_symbol;
9883 void
9884 s390_emit_tpf_eh_return (rtx target)
9885 {
9886 rtx insn, reg;
9887
9888 if (!s390_tpf_eh_return_symbol)
9889 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9890
9891 reg = gen_rtx_REG (Pmode, 2);
9892
9893 emit_move_insn (reg, target);
9894 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9895 gen_rtx_REG (Pmode, RETURN_REGNUM));
9896 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9897
9898 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9899 }
9900
9901 /* Rework the prologue/epilogue to avoid saving/restoring
9902 registers unnecessarily. */
9903
9904 static void
9905 s390_optimize_prologue (void)
9906 {
9907 rtx insn, new_insn, next_insn;
9908
9909 /* Do a final recompute of the frame-related data. */
9910
9911 s390_update_frame_layout ();
9912
9913 /* If all special registers are in fact used, there's nothing we
9914 can do, so no point in walking the insn list. */
9915
9916 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9917 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9918 && (TARGET_CPU_ZARCH
9919 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9920 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9921 return;
9922
9923 /* Search for prologue/epilogue insns and replace them. */
9924
9925 for (insn = get_insns (); insn; insn = next_insn)
9926 {
9927 int first, last, off;
9928 rtx set, base, offset;
9929
9930 next_insn = NEXT_INSN (insn);
9931
9932 if (GET_CODE (insn) != INSN)
9933 continue;
9934
9935 if (GET_CODE (PATTERN (insn)) == PARALLEL
9936 && store_multiple_operation (PATTERN (insn), VOIDmode))
9937 {
9938 set = XVECEXP (PATTERN (insn), 0, 0);
9939 first = REGNO (SET_SRC (set));
9940 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9941 offset = const0_rtx;
9942 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9943 off = INTVAL (offset);
9944
9945 if (GET_CODE (base) != REG || off < 0)
9946 continue;
9947 if (cfun_frame_layout.first_save_gpr != -1
9948 && (cfun_frame_layout.first_save_gpr < first
9949 || cfun_frame_layout.last_save_gpr > last))
9950 continue;
9951 if (REGNO (base) != STACK_POINTER_REGNUM
9952 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9953 continue;
9954 if (first > BASE_REGNUM || last < BASE_REGNUM)
9955 continue;
9956
9957 if (cfun_frame_layout.first_save_gpr != -1)
9958 {
9959 new_insn = save_gprs (base,
9960 off + (cfun_frame_layout.first_save_gpr
9961 - first) * UNITS_PER_LONG,
9962 cfun_frame_layout.first_save_gpr,
9963 cfun_frame_layout.last_save_gpr);
9964 new_insn = emit_insn_before (new_insn, insn);
9965 INSN_ADDRESSES_NEW (new_insn, -1);
9966 }
9967
9968 remove_insn (insn);
9969 continue;
9970 }
9971
9972 if (cfun_frame_layout.first_save_gpr == -1
9973 && GET_CODE (PATTERN (insn)) == SET
9974 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9975 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9976 || (!TARGET_CPU_ZARCH
9977 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9978 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9979 {
9980 set = PATTERN (insn);
9981 first = REGNO (SET_SRC (set));
9982 offset = const0_rtx;
9983 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9984 off = INTVAL (offset);
9985
9986 if (GET_CODE (base) != REG || off < 0)
9987 continue;
9988 if (REGNO (base) != STACK_POINTER_REGNUM
9989 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9990 continue;
9991
9992 remove_insn (insn);
9993 continue;
9994 }
9995
9996 if (GET_CODE (PATTERN (insn)) == PARALLEL
9997 && load_multiple_operation (PATTERN (insn), VOIDmode))
9998 {
9999 set = XVECEXP (PATTERN (insn), 0, 0);
10000 first = REGNO (SET_DEST (set));
10001 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10002 offset = const0_rtx;
10003 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10004 off = INTVAL (offset);
10005
10006 if (GET_CODE (base) != REG || off < 0)
10007 continue;
10008 if (cfun_frame_layout.first_restore_gpr != -1
10009 && (cfun_frame_layout.first_restore_gpr < first
10010 || cfun_frame_layout.last_restore_gpr > last))
10011 continue;
10012 if (REGNO (base) != STACK_POINTER_REGNUM
10013 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10014 continue;
10015 if (first > BASE_REGNUM || last < BASE_REGNUM)
10016 continue;
10017
10018 if (cfun_frame_layout.first_restore_gpr != -1)
10019 {
10020 new_insn = restore_gprs (base,
10021 off + (cfun_frame_layout.first_restore_gpr
10022 - first) * UNITS_PER_LONG,
10023 cfun_frame_layout.first_restore_gpr,
10024 cfun_frame_layout.last_restore_gpr);
10025 new_insn = emit_insn_before (new_insn, insn);
10026 INSN_ADDRESSES_NEW (new_insn, -1);
10027 }
10028
10029 remove_insn (insn);
10030 continue;
10031 }
10032
10033 if (cfun_frame_layout.first_restore_gpr == -1
10034 && GET_CODE (PATTERN (insn)) == SET
10035 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10036 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10037 || (!TARGET_CPU_ZARCH
10038 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10039 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10040 {
10041 set = PATTERN (insn);
10042 first = REGNO (SET_DEST (set));
10043 offset = const0_rtx;
10044 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10045 off = INTVAL (offset);
10046
10047 if (GET_CODE (base) != REG || off < 0)
10048 continue;
10049 if (REGNO (base) != STACK_POINTER_REGNUM
10050 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10051 continue;
10052
10053 remove_insn (insn);
10054 continue;
10055 }
10056 }
10057 }
10058
10059 /* On z10 and later the dynamic branch prediction must see the
10060 backward jump within a certain windows. If not it falls back to
10061 the static prediction. This function rearranges the loop backward
10062 branch in a way which makes the static prediction always correct.
10063 The function returns true if it added an instruction. */
10064 static bool
10065 s390_fix_long_loop_prediction (rtx insn)
10066 {
10067 rtx set = single_set (insn);
10068 rtx code_label, label_ref, new_label;
10069 rtx uncond_jump;
10070 rtx cur_insn;
10071 rtx tmp;
10072 int distance;
10073
10074 /* This will exclude branch on count and branch on index patterns
10075 since these are correctly statically predicted. */
10076 if (!set
10077 || SET_DEST (set) != pc_rtx
10078 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10079 return false;
10080
10081 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10082 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10083
10084 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10085
10086 code_label = XEXP (label_ref, 0);
10087
10088 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10089 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10090 || (INSN_ADDRESSES (INSN_UID (insn))
10091 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10092 return false;
10093
10094 for (distance = 0, cur_insn = PREV_INSN (insn);
10095 distance < PREDICT_DISTANCE - 6;
10096 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10097 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10098 return false;
10099
10100 new_label = gen_label_rtx ();
10101 uncond_jump = emit_jump_insn_after (
10102 gen_rtx_SET (VOIDmode, pc_rtx,
10103 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10104 insn);
10105 emit_label_after (new_label, uncond_jump);
10106
10107 tmp = XEXP (SET_SRC (set), 1);
10108 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10109 XEXP (SET_SRC (set), 2) = tmp;
10110 INSN_CODE (insn) = -1;
10111
10112 XEXP (label_ref, 0) = new_label;
10113 JUMP_LABEL (insn) = new_label;
10114 JUMP_LABEL (uncond_jump) = code_label;
10115
10116 return true;
10117 }
10118
10119 /* Returns 1 if INSN reads the value of REG for purposes not related
10120 to addressing of memory, and 0 otherwise. */
10121 static int
10122 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10123 {
10124 return reg_referenced_p (reg, PATTERN (insn))
10125 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10126 }
10127
10128 /* Starting from INSN find_cond_jump looks downwards in the insn
10129 stream for a single jump insn which is the last user of the
10130 condition code set in INSN. */
10131 static rtx
10132 find_cond_jump (rtx insn)
10133 {
10134 for (; insn; insn = NEXT_INSN (insn))
10135 {
10136 rtx ite, cc;
10137
10138 if (LABEL_P (insn))
10139 break;
10140
10141 if (!JUMP_P (insn))
10142 {
10143 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10144 break;
10145 continue;
10146 }
10147
10148 /* This will be triggered by a return. */
10149 if (GET_CODE (PATTERN (insn)) != SET)
10150 break;
10151
10152 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10153 ite = SET_SRC (PATTERN (insn));
10154
10155 if (GET_CODE (ite) != IF_THEN_ELSE)
10156 break;
10157
10158 cc = XEXP (XEXP (ite, 0), 0);
10159 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10160 break;
10161
10162 if (find_reg_note (insn, REG_DEAD, cc))
10163 return insn;
10164 break;
10165 }
10166
10167 return NULL_RTX;
10168 }
10169
10170 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10171 the semantics does not change. If NULL_RTX is passed as COND the
10172 function tries to find the conditional jump starting with INSN. */
10173 static void
10174 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10175 {
10176 rtx tmp = *op0;
10177
10178 if (cond == NULL_RTX)
10179 {
10180 rtx jump = find_cond_jump (NEXT_INSN (insn));
10181 jump = jump ? single_set (jump) : NULL_RTX;
10182
10183 if (jump == NULL_RTX)
10184 return;
10185
10186 cond = XEXP (XEXP (jump, 1), 0);
10187 }
10188
10189 *op0 = *op1;
10190 *op1 = tmp;
10191 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10192 }
10193
10194 /* On z10, instructions of the compare-and-branch family have the
10195 property to access the register occurring as second operand with
10196 its bits complemented. If such a compare is grouped with a second
10197 instruction that accesses the same register non-complemented, and
10198 if that register's value is delivered via a bypass, then the
10199 pipeline recycles, thereby causing significant performance decline.
10200 This function locates such situations and exchanges the two
10201 operands of the compare. The function return true whenever it
10202 added an insn. */
10203 static bool
10204 s390_z10_optimize_cmp (rtx insn)
10205 {
10206 rtx prev_insn, next_insn;
10207 bool insn_added_p = false;
10208 rtx cond, *op0, *op1;
10209
10210 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10211 {
10212 /* Handle compare and branch and branch on count
10213 instructions. */
10214 rtx pattern = single_set (insn);
10215
10216 if (!pattern
10217 || SET_DEST (pattern) != pc_rtx
10218 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10219 return false;
10220
10221 cond = XEXP (SET_SRC (pattern), 0);
10222 op0 = &XEXP (cond, 0);
10223 op1 = &XEXP (cond, 1);
10224 }
10225 else if (GET_CODE (PATTERN (insn)) == SET)
10226 {
10227 rtx src, dest;
10228
10229 /* Handle normal compare instructions. */
10230 src = SET_SRC (PATTERN (insn));
10231 dest = SET_DEST (PATTERN (insn));
10232
10233 if (!REG_P (dest)
10234 || !CC_REGNO_P (REGNO (dest))
10235 || GET_CODE (src) != COMPARE)
10236 return false;
10237
10238 /* s390_swap_cmp will try to find the conditional
10239 jump when passing NULL_RTX as condition. */
10240 cond = NULL_RTX;
10241 op0 = &XEXP (src, 0);
10242 op1 = &XEXP (src, 1);
10243 }
10244 else
10245 return false;
10246
10247 if (!REG_P (*op0) || !REG_P (*op1))
10248 return false;
10249
10250 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10251 return false;
10252
10253 /* Swap the COMPARE arguments and its mask if there is a
10254 conflicting access in the previous insn. */
10255 prev_insn = prev_active_insn (insn);
10256 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10257 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10258 s390_swap_cmp (cond, op0, op1, insn);
10259
10260 /* Check if there is a conflict with the next insn. If there
10261 was no conflict with the previous insn, then swap the
10262 COMPARE arguments and its mask. If we already swapped
10263 the operands, or if swapping them would cause a conflict
10264 with the previous insn, issue a NOP after the COMPARE in
10265 order to separate the two instuctions. */
10266 next_insn = next_active_insn (insn);
10267 if (next_insn != NULL_RTX && INSN_P (next_insn)
10268 && s390_non_addr_reg_read_p (*op1, next_insn))
10269 {
10270 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10271 && s390_non_addr_reg_read_p (*op0, prev_insn))
10272 {
10273 if (REGNO (*op1) == 0)
10274 emit_insn_after (gen_nop1 (), insn);
10275 else
10276 emit_insn_after (gen_nop (), insn);
10277 insn_added_p = true;
10278 }
10279 else
10280 s390_swap_cmp (cond, op0, op1, insn);
10281 }
10282 return insn_added_p;
10283 }
10284
10285 /* Perform machine-dependent processing. */
10286
10287 static void
10288 s390_reorg (void)
10289 {
10290 bool pool_overflow = false;
10291
10292 /* Make sure all splits have been performed; splits after
10293 machine_dependent_reorg might confuse insn length counts. */
10294 split_all_insns_noflow ();
10295
10296 /* Install the main literal pool and the associated base
10297 register load insns.
10298
10299 In addition, there are two problematic situations we need
10300 to correct:
10301
10302 - the literal pool might be > 4096 bytes in size, so that
10303 some of its elements cannot be directly accessed
10304
10305 - a branch target might be > 64K away from the branch, so that
10306 it is not possible to use a PC-relative instruction.
10307
10308 To fix those, we split the single literal pool into multiple
10309 pool chunks, reloading the pool base register at various
10310 points throughout the function to ensure it always points to
10311 the pool chunk the following code expects, and / or replace
10312 PC-relative branches by absolute branches.
10313
10314 However, the two problems are interdependent: splitting the
10315 literal pool can move a branch further away from its target,
10316 causing the 64K limit to overflow, and on the other hand,
10317 replacing a PC-relative branch by an absolute branch means
10318 we need to put the branch target address into the literal
10319 pool, possibly causing it to overflow.
10320
10321 So, we loop trying to fix up both problems until we manage
10322 to satisfy both conditions at the same time. Note that the
10323 loop is guaranteed to terminate as every pass of the loop
10324 strictly decreases the total number of PC-relative branches
10325 in the function. (This is not completely true as there
10326 might be branch-over-pool insns introduced by chunkify_start.
10327 Those never need to be split however.) */
10328
10329 for (;;)
10330 {
10331 struct constant_pool *pool = NULL;
10332
10333 /* Collect the literal pool. */
10334 if (!pool_overflow)
10335 {
10336 pool = s390_mainpool_start ();
10337 if (!pool)
10338 pool_overflow = true;
10339 }
10340
10341 /* If literal pool overflowed, start to chunkify it. */
10342 if (pool_overflow)
10343 pool = s390_chunkify_start ();
10344
10345 /* Split out-of-range branches. If this has created new
10346 literal pool entries, cancel current chunk list and
10347 recompute it. zSeries machines have large branch
10348 instructions, so we never need to split a branch. */
10349 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10350 {
10351 if (pool_overflow)
10352 s390_chunkify_cancel (pool);
10353 else
10354 s390_mainpool_cancel (pool);
10355
10356 continue;
10357 }
10358
10359 /* If we made it up to here, both conditions are satisfied.
10360 Finish up literal pool related changes. */
10361 if (pool_overflow)
10362 s390_chunkify_finish (pool);
10363 else
10364 s390_mainpool_finish (pool);
10365
10366 /* We're done splitting branches. */
10367 cfun->machine->split_branches_pending_p = false;
10368 break;
10369 }
10370
10371 /* Generate out-of-pool execute target insns. */
10372 if (TARGET_CPU_ZARCH)
10373 {
10374 rtx insn, label, target;
10375
10376 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10377 {
10378 label = s390_execute_label (insn);
10379 if (!label)
10380 continue;
10381
10382 gcc_assert (label != const0_rtx);
10383
10384 target = emit_label (XEXP (label, 0));
10385 INSN_ADDRESSES_NEW (target, -1);
10386
10387 target = emit_insn (s390_execute_target (insn));
10388 INSN_ADDRESSES_NEW (target, -1);
10389 }
10390 }
10391
10392 /* Try to optimize prologue and epilogue further. */
10393 s390_optimize_prologue ();
10394
10395 /* Walk over the insns and do some >=z10 specific changes. */
10396 if (s390_tune == PROCESSOR_2097_Z10
10397 || s390_tune == PROCESSOR_2817_Z196)
10398 {
10399 rtx insn;
10400 bool insn_added_p = false;
10401
10402 /* The insn lengths and addresses have to be up to date for the
10403 following manipulations. */
10404 shorten_branches (get_insns ());
10405
10406 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10407 {
10408 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10409 continue;
10410
10411 if (JUMP_P (insn))
10412 insn_added_p |= s390_fix_long_loop_prediction (insn);
10413
10414 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10415 || GET_CODE (PATTERN (insn)) == SET)
10416 && s390_tune == PROCESSOR_2097_Z10)
10417 insn_added_p |= s390_z10_optimize_cmp (insn);
10418 }
10419
10420 /* Adjust branches if we added new instructions. */
10421 if (insn_added_p)
10422 shorten_branches (get_insns ());
10423 }
10424 }
10425
10426 /* Return true if INSN is a fp load insn writing register REGNO. */
10427 static inline bool
10428 s390_fpload_toreg (rtx insn, unsigned int regno)
10429 {
10430 rtx set;
10431 enum attr_type flag = s390_safe_attr_type (insn);
10432
10433 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10434 return false;
10435
10436 set = single_set (insn);
10437
10438 if (set == NULL_RTX)
10439 return false;
10440
10441 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10442 return false;
10443
10444 if (REGNO (SET_DEST (set)) != regno)
10445 return false;
10446
10447 return true;
10448 }
10449
10450 /* This value describes the distance to be avoided between an
10451 aritmetic fp instruction and an fp load writing the same register.
10452 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10453 fine but the exact value has to be avoided. Otherwise the FP
10454 pipeline will throw an exception causing a major penalty. */
10455 #define Z10_EARLYLOAD_DISTANCE 7
10456
10457 /* Rearrange the ready list in order to avoid the situation described
10458 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10459 moved to the very end of the ready list. */
10460 static void
10461 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10462 {
10463 unsigned int regno;
10464 int nready = *nready_p;
10465 rtx tmp;
10466 int i;
10467 rtx insn;
10468 rtx set;
10469 enum attr_type flag;
10470 int distance;
10471
10472 /* Skip DISTANCE - 1 active insns. */
10473 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10474 distance > 0 && insn != NULL_RTX;
10475 distance--, insn = prev_active_insn (insn))
10476 if (CALL_P (insn) || JUMP_P (insn))
10477 return;
10478
10479 if (insn == NULL_RTX)
10480 return;
10481
10482 set = single_set (insn);
10483
10484 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10485 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10486 return;
10487
10488 flag = s390_safe_attr_type (insn);
10489
10490 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10491 return;
10492
10493 regno = REGNO (SET_DEST (set));
10494 i = nready - 1;
10495
10496 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10497 i--;
10498
10499 if (!i)
10500 return;
10501
10502 tmp = ready[i];
10503 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10504 ready[0] = tmp;
10505 }
10506
10507 /* This function is called via hook TARGET_SCHED_REORDER before
10508 issueing one insn from list READY which contains *NREADYP entries.
10509 For target z10 it reorders load instructions to avoid early load
10510 conflicts in the floating point pipeline */
10511 static int
10512 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10513 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10514 {
10515 if (s390_tune == PROCESSOR_2097_Z10)
10516 if (reload_completed && *nreadyp > 1)
10517 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10518
10519 return s390_issue_rate ();
10520 }
10521
10522 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10523 the scheduler has issued INSN. It stores the last issued insn into
10524 last_scheduled_insn in order to make it available for
10525 s390_sched_reorder. */
10526 static int
10527 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10528 int verbose ATTRIBUTE_UNUSED,
10529 rtx insn, int more)
10530 {
10531 last_scheduled_insn = insn;
10532
10533 if (GET_CODE (PATTERN (insn)) != USE
10534 && GET_CODE (PATTERN (insn)) != CLOBBER)
10535 return more - 1;
10536 else
10537 return more;
10538 }
10539
10540 static void
10541 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10542 int verbose ATTRIBUTE_UNUSED,
10543 int max_ready ATTRIBUTE_UNUSED)
10544 {
10545 last_scheduled_insn = NULL_RTX;
10546 }
10547
10548 /* This function checks the whole of insn X for memory references. The
10549 function always returns zero because the framework it is called
10550 from would stop recursively analyzing the insn upon a return value
10551 other than zero. The real result of this function is updating
10552 counter variable MEM_COUNT. */
10553 static int
10554 check_dpu (rtx *x, unsigned *mem_count)
10555 {
10556 if (*x != NULL_RTX && MEM_P (*x))
10557 (*mem_count)++;
10558 return 0;
10559 }
10560
10561 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10562 a new number struct loop *loop should be unrolled if tuned for cpus with
10563 a built-in stride prefetcher.
10564 The loop is analyzed for memory accesses by calling check_dpu for
10565 each rtx of the loop. Depending on the loop_depth and the amount of
10566 memory accesses a new number <=nunroll is returned to improve the
10567 behaviour of the hardware prefetch unit. */
10568 static unsigned
10569 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10570 {
10571 basic_block *bbs;
10572 rtx insn;
10573 unsigned i;
10574 unsigned mem_count = 0;
10575
10576 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10577 return nunroll;
10578
10579 /* Count the number of memory references within the loop body. */
10580 bbs = get_loop_body (loop);
10581 for (i = 0; i < loop->num_nodes; i++)
10582 {
10583 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10584 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10585 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10586 }
10587 free (bbs);
10588
10589 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10590 if (mem_count == 0)
10591 return nunroll;
10592
10593 switch (loop_depth(loop))
10594 {
10595 case 1:
10596 return MIN (nunroll, 28 / mem_count);
10597 case 2:
10598 return MIN (nunroll, 22 / mem_count);
10599 default:
10600 return MIN (nunroll, 16 / mem_count);
10601 }
10602 }
10603
10604 /* Initialize GCC target structure. */
10605
10606 #undef TARGET_ASM_ALIGNED_HI_OP
10607 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10608 #undef TARGET_ASM_ALIGNED_DI_OP
10609 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10610 #undef TARGET_ASM_INTEGER
10611 #define TARGET_ASM_INTEGER s390_assemble_integer
10612
10613 #undef TARGET_ASM_OPEN_PAREN
10614 #define TARGET_ASM_OPEN_PAREN ""
10615
10616 #undef TARGET_ASM_CLOSE_PAREN
10617 #define TARGET_ASM_CLOSE_PAREN ""
10618
10619 #undef TARGET_OPTION_OVERRIDE
10620 #define TARGET_OPTION_OVERRIDE s390_option_override
10621
10622 #undef TARGET_ENCODE_SECTION_INFO
10623 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10624
10625 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10626 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10627
10628 #ifdef HAVE_AS_TLS
10629 #undef TARGET_HAVE_TLS
10630 #define TARGET_HAVE_TLS true
10631 #endif
10632 #undef TARGET_CANNOT_FORCE_CONST_MEM
10633 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10634
10635 #undef TARGET_DELEGITIMIZE_ADDRESS
10636 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10637
10638 #undef TARGET_LEGITIMIZE_ADDRESS
10639 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10640
10641 #undef TARGET_RETURN_IN_MEMORY
10642 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10643
10644 #undef TARGET_INIT_BUILTINS
10645 #define TARGET_INIT_BUILTINS s390_init_builtins
10646 #undef TARGET_EXPAND_BUILTIN
10647 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10648
10649 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10650 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10651
10652 #undef TARGET_ASM_OUTPUT_MI_THUNK
10653 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10654 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10655 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10656
10657 #undef TARGET_SCHED_ADJUST_PRIORITY
10658 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10659 #undef TARGET_SCHED_ISSUE_RATE
10660 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10661 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10662 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10663
10664 #undef TARGET_SCHED_VARIABLE_ISSUE
10665 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10666 #undef TARGET_SCHED_REORDER
10667 #define TARGET_SCHED_REORDER s390_sched_reorder
10668 #undef TARGET_SCHED_INIT
10669 #define TARGET_SCHED_INIT s390_sched_init
10670
10671 #undef TARGET_CANNOT_COPY_INSN_P
10672 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10673 #undef TARGET_RTX_COSTS
10674 #define TARGET_RTX_COSTS s390_rtx_costs
10675 #undef TARGET_ADDRESS_COST
10676 #define TARGET_ADDRESS_COST s390_address_cost
10677 #undef TARGET_REGISTER_MOVE_COST
10678 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10679 #undef TARGET_MEMORY_MOVE_COST
10680 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10681
10682 #undef TARGET_MACHINE_DEPENDENT_REORG
10683 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10684
10685 #undef TARGET_VALID_POINTER_MODE
10686 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10687
10688 #undef TARGET_BUILD_BUILTIN_VA_LIST
10689 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10690 #undef TARGET_EXPAND_BUILTIN_VA_START
10691 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10692 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10693 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10694
10695 #undef TARGET_PROMOTE_FUNCTION_MODE
10696 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10697 #undef TARGET_PASS_BY_REFERENCE
10698 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10699
10700 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10701 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10702 #undef TARGET_FUNCTION_ARG
10703 #define TARGET_FUNCTION_ARG s390_function_arg
10704 #undef TARGET_FUNCTION_ARG_ADVANCE
10705 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10706 #undef TARGET_FUNCTION_VALUE
10707 #define TARGET_FUNCTION_VALUE s390_function_value
10708 #undef TARGET_LIBCALL_VALUE
10709 #define TARGET_LIBCALL_VALUE s390_libcall_value
10710
10711 #undef TARGET_FIXED_CONDITION_CODE_REGS
10712 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10713
10714 #undef TARGET_CC_MODES_COMPATIBLE
10715 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10716
10717 #undef TARGET_INVALID_WITHIN_DOLOOP
10718 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10719
10720 #ifdef HAVE_AS_TLS
10721 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10722 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10723 #endif
10724
10725 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10726 #undef TARGET_MANGLE_TYPE
10727 #define TARGET_MANGLE_TYPE s390_mangle_type
10728 #endif
10729
10730 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10731 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10732
10733 #undef TARGET_PREFERRED_RELOAD_CLASS
10734 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10735
10736 #undef TARGET_SECONDARY_RELOAD
10737 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10738
10739 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10740 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10741
10742 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10743 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10744
10745 #undef TARGET_LEGITIMATE_ADDRESS_P
10746 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10747
10748 #undef TARGET_LEGITIMATE_CONSTANT_P
10749 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10750
10751 #undef TARGET_CAN_ELIMINATE
10752 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10753
10754 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10755 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10756
10757 #undef TARGET_LOOP_UNROLL_ADJUST
10758 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10759
10760 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10761 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10762 #undef TARGET_TRAMPOLINE_INIT
10763 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10764
10765 #undef TARGET_UNWIND_WORD_MODE
10766 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10767
10768 struct gcc_target targetm = TARGET_INITIALIZER;
10769
10770 #include "gt-s390.h"