s390.c (s390_emit_tls_call_insn): Remove assertion.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56 #include "opts.h"
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* The following structure is embedded in the machine
281 specific part of struct function. */
282
283 struct GTY (()) s390_frame_layout
284 {
285 /* Offset within stack frame. */
286 HOST_WIDE_INT gprs_offset;
287 HOST_WIDE_INT f0_offset;
288 HOST_WIDE_INT f4_offset;
289 HOST_WIDE_INT f8_offset;
290 HOST_WIDE_INT backchain_offset;
291
292 /* Number of first and last gpr where slots in the register
293 save area are reserved for. */
294 int first_save_gpr_slot;
295 int last_save_gpr_slot;
296
297 /* Number of first and last gpr to be saved, restored. */
298 int first_save_gpr;
299 int first_restore_gpr;
300 int last_save_gpr;
301 int last_restore_gpr;
302
303 /* Bits standing for floating point registers. Set, if the
304 respective register has to be saved. Starting with reg 16 (f0)
305 at the rightmost bit.
306 Bit 15 - 8 7 6 5 4 3 2 1 0
307 fpr 15 - 8 7 5 3 1 6 4 2 0
308 reg 31 - 24 23 22 21 20 19 18 17 16 */
309 unsigned int fpr_bitmap;
310
311 /* Number of floating point registers f8-f15 which must be saved. */
312 int high_fprs;
313
314 /* Set if return address needs to be saved.
315 This flag is set by s390_return_addr_rtx if it could not use
316 the initial value of r14 and therefore depends on r14 saved
317 to the stack. */
318 bool save_return_addr_p;
319
320 /* Size of stack frame. */
321 HOST_WIDE_INT frame_size;
322 };
323
324 /* Define the structure for the machine field in struct function. */
325
326 struct GTY(()) machine_function
327 {
328 struct s390_frame_layout frame_layout;
329
330 /* Literal pool base register. */
331 rtx base_reg;
332
333 /* True if we may need to perform branch splitting. */
334 bool split_branches_pending_p;
335
336 /* Some local-dynamic TLS symbol name. */
337 const char *some_ld_name;
338
339 bool has_landing_pad_p;
340 };
341
342 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
343
344 #define cfun_frame_layout (cfun->machine->frame_layout)
345 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
346 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
347 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
348 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
349 (1 << (BITNUM)))
350 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
351 (1 << (BITNUM))))
352
353 /* Number of GPRs and FPRs used for argument passing. */
354 #define GP_ARG_NUM_REG 5
355 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
356
357 /* A couple of shortcuts. */
358 #define CONST_OK_FOR_J(x) \
359 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
360 #define CONST_OK_FOR_K(x) \
361 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
362 #define CONST_OK_FOR_Os(x) \
363 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
364 #define CONST_OK_FOR_Op(x) \
365 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
366 #define CONST_OK_FOR_On(x) \
367 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
368
369 #define REGNO_PAIR_OK(REGNO, MODE) \
370 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
371
372 /* That's the read ahead of the dynamic branch prediction unit in
373 bytes on a z10 (or higher) CPU. */
374 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
375
376 /* Return the alignment for LABEL. We default to the -falign-labels
377 value except for the literal pool base label. */
378 int
379 s390_label_align (rtx label)
380 {
381 rtx prev_insn = prev_active_insn (label);
382
383 if (prev_insn == NULL_RTX)
384 goto old;
385
386 prev_insn = single_set (prev_insn);
387
388 if (prev_insn == NULL_RTX)
389 goto old;
390
391 prev_insn = SET_SRC (prev_insn);
392
393 /* Don't align literal pool base labels. */
394 if (GET_CODE (prev_insn) == UNSPEC
395 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
396 return 0;
397
398 old:
399 return align_labels_log;
400 }
401
402 static enum machine_mode
403 s390_libgcc_cmp_return_mode (void)
404 {
405 return TARGET_64BIT ? DImode : SImode;
406 }
407
408 static enum machine_mode
409 s390_libgcc_shift_count_mode (void)
410 {
411 return TARGET_64BIT ? DImode : SImode;
412 }
413
414 static enum machine_mode
415 s390_unwind_word_mode (void)
416 {
417 return TARGET_64BIT ? DImode : SImode;
418 }
419
420 /* Return true if the back end supports mode MODE. */
421 static bool
422 s390_scalar_mode_supported_p (enum machine_mode mode)
423 {
424 /* In contrast to the default implementation reject TImode constants on 31bit
425 TARGET_ZARCH for ABI compliance. */
426 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
427 return false;
428
429 if (DECIMAL_FLOAT_MODE_P (mode))
430 return default_decimal_float_supported_p ();
431
432 return default_scalar_mode_supported_p (mode);
433 }
434
435 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
436
437 void
438 s390_set_has_landing_pad_p (bool value)
439 {
440 cfun->machine->has_landing_pad_p = value;
441 }
442
443 /* If two condition code modes are compatible, return a condition code
444 mode which is compatible with both. Otherwise, return
445 VOIDmode. */
446
447 static enum machine_mode
448 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
449 {
450 if (m1 == m2)
451 return m1;
452
453 switch (m1)
454 {
455 case CCZmode:
456 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
457 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
458 return m2;
459 return VOIDmode;
460
461 case CCSmode:
462 case CCUmode:
463 case CCTmode:
464 case CCSRmode:
465 case CCURmode:
466 case CCZ1mode:
467 if (m2 == CCZmode)
468 return m1;
469
470 return VOIDmode;
471
472 default:
473 return VOIDmode;
474 }
475 return VOIDmode;
476 }
477
478 /* Return true if SET either doesn't set the CC register, or else
479 the source and destination have matching CC modes and that
480 CC mode is at least as constrained as REQ_MODE. */
481
482 static bool
483 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
484 {
485 enum machine_mode set_mode;
486
487 gcc_assert (GET_CODE (set) == SET);
488
489 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
490 return 1;
491
492 set_mode = GET_MODE (SET_DEST (set));
493 switch (set_mode)
494 {
495 case CCSmode:
496 case CCSRmode:
497 case CCUmode:
498 case CCURmode:
499 case CCLmode:
500 case CCL1mode:
501 case CCL2mode:
502 case CCL3mode:
503 case CCT1mode:
504 case CCT2mode:
505 case CCT3mode:
506 if (req_mode != set_mode)
507 return 0;
508 break;
509
510 case CCZmode:
511 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
512 && req_mode != CCSRmode && req_mode != CCURmode)
513 return 0;
514 break;
515
516 case CCAPmode:
517 case CCANmode:
518 if (req_mode != CCAmode)
519 return 0;
520 break;
521
522 default:
523 gcc_unreachable ();
524 }
525
526 return (GET_MODE (SET_SRC (set)) == set_mode);
527 }
528
529 /* Return true if every SET in INSN that sets the CC register
530 has source and destination with matching CC modes and that
531 CC mode is at least as constrained as REQ_MODE.
532 If REQ_MODE is VOIDmode, always return false. */
533
534 bool
535 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
536 {
537 int i;
538
539 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
540 if (req_mode == VOIDmode)
541 return false;
542
543 if (GET_CODE (PATTERN (insn)) == SET)
544 return s390_match_ccmode_set (PATTERN (insn), req_mode);
545
546 if (GET_CODE (PATTERN (insn)) == PARALLEL)
547 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
548 {
549 rtx set = XVECEXP (PATTERN (insn), 0, i);
550 if (GET_CODE (set) == SET)
551 if (!s390_match_ccmode_set (set, req_mode))
552 return false;
553 }
554
555 return true;
556 }
557
558 /* If a test-under-mask instruction can be used to implement
559 (compare (and ... OP1) OP2), return the CC mode required
560 to do that. Otherwise, return VOIDmode.
561 MIXED is true if the instruction can distinguish between
562 CC1 and CC2 for mixed selected bits (TMxx), it is false
563 if the instruction cannot (TM). */
564
565 enum machine_mode
566 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
567 {
568 int bit0, bit1;
569
570 /* ??? Fixme: should work on CONST_DOUBLE as well. */
571 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
572 return VOIDmode;
573
574 /* Selected bits all zero: CC0.
575 e.g.: int a; if ((a & (16 + 128)) == 0) */
576 if (INTVAL (op2) == 0)
577 return CCTmode;
578
579 /* Selected bits all one: CC3.
580 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
581 if (INTVAL (op2) == INTVAL (op1))
582 return CCT3mode;
583
584 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
585 int a;
586 if ((a & (16 + 128)) == 16) -> CCT1
587 if ((a & (16 + 128)) == 128) -> CCT2 */
588 if (mixed)
589 {
590 bit1 = exact_log2 (INTVAL (op2));
591 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
592 if (bit0 != -1 && bit1 != -1)
593 return bit0 > bit1 ? CCT1mode : CCT2mode;
594 }
595
596 return VOIDmode;
597 }
598
599 /* Given a comparison code OP (EQ, NE, etc.) and the operands
600 OP0 and OP1 of a COMPARE, return the mode to be used for the
601 comparison. */
602
603 enum machine_mode
604 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
605 {
606 switch (code)
607 {
608 case EQ:
609 case NE:
610 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
611 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
612 return CCAPmode;
613 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
614 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
615 return CCAPmode;
616 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
617 || GET_CODE (op1) == NEG)
618 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
619 return CCLmode;
620
621 if (GET_CODE (op0) == AND)
622 {
623 /* Check whether we can potentially do it via TM. */
624 enum machine_mode ccmode;
625 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
626 if (ccmode != VOIDmode)
627 {
628 /* Relax CCTmode to CCZmode to allow fall-back to AND
629 if that turns out to be beneficial. */
630 return ccmode == CCTmode ? CCZmode : ccmode;
631 }
632 }
633
634 if (register_operand (op0, HImode)
635 && GET_CODE (op1) == CONST_INT
636 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
637 return CCT3mode;
638 if (register_operand (op0, QImode)
639 && GET_CODE (op1) == CONST_INT
640 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
641 return CCT3mode;
642
643 return CCZmode;
644
645 case LE:
646 case LT:
647 case GE:
648 case GT:
649 /* The only overflow condition of NEG and ABS happens when
650 -INT_MAX is used as parameter, which stays negative. So
651 we have an overflow from a positive value to a negative.
652 Using CCAP mode the resulting cc can be used for comparisons. */
653 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
654 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
655 return CCAPmode;
656
657 /* If constants are involved in an add instruction it is possible to use
658 the resulting cc for comparisons with zero. Knowing the sign of the
659 constant the overflow behavior gets predictable. e.g.:
660 int a, b; if ((b = a + c) > 0)
661 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
662 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
663 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
664 {
665 if (INTVAL (XEXP((op0), 1)) < 0)
666 return CCANmode;
667 else
668 return CCAPmode;
669 }
670 /* Fall through. */
671 case UNORDERED:
672 case ORDERED:
673 case UNEQ:
674 case UNLE:
675 case UNLT:
676 case UNGE:
677 case UNGT:
678 case LTGT:
679 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
680 && GET_CODE (op1) != CONST_INT)
681 return CCSRmode;
682 return CCSmode;
683
684 case LTU:
685 case GEU:
686 if (GET_CODE (op0) == PLUS
687 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
688 return CCL1mode;
689
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCURmode;
693 return CCUmode;
694
695 case LEU:
696 case GTU:
697 if (GET_CODE (op0) == MINUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL2mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 default:
707 gcc_unreachable ();
708 }
709 }
710
711 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
712 that we can implement more efficiently. */
713
714 void
715 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
716 {
717 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
718 if ((*code == EQ || *code == NE)
719 && *op1 == const0_rtx
720 && GET_CODE (*op0) == ZERO_EXTRACT
721 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
722 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
723 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
724 {
725 rtx inner = XEXP (*op0, 0);
726 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
727 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
728 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
729
730 if (len > 0 && len < modesize
731 && pos >= 0 && pos + len <= modesize
732 && modesize <= HOST_BITS_PER_WIDE_INT)
733 {
734 unsigned HOST_WIDE_INT block;
735 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
736 block <<= modesize - pos - len;
737
738 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
739 gen_int_mode (block, GET_MODE (inner)));
740 }
741 }
742
743 /* Narrow AND of memory against immediate to enable TM. */
744 if ((*code == EQ || *code == NE)
745 && *op1 == const0_rtx
746 && GET_CODE (*op0) == AND
747 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
748 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
749 {
750 rtx inner = XEXP (*op0, 0);
751 rtx mask = XEXP (*op0, 1);
752
753 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
754 if (GET_CODE (inner) == SUBREG
755 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
756 && (GET_MODE_SIZE (GET_MODE (inner))
757 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
758 && ((INTVAL (mask)
759 & GET_MODE_MASK (GET_MODE (inner))
760 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
761 == 0))
762 inner = SUBREG_REG (inner);
763
764 /* Do not change volatile MEMs. */
765 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
766 {
767 int part = s390_single_part (XEXP (*op0, 1),
768 GET_MODE (inner), QImode, 0);
769 if (part >= 0)
770 {
771 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
772 inner = adjust_address_nv (inner, QImode, part);
773 *op0 = gen_rtx_AND (QImode, inner, mask);
774 }
775 }
776 }
777
778 /* Narrow comparisons against 0xffff to HImode if possible. */
779 if ((*code == EQ || *code == NE)
780 && GET_CODE (*op1) == CONST_INT
781 && INTVAL (*op1) == 0xffff
782 && SCALAR_INT_MODE_P (GET_MODE (*op0))
783 && (nonzero_bits (*op0, GET_MODE (*op0))
784 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
785 {
786 *op0 = gen_lowpart (HImode, *op0);
787 *op1 = constm1_rtx;
788 }
789
790 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
791 if (GET_CODE (*op0) == UNSPEC
792 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
793 && XVECLEN (*op0, 0) == 1
794 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
795 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
796 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
797 && *op1 == const0_rtx)
798 {
799 enum rtx_code new_code = UNKNOWN;
800 switch (*code)
801 {
802 case EQ: new_code = EQ; break;
803 case NE: new_code = NE; break;
804 case LT: new_code = GTU; break;
805 case GT: new_code = LTU; break;
806 case LE: new_code = GEU; break;
807 case GE: new_code = LEU; break;
808 default: break;
809 }
810
811 if (new_code != UNKNOWN)
812 {
813 *op0 = XVECEXP (*op0, 0, 0);
814 *code = new_code;
815 }
816 }
817
818 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
819 if (GET_CODE (*op0) == UNSPEC
820 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
821 && XVECLEN (*op0, 0) == 1
822 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
823 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
824 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
825 && *op1 == const0_rtx)
826 {
827 enum rtx_code new_code = UNKNOWN;
828 switch (*code)
829 {
830 case EQ: new_code = EQ; break;
831 case NE: new_code = NE; break;
832 default: break;
833 }
834
835 if (new_code != UNKNOWN)
836 {
837 *op0 = XVECEXP (*op0, 0, 0);
838 *code = new_code;
839 }
840 }
841
842 /* Simplify cascaded EQ, NE with const0_rtx. */
843 if ((*code == NE || *code == EQ)
844 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
845 && GET_MODE (*op0) == SImode
846 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
847 && REG_P (XEXP (*op0, 0))
848 && XEXP (*op0, 1) == const0_rtx
849 && *op1 == const0_rtx)
850 {
851 if ((*code == EQ && GET_CODE (*op0) == NE)
852 || (*code == NE && GET_CODE (*op0) == EQ))
853 *code = EQ;
854 else
855 *code = NE;
856 *op0 = XEXP (*op0, 0);
857 }
858
859 /* Prefer register over memory as first operand. */
860 if (MEM_P (*op0) && REG_P (*op1))
861 {
862 rtx tem = *op0; *op0 = *op1; *op1 = tem;
863 *code = swap_condition (*code);
864 }
865 }
866
867 /* Emit a compare instruction suitable to implement the comparison
868 OP0 CODE OP1. Return the correct condition RTL to be placed in
869 the IF_THEN_ELSE of the conditional branch testing the result. */
870
871 rtx
872 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
873 {
874 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
875 rtx cc;
876
877 /* Do not output a redundant compare instruction if a compare_and_swap
878 pattern already computed the result and the machine modes are compatible. */
879 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
880 {
881 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
882 == GET_MODE (op0));
883 cc = op0;
884 }
885 else
886 {
887 cc = gen_rtx_REG (mode, CC_REGNUM);
888 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
889 }
890
891 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
892 }
893
894 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
895 matches CMP.
896 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
897 conditional branch testing the result. */
898
899 static rtx
900 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
904 }
905
906 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
907 unconditional jump, else a conditional jump under condition COND. */
908
909 void
910 s390_emit_jump (rtx target, rtx cond)
911 {
912 rtx insn;
913
914 target = gen_rtx_LABEL_REF (VOIDmode, target);
915 if (cond)
916 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
917
918 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
919 emit_jump_insn (insn);
920 }
921
922 /* Return branch condition mask to implement a branch
923 specified by CODE. Return -1 for invalid comparisons. */
924
925 int
926 s390_branch_condition_mask (rtx code)
927 {
928 const int CC0 = 1 << 3;
929 const int CC1 = 1 << 2;
930 const int CC2 = 1 << 1;
931 const int CC3 = 1 << 0;
932
933 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
934 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
935 gcc_assert (XEXP (code, 1) == const0_rtx);
936
937 switch (GET_MODE (XEXP (code, 0)))
938 {
939 case CCZmode:
940 case CCZ1mode:
941 switch (GET_CODE (code))
942 {
943 case EQ: return CC0;
944 case NE: return CC1 | CC2 | CC3;
945 default: return -1;
946 }
947 break;
948
949 case CCT1mode:
950 switch (GET_CODE (code))
951 {
952 case EQ: return CC1;
953 case NE: return CC0 | CC2 | CC3;
954 default: return -1;
955 }
956 break;
957
958 case CCT2mode:
959 switch (GET_CODE (code))
960 {
961 case EQ: return CC2;
962 case NE: return CC0 | CC1 | CC3;
963 default: return -1;
964 }
965 break;
966
967 case CCT3mode:
968 switch (GET_CODE (code))
969 {
970 case EQ: return CC3;
971 case NE: return CC0 | CC1 | CC2;
972 default: return -1;
973 }
974 break;
975
976 case CCLmode:
977 switch (GET_CODE (code))
978 {
979 case EQ: return CC0 | CC2;
980 case NE: return CC1 | CC3;
981 default: return -1;
982 }
983 break;
984
985 case CCL1mode:
986 switch (GET_CODE (code))
987 {
988 case LTU: return CC2 | CC3; /* carry */
989 case GEU: return CC0 | CC1; /* no carry */
990 default: return -1;
991 }
992 break;
993
994 case CCL2mode:
995 switch (GET_CODE (code))
996 {
997 case GTU: return CC0 | CC1; /* borrow */
998 case LEU: return CC2 | CC3; /* no borrow */
999 default: return -1;
1000 }
1001 break;
1002
1003 case CCL3mode:
1004 switch (GET_CODE (code))
1005 {
1006 case EQ: return CC0 | CC2;
1007 case NE: return CC1 | CC3;
1008 case LTU: return CC1;
1009 case GTU: return CC3;
1010 case LEU: return CC1 | CC2;
1011 case GEU: return CC2 | CC3;
1012 default: return -1;
1013 }
1014
1015 case CCUmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0;
1019 case NE: return CC1 | CC2 | CC3;
1020 case LTU: return CC1;
1021 case GTU: return CC2;
1022 case LEU: return CC0 | CC1;
1023 case GEU: return CC0 | CC2;
1024 default: return -1;
1025 }
1026 break;
1027
1028 case CCURmode:
1029 switch (GET_CODE (code))
1030 {
1031 case EQ: return CC0;
1032 case NE: return CC2 | CC1 | CC3;
1033 case LTU: return CC2;
1034 case GTU: return CC1;
1035 case LEU: return CC0 | CC2;
1036 case GEU: return CC0 | CC1;
1037 default: return -1;
1038 }
1039 break;
1040
1041 case CCAPmode:
1042 switch (GET_CODE (code))
1043 {
1044 case EQ: return CC0;
1045 case NE: return CC1 | CC2 | CC3;
1046 case LT: return CC1 | CC3;
1047 case GT: return CC2;
1048 case LE: return CC0 | CC1 | CC3;
1049 case GE: return CC0 | CC2;
1050 default: return -1;
1051 }
1052 break;
1053
1054 case CCANmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LT: return CC1;
1060 case GT: return CC2 | CC3;
1061 case LE: return CC0 | CC1;
1062 case GE: return CC0 | CC2 | CC3;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCSmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC1 | CC2 | CC3;
1072 case LT: return CC1;
1073 case GT: return CC2;
1074 case LE: return CC0 | CC1;
1075 case GE: return CC0 | CC2;
1076 case UNORDERED: return CC3;
1077 case ORDERED: return CC0 | CC1 | CC2;
1078 case UNEQ: return CC0 | CC3;
1079 case UNLT: return CC1 | CC3;
1080 case UNGT: return CC2 | CC3;
1081 case UNLE: return CC0 | CC1 | CC3;
1082 case UNGE: return CC0 | CC2 | CC3;
1083 case LTGT: return CC1 | CC2;
1084 default: return -1;
1085 }
1086 break;
1087
1088 case CCSRmode:
1089 switch (GET_CODE (code))
1090 {
1091 case EQ: return CC0;
1092 case NE: return CC2 | CC1 | CC3;
1093 case LT: return CC2;
1094 case GT: return CC1;
1095 case LE: return CC0 | CC2;
1096 case GE: return CC0 | CC1;
1097 case UNORDERED: return CC3;
1098 case ORDERED: return CC0 | CC2 | CC1;
1099 case UNEQ: return CC0 | CC3;
1100 case UNLT: return CC2 | CC3;
1101 case UNGT: return CC1 | CC3;
1102 case UNLE: return CC0 | CC2 | CC3;
1103 case UNGE: return CC0 | CC1 | CC3;
1104 case LTGT: return CC2 | CC1;
1105 default: return -1;
1106 }
1107 break;
1108
1109 default:
1110 return -1;
1111 }
1112 }
1113
1114
1115 /* Return branch condition mask to implement a compare and branch
1116 specified by CODE. Return -1 for invalid comparisons. */
1117
1118 int
1119 s390_compare_and_branch_condition_mask (rtx code)
1120 {
1121 const int CC0 = 1 << 3;
1122 const int CC1 = 1 << 2;
1123 const int CC2 = 1 << 1;
1124
1125 switch (GET_CODE (code))
1126 {
1127 case EQ:
1128 return CC0;
1129 case NE:
1130 return CC1 | CC2;
1131 case LT:
1132 case LTU:
1133 return CC1;
1134 case GT:
1135 case GTU:
1136 return CC2;
1137 case LE:
1138 case LEU:
1139 return CC0 | CC1;
1140 case GE:
1141 case GEU:
1142 return CC0 | CC2;
1143 default:
1144 gcc_unreachable ();
1145 }
1146 return -1;
1147 }
1148
1149 /* If INV is false, return assembler mnemonic string to implement
1150 a branch specified by CODE. If INV is true, return mnemonic
1151 for the corresponding inverted branch. */
1152
1153 static const char *
1154 s390_branch_condition_mnemonic (rtx code, int inv)
1155 {
1156 int mask;
1157
1158 static const char *const mnemonic[16] =
1159 {
1160 NULL, "o", "h", "nle",
1161 "l", "nhe", "lh", "ne",
1162 "e", "nlh", "he", "nl",
1163 "le", "nh", "no", NULL
1164 };
1165
1166 if (GET_CODE (XEXP (code, 0)) == REG
1167 && REGNO (XEXP (code, 0)) == CC_REGNUM
1168 && XEXP (code, 1) == const0_rtx)
1169 mask = s390_branch_condition_mask (code);
1170 else
1171 mask = s390_compare_and_branch_condition_mask (code);
1172
1173 gcc_assert (mask >= 0);
1174
1175 if (inv)
1176 mask ^= 15;
1177
1178 gcc_assert (mask >= 1 && mask <= 14);
1179
1180 return mnemonic[mask];
1181 }
1182
1183 /* Return the part of op which has a value different from def.
1184 The size of the part is determined by mode.
1185 Use this function only if you already know that op really
1186 contains such a part. */
1187
1188 unsigned HOST_WIDE_INT
1189 s390_extract_part (rtx op, enum machine_mode mode, int def)
1190 {
1191 unsigned HOST_WIDE_INT value = 0;
1192 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1193 int part_bits = GET_MODE_BITSIZE (mode);
1194 unsigned HOST_WIDE_INT part_mask
1195 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1196 int i;
1197
1198 for (i = 0; i < max_parts; i++)
1199 {
1200 if (i == 0)
1201 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1202 else
1203 value >>= part_bits;
1204
1205 if ((value & part_mask) != (def & part_mask))
1206 return value & part_mask;
1207 }
1208
1209 gcc_unreachable ();
1210 }
1211
1212 /* If OP is an integer constant of mode MODE with exactly one
1213 part of mode PART_MODE unequal to DEF, return the number of that
1214 part. Otherwise, return -1. */
1215
1216 int
1217 s390_single_part (rtx op,
1218 enum machine_mode mode,
1219 enum machine_mode part_mode,
1220 int def)
1221 {
1222 unsigned HOST_WIDE_INT value = 0;
1223 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1224 unsigned HOST_WIDE_INT part_mask
1225 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1226 int i, part = -1;
1227
1228 if (GET_CODE (op) != CONST_INT)
1229 return -1;
1230
1231 for (i = 0; i < n_parts; i++)
1232 {
1233 if (i == 0)
1234 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1235 else
1236 value >>= GET_MODE_BITSIZE (part_mode);
1237
1238 if ((value & part_mask) != (def & part_mask))
1239 {
1240 if (part != -1)
1241 return -1;
1242 else
1243 part = i;
1244 }
1245 }
1246 return part == -1 ? -1 : n_parts - 1 - part;
1247 }
1248
1249 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1250 bits and no other bits are set in IN. POS and LENGTH can be used
1251 to obtain the start position and the length of the bitfield.
1252
1253 POS gives the position of the first bit of the bitfield counting
1254 from the lowest order bit starting with zero. In order to use this
1255 value for S/390 instructions this has to be converted to "bits big
1256 endian" style. */
1257
1258 bool
1259 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1260 int *pos, int *length)
1261 {
1262 int tmp_pos = 0;
1263 int tmp_length = 0;
1264 int i;
1265 unsigned HOST_WIDE_INT mask = 1ULL;
1266 bool contiguous = false;
1267
1268 for (i = 0; i < size; mask <<= 1, i++)
1269 {
1270 if (contiguous)
1271 {
1272 if (mask & in)
1273 tmp_length++;
1274 else
1275 break;
1276 }
1277 else
1278 {
1279 if (mask & in)
1280 {
1281 contiguous = true;
1282 tmp_length++;
1283 }
1284 else
1285 tmp_pos++;
1286 }
1287 }
1288
1289 if (!tmp_length)
1290 return false;
1291
1292 /* Calculate a mask for all bits beyond the contiguous bits. */
1293 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1294
1295 if (mask & in)
1296 return false;
1297
1298 if (tmp_length + tmp_pos - 1 > size)
1299 return false;
1300
1301 if (length)
1302 *length = tmp_length;
1303
1304 if (pos)
1305 *pos = tmp_pos;
1306
1307 return true;
1308 }
1309
1310 /* Check whether we can (and want to) split a double-word
1311 move in mode MODE from SRC to DST into two single-word
1312 moves, moving the subword FIRST_SUBWORD first. */
1313
1314 bool
1315 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1316 {
1317 /* Floating point registers cannot be split. */
1318 if (FP_REG_P (src) || FP_REG_P (dst))
1319 return false;
1320
1321 /* We don't need to split if operands are directly accessible. */
1322 if (s_operand (src, mode) || s_operand (dst, mode))
1323 return false;
1324
1325 /* Non-offsettable memory references cannot be split. */
1326 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1327 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1328 return false;
1329
1330 /* Moving the first subword must not clobber a register
1331 needed to move the second subword. */
1332 if (register_operand (dst, mode))
1333 {
1334 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1335 if (reg_overlap_mentioned_p (subreg, src))
1336 return false;
1337 }
1338
1339 return true;
1340 }
1341
1342 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1343 and [MEM2, MEM2 + SIZE] do overlap and false
1344 otherwise. */
1345
1346 bool
1347 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1348 {
1349 rtx addr1, addr2, addr_delta;
1350 HOST_WIDE_INT delta;
1351
1352 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1353 return true;
1354
1355 if (size == 0)
1356 return false;
1357
1358 addr1 = XEXP (mem1, 0);
1359 addr2 = XEXP (mem2, 0);
1360
1361 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1362
1363 /* This overlapping check is used by peepholes merging memory block operations.
1364 Overlapping operations would otherwise be recognized by the S/390 hardware
1365 and would fall back to a slower implementation. Allowing overlapping
1366 operations would lead to slow code but not to wrong code. Therefore we are
1367 somewhat optimistic if we cannot prove that the memory blocks are
1368 overlapping.
1369 That's why we return false here although this may accept operations on
1370 overlapping memory areas. */
1371 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1372 return false;
1373
1374 delta = INTVAL (addr_delta);
1375
1376 if (delta == 0
1377 || (delta > 0 && delta < size)
1378 || (delta < 0 && -delta < size))
1379 return true;
1380
1381 return false;
1382 }
1383
1384 /* Check whether the address of memory reference MEM2 equals exactly
1385 the address of memory reference MEM1 plus DELTA. Return true if
1386 we can prove this to be the case, false otherwise. */
1387
1388 bool
1389 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1390 {
1391 rtx addr1, addr2, addr_delta;
1392
1393 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1394 return false;
1395
1396 addr1 = XEXP (mem1, 0);
1397 addr2 = XEXP (mem2, 0);
1398
1399 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1400 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1401 return false;
1402
1403 return true;
1404 }
1405
1406 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1407
1408 void
1409 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1410 rtx *operands)
1411 {
1412 enum machine_mode wmode = mode;
1413 rtx dst = operands[0];
1414 rtx src1 = operands[1];
1415 rtx src2 = operands[2];
1416 rtx op, clob, tem;
1417
1418 /* If we cannot handle the operation directly, use a temp register. */
1419 if (!s390_logical_operator_ok_p (operands))
1420 dst = gen_reg_rtx (mode);
1421
1422 /* QImode and HImode patterns make sense only if we have a destination
1423 in memory. Otherwise perform the operation in SImode. */
1424 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1425 wmode = SImode;
1426
1427 /* Widen operands if required. */
1428 if (mode != wmode)
1429 {
1430 if (GET_CODE (dst) == SUBREG
1431 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1432 dst = tem;
1433 else if (REG_P (dst))
1434 dst = gen_rtx_SUBREG (wmode, dst, 0);
1435 else
1436 dst = gen_reg_rtx (wmode);
1437
1438 if (GET_CODE (src1) == SUBREG
1439 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1440 src1 = tem;
1441 else if (GET_MODE (src1) != VOIDmode)
1442 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1443
1444 if (GET_CODE (src2) == SUBREG
1445 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1446 src2 = tem;
1447 else if (GET_MODE (src2) != VOIDmode)
1448 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1449 }
1450
1451 /* Emit the instruction. */
1452 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1453 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1454 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1455
1456 /* Fix up the destination if needed. */
1457 if (dst != operands[0])
1458 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1459 }
1460
1461 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1462
1463 bool
1464 s390_logical_operator_ok_p (rtx *operands)
1465 {
1466 /* If the destination operand is in memory, it needs to coincide
1467 with one of the source operands. After reload, it has to be
1468 the first source operand. */
1469 if (GET_CODE (operands[0]) == MEM)
1470 return rtx_equal_p (operands[0], operands[1])
1471 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1472
1473 return true;
1474 }
1475
1476 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1477 operand IMMOP to switch from SS to SI type instructions. */
1478
1479 void
1480 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1481 {
1482 int def = code == AND ? -1 : 0;
1483 HOST_WIDE_INT mask;
1484 int part;
1485
1486 gcc_assert (GET_CODE (*memop) == MEM);
1487 gcc_assert (!MEM_VOLATILE_P (*memop));
1488
1489 mask = s390_extract_part (*immop, QImode, def);
1490 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1491 gcc_assert (part >= 0);
1492
1493 *memop = adjust_address (*memop, QImode, part);
1494 *immop = gen_int_mode (mask, QImode);
1495 }
1496
1497
1498 /* How to allocate a 'struct machine_function'. */
1499
1500 static struct machine_function *
1501 s390_init_machine_status (void)
1502 {
1503 return ggc_alloc_cleared_machine_function ();
1504 }
1505
1506 static void
1507 s390_option_override (void)
1508 {
1509 /* Set up function hooks. */
1510 init_machine_status = s390_init_machine_status;
1511
1512 /* Architecture mode defaults according to ABI. */
1513 if (!(target_flags_explicit & MASK_ZARCH))
1514 {
1515 if (TARGET_64BIT)
1516 target_flags |= MASK_ZARCH;
1517 else
1518 target_flags &= ~MASK_ZARCH;
1519 }
1520
1521 /* Set the march default in case it hasn't been specified on
1522 cmdline. */
1523 if (s390_arch == PROCESSOR_max)
1524 {
1525 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1526 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1527 s390_arch_flags = processor_flags_table[(int)s390_arch];
1528 }
1529
1530 /* Determine processor to tune for. */
1531 if (s390_tune == PROCESSOR_max)
1532 {
1533 s390_tune = s390_arch;
1534 s390_tune_flags = s390_arch_flags;
1535 }
1536
1537 /* Sanity checks. */
1538 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1539 error ("z/Architecture mode not supported on %s", s390_arch_string);
1540 if (TARGET_64BIT && !TARGET_ZARCH)
1541 error ("64-bit ABI not supported in ESA/390 mode");
1542
1543 if (TARGET_HARD_DFP && !TARGET_DFP)
1544 {
1545 if (target_flags_explicit & MASK_HARD_DFP)
1546 {
1547 if (!TARGET_CPU_DFP)
1548 error ("hardware decimal floating point instructions"
1549 " not available on %s", s390_arch_string);
1550 if (!TARGET_ZARCH)
1551 error ("hardware decimal floating point instructions"
1552 " not available in ESA/390 mode");
1553 }
1554 else
1555 target_flags &= ~MASK_HARD_DFP;
1556 }
1557
1558 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1559 {
1560 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1561 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1562
1563 target_flags &= ~MASK_HARD_DFP;
1564 }
1565
1566 /* Set processor cost function. */
1567 switch (s390_tune)
1568 {
1569 case PROCESSOR_2084_Z990:
1570 s390_cost = &z990_cost;
1571 break;
1572 case PROCESSOR_2094_Z9_109:
1573 s390_cost = &z9_109_cost;
1574 break;
1575 case PROCESSOR_2097_Z10:
1576 s390_cost = &z10_cost;
1577 case PROCESSOR_2817_Z196:
1578 s390_cost = &z196_cost;
1579 break;
1580 default:
1581 s390_cost = &z900_cost;
1582 }
1583
1584 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1585 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1586 "in combination");
1587
1588 if (s390_stack_size)
1589 {
1590 if (s390_stack_guard >= s390_stack_size)
1591 error ("stack size must be greater than the stack guard value");
1592 else if (s390_stack_size > 1 << 16)
1593 error ("stack size must not be greater than 64k");
1594 }
1595 else if (s390_stack_guard)
1596 error ("-mstack-guard implies use of -mstack-size");
1597
1598 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1599 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1600 target_flags |= MASK_LONG_DOUBLE_128;
1601 #endif
1602
1603 if (s390_tune == PROCESSOR_2097_Z10
1604 || s390_tune == PROCESSOR_2817_Z196)
1605 {
1606 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1607 global_options.x_param_values,
1608 global_options_set.x_param_values);
1609 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1610 global_options.x_param_values,
1611 global_options_set.x_param_values);
1612 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 }
1619
1620 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1621 global_options.x_param_values,
1622 global_options_set.x_param_values);
1623 /* values for loop prefetching */
1624 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1625 global_options.x_param_values,
1626 global_options_set.x_param_values);
1627 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1628 global_options.x_param_values,
1629 global_options_set.x_param_values);
1630 /* s390 has more than 2 levels and the size is much larger. Since
1631 we are always running virtualized assume that we only get a small
1632 part of the caches above l1. */
1633 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1637 global_options.x_param_values,
1638 global_options_set.x_param_values);
1639 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642
1643 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1644 requires the arch flags to be evaluated already. Since prefetching
1645 is beneficial on s390, we enable it if available. */
1646 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1647 flag_prefetch_loop_arrays = 1;
1648 }
1649
1650 /* Map for smallest class containing reg regno. */
1651
1652 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1653 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1654 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1655 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1656 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1661 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1662 ACCESS_REGS, ACCESS_REGS
1663 };
1664
1665 /* Return attribute type of insn. */
1666
1667 static enum attr_type
1668 s390_safe_attr_type (rtx insn)
1669 {
1670 if (recog_memoized (insn) >= 0)
1671 return get_attr_type (insn);
1672 else
1673 return TYPE_NONE;
1674 }
1675
1676 /* Return true if DISP is a valid short displacement. */
1677
1678 static bool
1679 s390_short_displacement (rtx disp)
1680 {
1681 /* No displacement is OK. */
1682 if (!disp)
1683 return true;
1684
1685 /* Without the long displacement facility we don't need to
1686 distingiush between long and short displacement. */
1687 if (!TARGET_LONG_DISPLACEMENT)
1688 return true;
1689
1690 /* Integer displacement in range. */
1691 if (GET_CODE (disp) == CONST_INT)
1692 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1693
1694 /* GOT offset is not OK, the GOT can be large. */
1695 if (GET_CODE (disp) == CONST
1696 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1697 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1698 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1699 return false;
1700
1701 /* All other symbolic constants are literal pool references,
1702 which are OK as the literal pool must be small. */
1703 if (GET_CODE (disp) == CONST)
1704 return true;
1705
1706 return false;
1707 }
1708
1709 /* Decompose a RTL expression ADDR for a memory address into
1710 its components, returned in OUT.
1711
1712 Returns false if ADDR is not a valid memory address, true
1713 otherwise. If OUT is NULL, don't return the components,
1714 but check for validity only.
1715
1716 Note: Only addresses in canonical form are recognized.
1717 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1718 canonical form so that they will be recognized. */
1719
1720 static int
1721 s390_decompose_address (rtx addr, struct s390_address *out)
1722 {
1723 HOST_WIDE_INT offset = 0;
1724 rtx base = NULL_RTX;
1725 rtx indx = NULL_RTX;
1726 rtx disp = NULL_RTX;
1727 rtx orig_disp;
1728 bool pointer = false;
1729 bool base_ptr = false;
1730 bool indx_ptr = false;
1731 bool literal_pool = false;
1732
1733 /* We may need to substitute the literal pool base register into the address
1734 below. However, at this point we do not know which register is going to
1735 be used as base, so we substitute the arg pointer register. This is going
1736 to be treated as holding a pointer below -- it shouldn't be used for any
1737 other purpose. */
1738 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1739
1740 /* Decompose address into base + index + displacement. */
1741
1742 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1743 base = addr;
1744
1745 else if (GET_CODE (addr) == PLUS)
1746 {
1747 rtx op0 = XEXP (addr, 0);
1748 rtx op1 = XEXP (addr, 1);
1749 enum rtx_code code0 = GET_CODE (op0);
1750 enum rtx_code code1 = GET_CODE (op1);
1751
1752 if (code0 == REG || code0 == UNSPEC)
1753 {
1754 if (code1 == REG || code1 == UNSPEC)
1755 {
1756 indx = op0; /* index + base */
1757 base = op1;
1758 }
1759
1760 else
1761 {
1762 base = op0; /* base + displacement */
1763 disp = op1;
1764 }
1765 }
1766
1767 else if (code0 == PLUS)
1768 {
1769 indx = XEXP (op0, 0); /* index + base + disp */
1770 base = XEXP (op0, 1);
1771 disp = op1;
1772 }
1773
1774 else
1775 {
1776 return false;
1777 }
1778 }
1779
1780 else
1781 disp = addr; /* displacement */
1782
1783 /* Extract integer part of displacement. */
1784 orig_disp = disp;
1785 if (disp)
1786 {
1787 if (GET_CODE (disp) == CONST_INT)
1788 {
1789 offset = INTVAL (disp);
1790 disp = NULL_RTX;
1791 }
1792 else if (GET_CODE (disp) == CONST
1793 && GET_CODE (XEXP (disp, 0)) == PLUS
1794 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1795 {
1796 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1797 disp = XEXP (XEXP (disp, 0), 0);
1798 }
1799 }
1800
1801 /* Strip off CONST here to avoid special case tests later. */
1802 if (disp && GET_CODE (disp) == CONST)
1803 disp = XEXP (disp, 0);
1804
1805 /* We can convert literal pool addresses to
1806 displacements by basing them off the base register. */
1807 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1808 {
1809 /* Either base or index must be free to hold the base register. */
1810 if (!base)
1811 base = fake_pool_base, literal_pool = true;
1812 else if (!indx)
1813 indx = fake_pool_base, literal_pool = true;
1814 else
1815 return false;
1816
1817 /* Mark up the displacement. */
1818 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1819 UNSPEC_LTREL_OFFSET);
1820 }
1821
1822 /* Validate base register. */
1823 if (base)
1824 {
1825 if (GET_CODE (base) == UNSPEC)
1826 switch (XINT (base, 1))
1827 {
1828 case UNSPEC_LTREF:
1829 if (!disp)
1830 disp = gen_rtx_UNSPEC (Pmode,
1831 gen_rtvec (1, XVECEXP (base, 0, 0)),
1832 UNSPEC_LTREL_OFFSET);
1833 else
1834 return false;
1835
1836 base = XVECEXP (base, 0, 1);
1837 break;
1838
1839 case UNSPEC_LTREL_BASE:
1840 if (XVECLEN (base, 0) == 1)
1841 base = fake_pool_base, literal_pool = true;
1842 else
1843 base = XVECEXP (base, 0, 1);
1844 break;
1845
1846 default:
1847 return false;
1848 }
1849
1850 if (!REG_P (base)
1851 || (GET_MODE (base) != SImode
1852 && GET_MODE (base) != Pmode))
1853 return false;
1854
1855 if (REGNO (base) == STACK_POINTER_REGNUM
1856 || REGNO (base) == FRAME_POINTER_REGNUM
1857 || ((reload_completed || reload_in_progress)
1858 && frame_pointer_needed
1859 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1860 || REGNO (base) == ARG_POINTER_REGNUM
1861 || (flag_pic
1862 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1863 pointer = base_ptr = true;
1864
1865 if ((reload_completed || reload_in_progress)
1866 && base == cfun->machine->base_reg)
1867 pointer = base_ptr = literal_pool = true;
1868 }
1869
1870 /* Validate index register. */
1871 if (indx)
1872 {
1873 if (GET_CODE (indx) == UNSPEC)
1874 switch (XINT (indx, 1))
1875 {
1876 case UNSPEC_LTREF:
1877 if (!disp)
1878 disp = gen_rtx_UNSPEC (Pmode,
1879 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1880 UNSPEC_LTREL_OFFSET);
1881 else
1882 return false;
1883
1884 indx = XVECEXP (indx, 0, 1);
1885 break;
1886
1887 case UNSPEC_LTREL_BASE:
1888 if (XVECLEN (indx, 0) == 1)
1889 indx = fake_pool_base, literal_pool = true;
1890 else
1891 indx = XVECEXP (indx, 0, 1);
1892 break;
1893
1894 default:
1895 return false;
1896 }
1897
1898 if (!REG_P (indx)
1899 || (GET_MODE (indx) != SImode
1900 && GET_MODE (indx) != Pmode))
1901 return false;
1902
1903 if (REGNO (indx) == STACK_POINTER_REGNUM
1904 || REGNO (indx) == FRAME_POINTER_REGNUM
1905 || ((reload_completed || reload_in_progress)
1906 && frame_pointer_needed
1907 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1908 || REGNO (indx) == ARG_POINTER_REGNUM
1909 || (flag_pic
1910 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1911 pointer = indx_ptr = true;
1912
1913 if ((reload_completed || reload_in_progress)
1914 && indx == cfun->machine->base_reg)
1915 pointer = indx_ptr = literal_pool = true;
1916 }
1917
1918 /* Prefer to use pointer as base, not index. */
1919 if (base && indx && !base_ptr
1920 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1921 {
1922 rtx tmp = base;
1923 base = indx;
1924 indx = tmp;
1925 }
1926
1927 /* Validate displacement. */
1928 if (!disp)
1929 {
1930 /* If virtual registers are involved, the displacement will change later
1931 anyway as the virtual registers get eliminated. This could make a
1932 valid displacement invalid, but it is more likely to make an invalid
1933 displacement valid, because we sometimes access the register save area
1934 via negative offsets to one of those registers.
1935 Thus we don't check the displacement for validity here. If after
1936 elimination the displacement turns out to be invalid after all,
1937 this is fixed up by reload in any case. */
1938 if (base != arg_pointer_rtx
1939 && indx != arg_pointer_rtx
1940 && base != return_address_pointer_rtx
1941 && indx != return_address_pointer_rtx
1942 && base != frame_pointer_rtx
1943 && indx != frame_pointer_rtx
1944 && base != virtual_stack_vars_rtx
1945 && indx != virtual_stack_vars_rtx)
1946 if (!DISP_IN_RANGE (offset))
1947 return false;
1948 }
1949 else
1950 {
1951 /* All the special cases are pointers. */
1952 pointer = true;
1953
1954 /* In the small-PIC case, the linker converts @GOT
1955 and @GOTNTPOFF offsets to possible displacements. */
1956 if (GET_CODE (disp) == UNSPEC
1957 && (XINT (disp, 1) == UNSPEC_GOT
1958 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1959 && flag_pic == 1)
1960 {
1961 ;
1962 }
1963
1964 /* Accept pool label offsets. */
1965 else if (GET_CODE (disp) == UNSPEC
1966 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1967 ;
1968
1969 /* Accept literal pool references. */
1970 else if (GET_CODE (disp) == UNSPEC
1971 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1972 {
1973 /* In case CSE pulled a non literal pool reference out of
1974 the pool we have to reject the address. This is
1975 especially important when loading the GOT pointer on non
1976 zarch CPUs. In this case the literal pool contains an lt
1977 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1978 will most likely exceed the displacement. */
1979 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
1980 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
1981 return false;
1982
1983 orig_disp = gen_rtx_CONST (Pmode, disp);
1984 if (offset)
1985 {
1986 /* If we have an offset, make sure it does not
1987 exceed the size of the constant pool entry. */
1988 rtx sym = XVECEXP (disp, 0, 0);
1989 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1990 return false;
1991
1992 orig_disp = plus_constant (orig_disp, offset);
1993 }
1994 }
1995
1996 else
1997 return false;
1998 }
1999
2000 if (!base && !indx)
2001 pointer = true;
2002
2003 if (out)
2004 {
2005 out->base = base;
2006 out->indx = indx;
2007 out->disp = orig_disp;
2008 out->pointer = pointer;
2009 out->literal_pool = literal_pool;
2010 }
2011
2012 return true;
2013 }
2014
2015 /* Decompose a RTL expression OP for a shift count into its components,
2016 and return the base register in BASE and the offset in OFFSET.
2017
2018 Return true if OP is a valid shift count, false if not. */
2019
2020 bool
2021 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2022 {
2023 HOST_WIDE_INT off = 0;
2024
2025 /* We can have an integer constant, an address register,
2026 or a sum of the two. */
2027 if (GET_CODE (op) == CONST_INT)
2028 {
2029 off = INTVAL (op);
2030 op = NULL_RTX;
2031 }
2032 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2033 {
2034 off = INTVAL (XEXP (op, 1));
2035 op = XEXP (op, 0);
2036 }
2037 while (op && GET_CODE (op) == SUBREG)
2038 op = SUBREG_REG (op);
2039
2040 if (op && GET_CODE (op) != REG)
2041 return false;
2042
2043 if (offset)
2044 *offset = off;
2045 if (base)
2046 *base = op;
2047
2048 return true;
2049 }
2050
2051
2052 /* Return true if CODE is a valid address without index. */
2053
2054 bool
2055 s390_legitimate_address_without_index_p (rtx op)
2056 {
2057 struct s390_address addr;
2058
2059 if (!s390_decompose_address (XEXP (op, 0), &addr))
2060 return false;
2061 if (addr.indx)
2062 return false;
2063
2064 return true;
2065 }
2066
2067
2068 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2069 and return these parts in SYMREF and ADDEND. You can pass NULL in
2070 SYMREF and/or ADDEND if you are not interested in these values.
2071 Literal pool references are *not* considered symbol references. */
2072
2073 static bool
2074 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2075 {
2076 HOST_WIDE_INT tmpaddend = 0;
2077
2078 if (GET_CODE (addr) == CONST)
2079 addr = XEXP (addr, 0);
2080
2081 if (GET_CODE (addr) == PLUS)
2082 {
2083 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2084 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2085 && CONST_INT_P (XEXP (addr, 1)))
2086 {
2087 tmpaddend = INTVAL (XEXP (addr, 1));
2088 addr = XEXP (addr, 0);
2089 }
2090 else
2091 return false;
2092 }
2093 else
2094 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2095 return false;
2096
2097 if (symref)
2098 *symref = addr;
2099 if (addend)
2100 *addend = tmpaddend;
2101
2102 return true;
2103 }
2104
2105
2106 /* Return true if the address in OP is valid for constraint letter C
2107 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2108 pool MEMs should be accepted. Only the Q, R, S, T constraint
2109 letters are allowed for C. */
2110
2111 static int
2112 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2113 {
2114 struct s390_address addr;
2115 bool decomposed = false;
2116
2117 /* This check makes sure that no symbolic address (except literal
2118 pool references) are accepted by the R or T constraints. */
2119 if (s390_symref_operand_p (op, NULL, NULL))
2120 return 0;
2121
2122 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2123 if (!lit_pool_ok)
2124 {
2125 if (!s390_decompose_address (op, &addr))
2126 return 0;
2127 if (addr.literal_pool)
2128 return 0;
2129 decomposed = true;
2130 }
2131
2132 switch (c)
2133 {
2134 case 'Q': /* no index short displacement */
2135 if (!decomposed && !s390_decompose_address (op, &addr))
2136 return 0;
2137 if (addr.indx)
2138 return 0;
2139 if (!s390_short_displacement (addr.disp))
2140 return 0;
2141 break;
2142
2143 case 'R': /* with index short displacement */
2144 if (TARGET_LONG_DISPLACEMENT)
2145 {
2146 if (!decomposed && !s390_decompose_address (op, &addr))
2147 return 0;
2148 if (!s390_short_displacement (addr.disp))
2149 return 0;
2150 }
2151 /* Any invalid address here will be fixed up by reload,
2152 so accept it for the most generic constraint. */
2153 break;
2154
2155 case 'S': /* no index long displacement */
2156 if (!TARGET_LONG_DISPLACEMENT)
2157 return 0;
2158 if (!decomposed && !s390_decompose_address (op, &addr))
2159 return 0;
2160 if (addr.indx)
2161 return 0;
2162 if (s390_short_displacement (addr.disp))
2163 return 0;
2164 break;
2165
2166 case 'T': /* with index long displacement */
2167 if (!TARGET_LONG_DISPLACEMENT)
2168 return 0;
2169 /* Any invalid address here will be fixed up by reload,
2170 so accept it for the most generic constraint. */
2171 if ((decomposed || s390_decompose_address (op, &addr))
2172 && s390_short_displacement (addr.disp))
2173 return 0;
2174 break;
2175 default:
2176 return 0;
2177 }
2178 return 1;
2179 }
2180
2181
2182 /* Evaluates constraint strings described by the regular expression
2183 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2184 the constraint given in STR, or 0 else. */
2185
2186 int
2187 s390_mem_constraint (const char *str, rtx op)
2188 {
2189 char c = str[0];
2190
2191 switch (c)
2192 {
2193 case 'A':
2194 /* Check for offsettable variants of memory constraints. */
2195 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2196 return 0;
2197 if ((reload_completed || reload_in_progress)
2198 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2199 return 0;
2200 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2201 case 'B':
2202 /* Check for non-literal-pool variants of memory constraints. */
2203 if (!MEM_P (op))
2204 return 0;
2205 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2206 case 'Q':
2207 case 'R':
2208 case 'S':
2209 case 'T':
2210 if (GET_CODE (op) != MEM)
2211 return 0;
2212 return s390_check_qrst_address (c, XEXP (op, 0), true);
2213 case 'U':
2214 return (s390_check_qrst_address ('Q', op, true)
2215 || s390_check_qrst_address ('R', op, true));
2216 case 'W':
2217 return (s390_check_qrst_address ('S', op, true)
2218 || s390_check_qrst_address ('T', op, true));
2219 case 'Y':
2220 /* Simply check for the basic form of a shift count. Reload will
2221 take care of making sure we have a proper base register. */
2222 if (!s390_decompose_shift_count (op, NULL, NULL))
2223 return 0;
2224 break;
2225 case 'Z':
2226 return s390_check_qrst_address (str[1], op, true);
2227 default:
2228 return 0;
2229 }
2230 return 1;
2231 }
2232
2233
2234 /* Evaluates constraint strings starting with letter O. Input
2235 parameter C is the second letter following the "O" in the constraint
2236 string. Returns 1 if VALUE meets the respective constraint and 0
2237 otherwise. */
2238
2239 int
2240 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2241 {
2242 if (!TARGET_EXTIMM)
2243 return 0;
2244
2245 switch (c)
2246 {
2247 case 's':
2248 return trunc_int_for_mode (value, SImode) == value;
2249
2250 case 'p':
2251 return value == 0
2252 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2253
2254 case 'n':
2255 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2256
2257 default:
2258 gcc_unreachable ();
2259 }
2260 }
2261
2262
2263 /* Evaluates constraint strings starting with letter N. Parameter STR
2264 contains the letters following letter "N" in the constraint string.
2265 Returns true if VALUE matches the constraint. */
2266
2267 int
2268 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2269 {
2270 enum machine_mode mode, part_mode;
2271 int def;
2272 int part, part_goal;
2273
2274
2275 if (str[0] == 'x')
2276 part_goal = -1;
2277 else
2278 part_goal = str[0] - '0';
2279
2280 switch (str[1])
2281 {
2282 case 'Q':
2283 part_mode = QImode;
2284 break;
2285 case 'H':
2286 part_mode = HImode;
2287 break;
2288 case 'S':
2289 part_mode = SImode;
2290 break;
2291 default:
2292 return 0;
2293 }
2294
2295 switch (str[2])
2296 {
2297 case 'H':
2298 mode = HImode;
2299 break;
2300 case 'S':
2301 mode = SImode;
2302 break;
2303 case 'D':
2304 mode = DImode;
2305 break;
2306 default:
2307 return 0;
2308 }
2309
2310 switch (str[3])
2311 {
2312 case '0':
2313 def = 0;
2314 break;
2315 case 'F':
2316 def = -1;
2317 break;
2318 default:
2319 return 0;
2320 }
2321
2322 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2323 return 0;
2324
2325 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2326 if (part < 0)
2327 return 0;
2328 if (part_goal != -1 && part_goal != part)
2329 return 0;
2330
2331 return 1;
2332 }
2333
2334
2335 /* Returns true if the input parameter VALUE is a float zero. */
2336
2337 int
2338 s390_float_const_zero_p (rtx value)
2339 {
2340 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2341 && value == CONST0_RTX (GET_MODE (value)));
2342 }
2343
2344 /* Implement TARGET_REGISTER_MOVE_COST. */
2345
2346 static int
2347 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2348 reg_class_t from, reg_class_t to)
2349 {
2350 /* On s390, copy between fprs and gprs is expensive. */
2351 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2352 && reg_classes_intersect_p (to, FP_REGS))
2353 || (reg_classes_intersect_p (from, FP_REGS)
2354 && reg_classes_intersect_p (to, GENERAL_REGS)))
2355 return 10;
2356
2357 return 1;
2358 }
2359
2360 /* Implement TARGET_MEMORY_MOVE_COST. */
2361
2362 static int
2363 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2364 reg_class_t rclass ATTRIBUTE_UNUSED,
2365 bool in ATTRIBUTE_UNUSED)
2366 {
2367 return 1;
2368 }
2369
2370 /* Compute a (partial) cost for rtx X. Return true if the complete
2371 cost has been computed, and false if subexpressions should be
2372 scanned. In either case, *TOTAL contains the cost result.
2373 CODE contains GET_CODE (x), OUTER_CODE contains the code
2374 of the superexpression of x. */
2375
2376 static bool
2377 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2378 int *total, bool speed ATTRIBUTE_UNUSED)
2379 {
2380 switch (code)
2381 {
2382 case CONST:
2383 case CONST_INT:
2384 case LABEL_REF:
2385 case SYMBOL_REF:
2386 case CONST_DOUBLE:
2387 case MEM:
2388 *total = 0;
2389 return true;
2390
2391 case ASHIFT:
2392 case ASHIFTRT:
2393 case LSHIFTRT:
2394 case ROTATE:
2395 case ROTATERT:
2396 case AND:
2397 case IOR:
2398 case XOR:
2399 case NEG:
2400 case NOT:
2401 *total = COSTS_N_INSNS (1);
2402 return false;
2403
2404 case PLUS:
2405 case MINUS:
2406 *total = COSTS_N_INSNS (1);
2407 return false;
2408
2409 case MULT:
2410 switch (GET_MODE (x))
2411 {
2412 case SImode:
2413 {
2414 rtx left = XEXP (x, 0);
2415 rtx right = XEXP (x, 1);
2416 if (GET_CODE (right) == CONST_INT
2417 && CONST_OK_FOR_K (INTVAL (right)))
2418 *total = s390_cost->mhi;
2419 else if (GET_CODE (left) == SIGN_EXTEND)
2420 *total = s390_cost->mh;
2421 else
2422 *total = s390_cost->ms; /* msr, ms, msy */
2423 break;
2424 }
2425 case DImode:
2426 {
2427 rtx left = XEXP (x, 0);
2428 rtx right = XEXP (x, 1);
2429 if (TARGET_ZARCH)
2430 {
2431 if (GET_CODE (right) == CONST_INT
2432 && CONST_OK_FOR_K (INTVAL (right)))
2433 *total = s390_cost->mghi;
2434 else if (GET_CODE (left) == SIGN_EXTEND)
2435 *total = s390_cost->msgf;
2436 else
2437 *total = s390_cost->msg; /* msgr, msg */
2438 }
2439 else /* TARGET_31BIT */
2440 {
2441 if (GET_CODE (left) == SIGN_EXTEND
2442 && GET_CODE (right) == SIGN_EXTEND)
2443 /* mulsidi case: mr, m */
2444 *total = s390_cost->m;
2445 else if (GET_CODE (left) == ZERO_EXTEND
2446 && GET_CODE (right) == ZERO_EXTEND
2447 && TARGET_CPU_ZARCH)
2448 /* umulsidi case: ml, mlr */
2449 *total = s390_cost->ml;
2450 else
2451 /* Complex calculation is required. */
2452 *total = COSTS_N_INSNS (40);
2453 }
2454 break;
2455 }
2456 case SFmode:
2457 case DFmode:
2458 *total = s390_cost->mult_df;
2459 break;
2460 case TFmode:
2461 *total = s390_cost->mxbr;
2462 break;
2463 default:
2464 return false;
2465 }
2466 return false;
2467
2468 case FMA:
2469 switch (GET_MODE (x))
2470 {
2471 case DFmode:
2472 *total = s390_cost->madbr;
2473 break;
2474 case SFmode:
2475 *total = s390_cost->maebr;
2476 break;
2477 default:
2478 return false;
2479 }
2480 /* Negate in the third argument is free: FMSUB. */
2481 if (GET_CODE (XEXP (x, 2)) == NEG)
2482 {
2483 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2484 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2485 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2486 return true;
2487 }
2488 return false;
2489
2490 case UDIV:
2491 case UMOD:
2492 if (GET_MODE (x) == TImode) /* 128 bit division */
2493 *total = s390_cost->dlgr;
2494 else if (GET_MODE (x) == DImode)
2495 {
2496 rtx right = XEXP (x, 1);
2497 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2498 *total = s390_cost->dlr;
2499 else /* 64 by 64 bit division */
2500 *total = s390_cost->dlgr;
2501 }
2502 else if (GET_MODE (x) == SImode) /* 32 bit division */
2503 *total = s390_cost->dlr;
2504 return false;
2505
2506 case DIV:
2507 case MOD:
2508 if (GET_MODE (x) == DImode)
2509 {
2510 rtx right = XEXP (x, 1);
2511 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2512 if (TARGET_ZARCH)
2513 *total = s390_cost->dsgfr;
2514 else
2515 *total = s390_cost->dr;
2516 else /* 64 by 64 bit division */
2517 *total = s390_cost->dsgr;
2518 }
2519 else if (GET_MODE (x) == SImode) /* 32 bit division */
2520 *total = s390_cost->dlr;
2521 else if (GET_MODE (x) == SFmode)
2522 {
2523 *total = s390_cost->debr;
2524 }
2525 else if (GET_MODE (x) == DFmode)
2526 {
2527 *total = s390_cost->ddbr;
2528 }
2529 else if (GET_MODE (x) == TFmode)
2530 {
2531 *total = s390_cost->dxbr;
2532 }
2533 return false;
2534
2535 case SQRT:
2536 if (GET_MODE (x) == SFmode)
2537 *total = s390_cost->sqebr;
2538 else if (GET_MODE (x) == DFmode)
2539 *total = s390_cost->sqdbr;
2540 else /* TFmode */
2541 *total = s390_cost->sqxbr;
2542 return false;
2543
2544 case SIGN_EXTEND:
2545 case ZERO_EXTEND:
2546 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2547 || outer_code == PLUS || outer_code == MINUS
2548 || outer_code == COMPARE)
2549 *total = 0;
2550 return false;
2551
2552 case COMPARE:
2553 *total = COSTS_N_INSNS (1);
2554 if (GET_CODE (XEXP (x, 0)) == AND
2555 && GET_CODE (XEXP (x, 1)) == CONST_INT
2556 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2557 {
2558 rtx op0 = XEXP (XEXP (x, 0), 0);
2559 rtx op1 = XEXP (XEXP (x, 0), 1);
2560 rtx op2 = XEXP (x, 1);
2561
2562 if (memory_operand (op0, GET_MODE (op0))
2563 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2564 return true;
2565 if (register_operand (op0, GET_MODE (op0))
2566 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2567 return true;
2568 }
2569 return false;
2570
2571 default:
2572 return false;
2573 }
2574 }
2575
2576 /* Return the cost of an address rtx ADDR. */
2577
2578 static int
2579 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2580 {
2581 struct s390_address ad;
2582 if (!s390_decompose_address (addr, &ad))
2583 return 1000;
2584
2585 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2586 }
2587
2588 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2589 otherwise return 0. */
2590
2591 int
2592 tls_symbolic_operand (rtx op)
2593 {
2594 if (GET_CODE (op) != SYMBOL_REF)
2595 return 0;
2596 return SYMBOL_REF_TLS_MODEL (op);
2597 }
2598 \f
2599 /* Split DImode access register reference REG (on 64-bit) into its constituent
2600 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2601 gen_highpart cannot be used as they assume all registers are word-sized,
2602 while our access registers have only half that size. */
2603
2604 void
2605 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2606 {
2607 gcc_assert (TARGET_64BIT);
2608 gcc_assert (ACCESS_REG_P (reg));
2609 gcc_assert (GET_MODE (reg) == DImode);
2610 gcc_assert (!(REGNO (reg) & 1));
2611
2612 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2613 *hi = gen_rtx_REG (SImode, REGNO (reg));
2614 }
2615
2616 /* Return true if OP contains a symbol reference */
2617
2618 bool
2619 symbolic_reference_mentioned_p (rtx op)
2620 {
2621 const char *fmt;
2622 int i;
2623
2624 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2625 return 1;
2626
2627 fmt = GET_RTX_FORMAT (GET_CODE (op));
2628 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2629 {
2630 if (fmt[i] == 'E')
2631 {
2632 int j;
2633
2634 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2635 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2636 return 1;
2637 }
2638
2639 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2640 return 1;
2641 }
2642
2643 return 0;
2644 }
2645
2646 /* Return true if OP contains a reference to a thread-local symbol. */
2647
2648 bool
2649 tls_symbolic_reference_mentioned_p (rtx op)
2650 {
2651 const char *fmt;
2652 int i;
2653
2654 if (GET_CODE (op) == SYMBOL_REF)
2655 return tls_symbolic_operand (op);
2656
2657 fmt = GET_RTX_FORMAT (GET_CODE (op));
2658 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2659 {
2660 if (fmt[i] == 'E')
2661 {
2662 int j;
2663
2664 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2665 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2666 return true;
2667 }
2668
2669 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2670 return true;
2671 }
2672
2673 return false;
2674 }
2675
2676
2677 /* Return true if OP is a legitimate general operand when
2678 generating PIC code. It is given that flag_pic is on
2679 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2680
2681 int
2682 legitimate_pic_operand_p (rtx op)
2683 {
2684 /* Accept all non-symbolic constants. */
2685 if (!SYMBOLIC_CONST (op))
2686 return 1;
2687
2688 /* Reject everything else; must be handled
2689 via emit_symbolic_move. */
2690 return 0;
2691 }
2692
2693 /* Returns true if the constant value OP is a legitimate general operand.
2694 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2695
2696 static bool
2697 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2698 {
2699 /* Accept all non-symbolic constants. */
2700 if (!SYMBOLIC_CONST (op))
2701 return 1;
2702
2703 /* Accept immediate LARL operands. */
2704 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2705 return 1;
2706
2707 /* Thread-local symbols are never legal constants. This is
2708 so that emit_call knows that computing such addresses
2709 might require a function call. */
2710 if (TLS_SYMBOLIC_CONST (op))
2711 return 0;
2712
2713 /* In the PIC case, symbolic constants must *not* be
2714 forced into the literal pool. We accept them here,
2715 so that they will be handled by emit_symbolic_move. */
2716 if (flag_pic)
2717 return 1;
2718
2719 /* All remaining non-PIC symbolic constants are
2720 forced into the literal pool. */
2721 return 0;
2722 }
2723
2724 /* Determine if it's legal to put X into the constant pool. This
2725 is not possible if X contains the address of a symbol that is
2726 not constant (TLS) or not known at final link time (PIC). */
2727
2728 static bool
2729 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2730 {
2731 switch (GET_CODE (x))
2732 {
2733 case CONST_INT:
2734 case CONST_DOUBLE:
2735 /* Accept all non-symbolic constants. */
2736 return false;
2737
2738 case LABEL_REF:
2739 /* Labels are OK iff we are non-PIC. */
2740 return flag_pic != 0;
2741
2742 case SYMBOL_REF:
2743 /* 'Naked' TLS symbol references are never OK,
2744 non-TLS symbols are OK iff we are non-PIC. */
2745 if (tls_symbolic_operand (x))
2746 return true;
2747 else
2748 return flag_pic != 0;
2749
2750 case CONST:
2751 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2752 case PLUS:
2753 case MINUS:
2754 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2755 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2756
2757 case UNSPEC:
2758 switch (XINT (x, 1))
2759 {
2760 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2761 case UNSPEC_LTREL_OFFSET:
2762 case UNSPEC_GOT:
2763 case UNSPEC_GOTOFF:
2764 case UNSPEC_PLTOFF:
2765 case UNSPEC_TLSGD:
2766 case UNSPEC_TLSLDM:
2767 case UNSPEC_NTPOFF:
2768 case UNSPEC_DTPOFF:
2769 case UNSPEC_GOTNTPOFF:
2770 case UNSPEC_INDNTPOFF:
2771 return false;
2772
2773 /* If the literal pool shares the code section, be put
2774 execute template placeholders into the pool as well. */
2775 case UNSPEC_INSN:
2776 return TARGET_CPU_ZARCH;
2777
2778 default:
2779 return true;
2780 }
2781 break;
2782
2783 default:
2784 gcc_unreachable ();
2785 }
2786 }
2787
2788 /* Returns true if the constant value OP is a legitimate general
2789 operand during and after reload. The difference to
2790 legitimate_constant_p is that this function will not accept
2791 a constant that would need to be forced to the literal pool
2792 before it can be used as operand.
2793 This function accepts all constants which can be loaded directly
2794 into a GPR. */
2795
2796 bool
2797 legitimate_reload_constant_p (rtx op)
2798 {
2799 /* Accept la(y) operands. */
2800 if (GET_CODE (op) == CONST_INT
2801 && DISP_IN_RANGE (INTVAL (op)))
2802 return true;
2803
2804 /* Accept l(g)hi/l(g)fi operands. */
2805 if (GET_CODE (op) == CONST_INT
2806 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2807 return true;
2808
2809 /* Accept lliXX operands. */
2810 if (TARGET_ZARCH
2811 && GET_CODE (op) == CONST_INT
2812 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2813 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2814 return true;
2815
2816 if (TARGET_EXTIMM
2817 && GET_CODE (op) == CONST_INT
2818 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2819 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2820 return true;
2821
2822 /* Accept larl operands. */
2823 if (TARGET_CPU_ZARCH
2824 && larl_operand (op, VOIDmode))
2825 return true;
2826
2827 /* Accept floating-point zero operands that fit into a single GPR. */
2828 if (GET_CODE (op) == CONST_DOUBLE
2829 && s390_float_const_zero_p (op)
2830 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2831 return true;
2832
2833 /* Accept double-word operands that can be split. */
2834 if (GET_CODE (op) == CONST_INT
2835 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2836 {
2837 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2838 rtx hi = operand_subword (op, 0, 0, dword_mode);
2839 rtx lo = operand_subword (op, 1, 0, dword_mode);
2840 return legitimate_reload_constant_p (hi)
2841 && legitimate_reload_constant_p (lo);
2842 }
2843
2844 /* Everything else cannot be handled without reload. */
2845 return false;
2846 }
2847
2848 /* Returns true if the constant value OP is a legitimate fp operand
2849 during and after reload.
2850 This function accepts all constants which can be loaded directly
2851 into an FPR. */
2852
2853 static bool
2854 legitimate_reload_fp_constant_p (rtx op)
2855 {
2856 /* Accept floating-point zero operands if the load zero instruction
2857 can be used. */
2858 if (TARGET_Z196
2859 && GET_CODE (op) == CONST_DOUBLE
2860 && s390_float_const_zero_p (op))
2861 return true;
2862
2863 return false;
2864 }
2865
2866 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2867 return the class of reg to actually use. */
2868
2869 static reg_class_t
2870 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2871 {
2872 switch (GET_CODE (op))
2873 {
2874 /* Constants we cannot reload into general registers
2875 must be forced into the literal pool. */
2876 case CONST_DOUBLE:
2877 case CONST_INT:
2878 if (reg_class_subset_p (GENERAL_REGS, rclass)
2879 && legitimate_reload_constant_p (op))
2880 return GENERAL_REGS;
2881 else if (reg_class_subset_p (ADDR_REGS, rclass)
2882 && legitimate_reload_constant_p (op))
2883 return ADDR_REGS;
2884 else if (reg_class_subset_p (FP_REGS, rclass)
2885 && legitimate_reload_fp_constant_p (op))
2886 return FP_REGS;
2887 return NO_REGS;
2888
2889 /* If a symbolic constant or a PLUS is reloaded,
2890 it is most likely being used as an address, so
2891 prefer ADDR_REGS. If 'class' is not a superset
2892 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2893 case LABEL_REF:
2894 case SYMBOL_REF:
2895 case CONST:
2896 if (!legitimate_reload_constant_p (op))
2897 return NO_REGS;
2898 /* fallthrough */
2899 case PLUS:
2900 /* load address will be used. */
2901 if (reg_class_subset_p (ADDR_REGS, rclass))
2902 return ADDR_REGS;
2903 else
2904 return NO_REGS;
2905
2906 default:
2907 break;
2908 }
2909
2910 return rclass;
2911 }
2912
2913 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2914 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2915 aligned. */
2916
2917 bool
2918 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2919 {
2920 HOST_WIDE_INT addend;
2921 rtx symref;
2922
2923 if (!s390_symref_operand_p (addr, &symref, &addend))
2924 return false;
2925
2926 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2927 && !(addend & (alignment - 1)));
2928 }
2929
2930 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2931 operand SCRATCH is used to reload the even part of the address and
2932 adding one. */
2933
2934 void
2935 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2936 {
2937 HOST_WIDE_INT addend;
2938 rtx symref;
2939
2940 if (!s390_symref_operand_p (addr, &symref, &addend))
2941 gcc_unreachable ();
2942
2943 if (!(addend & 1))
2944 /* Easy case. The addend is even so larl will do fine. */
2945 emit_move_insn (reg, addr);
2946 else
2947 {
2948 /* We can leave the scratch register untouched if the target
2949 register is a valid base register. */
2950 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2951 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2952 scratch = reg;
2953
2954 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2955 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2956
2957 if (addend != 1)
2958 emit_move_insn (scratch,
2959 gen_rtx_CONST (Pmode,
2960 gen_rtx_PLUS (Pmode, symref,
2961 GEN_INT (addend - 1))));
2962 else
2963 emit_move_insn (scratch, symref);
2964
2965 /* Increment the address using la in order to avoid clobbering cc. */
2966 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2967 }
2968 }
2969
2970 /* Generate what is necessary to move between REG and MEM using
2971 SCRATCH. The direction is given by TOMEM. */
2972
2973 void
2974 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2975 {
2976 /* Reload might have pulled a constant out of the literal pool.
2977 Force it back in. */
2978 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2979 || GET_CODE (mem) == CONST)
2980 mem = force_const_mem (GET_MODE (reg), mem);
2981
2982 gcc_assert (MEM_P (mem));
2983
2984 /* For a load from memory we can leave the scratch register
2985 untouched if the target register is a valid base register. */
2986 if (!tomem
2987 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2988 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2989 && GET_MODE (reg) == GET_MODE (scratch))
2990 scratch = reg;
2991
2992 /* Load address into scratch register. Since we can't have a
2993 secondary reload for a secondary reload we have to cover the case
2994 where larl would need a secondary reload here as well. */
2995 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2996
2997 /* Now we can use a standard load/store to do the move. */
2998 if (tomem)
2999 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3000 else
3001 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3002 }
3003
3004 /* Inform reload about cases where moving X with a mode MODE to a register in
3005 RCLASS requires an extra scratch or immediate register. Return the class
3006 needed for the immediate register. */
3007
3008 static reg_class_t
3009 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3010 enum machine_mode mode, secondary_reload_info *sri)
3011 {
3012 enum reg_class rclass = (enum reg_class) rclass_i;
3013
3014 /* Intermediate register needed. */
3015 if (reg_classes_intersect_p (CC_REGS, rclass))
3016 return GENERAL_REGS;
3017
3018 if (TARGET_Z10)
3019 {
3020 HOST_WIDE_INT offset;
3021 rtx symref;
3022
3023 /* On z10 several optimizer steps may generate larl operands with
3024 an odd addend. */
3025 if (in_p
3026 && s390_symref_operand_p (x, &symref, &offset)
3027 && mode == Pmode
3028 && !SYMBOL_REF_ALIGN1_P (symref)
3029 && (offset & 1) == 1)
3030 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3031 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3032
3033 /* On z10 we need a scratch register when moving QI, TI or floating
3034 point mode values from or to a memory location with a SYMBOL_REF
3035 or if the symref addend of a SI or DI move is not aligned to the
3036 width of the access. */
3037 if (MEM_P (x)
3038 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3039 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3040 || (!TARGET_ZARCH && mode == DImode)
3041 || ((mode == HImode || mode == SImode || mode == DImode)
3042 && (!s390_check_symref_alignment (XEXP (x, 0),
3043 GET_MODE_SIZE (mode))))))
3044 {
3045 #define __SECONDARY_RELOAD_CASE(M,m) \
3046 case M##mode: \
3047 if (TARGET_64BIT) \
3048 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3049 CODE_FOR_reload##m##di_tomem_z10; \
3050 else \
3051 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3052 CODE_FOR_reload##m##si_tomem_z10; \
3053 break;
3054
3055 switch (GET_MODE (x))
3056 {
3057 __SECONDARY_RELOAD_CASE (QI, qi);
3058 __SECONDARY_RELOAD_CASE (HI, hi);
3059 __SECONDARY_RELOAD_CASE (SI, si);
3060 __SECONDARY_RELOAD_CASE (DI, di);
3061 __SECONDARY_RELOAD_CASE (TI, ti);
3062 __SECONDARY_RELOAD_CASE (SF, sf);
3063 __SECONDARY_RELOAD_CASE (DF, df);
3064 __SECONDARY_RELOAD_CASE (TF, tf);
3065 __SECONDARY_RELOAD_CASE (SD, sd);
3066 __SECONDARY_RELOAD_CASE (DD, dd);
3067 __SECONDARY_RELOAD_CASE (TD, td);
3068
3069 default:
3070 gcc_unreachable ();
3071 }
3072 #undef __SECONDARY_RELOAD_CASE
3073 }
3074 }
3075
3076 /* We need a scratch register when loading a PLUS expression which
3077 is not a legitimate operand of the LOAD ADDRESS instruction. */
3078 if (in_p && s390_plus_operand (x, mode))
3079 sri->icode = (TARGET_64BIT ?
3080 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3081
3082 /* Performing a multiword move from or to memory we have to make sure the
3083 second chunk in memory is addressable without causing a displacement
3084 overflow. If that would be the case we calculate the address in
3085 a scratch register. */
3086 if (MEM_P (x)
3087 && GET_CODE (XEXP (x, 0)) == PLUS
3088 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3089 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3090 + GET_MODE_SIZE (mode) - 1))
3091 {
3092 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3093 in a s_operand address since we may fallback to lm/stm. So we only
3094 have to care about overflows in the b+i+d case. */
3095 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3096 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3097 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3098 /* For FP_REGS no lm/stm is available so this check is triggered
3099 for displacement overflows in b+i+d and b+d like addresses. */
3100 || (reg_classes_intersect_p (FP_REGS, rclass)
3101 && s390_class_max_nregs (FP_REGS, mode) > 1))
3102 {
3103 if (in_p)
3104 sri->icode = (TARGET_64BIT ?
3105 CODE_FOR_reloaddi_nonoffmem_in :
3106 CODE_FOR_reloadsi_nonoffmem_in);
3107 else
3108 sri->icode = (TARGET_64BIT ?
3109 CODE_FOR_reloaddi_nonoffmem_out :
3110 CODE_FOR_reloadsi_nonoffmem_out);
3111 }
3112 }
3113
3114 /* A scratch address register is needed when a symbolic constant is
3115 copied to r0 compiling with -fPIC. In other cases the target
3116 register might be used as temporary (see legitimize_pic_address). */
3117 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3118 sri->icode = (TARGET_64BIT ?
3119 CODE_FOR_reloaddi_PIC_addr :
3120 CODE_FOR_reloadsi_PIC_addr);
3121
3122 /* Either scratch or no register needed. */
3123 return NO_REGS;
3124 }
3125
3126 /* Generate code to load SRC, which is PLUS that is not a
3127 legitimate operand for the LA instruction, into TARGET.
3128 SCRATCH may be used as scratch register. */
3129
3130 void
3131 s390_expand_plus_operand (rtx target, rtx src,
3132 rtx scratch)
3133 {
3134 rtx sum1, sum2;
3135 struct s390_address ad;
3136
3137 /* src must be a PLUS; get its two operands. */
3138 gcc_assert (GET_CODE (src) == PLUS);
3139 gcc_assert (GET_MODE (src) == Pmode);
3140
3141 /* Check if any of the two operands is already scheduled
3142 for replacement by reload. This can happen e.g. when
3143 float registers occur in an address. */
3144 sum1 = find_replacement (&XEXP (src, 0));
3145 sum2 = find_replacement (&XEXP (src, 1));
3146 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3147
3148 /* If the address is already strictly valid, there's nothing to do. */
3149 if (!s390_decompose_address (src, &ad)
3150 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3151 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3152 {
3153 /* Otherwise, one of the operands cannot be an address register;
3154 we reload its value into the scratch register. */
3155 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3156 {
3157 emit_move_insn (scratch, sum1);
3158 sum1 = scratch;
3159 }
3160 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3161 {
3162 emit_move_insn (scratch, sum2);
3163 sum2 = scratch;
3164 }
3165
3166 /* According to the way these invalid addresses are generated
3167 in reload.c, it should never happen (at least on s390) that
3168 *neither* of the PLUS components, after find_replacements
3169 was applied, is an address register. */
3170 if (sum1 == scratch && sum2 == scratch)
3171 {
3172 debug_rtx (src);
3173 gcc_unreachable ();
3174 }
3175
3176 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3177 }
3178
3179 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3180 is only ever performed on addresses, so we can mark the
3181 sum as legitimate for LA in any case. */
3182 s390_load_address (target, src);
3183 }
3184
3185
3186 /* Return true if ADDR is a valid memory address.
3187 STRICT specifies whether strict register checking applies. */
3188
3189 static bool
3190 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3191 {
3192 struct s390_address ad;
3193
3194 if (TARGET_Z10
3195 && larl_operand (addr, VOIDmode)
3196 && (mode == VOIDmode
3197 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3198 return true;
3199
3200 if (!s390_decompose_address (addr, &ad))
3201 return false;
3202
3203 if (strict)
3204 {
3205 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3206 return false;
3207
3208 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3209 return false;
3210 }
3211 else
3212 {
3213 if (ad.base
3214 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3215 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3216 return false;
3217
3218 if (ad.indx
3219 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3220 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3221 return false;
3222 }
3223 return true;
3224 }
3225
3226 /* Return true if OP is a valid operand for the LA instruction.
3227 In 31-bit, we need to prove that the result is used as an
3228 address, as LA performs only a 31-bit addition. */
3229
3230 bool
3231 legitimate_la_operand_p (rtx op)
3232 {
3233 struct s390_address addr;
3234 if (!s390_decompose_address (op, &addr))
3235 return false;
3236
3237 return (TARGET_64BIT || addr.pointer);
3238 }
3239
3240 /* Return true if it is valid *and* preferable to use LA to
3241 compute the sum of OP1 and OP2. */
3242
3243 bool
3244 preferred_la_operand_p (rtx op1, rtx op2)
3245 {
3246 struct s390_address addr;
3247
3248 if (op2 != const0_rtx)
3249 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3250
3251 if (!s390_decompose_address (op1, &addr))
3252 return false;
3253 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3254 return false;
3255 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3256 return false;
3257
3258 /* Avoid LA instructions with index register on z196; it is
3259 preferable to use regular add instructions when possible. */
3260 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3261 return false;
3262
3263 if (!TARGET_64BIT && !addr.pointer)
3264 return false;
3265
3266 if (addr.pointer)
3267 return true;
3268
3269 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3270 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3271 return true;
3272
3273 return false;
3274 }
3275
3276 /* Emit a forced load-address operation to load SRC into DST.
3277 This will use the LOAD ADDRESS instruction even in situations
3278 where legitimate_la_operand_p (SRC) returns false. */
3279
3280 void
3281 s390_load_address (rtx dst, rtx src)
3282 {
3283 if (TARGET_64BIT)
3284 emit_move_insn (dst, src);
3285 else
3286 emit_insn (gen_force_la_31 (dst, src));
3287 }
3288
3289 /* Return a legitimate reference for ORIG (an address) using the
3290 register REG. If REG is 0, a new pseudo is generated.
3291
3292 There are two types of references that must be handled:
3293
3294 1. Global data references must load the address from the GOT, via
3295 the PIC reg. An insn is emitted to do this load, and the reg is
3296 returned.
3297
3298 2. Static data references, constant pool addresses, and code labels
3299 compute the address as an offset from the GOT, whose base is in
3300 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3301 differentiate them from global data objects. The returned
3302 address is the PIC reg + an unspec constant.
3303
3304 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3305 reg also appears in the address. */
3306
3307 rtx
3308 legitimize_pic_address (rtx orig, rtx reg)
3309 {
3310 rtx addr = orig;
3311 rtx new_rtx = orig;
3312 rtx base;
3313
3314 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3315
3316 if (GET_CODE (addr) == LABEL_REF
3317 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3318 {
3319 /* This is a local symbol. */
3320 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3321 {
3322 /* Access local symbols PC-relative via LARL.
3323 This is the same as in the non-PIC case, so it is
3324 handled automatically ... */
3325 }
3326 else
3327 {
3328 /* Access local symbols relative to the GOT. */
3329
3330 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3331
3332 if (reload_in_progress || reload_completed)
3333 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3334
3335 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3336 addr = gen_rtx_CONST (Pmode, addr);
3337 addr = force_const_mem (Pmode, addr);
3338 emit_move_insn (temp, addr);
3339
3340 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3341 if (reg != 0)
3342 {
3343 s390_load_address (reg, new_rtx);
3344 new_rtx = reg;
3345 }
3346 }
3347 }
3348 else if (GET_CODE (addr) == SYMBOL_REF)
3349 {
3350 if (reg == 0)
3351 reg = gen_reg_rtx (Pmode);
3352
3353 if (flag_pic == 1)
3354 {
3355 /* Assume GOT offset < 4k. This is handled the same way
3356 in both 31- and 64-bit code (@GOT). */
3357
3358 if (reload_in_progress || reload_completed)
3359 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3360
3361 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3362 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3363 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3364 new_rtx = gen_const_mem (Pmode, new_rtx);
3365 emit_move_insn (reg, new_rtx);
3366 new_rtx = reg;
3367 }
3368 else if (TARGET_CPU_ZARCH)
3369 {
3370 /* If the GOT offset might be >= 4k, we determine the position
3371 of the GOT entry via a PC-relative LARL (@GOTENT). */
3372
3373 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3374
3375 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3376 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3377
3378 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3379 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3380 emit_move_insn (temp, new_rtx);
3381
3382 new_rtx = gen_const_mem (Pmode, temp);
3383 emit_move_insn (reg, new_rtx);
3384 new_rtx = reg;
3385 }
3386 else
3387 {
3388 /* If the GOT offset might be >= 4k, we have to load it
3389 from the literal pool (@GOT). */
3390
3391 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3392
3393 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3394 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3395
3396 if (reload_in_progress || reload_completed)
3397 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3398
3399 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3400 addr = gen_rtx_CONST (Pmode, addr);
3401 addr = force_const_mem (Pmode, addr);
3402 emit_move_insn (temp, addr);
3403
3404 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3405 new_rtx = gen_const_mem (Pmode, new_rtx);
3406 emit_move_insn (reg, new_rtx);
3407 new_rtx = reg;
3408 }
3409 }
3410 else
3411 {
3412 if (GET_CODE (addr) == CONST)
3413 {
3414 addr = XEXP (addr, 0);
3415 if (GET_CODE (addr) == UNSPEC)
3416 {
3417 gcc_assert (XVECLEN (addr, 0) == 1);
3418 switch (XINT (addr, 1))
3419 {
3420 /* If someone moved a GOT-relative UNSPEC
3421 out of the literal pool, force them back in. */
3422 case UNSPEC_GOTOFF:
3423 case UNSPEC_PLTOFF:
3424 new_rtx = force_const_mem (Pmode, orig);
3425 break;
3426
3427 /* @GOT is OK as is if small. */
3428 case UNSPEC_GOT:
3429 if (flag_pic == 2)
3430 new_rtx = force_const_mem (Pmode, orig);
3431 break;
3432
3433 /* @GOTENT is OK as is. */
3434 case UNSPEC_GOTENT:
3435 break;
3436
3437 /* @PLT is OK as is on 64-bit, must be converted to
3438 GOT-relative @PLTOFF on 31-bit. */
3439 case UNSPEC_PLT:
3440 if (!TARGET_CPU_ZARCH)
3441 {
3442 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3443
3444 if (reload_in_progress || reload_completed)
3445 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3446
3447 addr = XVECEXP (addr, 0, 0);
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3449 UNSPEC_PLTOFF);
3450 addr = gen_rtx_CONST (Pmode, addr);
3451 addr = force_const_mem (Pmode, addr);
3452 emit_move_insn (temp, addr);
3453
3454 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3455 if (reg != 0)
3456 {
3457 s390_load_address (reg, new_rtx);
3458 new_rtx = reg;
3459 }
3460 }
3461 break;
3462
3463 /* Everything else cannot happen. */
3464 default:
3465 gcc_unreachable ();
3466 }
3467 }
3468 else
3469 gcc_assert (GET_CODE (addr) == PLUS);
3470 }
3471 if (GET_CODE (addr) == PLUS)
3472 {
3473 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3474
3475 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3476 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3477
3478 /* Check first to see if this is a constant offset
3479 from a local symbol reference. */
3480 if ((GET_CODE (op0) == LABEL_REF
3481 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3482 && GET_CODE (op1) == CONST_INT)
3483 {
3484 if (TARGET_CPU_ZARCH
3485 && larl_operand (op0, VOIDmode)
3486 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3487 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3488 {
3489 if (INTVAL (op1) & 1)
3490 {
3491 /* LARL can't handle odd offsets, so emit a
3492 pair of LARL and LA. */
3493 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3494
3495 if (!DISP_IN_RANGE (INTVAL (op1)))
3496 {
3497 HOST_WIDE_INT even = INTVAL (op1) - 1;
3498 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3499 op0 = gen_rtx_CONST (Pmode, op0);
3500 op1 = const1_rtx;
3501 }
3502
3503 emit_move_insn (temp, op0);
3504 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3505
3506 if (reg != 0)
3507 {
3508 s390_load_address (reg, new_rtx);
3509 new_rtx = reg;
3510 }
3511 }
3512 else
3513 {
3514 /* If the offset is even, we can just use LARL.
3515 This will happen automatically. */
3516 }
3517 }
3518 else
3519 {
3520 /* Access local symbols relative to the GOT. */
3521
3522 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3523
3524 if (reload_in_progress || reload_completed)
3525 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3526
3527 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3528 UNSPEC_GOTOFF);
3529 addr = gen_rtx_PLUS (Pmode, addr, op1);
3530 addr = gen_rtx_CONST (Pmode, addr);
3531 addr = force_const_mem (Pmode, addr);
3532 emit_move_insn (temp, addr);
3533
3534 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3535 if (reg != 0)
3536 {
3537 s390_load_address (reg, new_rtx);
3538 new_rtx = reg;
3539 }
3540 }
3541 }
3542
3543 /* Now, check whether it is a GOT relative symbol plus offset
3544 that was pulled out of the literal pool. Force it back in. */
3545
3546 else if (GET_CODE (op0) == UNSPEC
3547 && GET_CODE (op1) == CONST_INT
3548 && XINT (op0, 1) == UNSPEC_GOTOFF)
3549 {
3550 gcc_assert (XVECLEN (op0, 0) == 1);
3551
3552 new_rtx = force_const_mem (Pmode, orig);
3553 }
3554
3555 /* Otherwise, compute the sum. */
3556 else
3557 {
3558 base = legitimize_pic_address (XEXP (addr, 0), reg);
3559 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3560 base == reg ? NULL_RTX : reg);
3561 if (GET_CODE (new_rtx) == CONST_INT)
3562 new_rtx = plus_constant (base, INTVAL (new_rtx));
3563 else
3564 {
3565 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3566 {
3567 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3568 new_rtx = XEXP (new_rtx, 1);
3569 }
3570 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3571 }
3572
3573 if (GET_CODE (new_rtx) == CONST)
3574 new_rtx = XEXP (new_rtx, 0);
3575 new_rtx = force_operand (new_rtx, 0);
3576 }
3577 }
3578 }
3579 return new_rtx;
3580 }
3581
3582 /* Load the thread pointer into a register. */
3583
3584 rtx
3585 s390_get_thread_pointer (void)
3586 {
3587 rtx tp = gen_reg_rtx (Pmode);
3588
3589 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3590 mark_reg_pointer (tp, BITS_PER_WORD);
3591
3592 return tp;
3593 }
3594
3595 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3596 in s390_tls_symbol which always refers to __tls_get_offset.
3597 The returned offset is written to RESULT_REG and an USE rtx is
3598 generated for TLS_CALL. */
3599
3600 static GTY(()) rtx s390_tls_symbol;
3601
3602 static void
3603 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3604 {
3605 rtx insn;
3606
3607 if (!flag_pic)
3608 emit_insn (s390_load_got ());
3609
3610 if (!s390_tls_symbol)
3611 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3612
3613 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3614 gen_rtx_REG (Pmode, RETURN_REGNUM));
3615
3616 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3617 RTL_CONST_CALL_P (insn) = 1;
3618 }
3619
3620 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3621 this (thread-local) address. REG may be used as temporary. */
3622
3623 static rtx
3624 legitimize_tls_address (rtx addr, rtx reg)
3625 {
3626 rtx new_rtx, tls_call, temp, base, r2, insn;
3627
3628 if (GET_CODE (addr) == SYMBOL_REF)
3629 switch (tls_symbolic_operand (addr))
3630 {
3631 case TLS_MODEL_GLOBAL_DYNAMIC:
3632 start_sequence ();
3633 r2 = gen_rtx_REG (Pmode, 2);
3634 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3635 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3636 new_rtx = force_const_mem (Pmode, new_rtx);
3637 emit_move_insn (r2, new_rtx);
3638 s390_emit_tls_call_insn (r2, tls_call);
3639 insn = get_insns ();
3640 end_sequence ();
3641
3642 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3643 temp = gen_reg_rtx (Pmode);
3644 emit_libcall_block (insn, temp, r2, new_rtx);
3645
3646 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3647 if (reg != 0)
3648 {
3649 s390_load_address (reg, new_rtx);
3650 new_rtx = reg;
3651 }
3652 break;
3653
3654 case TLS_MODEL_LOCAL_DYNAMIC:
3655 start_sequence ();
3656 r2 = gen_rtx_REG (Pmode, 2);
3657 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3658 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3659 new_rtx = force_const_mem (Pmode, new_rtx);
3660 emit_move_insn (r2, new_rtx);
3661 s390_emit_tls_call_insn (r2, tls_call);
3662 insn = get_insns ();
3663 end_sequence ();
3664
3665 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3666 temp = gen_reg_rtx (Pmode);
3667 emit_libcall_block (insn, temp, r2, new_rtx);
3668
3669 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3670 base = gen_reg_rtx (Pmode);
3671 s390_load_address (base, new_rtx);
3672
3673 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3674 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3675 new_rtx = force_const_mem (Pmode, new_rtx);
3676 temp = gen_reg_rtx (Pmode);
3677 emit_move_insn (temp, new_rtx);
3678
3679 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3680 if (reg != 0)
3681 {
3682 s390_load_address (reg, new_rtx);
3683 new_rtx = reg;
3684 }
3685 break;
3686
3687 case TLS_MODEL_INITIAL_EXEC:
3688 if (flag_pic == 1)
3689 {
3690 /* Assume GOT offset < 4k. This is handled the same way
3691 in both 31- and 64-bit code. */
3692
3693 if (reload_in_progress || reload_completed)
3694 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3695
3696 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3697 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3698 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3699 new_rtx = gen_const_mem (Pmode, new_rtx);
3700 temp = gen_reg_rtx (Pmode);
3701 emit_move_insn (temp, new_rtx);
3702 }
3703 else if (TARGET_CPU_ZARCH)
3704 {
3705 /* If the GOT offset might be >= 4k, we determine the position
3706 of the GOT entry via a PC-relative LARL. */
3707
3708 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3709 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3710 temp = gen_reg_rtx (Pmode);
3711 emit_move_insn (temp, new_rtx);
3712
3713 new_rtx = gen_const_mem (Pmode, temp);
3714 temp = gen_reg_rtx (Pmode);
3715 emit_move_insn (temp, new_rtx);
3716 }
3717 else if (flag_pic)
3718 {
3719 /* If the GOT offset might be >= 4k, we have to load it
3720 from the literal pool. */
3721
3722 if (reload_in_progress || reload_completed)
3723 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3724
3725 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3726 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3727 new_rtx = force_const_mem (Pmode, new_rtx);
3728 temp = gen_reg_rtx (Pmode);
3729 emit_move_insn (temp, new_rtx);
3730
3731 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3732 new_rtx = gen_const_mem (Pmode, new_rtx);
3733
3734 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3737 }
3738 else
3739 {
3740 /* In position-dependent code, load the absolute address of
3741 the GOT entry from the literal pool. */
3742
3743 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3744 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3745 new_rtx = force_const_mem (Pmode, new_rtx);
3746 temp = gen_reg_rtx (Pmode);
3747 emit_move_insn (temp, new_rtx);
3748
3749 new_rtx = temp;
3750 new_rtx = gen_const_mem (Pmode, new_rtx);
3751 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3752 temp = gen_reg_rtx (Pmode);
3753 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3754 }
3755
3756 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3757 if (reg != 0)
3758 {
3759 s390_load_address (reg, new_rtx);
3760 new_rtx = reg;
3761 }
3762 break;
3763
3764 case TLS_MODEL_LOCAL_EXEC:
3765 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3766 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3767 new_rtx = force_const_mem (Pmode, new_rtx);
3768 temp = gen_reg_rtx (Pmode);
3769 emit_move_insn (temp, new_rtx);
3770
3771 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3772 if (reg != 0)
3773 {
3774 s390_load_address (reg, new_rtx);
3775 new_rtx = reg;
3776 }
3777 break;
3778
3779 default:
3780 gcc_unreachable ();
3781 }
3782
3783 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3784 {
3785 switch (XINT (XEXP (addr, 0), 1))
3786 {
3787 case UNSPEC_INDNTPOFF:
3788 gcc_assert (TARGET_CPU_ZARCH);
3789 new_rtx = addr;
3790 break;
3791
3792 default:
3793 gcc_unreachable ();
3794 }
3795 }
3796
3797 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3798 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3799 {
3800 new_rtx = XEXP (XEXP (addr, 0), 0);
3801 if (GET_CODE (new_rtx) != SYMBOL_REF)
3802 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3803
3804 new_rtx = legitimize_tls_address (new_rtx, reg);
3805 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3806 new_rtx = force_operand (new_rtx, 0);
3807 }
3808
3809 else
3810 gcc_unreachable (); /* for now ... */
3811
3812 return new_rtx;
3813 }
3814
3815 /* Emit insns making the address in operands[1] valid for a standard
3816 move to operands[0]. operands[1] is replaced by an address which
3817 should be used instead of the former RTX to emit the move
3818 pattern. */
3819
3820 void
3821 emit_symbolic_move (rtx *operands)
3822 {
3823 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3824
3825 if (GET_CODE (operands[0]) == MEM)
3826 operands[1] = force_reg (Pmode, operands[1]);
3827 else if (TLS_SYMBOLIC_CONST (operands[1]))
3828 operands[1] = legitimize_tls_address (operands[1], temp);
3829 else if (flag_pic)
3830 operands[1] = legitimize_pic_address (operands[1], temp);
3831 }
3832
3833 /* Try machine-dependent ways of modifying an illegitimate address X
3834 to be legitimate. If we find one, return the new, valid address.
3835
3836 OLDX is the address as it was before break_out_memory_refs was called.
3837 In some cases it is useful to look at this to decide what needs to be done.
3838
3839 MODE is the mode of the operand pointed to by X.
3840
3841 When -fpic is used, special handling is needed for symbolic references.
3842 See comments by legitimize_pic_address for details. */
3843
3844 static rtx
3845 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3846 enum machine_mode mode ATTRIBUTE_UNUSED)
3847 {
3848 rtx constant_term = const0_rtx;
3849
3850 if (TLS_SYMBOLIC_CONST (x))
3851 {
3852 x = legitimize_tls_address (x, 0);
3853
3854 if (s390_legitimate_address_p (mode, x, FALSE))
3855 return x;
3856 }
3857 else if (GET_CODE (x) == PLUS
3858 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3859 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3860 {
3861 return x;
3862 }
3863 else if (flag_pic)
3864 {
3865 if (SYMBOLIC_CONST (x)
3866 || (GET_CODE (x) == PLUS
3867 && (SYMBOLIC_CONST (XEXP (x, 0))
3868 || SYMBOLIC_CONST (XEXP (x, 1)))))
3869 x = legitimize_pic_address (x, 0);
3870
3871 if (s390_legitimate_address_p (mode, x, FALSE))
3872 return x;
3873 }
3874
3875 x = eliminate_constant_term (x, &constant_term);
3876
3877 /* Optimize loading of large displacements by splitting them
3878 into the multiple of 4K and the rest; this allows the
3879 former to be CSE'd if possible.
3880
3881 Don't do this if the displacement is added to a register
3882 pointing into the stack frame, as the offsets will
3883 change later anyway. */
3884
3885 if (GET_CODE (constant_term) == CONST_INT
3886 && !TARGET_LONG_DISPLACEMENT
3887 && !DISP_IN_RANGE (INTVAL (constant_term))
3888 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3889 {
3890 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3891 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3892
3893 rtx temp = gen_reg_rtx (Pmode);
3894 rtx val = force_operand (GEN_INT (upper), temp);
3895 if (val != temp)
3896 emit_move_insn (temp, val);
3897
3898 x = gen_rtx_PLUS (Pmode, x, temp);
3899 constant_term = GEN_INT (lower);
3900 }
3901
3902 if (GET_CODE (x) == PLUS)
3903 {
3904 if (GET_CODE (XEXP (x, 0)) == REG)
3905 {
3906 rtx temp = gen_reg_rtx (Pmode);
3907 rtx val = force_operand (XEXP (x, 1), temp);
3908 if (val != temp)
3909 emit_move_insn (temp, val);
3910
3911 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3912 }
3913
3914 else if (GET_CODE (XEXP (x, 1)) == REG)
3915 {
3916 rtx temp = gen_reg_rtx (Pmode);
3917 rtx val = force_operand (XEXP (x, 0), temp);
3918 if (val != temp)
3919 emit_move_insn (temp, val);
3920
3921 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3922 }
3923 }
3924
3925 if (constant_term != const0_rtx)
3926 x = gen_rtx_PLUS (Pmode, x, constant_term);
3927
3928 return x;
3929 }
3930
3931 /* Try a machine-dependent way of reloading an illegitimate address AD
3932 operand. If we find one, push the reload and return the new address.
3933
3934 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3935 and TYPE is the reload type of the current reload. */
3936
3937 rtx
3938 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3939 int opnum, int type)
3940 {
3941 if (!optimize || TARGET_LONG_DISPLACEMENT)
3942 return NULL_RTX;
3943
3944 if (GET_CODE (ad) == PLUS)
3945 {
3946 rtx tem = simplify_binary_operation (PLUS, Pmode,
3947 XEXP (ad, 0), XEXP (ad, 1));
3948 if (tem)
3949 ad = tem;
3950 }
3951
3952 if (GET_CODE (ad) == PLUS
3953 && GET_CODE (XEXP (ad, 0)) == REG
3954 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3955 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3956 {
3957 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3958 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3959 rtx cst, tem, new_rtx;
3960
3961 cst = GEN_INT (upper);
3962 if (!legitimate_reload_constant_p (cst))
3963 cst = force_const_mem (Pmode, cst);
3964
3965 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3966 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3967
3968 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3969 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3970 opnum, (enum reload_type) type);
3971 return new_rtx;
3972 }
3973
3974 return NULL_RTX;
3975 }
3976
3977 /* Emit code to move LEN bytes from DST to SRC. */
3978
3979 void
3980 s390_expand_movmem (rtx dst, rtx src, rtx len)
3981 {
3982 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3983 {
3984 if (INTVAL (len) > 0)
3985 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3986 }
3987
3988 else if (TARGET_MVCLE)
3989 {
3990 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3991 }
3992
3993 else
3994 {
3995 rtx dst_addr, src_addr, count, blocks, temp;
3996 rtx loop_start_label = gen_label_rtx ();
3997 rtx loop_end_label = gen_label_rtx ();
3998 rtx end_label = gen_label_rtx ();
3999 enum machine_mode mode;
4000
4001 mode = GET_MODE (len);
4002 if (mode == VOIDmode)
4003 mode = Pmode;
4004
4005 dst_addr = gen_reg_rtx (Pmode);
4006 src_addr = gen_reg_rtx (Pmode);
4007 count = gen_reg_rtx (mode);
4008 blocks = gen_reg_rtx (mode);
4009
4010 convert_move (count, len, 1);
4011 emit_cmp_and_jump_insns (count, const0_rtx,
4012 EQ, NULL_RTX, mode, 1, end_label);
4013
4014 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4015 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4016 dst = change_address (dst, VOIDmode, dst_addr);
4017 src = change_address (src, VOIDmode, src_addr);
4018
4019 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4020 OPTAB_DIRECT);
4021 if (temp != count)
4022 emit_move_insn (count, temp);
4023
4024 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4025 OPTAB_DIRECT);
4026 if (temp != blocks)
4027 emit_move_insn (blocks, temp);
4028
4029 emit_cmp_and_jump_insns (blocks, const0_rtx,
4030 EQ, NULL_RTX, mode, 1, loop_end_label);
4031
4032 emit_label (loop_start_label);
4033
4034 if (TARGET_Z10
4035 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4036 {
4037 rtx prefetch;
4038
4039 /* Issue a read prefetch for the +3 cache line. */
4040 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4041 const0_rtx, const0_rtx);
4042 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4043 emit_insn (prefetch);
4044
4045 /* Issue a write prefetch for the +3 cache line. */
4046 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4047 const1_rtx, const0_rtx);
4048 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4049 emit_insn (prefetch);
4050 }
4051
4052 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4053 s390_load_address (dst_addr,
4054 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4055 s390_load_address (src_addr,
4056 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4057
4058 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4059 OPTAB_DIRECT);
4060 if (temp != blocks)
4061 emit_move_insn (blocks, temp);
4062
4063 emit_cmp_and_jump_insns (blocks, const0_rtx,
4064 EQ, NULL_RTX, mode, 1, loop_end_label);
4065
4066 emit_jump (loop_start_label);
4067 emit_label (loop_end_label);
4068
4069 emit_insn (gen_movmem_short (dst, src,
4070 convert_to_mode (Pmode, count, 1)));
4071 emit_label (end_label);
4072 }
4073 }
4074
4075 /* Emit code to set LEN bytes at DST to VAL.
4076 Make use of clrmem if VAL is zero. */
4077
4078 void
4079 s390_expand_setmem (rtx dst, rtx len, rtx val)
4080 {
4081 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4082 return;
4083
4084 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4085
4086 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4087 {
4088 if (val == const0_rtx && INTVAL (len) <= 256)
4089 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4090 else
4091 {
4092 /* Initialize memory by storing the first byte. */
4093 emit_move_insn (adjust_address (dst, QImode, 0), val);
4094
4095 if (INTVAL (len) > 1)
4096 {
4097 /* Initiate 1 byte overlap move.
4098 The first byte of DST is propagated through DSTP1.
4099 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4100 DST is set to size 1 so the rest of the memory location
4101 does not count as source operand. */
4102 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4103 set_mem_size (dst, 1);
4104
4105 emit_insn (gen_movmem_short (dstp1, dst,
4106 GEN_INT (INTVAL (len) - 2)));
4107 }
4108 }
4109 }
4110
4111 else if (TARGET_MVCLE)
4112 {
4113 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4114 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4115 }
4116
4117 else
4118 {
4119 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4120 rtx loop_start_label = gen_label_rtx ();
4121 rtx loop_end_label = gen_label_rtx ();
4122 rtx end_label = gen_label_rtx ();
4123 enum machine_mode mode;
4124
4125 mode = GET_MODE (len);
4126 if (mode == VOIDmode)
4127 mode = Pmode;
4128
4129 dst_addr = gen_reg_rtx (Pmode);
4130 count = gen_reg_rtx (mode);
4131 blocks = gen_reg_rtx (mode);
4132
4133 convert_move (count, len, 1);
4134 emit_cmp_and_jump_insns (count, const0_rtx,
4135 EQ, NULL_RTX, mode, 1, end_label);
4136
4137 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4138 dst = change_address (dst, VOIDmode, dst_addr);
4139
4140 if (val == const0_rtx)
4141 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4142 OPTAB_DIRECT);
4143 else
4144 {
4145 dstp1 = adjust_address (dst, VOIDmode, 1);
4146 set_mem_size (dst, 1);
4147
4148 /* Initialize memory by storing the first byte. */
4149 emit_move_insn (adjust_address (dst, QImode, 0), val);
4150
4151 /* If count is 1 we are done. */
4152 emit_cmp_and_jump_insns (count, const1_rtx,
4153 EQ, NULL_RTX, mode, 1, end_label);
4154
4155 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4156 OPTAB_DIRECT);
4157 }
4158 if (temp != count)
4159 emit_move_insn (count, temp);
4160
4161 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4162 OPTAB_DIRECT);
4163 if (temp != blocks)
4164 emit_move_insn (blocks, temp);
4165
4166 emit_cmp_and_jump_insns (blocks, const0_rtx,
4167 EQ, NULL_RTX, mode, 1, loop_end_label);
4168
4169 emit_label (loop_start_label);
4170
4171 if (TARGET_Z10
4172 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4173 {
4174 /* Issue a write prefetch for the +4 cache line. */
4175 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4176 GEN_INT (1024)),
4177 const1_rtx, const0_rtx);
4178 emit_insn (prefetch);
4179 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4180 }
4181
4182 if (val == const0_rtx)
4183 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4184 else
4185 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4186 s390_load_address (dst_addr,
4187 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4188
4189 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4190 OPTAB_DIRECT);
4191 if (temp != blocks)
4192 emit_move_insn (blocks, temp);
4193
4194 emit_cmp_and_jump_insns (blocks, const0_rtx,
4195 EQ, NULL_RTX, mode, 1, loop_end_label);
4196
4197 emit_jump (loop_start_label);
4198 emit_label (loop_end_label);
4199
4200 if (val == const0_rtx)
4201 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4202 else
4203 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4204 emit_label (end_label);
4205 }
4206 }
4207
4208 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4209 and return the result in TARGET. */
4210
4211 void
4212 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4213 {
4214 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4215 rtx tmp;
4216
4217 /* As the result of CMPINT is inverted compared to what we need,
4218 we have to swap the operands. */
4219 tmp = op0; op0 = op1; op1 = tmp;
4220
4221 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4222 {
4223 if (INTVAL (len) > 0)
4224 {
4225 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4226 emit_insn (gen_cmpint (target, ccreg));
4227 }
4228 else
4229 emit_move_insn (target, const0_rtx);
4230 }
4231 else if (TARGET_MVCLE)
4232 {
4233 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4234 emit_insn (gen_cmpint (target, ccreg));
4235 }
4236 else
4237 {
4238 rtx addr0, addr1, count, blocks, temp;
4239 rtx loop_start_label = gen_label_rtx ();
4240 rtx loop_end_label = gen_label_rtx ();
4241 rtx end_label = gen_label_rtx ();
4242 enum machine_mode mode;
4243
4244 mode = GET_MODE (len);
4245 if (mode == VOIDmode)
4246 mode = Pmode;
4247
4248 addr0 = gen_reg_rtx (Pmode);
4249 addr1 = gen_reg_rtx (Pmode);
4250 count = gen_reg_rtx (mode);
4251 blocks = gen_reg_rtx (mode);
4252
4253 convert_move (count, len, 1);
4254 emit_cmp_and_jump_insns (count, const0_rtx,
4255 EQ, NULL_RTX, mode, 1, end_label);
4256
4257 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4258 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4259 op0 = change_address (op0, VOIDmode, addr0);
4260 op1 = change_address (op1, VOIDmode, addr1);
4261
4262 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4263 OPTAB_DIRECT);
4264 if (temp != count)
4265 emit_move_insn (count, temp);
4266
4267 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4268 OPTAB_DIRECT);
4269 if (temp != blocks)
4270 emit_move_insn (blocks, temp);
4271
4272 emit_cmp_and_jump_insns (blocks, const0_rtx,
4273 EQ, NULL_RTX, mode, 1, loop_end_label);
4274
4275 emit_label (loop_start_label);
4276
4277 if (TARGET_Z10
4278 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4279 {
4280 rtx prefetch;
4281
4282 /* Issue a read prefetch for the +2 cache line of operand 1. */
4283 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4284 const0_rtx, const0_rtx);
4285 emit_insn (prefetch);
4286 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4287
4288 /* Issue a read prefetch for the +2 cache line of operand 2. */
4289 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4290 const0_rtx, const0_rtx);
4291 emit_insn (prefetch);
4292 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4293 }
4294
4295 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4296 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4297 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4298 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4299 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4300 emit_jump_insn (temp);
4301
4302 s390_load_address (addr0,
4303 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4304 s390_load_address (addr1,
4305 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4306
4307 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4308 OPTAB_DIRECT);
4309 if (temp != blocks)
4310 emit_move_insn (blocks, temp);
4311
4312 emit_cmp_and_jump_insns (blocks, const0_rtx,
4313 EQ, NULL_RTX, mode, 1, loop_end_label);
4314
4315 emit_jump (loop_start_label);
4316 emit_label (loop_end_label);
4317
4318 emit_insn (gen_cmpmem_short (op0, op1,
4319 convert_to_mode (Pmode, count, 1)));
4320 emit_label (end_label);
4321
4322 emit_insn (gen_cmpint (target, ccreg));
4323 }
4324 }
4325
4326
4327 /* Expand conditional increment or decrement using alc/slb instructions.
4328 Should generate code setting DST to either SRC or SRC + INCREMENT,
4329 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4330 Returns true if successful, false otherwise.
4331
4332 That makes it possible to implement some if-constructs without jumps e.g.:
4333 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4334 unsigned int a, b, c;
4335 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4336 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4337 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4338 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4339
4340 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4341 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4342 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4343 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4344 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4345
4346 bool
4347 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4348 rtx dst, rtx src, rtx increment)
4349 {
4350 enum machine_mode cmp_mode;
4351 enum machine_mode cc_mode;
4352 rtx op_res;
4353 rtx insn;
4354 rtvec p;
4355 int ret;
4356
4357 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4358 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4359 cmp_mode = SImode;
4360 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4361 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4362 cmp_mode = DImode;
4363 else
4364 return false;
4365
4366 /* Try ADD LOGICAL WITH CARRY. */
4367 if (increment == const1_rtx)
4368 {
4369 /* Determine CC mode to use. */
4370 if (cmp_code == EQ || cmp_code == NE)
4371 {
4372 if (cmp_op1 != const0_rtx)
4373 {
4374 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4375 NULL_RTX, 0, OPTAB_WIDEN);
4376 cmp_op1 = const0_rtx;
4377 }
4378
4379 cmp_code = cmp_code == EQ ? LEU : GTU;
4380 }
4381
4382 if (cmp_code == LTU || cmp_code == LEU)
4383 {
4384 rtx tem = cmp_op0;
4385 cmp_op0 = cmp_op1;
4386 cmp_op1 = tem;
4387 cmp_code = swap_condition (cmp_code);
4388 }
4389
4390 switch (cmp_code)
4391 {
4392 case GTU:
4393 cc_mode = CCUmode;
4394 break;
4395
4396 case GEU:
4397 cc_mode = CCL3mode;
4398 break;
4399
4400 default:
4401 return false;
4402 }
4403
4404 /* Emit comparison instruction pattern. */
4405 if (!register_operand (cmp_op0, cmp_mode))
4406 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4407
4408 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4409 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4410 /* We use insn_invalid_p here to add clobbers if required. */
4411 ret = insn_invalid_p (emit_insn (insn));
4412 gcc_assert (!ret);
4413
4414 /* Emit ALC instruction pattern. */
4415 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4416 gen_rtx_REG (cc_mode, CC_REGNUM),
4417 const0_rtx);
4418
4419 if (src != const0_rtx)
4420 {
4421 if (!register_operand (src, GET_MODE (dst)))
4422 src = force_reg (GET_MODE (dst), src);
4423
4424 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4425 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4426 }
4427
4428 p = rtvec_alloc (2);
4429 RTVEC_ELT (p, 0) =
4430 gen_rtx_SET (VOIDmode, dst, op_res);
4431 RTVEC_ELT (p, 1) =
4432 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4433 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4434
4435 return true;
4436 }
4437
4438 /* Try SUBTRACT LOGICAL WITH BORROW. */
4439 if (increment == constm1_rtx)
4440 {
4441 /* Determine CC mode to use. */
4442 if (cmp_code == EQ || cmp_code == NE)
4443 {
4444 if (cmp_op1 != const0_rtx)
4445 {
4446 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4447 NULL_RTX, 0, OPTAB_WIDEN);
4448 cmp_op1 = const0_rtx;
4449 }
4450
4451 cmp_code = cmp_code == EQ ? LEU : GTU;
4452 }
4453
4454 if (cmp_code == GTU || cmp_code == GEU)
4455 {
4456 rtx tem = cmp_op0;
4457 cmp_op0 = cmp_op1;
4458 cmp_op1 = tem;
4459 cmp_code = swap_condition (cmp_code);
4460 }
4461
4462 switch (cmp_code)
4463 {
4464 case LEU:
4465 cc_mode = CCUmode;
4466 break;
4467
4468 case LTU:
4469 cc_mode = CCL3mode;
4470 break;
4471
4472 default:
4473 return false;
4474 }
4475
4476 /* Emit comparison instruction pattern. */
4477 if (!register_operand (cmp_op0, cmp_mode))
4478 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4479
4480 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4481 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4482 /* We use insn_invalid_p here to add clobbers if required. */
4483 ret = insn_invalid_p (emit_insn (insn));
4484 gcc_assert (!ret);
4485
4486 /* Emit SLB instruction pattern. */
4487 if (!register_operand (src, GET_MODE (dst)))
4488 src = force_reg (GET_MODE (dst), src);
4489
4490 op_res = gen_rtx_MINUS (GET_MODE (dst),
4491 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4492 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4493 gen_rtx_REG (cc_mode, CC_REGNUM),
4494 const0_rtx));
4495 p = rtvec_alloc (2);
4496 RTVEC_ELT (p, 0) =
4497 gen_rtx_SET (VOIDmode, dst, op_res);
4498 RTVEC_ELT (p, 1) =
4499 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4500 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4501
4502 return true;
4503 }
4504
4505 return false;
4506 }
4507
4508 /* Expand code for the insv template. Return true if successful. */
4509
4510 bool
4511 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4512 {
4513 int bitsize = INTVAL (op1);
4514 int bitpos = INTVAL (op2);
4515
4516 /* On z10 we can use the risbg instruction to implement insv. */
4517 if (TARGET_Z10
4518 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4519 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4520 {
4521 rtx op;
4522 rtx clobber;
4523
4524 op = gen_rtx_SET (GET_MODE(src),
4525 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4526 src);
4527 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4528 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4529
4530 return true;
4531 }
4532
4533 /* We need byte alignment. */
4534 if (bitsize % BITS_PER_UNIT)
4535 return false;
4536
4537 if (bitpos == 0
4538 && memory_operand (dest, VOIDmode)
4539 && (register_operand (src, word_mode)
4540 || const_int_operand (src, VOIDmode)))
4541 {
4542 /* Emit standard pattern if possible. */
4543 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4544 if (GET_MODE_BITSIZE (mode) == bitsize)
4545 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4546
4547 /* (set (ze (mem)) (const_int)). */
4548 else if (const_int_operand (src, VOIDmode))
4549 {
4550 int size = bitsize / BITS_PER_UNIT;
4551 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4552 GET_MODE_SIZE (word_mode) - size);
4553
4554 dest = adjust_address (dest, BLKmode, 0);
4555 set_mem_size (dest, size);
4556 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4557 }
4558
4559 /* (set (ze (mem)) (reg)). */
4560 else if (register_operand (src, word_mode))
4561 {
4562 if (bitsize <= GET_MODE_BITSIZE (SImode))
4563 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4564 const0_rtx), src);
4565 else
4566 {
4567 /* Emit st,stcmh sequence. */
4568 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4569 int size = stcmh_width / BITS_PER_UNIT;
4570
4571 emit_move_insn (adjust_address (dest, SImode, size),
4572 gen_lowpart (SImode, src));
4573 set_mem_size (dest, size);
4574 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4575 (stcmh_width), const0_rtx),
4576 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4577 (GET_MODE_BITSIZE (SImode))));
4578 }
4579 }
4580 else
4581 return false;
4582
4583 return true;
4584 }
4585
4586 /* (set (ze (reg)) (const_int)). */
4587 if (TARGET_ZARCH
4588 && register_operand (dest, word_mode)
4589 && (bitpos % 16) == 0
4590 && (bitsize % 16) == 0
4591 && const_int_operand (src, VOIDmode))
4592 {
4593 HOST_WIDE_INT val = INTVAL (src);
4594 int regpos = bitpos + bitsize;
4595
4596 while (regpos > bitpos)
4597 {
4598 enum machine_mode putmode;
4599 int putsize;
4600
4601 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4602 putmode = SImode;
4603 else
4604 putmode = HImode;
4605
4606 putsize = GET_MODE_BITSIZE (putmode);
4607 regpos -= putsize;
4608 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4609 GEN_INT (putsize),
4610 GEN_INT (regpos)),
4611 gen_int_mode (val, putmode));
4612 val >>= putsize;
4613 }
4614 gcc_assert (regpos == bitpos);
4615 return true;
4616 }
4617
4618 return false;
4619 }
4620
4621 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4622 register that holds VAL of mode MODE shifted by COUNT bits. */
4623
4624 static inline rtx
4625 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4626 {
4627 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4628 NULL_RTX, 1, OPTAB_DIRECT);
4629 return expand_simple_binop (SImode, ASHIFT, val, count,
4630 NULL_RTX, 1, OPTAB_DIRECT);
4631 }
4632
4633 /* Structure to hold the initial parameters for a compare_and_swap operation
4634 in HImode and QImode. */
4635
4636 struct alignment_context
4637 {
4638 rtx memsi; /* SI aligned memory location. */
4639 rtx shift; /* Bit offset with regard to lsb. */
4640 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4641 rtx modemaski; /* ~modemask */
4642 bool aligned; /* True if memory is aligned, false else. */
4643 };
4644
4645 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4646 structure AC for transparent simplifying, if the memory alignment is known
4647 to be at least 32bit. MEM is the memory location for the actual operation
4648 and MODE its mode. */
4649
4650 static void
4651 init_alignment_context (struct alignment_context *ac, rtx mem,
4652 enum machine_mode mode)
4653 {
4654 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4655 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4656
4657 if (ac->aligned)
4658 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4659 else
4660 {
4661 /* Alignment is unknown. */
4662 rtx byteoffset, addr, align;
4663
4664 /* Force the address into a register. */
4665 addr = force_reg (Pmode, XEXP (mem, 0));
4666
4667 /* Align it to SImode. */
4668 align = expand_simple_binop (Pmode, AND, addr,
4669 GEN_INT (-GET_MODE_SIZE (SImode)),
4670 NULL_RTX, 1, OPTAB_DIRECT);
4671 /* Generate MEM. */
4672 ac->memsi = gen_rtx_MEM (SImode, align);
4673 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4674 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4675 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4676
4677 /* Calculate shiftcount. */
4678 byteoffset = expand_simple_binop (Pmode, AND, addr,
4679 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4680 NULL_RTX, 1, OPTAB_DIRECT);
4681 /* As we already have some offset, evaluate the remaining distance. */
4682 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4683 NULL_RTX, 1, OPTAB_DIRECT);
4684
4685 }
4686 /* Shift is the byte count, but we need the bitcount. */
4687 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4688 NULL_RTX, 1, OPTAB_DIRECT);
4689 /* Calculate masks. */
4690 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4691 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4692 NULL_RTX, 1, OPTAB_DIRECT);
4693 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4694 }
4695
4696 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4697 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4698 to set if CMP == MEM.
4699 CMP is never in memory for compare_and_swap_cc because
4700 expand_bool_compare_and_swap puts it into a register for later compare. */
4701
4702 void
4703 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4704 {
4705 struct alignment_context ac;
4706 rtx cmpv, newv, val, resv, cc;
4707 rtx res = gen_reg_rtx (SImode);
4708 rtx csloop = gen_label_rtx ();
4709 rtx csend = gen_label_rtx ();
4710
4711 gcc_assert (register_operand (target, VOIDmode));
4712 gcc_assert (MEM_P (mem));
4713
4714 init_alignment_context (&ac, mem, mode);
4715
4716 /* Shift the values to the correct bit positions. */
4717 if (!(ac.aligned && MEM_P (cmp)))
4718 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4719 if (!(ac.aligned && MEM_P (new_rtx)))
4720 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4721
4722 /* Load full word. Subsequent loads are performed by CS. */
4723 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4724 NULL_RTX, 1, OPTAB_DIRECT);
4725
4726 /* Start CS loop. */
4727 emit_label (csloop);
4728 /* val = "<mem>00..0<mem>"
4729 * cmp = "00..0<cmp>00..0"
4730 * new = "00..0<new>00..0"
4731 */
4732
4733 /* Patch cmp and new with val at correct position. */
4734 if (ac.aligned && MEM_P (cmp))
4735 {
4736 cmpv = force_reg (SImode, val);
4737 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0,
4738 0, 0, SImode, cmp);
4739 }
4740 else
4741 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4742 NULL_RTX, 1, OPTAB_DIRECT));
4743 if (ac.aligned && MEM_P (new_rtx))
4744 {
4745 newv = force_reg (SImode, val);
4746 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0,
4747 0, 0, SImode, new_rtx);
4748 }
4749 else
4750 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4751 NULL_RTX, 1, OPTAB_DIRECT));
4752
4753 /* Jump to end if we're done (likely?). */
4754 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4755 cmpv, newv));
4756
4757 /* Check for changes outside mode. */
4758 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4759 NULL_RTX, 1, OPTAB_DIRECT);
4760 cc = s390_emit_compare (NE, resv, val);
4761 emit_move_insn (val, resv);
4762 /* Loop internal if so. */
4763 s390_emit_jump (csloop, cc);
4764
4765 emit_label (csend);
4766
4767 /* Return the correct part of the bitfield. */
4768 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4769 NULL_RTX, 1, OPTAB_DIRECT), 1);
4770 }
4771
4772 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4773 and VAL the value to play with. If AFTER is true then store the value
4774 MEM holds after the operation, if AFTER is false then store the value MEM
4775 holds before the operation. If TARGET is zero then discard that value, else
4776 store it to TARGET. */
4777
4778 void
4779 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4780 rtx target, rtx mem, rtx val, bool after)
4781 {
4782 struct alignment_context ac;
4783 rtx cmp;
4784 rtx new_rtx = gen_reg_rtx (SImode);
4785 rtx orig = gen_reg_rtx (SImode);
4786 rtx csloop = gen_label_rtx ();
4787
4788 gcc_assert (!target || register_operand (target, VOIDmode));
4789 gcc_assert (MEM_P (mem));
4790
4791 init_alignment_context (&ac, mem, mode);
4792
4793 /* Shift val to the correct bit positions.
4794 Preserve "icm", but prevent "ex icm". */
4795 if (!(ac.aligned && code == SET && MEM_P (val)))
4796 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4797
4798 /* Further preparation insns. */
4799 if (code == PLUS || code == MINUS)
4800 emit_move_insn (orig, val);
4801 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4802 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4803 NULL_RTX, 1, OPTAB_DIRECT);
4804
4805 /* Load full word. Subsequent loads are performed by CS. */
4806 cmp = force_reg (SImode, ac.memsi);
4807
4808 /* Start CS loop. */
4809 emit_label (csloop);
4810 emit_move_insn (new_rtx, cmp);
4811
4812 /* Patch new with val at correct position. */
4813 switch (code)
4814 {
4815 case PLUS:
4816 case MINUS:
4817 val = expand_simple_binop (SImode, code, new_rtx, orig,
4818 NULL_RTX, 1, OPTAB_DIRECT);
4819 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4820 NULL_RTX, 1, OPTAB_DIRECT);
4821 /* FALLTHRU */
4822 case SET:
4823 if (ac.aligned && MEM_P (val))
4824 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4825 0, 0, SImode, val);
4826 else
4827 {
4828 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4829 NULL_RTX, 1, OPTAB_DIRECT);
4830 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4831 NULL_RTX, 1, OPTAB_DIRECT);
4832 }
4833 break;
4834 case AND:
4835 case IOR:
4836 case XOR:
4837 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4838 NULL_RTX, 1, OPTAB_DIRECT);
4839 break;
4840 case MULT: /* NAND */
4841 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4842 NULL_RTX, 1, OPTAB_DIRECT);
4843 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4844 NULL_RTX, 1, OPTAB_DIRECT);
4845 break;
4846 default:
4847 gcc_unreachable ();
4848 }
4849
4850 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4851 ac.memsi, cmp, new_rtx));
4852
4853 /* Return the correct part of the bitfield. */
4854 if (target)
4855 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4856 after ? new_rtx : cmp, ac.shift,
4857 NULL_RTX, 1, OPTAB_DIRECT), 1);
4858 }
4859
4860 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4861 We need to emit DTP-relative relocations. */
4862
4863 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4864
4865 static void
4866 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4867 {
4868 switch (size)
4869 {
4870 case 4:
4871 fputs ("\t.long\t", file);
4872 break;
4873 case 8:
4874 fputs ("\t.quad\t", file);
4875 break;
4876 default:
4877 gcc_unreachable ();
4878 }
4879 output_addr_const (file, x);
4880 fputs ("@DTPOFF", file);
4881 }
4882
4883 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4884 /* Implement TARGET_MANGLE_TYPE. */
4885
4886 static const char *
4887 s390_mangle_type (const_tree type)
4888 {
4889 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4890 && TARGET_LONG_DOUBLE_128)
4891 return "g";
4892
4893 /* For all other types, use normal C++ mangling. */
4894 return NULL;
4895 }
4896 #endif
4897
4898 /* In the name of slightly smaller debug output, and to cater to
4899 general assembler lossage, recognize various UNSPEC sequences
4900 and turn them back into a direct symbol reference. */
4901
4902 static rtx
4903 s390_delegitimize_address (rtx orig_x)
4904 {
4905 rtx x, y;
4906
4907 orig_x = delegitimize_mem_from_attrs (orig_x);
4908 x = orig_x;
4909
4910 /* Extract the symbol ref from:
4911 (plus:SI (reg:SI 12 %r12)
4912 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
4913 UNSPEC_GOTOFF/PLTOFF)))
4914 and
4915 (plus:SI (reg:SI 12 %r12)
4916 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
4917 UNSPEC_GOTOFF/PLTOFF)
4918 (const_int 4 [0x4])))) */
4919 if (GET_CODE (x) == PLUS
4920 && REG_P (XEXP (x, 0))
4921 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4922 && GET_CODE (XEXP (x, 1)) == CONST)
4923 {
4924 HOST_WIDE_INT offset = 0;
4925
4926 /* The const operand. */
4927 y = XEXP (XEXP (x, 1), 0);
4928
4929 if (GET_CODE (y) == PLUS
4930 && GET_CODE (XEXP (y, 1)) == CONST_INT)
4931 {
4932 offset = INTVAL (XEXP (y, 1));
4933 y = XEXP (y, 0);
4934 }
4935
4936 if (GET_CODE (y) == UNSPEC
4937 && (XINT (y, 1) == UNSPEC_GOTOFF
4938 || XINT (y, 1) == UNSPEC_PLTOFF))
4939 return plus_constant (XVECEXP (y, 0, 0), offset);
4940 }
4941
4942 if (GET_CODE (x) != MEM)
4943 return orig_x;
4944
4945 x = XEXP (x, 0);
4946 if (GET_CODE (x) == PLUS
4947 && GET_CODE (XEXP (x, 1)) == CONST
4948 && GET_CODE (XEXP (x, 0)) == REG
4949 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4950 {
4951 y = XEXP (XEXP (x, 1), 0);
4952 if (GET_CODE (y) == UNSPEC
4953 && XINT (y, 1) == UNSPEC_GOT)
4954 y = XVECEXP (y, 0, 0);
4955 else
4956 return orig_x;
4957 }
4958 else if (GET_CODE (x) == CONST)
4959 {
4960 /* Extract the symbol ref from:
4961 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
4962 UNSPEC_PLT/GOTENT))) */
4963
4964 y = XEXP (x, 0);
4965 if (GET_CODE (y) == UNSPEC
4966 && (XINT (y, 1) == UNSPEC_GOTENT
4967 || XINT (y, 1) == UNSPEC_PLT))
4968 y = XVECEXP (y, 0, 0);
4969 else
4970 return orig_x;
4971 }
4972 else
4973 return orig_x;
4974
4975 if (GET_MODE (orig_x) != Pmode)
4976 {
4977 if (GET_MODE (orig_x) == BLKmode)
4978 return orig_x;
4979 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
4980 if (y == NULL_RTX)
4981 return orig_x;
4982 }
4983 return y;
4984 }
4985
4986 /* Output operand OP to stdio stream FILE.
4987 OP is an address (register + offset) which is not used to address data;
4988 instead the rightmost bits are interpreted as the value. */
4989
4990 static void
4991 print_shift_count_operand (FILE *file, rtx op)
4992 {
4993 HOST_WIDE_INT offset;
4994 rtx base;
4995
4996 /* Extract base register and offset. */
4997 if (!s390_decompose_shift_count (op, &base, &offset))
4998 gcc_unreachable ();
4999
5000 /* Sanity check. */
5001 if (base)
5002 {
5003 gcc_assert (GET_CODE (base) == REG);
5004 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5005 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5006 }
5007
5008 /* Offsets are constricted to twelve bits. */
5009 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5010 if (base)
5011 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5012 }
5013
5014 /* See 'get_some_local_dynamic_name'. */
5015
5016 static int
5017 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5018 {
5019 rtx x = *px;
5020
5021 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5022 {
5023 x = get_pool_constant (x);
5024 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5025 }
5026
5027 if (GET_CODE (x) == SYMBOL_REF
5028 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5029 {
5030 cfun->machine->some_ld_name = XSTR (x, 0);
5031 return 1;
5032 }
5033
5034 return 0;
5035 }
5036
5037 /* Locate some local-dynamic symbol still in use by this function
5038 so that we can print its name in local-dynamic base patterns. */
5039
5040 static const char *
5041 get_some_local_dynamic_name (void)
5042 {
5043 rtx insn;
5044
5045 if (cfun->machine->some_ld_name)
5046 return cfun->machine->some_ld_name;
5047
5048 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5049 if (INSN_P (insn)
5050 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5051 return cfun->machine->some_ld_name;
5052
5053 gcc_unreachable ();
5054 }
5055
5056 /* Output machine-dependent UNSPECs occurring in address constant X
5057 in assembler syntax to stdio stream FILE. Returns true if the
5058 constant X could be recognized, false otherwise. */
5059
5060 static bool
5061 s390_output_addr_const_extra (FILE *file, rtx x)
5062 {
5063 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5064 switch (XINT (x, 1))
5065 {
5066 case UNSPEC_GOTENT:
5067 output_addr_const (file, XVECEXP (x, 0, 0));
5068 fprintf (file, "@GOTENT");
5069 return true;
5070 case UNSPEC_GOT:
5071 output_addr_const (file, XVECEXP (x, 0, 0));
5072 fprintf (file, "@GOT");
5073 return true;
5074 case UNSPEC_GOTOFF:
5075 output_addr_const (file, XVECEXP (x, 0, 0));
5076 fprintf (file, "@GOTOFF");
5077 return true;
5078 case UNSPEC_PLT:
5079 output_addr_const (file, XVECEXP (x, 0, 0));
5080 fprintf (file, "@PLT");
5081 return true;
5082 case UNSPEC_PLTOFF:
5083 output_addr_const (file, XVECEXP (x, 0, 0));
5084 fprintf (file, "@PLTOFF");
5085 return true;
5086 case UNSPEC_TLSGD:
5087 output_addr_const (file, XVECEXP (x, 0, 0));
5088 fprintf (file, "@TLSGD");
5089 return true;
5090 case UNSPEC_TLSLDM:
5091 assemble_name (file, get_some_local_dynamic_name ());
5092 fprintf (file, "@TLSLDM");
5093 return true;
5094 case UNSPEC_DTPOFF:
5095 output_addr_const (file, XVECEXP (x, 0, 0));
5096 fprintf (file, "@DTPOFF");
5097 return true;
5098 case UNSPEC_NTPOFF:
5099 output_addr_const (file, XVECEXP (x, 0, 0));
5100 fprintf (file, "@NTPOFF");
5101 return true;
5102 case UNSPEC_GOTNTPOFF:
5103 output_addr_const (file, XVECEXP (x, 0, 0));
5104 fprintf (file, "@GOTNTPOFF");
5105 return true;
5106 case UNSPEC_INDNTPOFF:
5107 output_addr_const (file, XVECEXP (x, 0, 0));
5108 fprintf (file, "@INDNTPOFF");
5109 return true;
5110 }
5111
5112 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5113 switch (XINT (x, 1))
5114 {
5115 case UNSPEC_POOL_OFFSET:
5116 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5117 output_addr_const (file, x);
5118 return true;
5119 }
5120 return false;
5121 }
5122
5123 /* Output address operand ADDR in assembler syntax to
5124 stdio stream FILE. */
5125
5126 void
5127 print_operand_address (FILE *file, rtx addr)
5128 {
5129 struct s390_address ad;
5130
5131 if (s390_symref_operand_p (addr, NULL, NULL))
5132 {
5133 if (!TARGET_Z10)
5134 {
5135 output_operand_lossage ("symbolic memory references are "
5136 "only supported on z10 or later");
5137 return;
5138 }
5139 output_addr_const (file, addr);
5140 return;
5141 }
5142
5143 if (!s390_decompose_address (addr, &ad)
5144 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5145 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5146 output_operand_lossage ("cannot decompose address");
5147
5148 if (ad.disp)
5149 output_addr_const (file, ad.disp);
5150 else
5151 fprintf (file, "0");
5152
5153 if (ad.base && ad.indx)
5154 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5155 reg_names[REGNO (ad.base)]);
5156 else if (ad.base)
5157 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5158 }
5159
5160 /* Output operand X in assembler syntax to stdio stream FILE.
5161 CODE specified the format flag. The following format flags
5162 are recognized:
5163
5164 'C': print opcode suffix for branch condition.
5165 'D': print opcode suffix for inverse branch condition.
5166 'E': print opcode suffix for branch on index instruction.
5167 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5168 'G': print the size of the operand in bytes.
5169 'O': print only the displacement of a memory reference.
5170 'R': print only the base register of a memory reference.
5171 'S': print S-type memory reference (base+displacement).
5172 'N': print the second word of a DImode operand.
5173 'M': print the second word of a TImode operand.
5174 'Y': print shift count operand.
5175
5176 'b': print integer X as if it's an unsigned byte.
5177 'c': print integer X as if it's an signed byte.
5178 'x': print integer X as if it's an unsigned halfword.
5179 'h': print integer X as if it's a signed halfword.
5180 'i': print the first nonzero HImode part of X.
5181 'j': print the first HImode part unequal to -1 of X.
5182 'k': print the first nonzero SImode part of X.
5183 'm': print the first SImode part unequal to -1 of X.
5184 'o': print integer X as if it's an unsigned 32bit word. */
5185
5186 void
5187 print_operand (FILE *file, rtx x, int code)
5188 {
5189 switch (code)
5190 {
5191 case 'C':
5192 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5193 return;
5194
5195 case 'D':
5196 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5197 return;
5198
5199 case 'E':
5200 if (GET_CODE (x) == LE)
5201 fprintf (file, "l");
5202 else if (GET_CODE (x) == GT)
5203 fprintf (file, "h");
5204 else
5205 output_operand_lossage ("invalid comparison operator "
5206 "for 'E' output modifier");
5207 return;
5208
5209 case 'J':
5210 if (GET_CODE (x) == SYMBOL_REF)
5211 {
5212 fprintf (file, "%s", ":tls_load:");
5213 output_addr_const (file, x);
5214 }
5215 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5216 {
5217 fprintf (file, "%s", ":tls_gdcall:");
5218 output_addr_const (file, XVECEXP (x, 0, 0));
5219 }
5220 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5221 {
5222 fprintf (file, "%s", ":tls_ldcall:");
5223 assemble_name (file, get_some_local_dynamic_name ());
5224 }
5225 else
5226 output_operand_lossage ("invalid reference for 'J' output modifier");
5227 return;
5228
5229 case 'G':
5230 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5231 return;
5232
5233 case 'O':
5234 {
5235 struct s390_address ad;
5236 int ret;
5237
5238 if (!MEM_P (x))
5239 {
5240 output_operand_lossage ("memory reference expected for "
5241 "'O' output modifier");
5242 return;
5243 }
5244
5245 ret = s390_decompose_address (XEXP (x, 0), &ad);
5246
5247 if (!ret
5248 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5249 || ad.indx)
5250 {
5251 output_operand_lossage ("invalid address for 'O' output modifier");
5252 return;
5253 }
5254
5255 if (ad.disp)
5256 output_addr_const (file, ad.disp);
5257 else
5258 fprintf (file, "0");
5259 }
5260 return;
5261
5262 case 'R':
5263 {
5264 struct s390_address ad;
5265 int ret;
5266
5267 if (!MEM_P (x))
5268 {
5269 output_operand_lossage ("memory reference expected for "
5270 "'R' output modifier");
5271 return;
5272 }
5273
5274 ret = s390_decompose_address (XEXP (x, 0), &ad);
5275
5276 if (!ret
5277 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5278 || ad.indx)
5279 {
5280 output_operand_lossage ("invalid address for 'R' output modifier");
5281 return;
5282 }
5283
5284 if (ad.base)
5285 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5286 else
5287 fprintf (file, "0");
5288 }
5289 return;
5290
5291 case 'S':
5292 {
5293 struct s390_address ad;
5294 int ret;
5295
5296 if (!MEM_P (x))
5297 {
5298 output_operand_lossage ("memory reference expected for "
5299 "'S' output modifier");
5300 return;
5301 }
5302 ret = s390_decompose_address (XEXP (x, 0), &ad);
5303
5304 if (!ret
5305 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5306 || ad.indx)
5307 {
5308 output_operand_lossage ("invalid address for 'S' output modifier");
5309 return;
5310 }
5311
5312 if (ad.disp)
5313 output_addr_const (file, ad.disp);
5314 else
5315 fprintf (file, "0");
5316
5317 if (ad.base)
5318 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5319 }
5320 return;
5321
5322 case 'N':
5323 if (GET_CODE (x) == REG)
5324 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5325 else if (GET_CODE (x) == MEM)
5326 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5327 else
5328 output_operand_lossage ("register or memory expression expected "
5329 "for 'N' output modifier");
5330 break;
5331
5332 case 'M':
5333 if (GET_CODE (x) == REG)
5334 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5335 else if (GET_CODE (x) == MEM)
5336 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5337 else
5338 output_operand_lossage ("register or memory expression expected "
5339 "for 'M' output modifier");
5340 break;
5341
5342 case 'Y':
5343 print_shift_count_operand (file, x);
5344 return;
5345 }
5346
5347 switch (GET_CODE (x))
5348 {
5349 case REG:
5350 fprintf (file, "%s", reg_names[REGNO (x)]);
5351 break;
5352
5353 case MEM:
5354 output_address (XEXP (x, 0));
5355 break;
5356
5357 case CONST:
5358 case CODE_LABEL:
5359 case LABEL_REF:
5360 case SYMBOL_REF:
5361 output_addr_const (file, x);
5362 break;
5363
5364 case CONST_INT:
5365 if (code == 'b')
5366 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5367 else if (code == 'c')
5368 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5369 else if (code == 'x')
5370 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5371 else if (code == 'h')
5372 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5373 else if (code == 'i')
5374 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5375 s390_extract_part (x, HImode, 0));
5376 else if (code == 'j')
5377 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5378 s390_extract_part (x, HImode, -1));
5379 else if (code == 'k')
5380 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5381 s390_extract_part (x, SImode, 0));
5382 else if (code == 'm')
5383 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5384 s390_extract_part (x, SImode, -1));
5385 else if (code == 'o')
5386 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5387 else
5388 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5389 break;
5390
5391 case CONST_DOUBLE:
5392 gcc_assert (GET_MODE (x) == VOIDmode);
5393 if (code == 'b')
5394 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5395 else if (code == 'x')
5396 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5397 else if (code == 'h')
5398 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5399 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5400 else
5401 {
5402 if (code == 0)
5403 output_operand_lossage ("invalid constant - try using "
5404 "an output modifier");
5405 else
5406 output_operand_lossage ("invalid constant for output modifier '%c'",
5407 code);
5408 }
5409 break;
5410
5411 default:
5412 if (code == 0)
5413 output_operand_lossage ("invalid expression - try using "
5414 "an output modifier");
5415 else
5416 output_operand_lossage ("invalid expression for output "
5417 "modifier '%c'", code);
5418 break;
5419 }
5420 }
5421
5422 /* Target hook for assembling integer objects. We need to define it
5423 here to work a round a bug in some versions of GAS, which couldn't
5424 handle values smaller than INT_MIN when printed in decimal. */
5425
5426 static bool
5427 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5428 {
5429 if (size == 8 && aligned_p
5430 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5431 {
5432 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5433 INTVAL (x));
5434 return true;
5435 }
5436 return default_assemble_integer (x, size, aligned_p);
5437 }
5438
5439 /* Returns true if register REGNO is used for forming
5440 a memory address in expression X. */
5441
5442 static bool
5443 reg_used_in_mem_p (int regno, rtx x)
5444 {
5445 enum rtx_code code = GET_CODE (x);
5446 int i, j;
5447 const char *fmt;
5448
5449 if (code == MEM)
5450 {
5451 if (refers_to_regno_p (regno, regno+1,
5452 XEXP (x, 0), 0))
5453 return true;
5454 }
5455 else if (code == SET
5456 && GET_CODE (SET_DEST (x)) == PC)
5457 {
5458 if (refers_to_regno_p (regno, regno+1,
5459 SET_SRC (x), 0))
5460 return true;
5461 }
5462
5463 fmt = GET_RTX_FORMAT (code);
5464 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5465 {
5466 if (fmt[i] == 'e'
5467 && reg_used_in_mem_p (regno, XEXP (x, i)))
5468 return true;
5469
5470 else if (fmt[i] == 'E')
5471 for (j = 0; j < XVECLEN (x, i); j++)
5472 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5473 return true;
5474 }
5475 return false;
5476 }
5477
5478 /* Returns true if expression DEP_RTX sets an address register
5479 used by instruction INSN to address memory. */
5480
5481 static bool
5482 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5483 {
5484 rtx target, pat;
5485
5486 if (GET_CODE (dep_rtx) == INSN)
5487 dep_rtx = PATTERN (dep_rtx);
5488
5489 if (GET_CODE (dep_rtx) == SET)
5490 {
5491 target = SET_DEST (dep_rtx);
5492 if (GET_CODE (target) == STRICT_LOW_PART)
5493 target = XEXP (target, 0);
5494 while (GET_CODE (target) == SUBREG)
5495 target = SUBREG_REG (target);
5496
5497 if (GET_CODE (target) == REG)
5498 {
5499 int regno = REGNO (target);
5500
5501 if (s390_safe_attr_type (insn) == TYPE_LA)
5502 {
5503 pat = PATTERN (insn);
5504 if (GET_CODE (pat) == PARALLEL)
5505 {
5506 gcc_assert (XVECLEN (pat, 0) == 2);
5507 pat = XVECEXP (pat, 0, 0);
5508 }
5509 gcc_assert (GET_CODE (pat) == SET);
5510 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5511 }
5512 else if (get_attr_atype (insn) == ATYPE_AGEN)
5513 return reg_used_in_mem_p (regno, PATTERN (insn));
5514 }
5515 }
5516 return false;
5517 }
5518
5519 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5520
5521 int
5522 s390_agen_dep_p (rtx dep_insn, rtx insn)
5523 {
5524 rtx dep_rtx = PATTERN (dep_insn);
5525 int i;
5526
5527 if (GET_CODE (dep_rtx) == SET
5528 && addr_generation_dependency_p (dep_rtx, insn))
5529 return 1;
5530 else if (GET_CODE (dep_rtx) == PARALLEL)
5531 {
5532 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5533 {
5534 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5535 return 1;
5536 }
5537 }
5538 return 0;
5539 }
5540
5541
5542 /* A C statement (sans semicolon) to update the integer scheduling priority
5543 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5544 reduce the priority to execute INSN later. Do not define this macro if
5545 you do not need to adjust the scheduling priorities of insns.
5546
5547 A STD instruction should be scheduled earlier,
5548 in order to use the bypass. */
5549 static int
5550 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5551 {
5552 if (! INSN_P (insn))
5553 return priority;
5554
5555 if (s390_tune != PROCESSOR_2084_Z990
5556 && s390_tune != PROCESSOR_2094_Z9_109
5557 && s390_tune != PROCESSOR_2097_Z10
5558 && s390_tune != PROCESSOR_2817_Z196)
5559 return priority;
5560
5561 switch (s390_safe_attr_type (insn))
5562 {
5563 case TYPE_FSTOREDF:
5564 case TYPE_FSTORESF:
5565 priority = priority << 3;
5566 break;
5567 case TYPE_STORE:
5568 case TYPE_STM:
5569 priority = priority << 1;
5570 break;
5571 default:
5572 break;
5573 }
5574 return priority;
5575 }
5576
5577
5578 /* The number of instructions that can be issued per cycle. */
5579
5580 static int
5581 s390_issue_rate (void)
5582 {
5583 switch (s390_tune)
5584 {
5585 case PROCESSOR_2084_Z990:
5586 case PROCESSOR_2094_Z9_109:
5587 case PROCESSOR_2817_Z196:
5588 return 3;
5589 case PROCESSOR_2097_Z10:
5590 return 2;
5591 default:
5592 return 1;
5593 }
5594 }
5595
5596 static int
5597 s390_first_cycle_multipass_dfa_lookahead (void)
5598 {
5599 return 4;
5600 }
5601
5602 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5603 Fix up MEMs as required. */
5604
5605 static void
5606 annotate_constant_pool_refs (rtx *x)
5607 {
5608 int i, j;
5609 const char *fmt;
5610
5611 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5612 || !CONSTANT_POOL_ADDRESS_P (*x));
5613
5614 /* Literal pool references can only occur inside a MEM ... */
5615 if (GET_CODE (*x) == MEM)
5616 {
5617 rtx memref = XEXP (*x, 0);
5618
5619 if (GET_CODE (memref) == SYMBOL_REF
5620 && CONSTANT_POOL_ADDRESS_P (memref))
5621 {
5622 rtx base = cfun->machine->base_reg;
5623 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5624 UNSPEC_LTREF);
5625
5626 *x = replace_equiv_address (*x, addr);
5627 return;
5628 }
5629
5630 if (GET_CODE (memref) == CONST
5631 && GET_CODE (XEXP (memref, 0)) == PLUS
5632 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5633 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5634 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5635 {
5636 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5637 rtx sym = XEXP (XEXP (memref, 0), 0);
5638 rtx base = cfun->machine->base_reg;
5639 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5640 UNSPEC_LTREF);
5641
5642 *x = replace_equiv_address (*x, plus_constant (addr, off));
5643 return;
5644 }
5645 }
5646
5647 /* ... or a load-address type pattern. */
5648 if (GET_CODE (*x) == SET)
5649 {
5650 rtx addrref = SET_SRC (*x);
5651
5652 if (GET_CODE (addrref) == SYMBOL_REF
5653 && CONSTANT_POOL_ADDRESS_P (addrref))
5654 {
5655 rtx base = cfun->machine->base_reg;
5656 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5657 UNSPEC_LTREF);
5658
5659 SET_SRC (*x) = addr;
5660 return;
5661 }
5662
5663 if (GET_CODE (addrref) == CONST
5664 && GET_CODE (XEXP (addrref, 0)) == PLUS
5665 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5666 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5667 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5668 {
5669 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5670 rtx sym = XEXP (XEXP (addrref, 0), 0);
5671 rtx base = cfun->machine->base_reg;
5672 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5673 UNSPEC_LTREF);
5674
5675 SET_SRC (*x) = plus_constant (addr, off);
5676 return;
5677 }
5678 }
5679
5680 /* Annotate LTREL_BASE as well. */
5681 if (GET_CODE (*x) == UNSPEC
5682 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5683 {
5684 rtx base = cfun->machine->base_reg;
5685 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5686 UNSPEC_LTREL_BASE);
5687 return;
5688 }
5689
5690 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5691 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5692 {
5693 if (fmt[i] == 'e')
5694 {
5695 annotate_constant_pool_refs (&XEXP (*x, i));
5696 }
5697 else if (fmt[i] == 'E')
5698 {
5699 for (j = 0; j < XVECLEN (*x, i); j++)
5700 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5701 }
5702 }
5703 }
5704
5705 /* Split all branches that exceed the maximum distance.
5706 Returns true if this created a new literal pool entry. */
5707
5708 static int
5709 s390_split_branches (void)
5710 {
5711 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5712 int new_literal = 0, ret;
5713 rtx insn, pat, tmp, target;
5714 rtx *label;
5715
5716 /* We need correct insn addresses. */
5717
5718 shorten_branches (get_insns ());
5719
5720 /* Find all branches that exceed 64KB, and split them. */
5721
5722 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5723 {
5724 if (GET_CODE (insn) != JUMP_INSN)
5725 continue;
5726
5727 pat = PATTERN (insn);
5728 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5729 pat = XVECEXP (pat, 0, 0);
5730 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5731 continue;
5732
5733 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5734 {
5735 label = &SET_SRC (pat);
5736 }
5737 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5738 {
5739 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5740 label = &XEXP (SET_SRC (pat), 1);
5741 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5742 label = &XEXP (SET_SRC (pat), 2);
5743 else
5744 continue;
5745 }
5746 else
5747 continue;
5748
5749 if (get_attr_length (insn) <= 4)
5750 continue;
5751
5752 /* We are going to use the return register as scratch register,
5753 make sure it will be saved/restored by the prologue/epilogue. */
5754 cfun_frame_layout.save_return_addr_p = 1;
5755
5756 if (!flag_pic)
5757 {
5758 new_literal = 1;
5759 tmp = force_const_mem (Pmode, *label);
5760 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5761 INSN_ADDRESSES_NEW (tmp, -1);
5762 annotate_constant_pool_refs (&PATTERN (tmp));
5763
5764 target = temp_reg;
5765 }
5766 else
5767 {
5768 new_literal = 1;
5769 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5770 UNSPEC_LTREL_OFFSET);
5771 target = gen_rtx_CONST (Pmode, target);
5772 target = force_const_mem (Pmode, target);
5773 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5774 INSN_ADDRESSES_NEW (tmp, -1);
5775 annotate_constant_pool_refs (&PATTERN (tmp));
5776
5777 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5778 cfun->machine->base_reg),
5779 UNSPEC_LTREL_BASE);
5780 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5781 }
5782
5783 ret = validate_change (insn, label, target, 0);
5784 gcc_assert (ret);
5785 }
5786
5787 return new_literal;
5788 }
5789
5790
5791 /* Find an annotated literal pool symbol referenced in RTX X,
5792 and store it at REF. Will abort if X contains references to
5793 more than one such pool symbol; multiple references to the same
5794 symbol are allowed, however.
5795
5796 The rtx pointed to by REF must be initialized to NULL_RTX
5797 by the caller before calling this routine. */
5798
5799 static void
5800 find_constant_pool_ref (rtx x, rtx *ref)
5801 {
5802 int i, j;
5803 const char *fmt;
5804
5805 /* Ignore LTREL_BASE references. */
5806 if (GET_CODE (x) == UNSPEC
5807 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5808 return;
5809 /* Likewise POOL_ENTRY insns. */
5810 if (GET_CODE (x) == UNSPEC_VOLATILE
5811 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5812 return;
5813
5814 gcc_assert (GET_CODE (x) != SYMBOL_REF
5815 || !CONSTANT_POOL_ADDRESS_P (x));
5816
5817 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5818 {
5819 rtx sym = XVECEXP (x, 0, 0);
5820 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5821 && CONSTANT_POOL_ADDRESS_P (sym));
5822
5823 if (*ref == NULL_RTX)
5824 *ref = sym;
5825 else
5826 gcc_assert (*ref == sym);
5827
5828 return;
5829 }
5830
5831 fmt = GET_RTX_FORMAT (GET_CODE (x));
5832 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5833 {
5834 if (fmt[i] == 'e')
5835 {
5836 find_constant_pool_ref (XEXP (x, i), ref);
5837 }
5838 else if (fmt[i] == 'E')
5839 {
5840 for (j = 0; j < XVECLEN (x, i); j++)
5841 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5842 }
5843 }
5844 }
5845
5846 /* Replace every reference to the annotated literal pool
5847 symbol REF in X by its base plus OFFSET. */
5848
5849 static void
5850 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5851 {
5852 int i, j;
5853 const char *fmt;
5854
5855 gcc_assert (*x != ref);
5856
5857 if (GET_CODE (*x) == UNSPEC
5858 && XINT (*x, 1) == UNSPEC_LTREF
5859 && XVECEXP (*x, 0, 0) == ref)
5860 {
5861 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5862 return;
5863 }
5864
5865 if (GET_CODE (*x) == PLUS
5866 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5867 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5868 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5869 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5870 {
5871 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5872 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5873 return;
5874 }
5875
5876 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5877 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5878 {
5879 if (fmt[i] == 'e')
5880 {
5881 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5882 }
5883 else if (fmt[i] == 'E')
5884 {
5885 for (j = 0; j < XVECLEN (*x, i); j++)
5886 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5887 }
5888 }
5889 }
5890
5891 /* Check whether X contains an UNSPEC_LTREL_BASE.
5892 Return its constant pool symbol if found, NULL_RTX otherwise. */
5893
5894 static rtx
5895 find_ltrel_base (rtx x)
5896 {
5897 int i, j;
5898 const char *fmt;
5899
5900 if (GET_CODE (x) == UNSPEC
5901 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5902 return XVECEXP (x, 0, 0);
5903
5904 fmt = GET_RTX_FORMAT (GET_CODE (x));
5905 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5906 {
5907 if (fmt[i] == 'e')
5908 {
5909 rtx fnd = find_ltrel_base (XEXP (x, i));
5910 if (fnd)
5911 return fnd;
5912 }
5913 else if (fmt[i] == 'E')
5914 {
5915 for (j = 0; j < XVECLEN (x, i); j++)
5916 {
5917 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5918 if (fnd)
5919 return fnd;
5920 }
5921 }
5922 }
5923
5924 return NULL_RTX;
5925 }
5926
5927 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5928
5929 static void
5930 replace_ltrel_base (rtx *x)
5931 {
5932 int i, j;
5933 const char *fmt;
5934
5935 if (GET_CODE (*x) == UNSPEC
5936 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5937 {
5938 *x = XVECEXP (*x, 0, 1);
5939 return;
5940 }
5941
5942 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5943 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5944 {
5945 if (fmt[i] == 'e')
5946 {
5947 replace_ltrel_base (&XEXP (*x, i));
5948 }
5949 else if (fmt[i] == 'E')
5950 {
5951 for (j = 0; j < XVECLEN (*x, i); j++)
5952 replace_ltrel_base (&XVECEXP (*x, i, j));
5953 }
5954 }
5955 }
5956
5957
5958 /* We keep a list of constants which we have to add to internal
5959 constant tables in the middle of large functions. */
5960
5961 #define NR_C_MODES 11
5962 enum machine_mode constant_modes[NR_C_MODES] =
5963 {
5964 TFmode, TImode, TDmode,
5965 DFmode, DImode, DDmode,
5966 SFmode, SImode, SDmode,
5967 HImode,
5968 QImode
5969 };
5970
5971 struct constant
5972 {
5973 struct constant *next;
5974 rtx value;
5975 rtx label;
5976 };
5977
5978 struct constant_pool
5979 {
5980 struct constant_pool *next;
5981 rtx first_insn;
5982 rtx pool_insn;
5983 bitmap insns;
5984 rtx emit_pool_after;
5985
5986 struct constant *constants[NR_C_MODES];
5987 struct constant *execute;
5988 rtx label;
5989 int size;
5990 };
5991
5992 /* Allocate new constant_pool structure. */
5993
5994 static struct constant_pool *
5995 s390_alloc_pool (void)
5996 {
5997 struct constant_pool *pool;
5998 int i;
5999
6000 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6001 pool->next = NULL;
6002 for (i = 0; i < NR_C_MODES; i++)
6003 pool->constants[i] = NULL;
6004
6005 pool->execute = NULL;
6006 pool->label = gen_label_rtx ();
6007 pool->first_insn = NULL_RTX;
6008 pool->pool_insn = NULL_RTX;
6009 pool->insns = BITMAP_ALLOC (NULL);
6010 pool->size = 0;
6011 pool->emit_pool_after = NULL_RTX;
6012
6013 return pool;
6014 }
6015
6016 /* Create new constant pool covering instructions starting at INSN
6017 and chain it to the end of POOL_LIST. */
6018
6019 static struct constant_pool *
6020 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6021 {
6022 struct constant_pool *pool, **prev;
6023
6024 pool = s390_alloc_pool ();
6025 pool->first_insn = insn;
6026
6027 for (prev = pool_list; *prev; prev = &(*prev)->next)
6028 ;
6029 *prev = pool;
6030
6031 return pool;
6032 }
6033
6034 /* End range of instructions covered by POOL at INSN and emit
6035 placeholder insn representing the pool. */
6036
6037 static void
6038 s390_end_pool (struct constant_pool *pool, rtx insn)
6039 {
6040 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6041
6042 if (!insn)
6043 insn = get_last_insn ();
6044
6045 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6046 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6047 }
6048
6049 /* Add INSN to the list of insns covered by POOL. */
6050
6051 static void
6052 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6053 {
6054 bitmap_set_bit (pool->insns, INSN_UID (insn));
6055 }
6056
6057 /* Return pool out of POOL_LIST that covers INSN. */
6058
6059 static struct constant_pool *
6060 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6061 {
6062 struct constant_pool *pool;
6063
6064 for (pool = pool_list; pool; pool = pool->next)
6065 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6066 break;
6067
6068 return pool;
6069 }
6070
6071 /* Add constant VAL of mode MODE to the constant pool POOL. */
6072
6073 static void
6074 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6075 {
6076 struct constant *c;
6077 int i;
6078
6079 for (i = 0; i < NR_C_MODES; i++)
6080 if (constant_modes[i] == mode)
6081 break;
6082 gcc_assert (i != NR_C_MODES);
6083
6084 for (c = pool->constants[i]; c != NULL; c = c->next)
6085 if (rtx_equal_p (val, c->value))
6086 break;
6087
6088 if (c == NULL)
6089 {
6090 c = (struct constant *) xmalloc (sizeof *c);
6091 c->value = val;
6092 c->label = gen_label_rtx ();
6093 c->next = pool->constants[i];
6094 pool->constants[i] = c;
6095 pool->size += GET_MODE_SIZE (mode);
6096 }
6097 }
6098
6099 /* Return an rtx that represents the offset of X from the start of
6100 pool POOL. */
6101
6102 static rtx
6103 s390_pool_offset (struct constant_pool *pool, rtx x)
6104 {
6105 rtx label;
6106
6107 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6108 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6109 UNSPEC_POOL_OFFSET);
6110 return gen_rtx_CONST (GET_MODE (x), x);
6111 }
6112
6113 /* Find constant VAL of mode MODE in the constant pool POOL.
6114 Return an RTX describing the distance from the start of
6115 the pool to the location of the new constant. */
6116
6117 static rtx
6118 s390_find_constant (struct constant_pool *pool, rtx val,
6119 enum machine_mode mode)
6120 {
6121 struct constant *c;
6122 int i;
6123
6124 for (i = 0; i < NR_C_MODES; i++)
6125 if (constant_modes[i] == mode)
6126 break;
6127 gcc_assert (i != NR_C_MODES);
6128
6129 for (c = pool->constants[i]; c != NULL; c = c->next)
6130 if (rtx_equal_p (val, c->value))
6131 break;
6132
6133 gcc_assert (c);
6134
6135 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6136 }
6137
6138 /* Check whether INSN is an execute. Return the label_ref to its
6139 execute target template if so, NULL_RTX otherwise. */
6140
6141 static rtx
6142 s390_execute_label (rtx insn)
6143 {
6144 if (GET_CODE (insn) == INSN
6145 && GET_CODE (PATTERN (insn)) == PARALLEL
6146 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6147 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6148 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6149
6150 return NULL_RTX;
6151 }
6152
6153 /* Add execute target for INSN to the constant pool POOL. */
6154
6155 static void
6156 s390_add_execute (struct constant_pool *pool, rtx insn)
6157 {
6158 struct constant *c;
6159
6160 for (c = pool->execute; c != NULL; c = c->next)
6161 if (INSN_UID (insn) == INSN_UID (c->value))
6162 break;
6163
6164 if (c == NULL)
6165 {
6166 c = (struct constant *) xmalloc (sizeof *c);
6167 c->value = insn;
6168 c->label = gen_label_rtx ();
6169 c->next = pool->execute;
6170 pool->execute = c;
6171 pool->size += 6;
6172 }
6173 }
6174
6175 /* Find execute target for INSN in the constant pool POOL.
6176 Return an RTX describing the distance from the start of
6177 the pool to the location of the execute target. */
6178
6179 static rtx
6180 s390_find_execute (struct constant_pool *pool, rtx insn)
6181 {
6182 struct constant *c;
6183
6184 for (c = pool->execute; c != NULL; c = c->next)
6185 if (INSN_UID (insn) == INSN_UID (c->value))
6186 break;
6187
6188 gcc_assert (c);
6189
6190 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6191 }
6192
6193 /* For an execute INSN, extract the execute target template. */
6194
6195 static rtx
6196 s390_execute_target (rtx insn)
6197 {
6198 rtx pattern = PATTERN (insn);
6199 gcc_assert (s390_execute_label (insn));
6200
6201 if (XVECLEN (pattern, 0) == 2)
6202 {
6203 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6204 }
6205 else
6206 {
6207 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6208 int i;
6209
6210 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6211 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6212
6213 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6214 }
6215
6216 return pattern;
6217 }
6218
6219 /* Indicate that INSN cannot be duplicated. This is the case for
6220 execute insns that carry a unique label. */
6221
6222 static bool
6223 s390_cannot_copy_insn_p (rtx insn)
6224 {
6225 rtx label = s390_execute_label (insn);
6226 return label && label != const0_rtx;
6227 }
6228
6229 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6230 do not emit the pool base label. */
6231
6232 static void
6233 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6234 {
6235 struct constant *c;
6236 rtx insn = pool->pool_insn;
6237 int i;
6238
6239 /* Switch to rodata section. */
6240 if (TARGET_CPU_ZARCH)
6241 {
6242 insn = emit_insn_after (gen_pool_section_start (), insn);
6243 INSN_ADDRESSES_NEW (insn, -1);
6244 }
6245
6246 /* Ensure minimum pool alignment. */
6247 if (TARGET_CPU_ZARCH)
6248 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6249 else
6250 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6251 INSN_ADDRESSES_NEW (insn, -1);
6252
6253 /* Emit pool base label. */
6254 if (!remote_label)
6255 {
6256 insn = emit_label_after (pool->label, insn);
6257 INSN_ADDRESSES_NEW (insn, -1);
6258 }
6259
6260 /* Dump constants in descending alignment requirement order,
6261 ensuring proper alignment for every constant. */
6262 for (i = 0; i < NR_C_MODES; i++)
6263 for (c = pool->constants[i]; c; c = c->next)
6264 {
6265 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6266 rtx value = copy_rtx (c->value);
6267 if (GET_CODE (value) == CONST
6268 && GET_CODE (XEXP (value, 0)) == UNSPEC
6269 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6270 && XVECLEN (XEXP (value, 0), 0) == 1)
6271 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6272
6273 insn = emit_label_after (c->label, insn);
6274 INSN_ADDRESSES_NEW (insn, -1);
6275
6276 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6277 gen_rtvec (1, value),
6278 UNSPECV_POOL_ENTRY);
6279 insn = emit_insn_after (value, insn);
6280 INSN_ADDRESSES_NEW (insn, -1);
6281 }
6282
6283 /* Ensure minimum alignment for instructions. */
6284 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6285 INSN_ADDRESSES_NEW (insn, -1);
6286
6287 /* Output in-pool execute template insns. */
6288 for (c = pool->execute; c; c = c->next)
6289 {
6290 insn = emit_label_after (c->label, insn);
6291 INSN_ADDRESSES_NEW (insn, -1);
6292
6293 insn = emit_insn_after (s390_execute_target (c->value), insn);
6294 INSN_ADDRESSES_NEW (insn, -1);
6295 }
6296
6297 /* Switch back to previous section. */
6298 if (TARGET_CPU_ZARCH)
6299 {
6300 insn = emit_insn_after (gen_pool_section_end (), insn);
6301 INSN_ADDRESSES_NEW (insn, -1);
6302 }
6303
6304 insn = emit_barrier_after (insn);
6305 INSN_ADDRESSES_NEW (insn, -1);
6306
6307 /* Remove placeholder insn. */
6308 remove_insn (pool->pool_insn);
6309 }
6310
6311 /* Free all memory used by POOL. */
6312
6313 static void
6314 s390_free_pool (struct constant_pool *pool)
6315 {
6316 struct constant *c, *next;
6317 int i;
6318
6319 for (i = 0; i < NR_C_MODES; i++)
6320 for (c = pool->constants[i]; c; c = next)
6321 {
6322 next = c->next;
6323 free (c);
6324 }
6325
6326 for (c = pool->execute; c; c = next)
6327 {
6328 next = c->next;
6329 free (c);
6330 }
6331
6332 BITMAP_FREE (pool->insns);
6333 free (pool);
6334 }
6335
6336
6337 /* Collect main literal pool. Return NULL on overflow. */
6338
6339 static struct constant_pool *
6340 s390_mainpool_start (void)
6341 {
6342 struct constant_pool *pool;
6343 rtx insn;
6344
6345 pool = s390_alloc_pool ();
6346
6347 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6348 {
6349 if (GET_CODE (insn) == INSN
6350 && GET_CODE (PATTERN (insn)) == SET
6351 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6352 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6353 {
6354 gcc_assert (!pool->pool_insn);
6355 pool->pool_insn = insn;
6356 }
6357
6358 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6359 {
6360 s390_add_execute (pool, insn);
6361 }
6362 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6363 {
6364 rtx pool_ref = NULL_RTX;
6365 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6366 if (pool_ref)
6367 {
6368 rtx constant = get_pool_constant (pool_ref);
6369 enum machine_mode mode = get_pool_mode (pool_ref);
6370 s390_add_constant (pool, constant, mode);
6371 }
6372 }
6373
6374 /* If hot/cold partitioning is enabled we have to make sure that
6375 the literal pool is emitted in the same section where the
6376 initialization of the literal pool base pointer takes place.
6377 emit_pool_after is only used in the non-overflow case on non
6378 Z cpus where we can emit the literal pool at the end of the
6379 function body within the text section. */
6380 if (NOTE_P (insn)
6381 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6382 && !pool->emit_pool_after)
6383 pool->emit_pool_after = PREV_INSN (insn);
6384 }
6385
6386 gcc_assert (pool->pool_insn || pool->size == 0);
6387
6388 if (pool->size >= 4096)
6389 {
6390 /* We're going to chunkify the pool, so remove the main
6391 pool placeholder insn. */
6392 remove_insn (pool->pool_insn);
6393
6394 s390_free_pool (pool);
6395 pool = NULL;
6396 }
6397
6398 /* If the functions ends with the section where the literal pool
6399 should be emitted set the marker to its end. */
6400 if (pool && !pool->emit_pool_after)
6401 pool->emit_pool_after = get_last_insn ();
6402
6403 return pool;
6404 }
6405
6406 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6407 Modify the current function to output the pool constants as well as
6408 the pool register setup instruction. */
6409
6410 static void
6411 s390_mainpool_finish (struct constant_pool *pool)
6412 {
6413 rtx base_reg = cfun->machine->base_reg;
6414 rtx insn;
6415
6416 /* If the pool is empty, we're done. */
6417 if (pool->size == 0)
6418 {
6419 /* We don't actually need a base register after all. */
6420 cfun->machine->base_reg = NULL_RTX;
6421
6422 if (pool->pool_insn)
6423 remove_insn (pool->pool_insn);
6424 s390_free_pool (pool);
6425 return;
6426 }
6427
6428 /* We need correct insn addresses. */
6429 shorten_branches (get_insns ());
6430
6431 /* On zSeries, we use a LARL to load the pool register. The pool is
6432 located in the .rodata section, so we emit it after the function. */
6433 if (TARGET_CPU_ZARCH)
6434 {
6435 insn = gen_main_base_64 (base_reg, pool->label);
6436 insn = emit_insn_after (insn, pool->pool_insn);
6437 INSN_ADDRESSES_NEW (insn, -1);
6438 remove_insn (pool->pool_insn);
6439
6440 insn = get_last_insn ();
6441 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6442 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6443
6444 s390_dump_pool (pool, 0);
6445 }
6446
6447 /* On S/390, if the total size of the function's code plus literal pool
6448 does not exceed 4096 bytes, we use BASR to set up a function base
6449 pointer, and emit the literal pool at the end of the function. */
6450 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6451 + pool->size + 8 /* alignment slop */ < 4096)
6452 {
6453 insn = gen_main_base_31_small (base_reg, pool->label);
6454 insn = emit_insn_after (insn, pool->pool_insn);
6455 INSN_ADDRESSES_NEW (insn, -1);
6456 remove_insn (pool->pool_insn);
6457
6458 insn = emit_label_after (pool->label, insn);
6459 INSN_ADDRESSES_NEW (insn, -1);
6460
6461 /* emit_pool_after will be set by s390_mainpool_start to the
6462 last insn of the section where the literal pool should be
6463 emitted. */
6464 insn = pool->emit_pool_after;
6465
6466 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6467 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6468
6469 s390_dump_pool (pool, 1);
6470 }
6471
6472 /* Otherwise, we emit an inline literal pool and use BASR to branch
6473 over it, setting up the pool register at the same time. */
6474 else
6475 {
6476 rtx pool_end = gen_label_rtx ();
6477
6478 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6479 insn = emit_insn_after (insn, pool->pool_insn);
6480 INSN_ADDRESSES_NEW (insn, -1);
6481 remove_insn (pool->pool_insn);
6482
6483 insn = emit_label_after (pool->label, insn);
6484 INSN_ADDRESSES_NEW (insn, -1);
6485
6486 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6487 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6488
6489 insn = emit_label_after (pool_end, pool->pool_insn);
6490 INSN_ADDRESSES_NEW (insn, -1);
6491
6492 s390_dump_pool (pool, 1);
6493 }
6494
6495
6496 /* Replace all literal pool references. */
6497
6498 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6499 {
6500 if (INSN_P (insn))
6501 replace_ltrel_base (&PATTERN (insn));
6502
6503 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6504 {
6505 rtx addr, pool_ref = NULL_RTX;
6506 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6507 if (pool_ref)
6508 {
6509 if (s390_execute_label (insn))
6510 addr = s390_find_execute (pool, insn);
6511 else
6512 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6513 get_pool_mode (pool_ref));
6514
6515 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6516 INSN_CODE (insn) = -1;
6517 }
6518 }
6519 }
6520
6521
6522 /* Free the pool. */
6523 s390_free_pool (pool);
6524 }
6525
6526 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6527 We have decided we cannot use this pool, so revert all changes
6528 to the current function that were done by s390_mainpool_start. */
6529 static void
6530 s390_mainpool_cancel (struct constant_pool *pool)
6531 {
6532 /* We didn't actually change the instruction stream, so simply
6533 free the pool memory. */
6534 s390_free_pool (pool);
6535 }
6536
6537
6538 /* Chunkify the literal pool. */
6539
6540 #define S390_POOL_CHUNK_MIN 0xc00
6541 #define S390_POOL_CHUNK_MAX 0xe00
6542
6543 static struct constant_pool *
6544 s390_chunkify_start (void)
6545 {
6546 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6547 int extra_size = 0;
6548 bitmap far_labels;
6549 rtx pending_ltrel = NULL_RTX;
6550 rtx insn;
6551
6552 rtx (*gen_reload_base) (rtx, rtx) =
6553 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6554
6555
6556 /* We need correct insn addresses. */
6557
6558 shorten_branches (get_insns ());
6559
6560 /* Scan all insns and move literals to pool chunks. */
6561
6562 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6563 {
6564 bool section_switch_p = false;
6565
6566 /* Check for pending LTREL_BASE. */
6567 if (INSN_P (insn))
6568 {
6569 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6570 if (ltrel_base)
6571 {
6572 gcc_assert (ltrel_base == pending_ltrel);
6573 pending_ltrel = NULL_RTX;
6574 }
6575 }
6576
6577 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6578 {
6579 if (!curr_pool)
6580 curr_pool = s390_start_pool (&pool_list, insn);
6581
6582 s390_add_execute (curr_pool, insn);
6583 s390_add_pool_insn (curr_pool, insn);
6584 }
6585 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6586 {
6587 rtx pool_ref = NULL_RTX;
6588 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6589 if (pool_ref)
6590 {
6591 rtx constant = get_pool_constant (pool_ref);
6592 enum machine_mode mode = get_pool_mode (pool_ref);
6593
6594 if (!curr_pool)
6595 curr_pool = s390_start_pool (&pool_list, insn);
6596
6597 s390_add_constant (curr_pool, constant, mode);
6598 s390_add_pool_insn (curr_pool, insn);
6599
6600 /* Don't split the pool chunk between a LTREL_OFFSET load
6601 and the corresponding LTREL_BASE. */
6602 if (GET_CODE (constant) == CONST
6603 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6604 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6605 {
6606 gcc_assert (!pending_ltrel);
6607 pending_ltrel = pool_ref;
6608 }
6609 }
6610 /* Make sure we do not split between a call and its
6611 corresponding CALL_ARG_LOCATION note. */
6612 if (CALL_P (insn))
6613 {
6614 rtx next = NEXT_INSN (insn);
6615 if (next && NOTE_P (next)
6616 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6617 continue;
6618 }
6619 }
6620
6621 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6622 {
6623 if (curr_pool)
6624 s390_add_pool_insn (curr_pool, insn);
6625 /* An LTREL_BASE must follow within the same basic block. */
6626 gcc_assert (!pending_ltrel);
6627 }
6628
6629 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6630 section_switch_p = true;
6631
6632 if (!curr_pool
6633 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6634 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6635 continue;
6636
6637 if (TARGET_CPU_ZARCH)
6638 {
6639 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6640 continue;
6641
6642 s390_end_pool (curr_pool, NULL_RTX);
6643 curr_pool = NULL;
6644 }
6645 else
6646 {
6647 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6648 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6649 + extra_size;
6650
6651 /* We will later have to insert base register reload insns.
6652 Those will have an effect on code size, which we need to
6653 consider here. This calculation makes rather pessimistic
6654 worst-case assumptions. */
6655 if (GET_CODE (insn) == CODE_LABEL)
6656 extra_size += 6;
6657
6658 if (chunk_size < S390_POOL_CHUNK_MIN
6659 && curr_pool->size < S390_POOL_CHUNK_MIN
6660 && !section_switch_p)
6661 continue;
6662
6663 /* Pool chunks can only be inserted after BARRIERs ... */
6664 if (GET_CODE (insn) == BARRIER)
6665 {
6666 s390_end_pool (curr_pool, insn);
6667 curr_pool = NULL;
6668 extra_size = 0;
6669 }
6670
6671 /* ... so if we don't find one in time, create one. */
6672 else if (chunk_size > S390_POOL_CHUNK_MAX
6673 || curr_pool->size > S390_POOL_CHUNK_MAX
6674 || section_switch_p)
6675 {
6676 rtx label, jump, barrier;
6677
6678 if (!section_switch_p)
6679 {
6680 /* We can insert the barrier only after a 'real' insn. */
6681 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6682 continue;
6683 if (get_attr_length (insn) == 0)
6684 continue;
6685 /* Don't separate LTREL_BASE from the corresponding
6686 LTREL_OFFSET load. */
6687 if (pending_ltrel)
6688 continue;
6689 }
6690 else
6691 {
6692 gcc_assert (!pending_ltrel);
6693
6694 /* The old pool has to end before the section switch
6695 note in order to make it part of the current
6696 section. */
6697 insn = PREV_INSN (insn);
6698 }
6699
6700 label = gen_label_rtx ();
6701 jump = emit_jump_insn_after (gen_jump (label), insn);
6702 barrier = emit_barrier_after (jump);
6703 insn = emit_label_after (label, barrier);
6704 JUMP_LABEL (jump) = label;
6705 LABEL_NUSES (label) = 1;
6706
6707 INSN_ADDRESSES_NEW (jump, -1);
6708 INSN_ADDRESSES_NEW (barrier, -1);
6709 INSN_ADDRESSES_NEW (insn, -1);
6710
6711 s390_end_pool (curr_pool, barrier);
6712 curr_pool = NULL;
6713 extra_size = 0;
6714 }
6715 }
6716 }
6717
6718 if (curr_pool)
6719 s390_end_pool (curr_pool, NULL_RTX);
6720 gcc_assert (!pending_ltrel);
6721
6722 /* Find all labels that are branched into
6723 from an insn belonging to a different chunk. */
6724
6725 far_labels = BITMAP_ALLOC (NULL);
6726
6727 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6728 {
6729 /* Labels marked with LABEL_PRESERVE_P can be target
6730 of non-local jumps, so we have to mark them.
6731 The same holds for named labels.
6732
6733 Don't do that, however, if it is the label before
6734 a jump table. */
6735
6736 if (GET_CODE (insn) == CODE_LABEL
6737 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6738 {
6739 rtx vec_insn = next_real_insn (insn);
6740 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6741 PATTERN (vec_insn) : NULL_RTX;
6742 if (!vec_pat
6743 || !(GET_CODE (vec_pat) == ADDR_VEC
6744 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6745 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6746 }
6747
6748 /* If we have a direct jump (conditional or unconditional)
6749 or a casesi jump, check all potential targets. */
6750 else if (GET_CODE (insn) == JUMP_INSN)
6751 {
6752 rtx pat = PATTERN (insn);
6753 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6754 pat = XVECEXP (pat, 0, 0);
6755
6756 if (GET_CODE (pat) == SET)
6757 {
6758 rtx label = JUMP_LABEL (insn);
6759 if (label)
6760 {
6761 if (s390_find_pool (pool_list, label)
6762 != s390_find_pool (pool_list, insn))
6763 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6764 }
6765 }
6766 else if (GET_CODE (pat) == PARALLEL
6767 && XVECLEN (pat, 0) == 2
6768 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6769 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6770 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6771 {
6772 /* Find the jump table used by this casesi jump. */
6773 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6774 rtx vec_insn = next_real_insn (vec_label);
6775 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6776 PATTERN (vec_insn) : NULL_RTX;
6777 if (vec_pat
6778 && (GET_CODE (vec_pat) == ADDR_VEC
6779 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6780 {
6781 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6782
6783 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6784 {
6785 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6786
6787 if (s390_find_pool (pool_list, label)
6788 != s390_find_pool (pool_list, insn))
6789 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6790 }
6791 }
6792 }
6793 }
6794 }
6795
6796 /* Insert base register reload insns before every pool. */
6797
6798 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6799 {
6800 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6801 curr_pool->label);
6802 rtx insn = curr_pool->first_insn;
6803 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6804 }
6805
6806 /* Insert base register reload insns at every far label. */
6807
6808 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6809 if (GET_CODE (insn) == CODE_LABEL
6810 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6811 {
6812 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6813 if (pool)
6814 {
6815 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6816 pool->label);
6817 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6818 }
6819 }
6820
6821
6822 BITMAP_FREE (far_labels);
6823
6824
6825 /* Recompute insn addresses. */
6826
6827 init_insn_lengths ();
6828 shorten_branches (get_insns ());
6829
6830 return pool_list;
6831 }
6832
6833 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6834 After we have decided to use this list, finish implementing
6835 all changes to the current function as required. */
6836
6837 static void
6838 s390_chunkify_finish (struct constant_pool *pool_list)
6839 {
6840 struct constant_pool *curr_pool = NULL;
6841 rtx insn;
6842
6843
6844 /* Replace all literal pool references. */
6845
6846 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6847 {
6848 if (INSN_P (insn))
6849 replace_ltrel_base (&PATTERN (insn));
6850
6851 curr_pool = s390_find_pool (pool_list, insn);
6852 if (!curr_pool)
6853 continue;
6854
6855 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6856 {
6857 rtx addr, pool_ref = NULL_RTX;
6858 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6859 if (pool_ref)
6860 {
6861 if (s390_execute_label (insn))
6862 addr = s390_find_execute (curr_pool, insn);
6863 else
6864 addr = s390_find_constant (curr_pool,
6865 get_pool_constant (pool_ref),
6866 get_pool_mode (pool_ref));
6867
6868 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6869 INSN_CODE (insn) = -1;
6870 }
6871 }
6872 }
6873
6874 /* Dump out all literal pools. */
6875
6876 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6877 s390_dump_pool (curr_pool, 0);
6878
6879 /* Free pool list. */
6880
6881 while (pool_list)
6882 {
6883 struct constant_pool *next = pool_list->next;
6884 s390_free_pool (pool_list);
6885 pool_list = next;
6886 }
6887 }
6888
6889 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6890 We have decided we cannot use this list, so revert all changes
6891 to the current function that were done by s390_chunkify_start. */
6892
6893 static void
6894 s390_chunkify_cancel (struct constant_pool *pool_list)
6895 {
6896 struct constant_pool *curr_pool = NULL;
6897 rtx insn;
6898
6899 /* Remove all pool placeholder insns. */
6900
6901 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6902 {
6903 /* Did we insert an extra barrier? Remove it. */
6904 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6905 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6906 rtx label = NEXT_INSN (curr_pool->pool_insn);
6907
6908 if (jump && GET_CODE (jump) == JUMP_INSN
6909 && barrier && GET_CODE (barrier) == BARRIER
6910 && label && GET_CODE (label) == CODE_LABEL
6911 && GET_CODE (PATTERN (jump)) == SET
6912 && SET_DEST (PATTERN (jump)) == pc_rtx
6913 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6914 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6915 {
6916 remove_insn (jump);
6917 remove_insn (barrier);
6918 remove_insn (label);
6919 }
6920
6921 remove_insn (curr_pool->pool_insn);
6922 }
6923
6924 /* Remove all base register reload insns. */
6925
6926 for (insn = get_insns (); insn; )
6927 {
6928 rtx next_insn = NEXT_INSN (insn);
6929
6930 if (GET_CODE (insn) == INSN
6931 && GET_CODE (PATTERN (insn)) == SET
6932 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6933 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6934 remove_insn (insn);
6935
6936 insn = next_insn;
6937 }
6938
6939 /* Free pool list. */
6940
6941 while (pool_list)
6942 {
6943 struct constant_pool *next = pool_list->next;
6944 s390_free_pool (pool_list);
6945 pool_list = next;
6946 }
6947 }
6948
6949 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6950
6951 void
6952 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6953 {
6954 REAL_VALUE_TYPE r;
6955
6956 switch (GET_MODE_CLASS (mode))
6957 {
6958 case MODE_FLOAT:
6959 case MODE_DECIMAL_FLOAT:
6960 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6961
6962 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6963 assemble_real (r, mode, align);
6964 break;
6965
6966 case MODE_INT:
6967 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6968 mark_symbol_refs_as_used (exp);
6969 break;
6970
6971 default:
6972 gcc_unreachable ();
6973 }
6974 }
6975
6976
6977 /* Return an RTL expression representing the value of the return address
6978 for the frame COUNT steps up from the current frame. FRAME is the
6979 frame pointer of that frame. */
6980
6981 rtx
6982 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6983 {
6984 int offset;
6985 rtx addr;
6986
6987 /* Without backchain, we fail for all but the current frame. */
6988
6989 if (!TARGET_BACKCHAIN && count > 0)
6990 return NULL_RTX;
6991
6992 /* For the current frame, we need to make sure the initial
6993 value of RETURN_REGNUM is actually saved. */
6994
6995 if (count == 0)
6996 {
6997 /* On non-z architectures branch splitting could overwrite r14. */
6998 if (TARGET_CPU_ZARCH)
6999 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7000 else
7001 {
7002 cfun_frame_layout.save_return_addr_p = true;
7003 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7004 }
7005 }
7006
7007 if (TARGET_PACKED_STACK)
7008 offset = -2 * UNITS_PER_LONG;
7009 else
7010 offset = RETURN_REGNUM * UNITS_PER_LONG;
7011
7012 addr = plus_constant (frame, offset);
7013 addr = memory_address (Pmode, addr);
7014 return gen_rtx_MEM (Pmode, addr);
7015 }
7016
7017 /* Return an RTL expression representing the back chain stored in
7018 the current stack frame. */
7019
7020 rtx
7021 s390_back_chain_rtx (void)
7022 {
7023 rtx chain;
7024
7025 gcc_assert (TARGET_BACKCHAIN);
7026
7027 if (TARGET_PACKED_STACK)
7028 chain = plus_constant (stack_pointer_rtx,
7029 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7030 else
7031 chain = stack_pointer_rtx;
7032
7033 chain = gen_rtx_MEM (Pmode, chain);
7034 return chain;
7035 }
7036
7037 /* Find first call clobbered register unused in a function.
7038 This could be used as base register in a leaf function
7039 or for holding the return address before epilogue. */
7040
7041 static int
7042 find_unused_clobbered_reg (void)
7043 {
7044 int i;
7045 for (i = 0; i < 6; i++)
7046 if (!df_regs_ever_live_p (i))
7047 return i;
7048 return 0;
7049 }
7050
7051
7052 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7053 clobbered hard regs in SETREG. */
7054
7055 static void
7056 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7057 {
7058 int *regs_ever_clobbered = (int *)data;
7059 unsigned int i, regno;
7060 enum machine_mode mode = GET_MODE (setreg);
7061
7062 if (GET_CODE (setreg) == SUBREG)
7063 {
7064 rtx inner = SUBREG_REG (setreg);
7065 if (!GENERAL_REG_P (inner))
7066 return;
7067 regno = subreg_regno (setreg);
7068 }
7069 else if (GENERAL_REG_P (setreg))
7070 regno = REGNO (setreg);
7071 else
7072 return;
7073
7074 for (i = regno;
7075 i < regno + HARD_REGNO_NREGS (regno, mode);
7076 i++)
7077 regs_ever_clobbered[i] = 1;
7078 }
7079
7080 /* Walks through all basic blocks of the current function looking
7081 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7082 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7083 each of those regs. */
7084
7085 static void
7086 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7087 {
7088 basic_block cur_bb;
7089 rtx cur_insn;
7090 unsigned int i;
7091
7092 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7093
7094 /* For non-leaf functions we have to consider all call clobbered regs to be
7095 clobbered. */
7096 if (!current_function_is_leaf)
7097 {
7098 for (i = 0; i < 16; i++)
7099 regs_ever_clobbered[i] = call_really_used_regs[i];
7100 }
7101
7102 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7103 this work is done by liveness analysis (mark_regs_live_at_end).
7104 Special care is needed for functions containing landing pads. Landing pads
7105 may use the eh registers, but the code which sets these registers is not
7106 contained in that function. Hence s390_regs_ever_clobbered is not able to
7107 deal with this automatically. */
7108 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7109 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7110 if (crtl->calls_eh_return
7111 || (cfun->machine->has_landing_pad_p
7112 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7113 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7114
7115 /* For nonlocal gotos all call-saved registers have to be saved.
7116 This flag is also set for the unwinding code in libgcc.
7117 See expand_builtin_unwind_init. For regs_ever_live this is done by
7118 reload. */
7119 if (cfun->has_nonlocal_label)
7120 for (i = 0; i < 16; i++)
7121 if (!call_really_used_regs[i])
7122 regs_ever_clobbered[i] = 1;
7123
7124 FOR_EACH_BB (cur_bb)
7125 {
7126 FOR_BB_INSNS (cur_bb, cur_insn)
7127 {
7128 if (INSN_P (cur_insn))
7129 note_stores (PATTERN (cur_insn),
7130 s390_reg_clobbered_rtx,
7131 regs_ever_clobbered);
7132 }
7133 }
7134 }
7135
7136 /* Determine the frame area which actually has to be accessed
7137 in the function epilogue. The values are stored at the
7138 given pointers AREA_BOTTOM (address of the lowest used stack
7139 address) and AREA_TOP (address of the first item which does
7140 not belong to the stack frame). */
7141
7142 static void
7143 s390_frame_area (int *area_bottom, int *area_top)
7144 {
7145 int b, t;
7146 int i;
7147
7148 b = INT_MAX;
7149 t = INT_MIN;
7150
7151 if (cfun_frame_layout.first_restore_gpr != -1)
7152 {
7153 b = (cfun_frame_layout.gprs_offset
7154 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7155 t = b + (cfun_frame_layout.last_restore_gpr
7156 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7157 }
7158
7159 if (TARGET_64BIT && cfun_save_high_fprs_p)
7160 {
7161 b = MIN (b, cfun_frame_layout.f8_offset);
7162 t = MAX (t, (cfun_frame_layout.f8_offset
7163 + cfun_frame_layout.high_fprs * 8));
7164 }
7165
7166 if (!TARGET_64BIT)
7167 for (i = 2; i < 4; i++)
7168 if (cfun_fpr_bit_p (i))
7169 {
7170 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7171 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7172 }
7173
7174 *area_bottom = b;
7175 *area_top = t;
7176 }
7177
7178 /* Fill cfun->machine with info about register usage of current function.
7179 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7180
7181 static void
7182 s390_register_info (int clobbered_regs[])
7183 {
7184 int i, j;
7185
7186 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7187 cfun_frame_layout.fpr_bitmap = 0;
7188 cfun_frame_layout.high_fprs = 0;
7189 if (TARGET_64BIT)
7190 for (i = 24; i < 32; i++)
7191 if (df_regs_ever_live_p (i) && !global_regs[i])
7192 {
7193 cfun_set_fpr_bit (i - 16);
7194 cfun_frame_layout.high_fprs++;
7195 }
7196
7197 /* Find first and last gpr to be saved. We trust regs_ever_live
7198 data, except that we don't save and restore global registers.
7199
7200 Also, all registers with special meaning to the compiler need
7201 to be handled extra. */
7202
7203 s390_regs_ever_clobbered (clobbered_regs);
7204
7205 for (i = 0; i < 16; i++)
7206 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7207
7208 if (frame_pointer_needed)
7209 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7210
7211 if (flag_pic)
7212 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7213 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7214
7215 clobbered_regs[BASE_REGNUM]
7216 |= (cfun->machine->base_reg
7217 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7218
7219 clobbered_regs[RETURN_REGNUM]
7220 |= (!current_function_is_leaf
7221 || TARGET_TPF_PROFILING
7222 || cfun->machine->split_branches_pending_p
7223 || cfun_frame_layout.save_return_addr_p
7224 || crtl->calls_eh_return
7225 || cfun->stdarg);
7226
7227 clobbered_regs[STACK_POINTER_REGNUM]
7228 |= (!current_function_is_leaf
7229 || TARGET_TPF_PROFILING
7230 || cfun_save_high_fprs_p
7231 || get_frame_size () > 0
7232 || cfun->calls_alloca
7233 || cfun->stdarg);
7234
7235 for (i = 6; i < 16; i++)
7236 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7237 break;
7238 for (j = 15; j > i; j--)
7239 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7240 break;
7241
7242 if (i == 16)
7243 {
7244 /* Nothing to save/restore. */
7245 cfun_frame_layout.first_save_gpr_slot = -1;
7246 cfun_frame_layout.last_save_gpr_slot = -1;
7247 cfun_frame_layout.first_save_gpr = -1;
7248 cfun_frame_layout.first_restore_gpr = -1;
7249 cfun_frame_layout.last_save_gpr = -1;
7250 cfun_frame_layout.last_restore_gpr = -1;
7251 }
7252 else
7253 {
7254 /* Save slots for gprs from i to j. */
7255 cfun_frame_layout.first_save_gpr_slot = i;
7256 cfun_frame_layout.last_save_gpr_slot = j;
7257
7258 for (i = cfun_frame_layout.first_save_gpr_slot;
7259 i < cfun_frame_layout.last_save_gpr_slot + 1;
7260 i++)
7261 if (clobbered_regs[i])
7262 break;
7263
7264 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7265 if (clobbered_regs[j])
7266 break;
7267
7268 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7269 {
7270 /* Nothing to save/restore. */
7271 cfun_frame_layout.first_save_gpr = -1;
7272 cfun_frame_layout.first_restore_gpr = -1;
7273 cfun_frame_layout.last_save_gpr = -1;
7274 cfun_frame_layout.last_restore_gpr = -1;
7275 }
7276 else
7277 {
7278 /* Save / Restore from gpr i to j. */
7279 cfun_frame_layout.first_save_gpr = i;
7280 cfun_frame_layout.first_restore_gpr = i;
7281 cfun_frame_layout.last_save_gpr = j;
7282 cfun_frame_layout.last_restore_gpr = j;
7283 }
7284 }
7285
7286 if (cfun->stdarg)
7287 {
7288 /* Varargs functions need to save gprs 2 to 6. */
7289 if (cfun->va_list_gpr_size
7290 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7291 {
7292 int min_gpr = crtl->args.info.gprs;
7293 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7294 if (max_gpr > GP_ARG_NUM_REG)
7295 max_gpr = GP_ARG_NUM_REG;
7296
7297 if (cfun_frame_layout.first_save_gpr == -1
7298 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7299 {
7300 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7301 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7302 }
7303
7304 if (cfun_frame_layout.last_save_gpr == -1
7305 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7306 {
7307 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7308 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7309 }
7310 }
7311
7312 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7313 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7314 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7315 {
7316 int min_fpr = crtl->args.info.fprs;
7317 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7318 if (max_fpr > FP_ARG_NUM_REG)
7319 max_fpr = FP_ARG_NUM_REG;
7320
7321 /* ??? This is currently required to ensure proper location
7322 of the fpr save slots within the va_list save area. */
7323 if (TARGET_PACKED_STACK)
7324 min_fpr = 0;
7325
7326 for (i = min_fpr; i < max_fpr; i++)
7327 cfun_set_fpr_bit (i);
7328 }
7329 }
7330
7331 if (!TARGET_64BIT)
7332 for (i = 2; i < 4; i++)
7333 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7334 cfun_set_fpr_bit (i);
7335 }
7336
7337 /* Fill cfun->machine with info about frame of current function. */
7338
7339 static void
7340 s390_frame_info (void)
7341 {
7342 int i;
7343
7344 cfun_frame_layout.frame_size = get_frame_size ();
7345 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7346 fatal_error ("total size of local variables exceeds architecture limit");
7347
7348 if (!TARGET_PACKED_STACK)
7349 {
7350 cfun_frame_layout.backchain_offset = 0;
7351 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7352 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7353 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7354 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7355 * UNITS_PER_LONG);
7356 }
7357 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7358 {
7359 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7360 - UNITS_PER_LONG);
7361 cfun_frame_layout.gprs_offset
7362 = (cfun_frame_layout.backchain_offset
7363 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7364 * UNITS_PER_LONG);
7365
7366 if (TARGET_64BIT)
7367 {
7368 cfun_frame_layout.f4_offset
7369 = (cfun_frame_layout.gprs_offset
7370 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7371
7372 cfun_frame_layout.f0_offset
7373 = (cfun_frame_layout.f4_offset
7374 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7375 }
7376 else
7377 {
7378 /* On 31 bit we have to care about alignment of the
7379 floating point regs to provide fastest access. */
7380 cfun_frame_layout.f0_offset
7381 = ((cfun_frame_layout.gprs_offset
7382 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7383 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7384
7385 cfun_frame_layout.f4_offset
7386 = (cfun_frame_layout.f0_offset
7387 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7388 }
7389 }
7390 else /* no backchain */
7391 {
7392 cfun_frame_layout.f4_offset
7393 = (STACK_POINTER_OFFSET
7394 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7395
7396 cfun_frame_layout.f0_offset
7397 = (cfun_frame_layout.f4_offset
7398 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7399
7400 cfun_frame_layout.gprs_offset
7401 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7402 }
7403
7404 if (current_function_is_leaf
7405 && !TARGET_TPF_PROFILING
7406 && cfun_frame_layout.frame_size == 0
7407 && !cfun_save_high_fprs_p
7408 && !cfun->calls_alloca
7409 && !cfun->stdarg)
7410 return;
7411
7412 if (!TARGET_PACKED_STACK)
7413 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7414 + crtl->outgoing_args_size
7415 + cfun_frame_layout.high_fprs * 8);
7416 else
7417 {
7418 if (TARGET_BACKCHAIN)
7419 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7420
7421 /* No alignment trouble here because f8-f15 are only saved under
7422 64 bit. */
7423 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7424 cfun_frame_layout.f4_offset),
7425 cfun_frame_layout.gprs_offset)
7426 - cfun_frame_layout.high_fprs * 8);
7427
7428 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7429
7430 for (i = 0; i < 8; i++)
7431 if (cfun_fpr_bit_p (i))
7432 cfun_frame_layout.frame_size += 8;
7433
7434 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7435
7436 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7437 the frame size to sustain 8 byte alignment of stack frames. */
7438 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7439 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7440 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7441
7442 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7443 }
7444 }
7445
7446 /* Generate frame layout. Fills in register and frame data for the current
7447 function in cfun->machine. This routine can be called multiple times;
7448 it will re-do the complete frame layout every time. */
7449
7450 static void
7451 s390_init_frame_layout (void)
7452 {
7453 HOST_WIDE_INT frame_size;
7454 int base_used;
7455 int clobbered_regs[16];
7456
7457 /* On S/390 machines, we may need to perform branch splitting, which
7458 will require both base and return address register. We have no
7459 choice but to assume we're going to need them until right at the
7460 end of the machine dependent reorg phase. */
7461 if (!TARGET_CPU_ZARCH)
7462 cfun->machine->split_branches_pending_p = true;
7463
7464 do
7465 {
7466 frame_size = cfun_frame_layout.frame_size;
7467
7468 /* Try to predict whether we'll need the base register. */
7469 base_used = cfun->machine->split_branches_pending_p
7470 || crtl->uses_const_pool
7471 || (!DISP_IN_RANGE (frame_size)
7472 && !CONST_OK_FOR_K (frame_size));
7473
7474 /* Decide which register to use as literal pool base. In small
7475 leaf functions, try to use an unused call-clobbered register
7476 as base register to avoid save/restore overhead. */
7477 if (!base_used)
7478 cfun->machine->base_reg = NULL_RTX;
7479 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7480 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7481 else
7482 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7483
7484 s390_register_info (clobbered_regs);
7485 s390_frame_info ();
7486 }
7487 while (frame_size != cfun_frame_layout.frame_size);
7488 }
7489
7490 /* Update frame layout. Recompute actual register save data based on
7491 current info and update regs_ever_live for the special registers.
7492 May be called multiple times, but may never cause *more* registers
7493 to be saved than s390_init_frame_layout allocated room for. */
7494
7495 static void
7496 s390_update_frame_layout (void)
7497 {
7498 int clobbered_regs[16];
7499
7500 s390_register_info (clobbered_regs);
7501
7502 df_set_regs_ever_live (BASE_REGNUM,
7503 clobbered_regs[BASE_REGNUM] ? true : false);
7504 df_set_regs_ever_live (RETURN_REGNUM,
7505 clobbered_regs[RETURN_REGNUM] ? true : false);
7506 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7507 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7508
7509 if (cfun->machine->base_reg)
7510 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7511 }
7512
7513 /* Return true if it is legal to put a value with MODE into REGNO. */
7514
7515 bool
7516 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7517 {
7518 switch (REGNO_REG_CLASS (regno))
7519 {
7520 case FP_REGS:
7521 if (REGNO_PAIR_OK (regno, mode))
7522 {
7523 if (mode == SImode || mode == DImode)
7524 return true;
7525
7526 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7527 return true;
7528 }
7529 break;
7530 case ADDR_REGS:
7531 if (FRAME_REGNO_P (regno) && mode == Pmode)
7532 return true;
7533
7534 /* fallthrough */
7535 case GENERAL_REGS:
7536 if (REGNO_PAIR_OK (regno, mode))
7537 {
7538 if (TARGET_ZARCH
7539 || (mode != TFmode && mode != TCmode && mode != TDmode))
7540 return true;
7541 }
7542 break;
7543 case CC_REGS:
7544 if (GET_MODE_CLASS (mode) == MODE_CC)
7545 return true;
7546 break;
7547 case ACCESS_REGS:
7548 if (REGNO_PAIR_OK (regno, mode))
7549 {
7550 if (mode == SImode || mode == Pmode)
7551 return true;
7552 }
7553 break;
7554 default:
7555 return false;
7556 }
7557
7558 return false;
7559 }
7560
7561 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7562
7563 bool
7564 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7565 {
7566 /* Once we've decided upon a register to use as base register, it must
7567 no longer be used for any other purpose. */
7568 if (cfun->machine->base_reg)
7569 if (REGNO (cfun->machine->base_reg) == old_reg
7570 || REGNO (cfun->machine->base_reg) == new_reg)
7571 return false;
7572
7573 return true;
7574 }
7575
7576 /* Maximum number of registers to represent a value of mode MODE
7577 in a register of class RCLASS. */
7578
7579 int
7580 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7581 {
7582 switch (rclass)
7583 {
7584 case FP_REGS:
7585 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7586 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7587 else
7588 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7589 case ACCESS_REGS:
7590 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7591 default:
7592 break;
7593 }
7594 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7595 }
7596
7597 /* Return true if register FROM can be eliminated via register TO. */
7598
7599 static bool
7600 s390_can_eliminate (const int from, const int to)
7601 {
7602 /* On zSeries machines, we have not marked the base register as fixed.
7603 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7604 If a function requires the base register, we say here that this
7605 elimination cannot be performed. This will cause reload to free
7606 up the base register (as if it were fixed). On the other hand,
7607 if the current function does *not* require the base register, we
7608 say here the elimination succeeds, which in turn allows reload
7609 to allocate the base register for any other purpose. */
7610 if (from == BASE_REGNUM && to == BASE_REGNUM)
7611 {
7612 if (TARGET_CPU_ZARCH)
7613 {
7614 s390_init_frame_layout ();
7615 return cfun->machine->base_reg == NULL_RTX;
7616 }
7617
7618 return false;
7619 }
7620
7621 /* Everything else must point into the stack frame. */
7622 gcc_assert (to == STACK_POINTER_REGNUM
7623 || to == HARD_FRAME_POINTER_REGNUM);
7624
7625 gcc_assert (from == FRAME_POINTER_REGNUM
7626 || from == ARG_POINTER_REGNUM
7627 || from == RETURN_ADDRESS_POINTER_REGNUM);
7628
7629 /* Make sure we actually saved the return address. */
7630 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7631 if (!crtl->calls_eh_return
7632 && !cfun->stdarg
7633 && !cfun_frame_layout.save_return_addr_p)
7634 return false;
7635
7636 return true;
7637 }
7638
7639 /* Return offset between register FROM and TO initially after prolog. */
7640
7641 HOST_WIDE_INT
7642 s390_initial_elimination_offset (int from, int to)
7643 {
7644 HOST_WIDE_INT offset;
7645 int index;
7646
7647 /* ??? Why are we called for non-eliminable pairs? */
7648 if (!s390_can_eliminate (from, to))
7649 return 0;
7650
7651 switch (from)
7652 {
7653 case FRAME_POINTER_REGNUM:
7654 offset = (get_frame_size()
7655 + STACK_POINTER_OFFSET
7656 + crtl->outgoing_args_size);
7657 break;
7658
7659 case ARG_POINTER_REGNUM:
7660 s390_init_frame_layout ();
7661 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7662 break;
7663
7664 case RETURN_ADDRESS_POINTER_REGNUM:
7665 s390_init_frame_layout ();
7666 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7667 gcc_assert (index >= 0);
7668 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7669 offset += index * UNITS_PER_LONG;
7670 break;
7671
7672 case BASE_REGNUM:
7673 offset = 0;
7674 break;
7675
7676 default:
7677 gcc_unreachable ();
7678 }
7679
7680 return offset;
7681 }
7682
7683 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7684 to register BASE. Return generated insn. */
7685
7686 static rtx
7687 save_fpr (rtx base, int offset, int regnum)
7688 {
7689 rtx addr;
7690 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7691
7692 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7693 set_mem_alias_set (addr, get_varargs_alias_set ());
7694 else
7695 set_mem_alias_set (addr, get_frame_alias_set ());
7696
7697 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7698 }
7699
7700 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7701 to register BASE. Return generated insn. */
7702
7703 static rtx
7704 restore_fpr (rtx base, int offset, int regnum)
7705 {
7706 rtx addr;
7707 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7708 set_mem_alias_set (addr, get_frame_alias_set ());
7709
7710 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7711 }
7712
7713 /* Return true if REGNO is a global register, but not one
7714 of the special ones that need to be saved/restored in anyway. */
7715
7716 static inline bool
7717 global_not_special_regno_p (int regno)
7718 {
7719 return (global_regs[regno]
7720 /* These registers are special and need to be
7721 restored in any case. */
7722 && !(regno == STACK_POINTER_REGNUM
7723 || regno == RETURN_REGNUM
7724 || regno == BASE_REGNUM
7725 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7726 }
7727
7728 /* Generate insn to save registers FIRST to LAST into
7729 the register save area located at offset OFFSET
7730 relative to register BASE. */
7731
7732 static rtx
7733 save_gprs (rtx base, int offset, int first, int last)
7734 {
7735 rtx addr, insn, note;
7736 int i;
7737
7738 addr = plus_constant (base, offset);
7739 addr = gen_rtx_MEM (Pmode, addr);
7740
7741 set_mem_alias_set (addr, get_frame_alias_set ());
7742
7743 /* Special-case single register. */
7744 if (first == last)
7745 {
7746 if (TARGET_64BIT)
7747 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7748 else
7749 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7750
7751 if (!global_not_special_regno_p (first))
7752 RTX_FRAME_RELATED_P (insn) = 1;
7753 return insn;
7754 }
7755
7756
7757 insn = gen_store_multiple (addr,
7758 gen_rtx_REG (Pmode, first),
7759 GEN_INT (last - first + 1));
7760
7761 if (first <= 6 && cfun->stdarg)
7762 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7763 {
7764 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7765
7766 if (first + i <= 6)
7767 set_mem_alias_set (mem, get_varargs_alias_set ());
7768 }
7769
7770 /* We need to set the FRAME_RELATED flag on all SETs
7771 inside the store-multiple pattern.
7772
7773 However, we must not emit DWARF records for registers 2..5
7774 if they are stored for use by variable arguments ...
7775
7776 ??? Unfortunately, it is not enough to simply not the
7777 FRAME_RELATED flags for those SETs, because the first SET
7778 of the PARALLEL is always treated as if it had the flag
7779 set, even if it does not. Therefore we emit a new pattern
7780 without those registers as REG_FRAME_RELATED_EXPR note. */
7781
7782 if (first >= 6 && !global_not_special_regno_p (first))
7783 {
7784 rtx pat = PATTERN (insn);
7785
7786 for (i = 0; i < XVECLEN (pat, 0); i++)
7787 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7788 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7789 0, i)))))
7790 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7791
7792 RTX_FRAME_RELATED_P (insn) = 1;
7793 }
7794 else if (last >= 6)
7795 {
7796 int start;
7797
7798 for (start = first >= 6 ? first : 6; start <= last; start++)
7799 if (!global_not_special_regno_p (start))
7800 break;
7801
7802 if (start > last)
7803 return insn;
7804
7805 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7806 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7807 gen_rtx_REG (Pmode, start),
7808 GEN_INT (last - start + 1));
7809 note = PATTERN (note);
7810
7811 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7812
7813 for (i = 0; i < XVECLEN (note, 0); i++)
7814 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7815 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7816 0, i)))))
7817 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7818
7819 RTX_FRAME_RELATED_P (insn) = 1;
7820 }
7821
7822 return insn;
7823 }
7824
7825 /* Generate insn to restore registers FIRST to LAST from
7826 the register save area located at offset OFFSET
7827 relative to register BASE. */
7828
7829 static rtx
7830 restore_gprs (rtx base, int offset, int first, int last)
7831 {
7832 rtx addr, insn;
7833
7834 addr = plus_constant (base, offset);
7835 addr = gen_rtx_MEM (Pmode, addr);
7836 set_mem_alias_set (addr, get_frame_alias_set ());
7837
7838 /* Special-case single register. */
7839 if (first == last)
7840 {
7841 if (TARGET_64BIT)
7842 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7843 else
7844 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7845
7846 return insn;
7847 }
7848
7849 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7850 addr,
7851 GEN_INT (last - first + 1));
7852 return insn;
7853 }
7854
7855 /* Return insn sequence to load the GOT register. */
7856
7857 static GTY(()) rtx got_symbol;
7858 rtx
7859 s390_load_got (void)
7860 {
7861 rtx insns;
7862
7863 /* We cannot use pic_offset_table_rtx here since we use this
7864 function also for non-pic if __tls_get_offset is called and in
7865 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
7866 aren't usable. */
7867 rtx got_rtx = gen_rtx_REG (Pmode, 12);
7868
7869 if (!got_symbol)
7870 {
7871 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7872 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7873 }
7874
7875 start_sequence ();
7876
7877 if (TARGET_CPU_ZARCH)
7878 {
7879 emit_move_insn (got_rtx, got_symbol);
7880 }
7881 else
7882 {
7883 rtx offset;
7884
7885 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7886 UNSPEC_LTREL_OFFSET);
7887 offset = gen_rtx_CONST (Pmode, offset);
7888 offset = force_const_mem (Pmode, offset);
7889
7890 emit_move_insn (got_rtx, offset);
7891
7892 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7893 UNSPEC_LTREL_BASE);
7894 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
7895
7896 emit_move_insn (got_rtx, offset);
7897 }
7898
7899 insns = get_insns ();
7900 end_sequence ();
7901 return insns;
7902 }
7903
7904 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7905 and the change to the stack pointer. */
7906
7907 static void
7908 s390_emit_stack_tie (void)
7909 {
7910 rtx mem = gen_frame_mem (BLKmode,
7911 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7912
7913 emit_insn (gen_stack_tie (mem));
7914 }
7915
7916 /* Expand the prologue into a bunch of separate insns. */
7917
7918 void
7919 s390_emit_prologue (void)
7920 {
7921 rtx insn, addr;
7922 rtx temp_reg;
7923 int i;
7924 int offset;
7925 int next_fpr = 0;
7926
7927 /* Complete frame layout. */
7928
7929 s390_update_frame_layout ();
7930
7931 /* Annotate all constant pool references to let the scheduler know
7932 they implicitly use the base register. */
7933
7934 push_topmost_sequence ();
7935
7936 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7937 if (INSN_P (insn))
7938 {
7939 annotate_constant_pool_refs (&PATTERN (insn));
7940 df_insn_rescan (insn);
7941 }
7942
7943 pop_topmost_sequence ();
7944
7945 /* Choose best register to use for temp use within prologue.
7946 See below for why TPF must use the register 1. */
7947
7948 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7949 && !current_function_is_leaf
7950 && !TARGET_TPF_PROFILING)
7951 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7952 else
7953 temp_reg = gen_rtx_REG (Pmode, 1);
7954
7955 /* Save call saved gprs. */
7956 if (cfun_frame_layout.first_save_gpr != -1)
7957 {
7958 insn = save_gprs (stack_pointer_rtx,
7959 cfun_frame_layout.gprs_offset +
7960 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7961 - cfun_frame_layout.first_save_gpr_slot),
7962 cfun_frame_layout.first_save_gpr,
7963 cfun_frame_layout.last_save_gpr);
7964 emit_insn (insn);
7965 }
7966
7967 /* Dummy insn to mark literal pool slot. */
7968
7969 if (cfun->machine->base_reg)
7970 emit_insn (gen_main_pool (cfun->machine->base_reg));
7971
7972 offset = cfun_frame_layout.f0_offset;
7973
7974 /* Save f0 and f2. */
7975 for (i = 0; i < 2; i++)
7976 {
7977 if (cfun_fpr_bit_p (i))
7978 {
7979 save_fpr (stack_pointer_rtx, offset, i + 16);
7980 offset += 8;
7981 }
7982 else if (!TARGET_PACKED_STACK)
7983 offset += 8;
7984 }
7985
7986 /* Save f4 and f6. */
7987 offset = cfun_frame_layout.f4_offset;
7988 for (i = 2; i < 4; i++)
7989 {
7990 if (cfun_fpr_bit_p (i))
7991 {
7992 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7993 offset += 8;
7994
7995 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7996 therefore are not frame related. */
7997 if (!call_really_used_regs[i + 16])
7998 RTX_FRAME_RELATED_P (insn) = 1;
7999 }
8000 else if (!TARGET_PACKED_STACK)
8001 offset += 8;
8002 }
8003
8004 if (TARGET_PACKED_STACK
8005 && cfun_save_high_fprs_p
8006 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8007 {
8008 offset = (cfun_frame_layout.f8_offset
8009 + (cfun_frame_layout.high_fprs - 1) * 8);
8010
8011 for (i = 15; i > 7 && offset >= 0; i--)
8012 if (cfun_fpr_bit_p (i))
8013 {
8014 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8015
8016 RTX_FRAME_RELATED_P (insn) = 1;
8017 offset -= 8;
8018 }
8019 if (offset >= cfun_frame_layout.f8_offset)
8020 next_fpr = i + 16;
8021 }
8022
8023 if (!TARGET_PACKED_STACK)
8024 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8025
8026 if (flag_stack_usage_info)
8027 current_function_static_stack_size = cfun_frame_layout.frame_size;
8028
8029 /* Decrement stack pointer. */
8030
8031 if (cfun_frame_layout.frame_size > 0)
8032 {
8033 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8034 rtx real_frame_off;
8035
8036 if (s390_stack_size)
8037 {
8038 HOST_WIDE_INT stack_guard;
8039
8040 if (s390_stack_guard)
8041 stack_guard = s390_stack_guard;
8042 else
8043 {
8044 /* If no value for stack guard is provided the smallest power of 2
8045 larger than the current frame size is chosen. */
8046 stack_guard = 1;
8047 while (stack_guard < cfun_frame_layout.frame_size)
8048 stack_guard <<= 1;
8049 }
8050
8051 if (cfun_frame_layout.frame_size >= s390_stack_size)
8052 {
8053 warning (0, "frame size of function %qs is %wd"
8054 " bytes exceeding user provided stack limit of "
8055 "%d bytes. "
8056 "An unconditional trap is added.",
8057 current_function_name(), cfun_frame_layout.frame_size,
8058 s390_stack_size);
8059 emit_insn (gen_trap ());
8060 }
8061 else
8062 {
8063 /* stack_guard has to be smaller than s390_stack_size.
8064 Otherwise we would emit an AND with zero which would
8065 not match the test under mask pattern. */
8066 if (stack_guard >= s390_stack_size)
8067 {
8068 warning (0, "frame size of function %qs is %wd"
8069 " bytes which is more than half the stack size. "
8070 "The dynamic check would not be reliable. "
8071 "No check emitted for this function.",
8072 current_function_name(),
8073 cfun_frame_layout.frame_size);
8074 }
8075 else
8076 {
8077 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8078 & ~(stack_guard - 1));
8079
8080 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8081 GEN_INT (stack_check_mask));
8082 if (TARGET_64BIT)
8083 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8084 t, const0_rtx),
8085 t, const0_rtx, const0_rtx));
8086 else
8087 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8088 t, const0_rtx),
8089 t, const0_rtx, const0_rtx));
8090 }
8091 }
8092 }
8093
8094 if (s390_warn_framesize > 0
8095 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8096 warning (0, "frame size of %qs is %wd bytes",
8097 current_function_name (), cfun_frame_layout.frame_size);
8098
8099 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8100 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8101
8102 /* Save incoming stack pointer into temp reg. */
8103 if (TARGET_BACKCHAIN || next_fpr)
8104 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8105
8106 /* Subtract frame size from stack pointer. */
8107
8108 if (DISP_IN_RANGE (INTVAL (frame_off)))
8109 {
8110 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8111 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8112 frame_off));
8113 insn = emit_insn (insn);
8114 }
8115 else
8116 {
8117 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8118 frame_off = force_const_mem (Pmode, frame_off);
8119
8120 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8121 annotate_constant_pool_refs (&PATTERN (insn));
8122 }
8123
8124 RTX_FRAME_RELATED_P (insn) = 1;
8125 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8126 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8127 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8128 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8129 real_frame_off)));
8130
8131 /* Set backchain. */
8132
8133 if (TARGET_BACKCHAIN)
8134 {
8135 if (cfun_frame_layout.backchain_offset)
8136 addr = gen_rtx_MEM (Pmode,
8137 plus_constant (stack_pointer_rtx,
8138 cfun_frame_layout.backchain_offset));
8139 else
8140 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8141 set_mem_alias_set (addr, get_frame_alias_set ());
8142 insn = emit_insn (gen_move_insn (addr, temp_reg));
8143 }
8144
8145 /* If we support non-call exceptions (e.g. for Java),
8146 we need to make sure the backchain pointer is set up
8147 before any possibly trapping memory access. */
8148 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8149 {
8150 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8151 emit_clobber (addr);
8152 }
8153 }
8154
8155 /* Save fprs 8 - 15 (64 bit ABI). */
8156
8157 if (cfun_save_high_fprs_p && next_fpr)
8158 {
8159 /* If the stack might be accessed through a different register
8160 we have to make sure that the stack pointer decrement is not
8161 moved below the use of the stack slots. */
8162 s390_emit_stack_tie ();
8163
8164 insn = emit_insn (gen_add2_insn (temp_reg,
8165 GEN_INT (cfun_frame_layout.f8_offset)));
8166
8167 offset = 0;
8168
8169 for (i = 24; i <= next_fpr; i++)
8170 if (cfun_fpr_bit_p (i - 16))
8171 {
8172 rtx addr = plus_constant (stack_pointer_rtx,
8173 cfun_frame_layout.frame_size
8174 + cfun_frame_layout.f8_offset
8175 + offset);
8176
8177 insn = save_fpr (temp_reg, offset, i);
8178 offset += 8;
8179 RTX_FRAME_RELATED_P (insn) = 1;
8180 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8181 gen_rtx_SET (VOIDmode,
8182 gen_rtx_MEM (DFmode, addr),
8183 gen_rtx_REG (DFmode, i)));
8184 }
8185 }
8186
8187 /* Set frame pointer, if needed. */
8188
8189 if (frame_pointer_needed)
8190 {
8191 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8192 RTX_FRAME_RELATED_P (insn) = 1;
8193 }
8194
8195 /* Set up got pointer, if needed. */
8196
8197 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8198 {
8199 rtx insns = s390_load_got ();
8200
8201 for (insn = insns; insn; insn = NEXT_INSN (insn))
8202 annotate_constant_pool_refs (&PATTERN (insn));
8203
8204 emit_insn (insns);
8205 }
8206
8207 if (TARGET_TPF_PROFILING)
8208 {
8209 /* Generate a BAS instruction to serve as a function
8210 entry intercept to facilitate the use of tracing
8211 algorithms located at the branch target. */
8212 emit_insn (gen_prologue_tpf ());
8213
8214 /* Emit a blockage here so that all code
8215 lies between the profiling mechanisms. */
8216 emit_insn (gen_blockage ());
8217 }
8218 }
8219
8220 /* Expand the epilogue into a bunch of separate insns. */
8221
8222 void
8223 s390_emit_epilogue (bool sibcall)
8224 {
8225 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8226 int area_bottom, area_top, offset = 0;
8227 int next_offset;
8228 rtvec p;
8229 int i;
8230
8231 if (TARGET_TPF_PROFILING)
8232 {
8233
8234 /* Generate a BAS instruction to serve as a function
8235 entry intercept to facilitate the use of tracing
8236 algorithms located at the branch target. */
8237
8238 /* Emit a blockage here so that all code
8239 lies between the profiling mechanisms. */
8240 emit_insn (gen_blockage ());
8241
8242 emit_insn (gen_epilogue_tpf ());
8243 }
8244
8245 /* Check whether to use frame or stack pointer for restore. */
8246
8247 frame_pointer = (frame_pointer_needed
8248 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8249
8250 s390_frame_area (&area_bottom, &area_top);
8251
8252 /* Check whether we can access the register save area.
8253 If not, increment the frame pointer as required. */
8254
8255 if (area_top <= area_bottom)
8256 {
8257 /* Nothing to restore. */
8258 }
8259 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8260 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8261 {
8262 /* Area is in range. */
8263 offset = cfun_frame_layout.frame_size;
8264 }
8265 else
8266 {
8267 rtx insn, frame_off, cfa;
8268
8269 offset = area_bottom < 0 ? -area_bottom : 0;
8270 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8271
8272 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8273 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8274 if (DISP_IN_RANGE (INTVAL (frame_off)))
8275 {
8276 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8277 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8278 insn = emit_insn (insn);
8279 }
8280 else
8281 {
8282 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8283 frame_off = force_const_mem (Pmode, frame_off);
8284
8285 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8286 annotate_constant_pool_refs (&PATTERN (insn));
8287 }
8288 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8289 RTX_FRAME_RELATED_P (insn) = 1;
8290 }
8291
8292 /* Restore call saved fprs. */
8293
8294 if (TARGET_64BIT)
8295 {
8296 if (cfun_save_high_fprs_p)
8297 {
8298 next_offset = cfun_frame_layout.f8_offset;
8299 for (i = 24; i < 32; i++)
8300 {
8301 if (cfun_fpr_bit_p (i - 16))
8302 {
8303 restore_fpr (frame_pointer,
8304 offset + next_offset, i);
8305 cfa_restores
8306 = alloc_reg_note (REG_CFA_RESTORE,
8307 gen_rtx_REG (DFmode, i), cfa_restores);
8308 next_offset += 8;
8309 }
8310 }
8311 }
8312
8313 }
8314 else
8315 {
8316 next_offset = cfun_frame_layout.f4_offset;
8317 for (i = 18; i < 20; i++)
8318 {
8319 if (cfun_fpr_bit_p (i - 16))
8320 {
8321 restore_fpr (frame_pointer,
8322 offset + next_offset, i);
8323 cfa_restores
8324 = alloc_reg_note (REG_CFA_RESTORE,
8325 gen_rtx_REG (DFmode, i), cfa_restores);
8326 next_offset += 8;
8327 }
8328 else if (!TARGET_PACKED_STACK)
8329 next_offset += 8;
8330 }
8331
8332 }
8333
8334 /* Return register. */
8335
8336 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8337
8338 /* Restore call saved gprs. */
8339
8340 if (cfun_frame_layout.first_restore_gpr != -1)
8341 {
8342 rtx insn, addr;
8343 int i;
8344
8345 /* Check for global register and save them
8346 to stack location from where they get restored. */
8347
8348 for (i = cfun_frame_layout.first_restore_gpr;
8349 i <= cfun_frame_layout.last_restore_gpr;
8350 i++)
8351 {
8352 if (global_not_special_regno_p (i))
8353 {
8354 addr = plus_constant (frame_pointer,
8355 offset + cfun_frame_layout.gprs_offset
8356 + (i - cfun_frame_layout.first_save_gpr_slot)
8357 * UNITS_PER_LONG);
8358 addr = gen_rtx_MEM (Pmode, addr);
8359 set_mem_alias_set (addr, get_frame_alias_set ());
8360 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8361 }
8362 else
8363 cfa_restores
8364 = alloc_reg_note (REG_CFA_RESTORE,
8365 gen_rtx_REG (Pmode, i), cfa_restores);
8366 }
8367
8368 if (! sibcall)
8369 {
8370 /* Fetch return address from stack before load multiple,
8371 this will do good for scheduling. */
8372
8373 if (cfun_frame_layout.save_return_addr_p
8374 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8375 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8376 {
8377 int return_regnum = find_unused_clobbered_reg();
8378 if (!return_regnum)
8379 return_regnum = 4;
8380 return_reg = gen_rtx_REG (Pmode, return_regnum);
8381
8382 addr = plus_constant (frame_pointer,
8383 offset + cfun_frame_layout.gprs_offset
8384 + (RETURN_REGNUM
8385 - cfun_frame_layout.first_save_gpr_slot)
8386 * UNITS_PER_LONG);
8387 addr = gen_rtx_MEM (Pmode, addr);
8388 set_mem_alias_set (addr, get_frame_alias_set ());
8389 emit_move_insn (return_reg, addr);
8390 }
8391 }
8392
8393 insn = restore_gprs (frame_pointer,
8394 offset + cfun_frame_layout.gprs_offset
8395 + (cfun_frame_layout.first_restore_gpr
8396 - cfun_frame_layout.first_save_gpr_slot)
8397 * UNITS_PER_LONG,
8398 cfun_frame_layout.first_restore_gpr,
8399 cfun_frame_layout.last_restore_gpr);
8400 insn = emit_insn (insn);
8401 REG_NOTES (insn) = cfa_restores;
8402 add_reg_note (insn, REG_CFA_DEF_CFA,
8403 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8404 RTX_FRAME_RELATED_P (insn) = 1;
8405 }
8406
8407 if (! sibcall)
8408 {
8409
8410 /* Return to caller. */
8411
8412 p = rtvec_alloc (2);
8413
8414 RTVEC_ELT (p, 0) = ret_rtx;
8415 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8416 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8417 }
8418 }
8419
8420
8421 /* Return the size in bytes of a function argument of
8422 type TYPE and/or mode MODE. At least one of TYPE or
8423 MODE must be specified. */
8424
8425 static int
8426 s390_function_arg_size (enum machine_mode mode, const_tree type)
8427 {
8428 if (type)
8429 return int_size_in_bytes (type);
8430
8431 /* No type info available for some library calls ... */
8432 if (mode != BLKmode)
8433 return GET_MODE_SIZE (mode);
8434
8435 /* If we have neither type nor mode, abort */
8436 gcc_unreachable ();
8437 }
8438
8439 /* Return true if a function argument of type TYPE and mode MODE
8440 is to be passed in a floating-point register, if available. */
8441
8442 static bool
8443 s390_function_arg_float (enum machine_mode mode, const_tree type)
8444 {
8445 int size = s390_function_arg_size (mode, type);
8446 if (size > 8)
8447 return false;
8448
8449 /* Soft-float changes the ABI: no floating-point registers are used. */
8450 if (TARGET_SOFT_FLOAT)
8451 return false;
8452
8453 /* No type info available for some library calls ... */
8454 if (!type)
8455 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8456
8457 /* The ABI says that record types with a single member are treated
8458 just like that member would be. */
8459 while (TREE_CODE (type) == RECORD_TYPE)
8460 {
8461 tree field, single = NULL_TREE;
8462
8463 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8464 {
8465 if (TREE_CODE (field) != FIELD_DECL)
8466 continue;
8467
8468 if (single == NULL_TREE)
8469 single = TREE_TYPE (field);
8470 else
8471 return false;
8472 }
8473
8474 if (single == NULL_TREE)
8475 return false;
8476 else
8477 type = single;
8478 }
8479
8480 return TREE_CODE (type) == REAL_TYPE;
8481 }
8482
8483 /* Return true if a function argument of type TYPE and mode MODE
8484 is to be passed in an integer register, or a pair of integer
8485 registers, if available. */
8486
8487 static bool
8488 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8489 {
8490 int size = s390_function_arg_size (mode, type);
8491 if (size > 8)
8492 return false;
8493
8494 /* No type info available for some library calls ... */
8495 if (!type)
8496 return GET_MODE_CLASS (mode) == MODE_INT
8497 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8498
8499 /* We accept small integral (and similar) types. */
8500 if (INTEGRAL_TYPE_P (type)
8501 || POINTER_TYPE_P (type)
8502 || TREE_CODE (type) == NULLPTR_TYPE
8503 || TREE_CODE (type) == OFFSET_TYPE
8504 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8505 return true;
8506
8507 /* We also accept structs of size 1, 2, 4, 8 that are not
8508 passed in floating-point registers. */
8509 if (AGGREGATE_TYPE_P (type)
8510 && exact_log2 (size) >= 0
8511 && !s390_function_arg_float (mode, type))
8512 return true;
8513
8514 return false;
8515 }
8516
8517 /* Return 1 if a function argument of type TYPE and mode MODE
8518 is to be passed by reference. The ABI specifies that only
8519 structures of size 1, 2, 4, or 8 bytes are passed by value,
8520 all other structures (and complex numbers) are passed by
8521 reference. */
8522
8523 static bool
8524 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8525 enum machine_mode mode, const_tree type,
8526 bool named ATTRIBUTE_UNUSED)
8527 {
8528 int size = s390_function_arg_size (mode, type);
8529 if (size > 8)
8530 return true;
8531
8532 if (type)
8533 {
8534 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8535 return 1;
8536
8537 if (TREE_CODE (type) == COMPLEX_TYPE
8538 || TREE_CODE (type) == VECTOR_TYPE)
8539 return 1;
8540 }
8541
8542 return 0;
8543 }
8544
8545 /* Update the data in CUM to advance over an argument of mode MODE and
8546 data type TYPE. (TYPE is null for libcalls where that information
8547 may not be available.). The boolean NAMED specifies whether the
8548 argument is a named argument (as opposed to an unnamed argument
8549 matching an ellipsis). */
8550
8551 static void
8552 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8553 const_tree type, bool named ATTRIBUTE_UNUSED)
8554 {
8555 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8556
8557 if (s390_function_arg_float (mode, type))
8558 {
8559 cum->fprs += 1;
8560 }
8561 else if (s390_function_arg_integer (mode, type))
8562 {
8563 int size = s390_function_arg_size (mode, type);
8564 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8565 }
8566 else
8567 gcc_unreachable ();
8568 }
8569
8570 /* Define where to put the arguments to a function.
8571 Value is zero to push the argument on the stack,
8572 or a hard register in which to store the argument.
8573
8574 MODE is the argument's machine mode.
8575 TYPE is the data type of the argument (as a tree).
8576 This is null for libcalls where that information may
8577 not be available.
8578 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8579 the preceding args and about the function being called.
8580 NAMED is nonzero if this argument is a named parameter
8581 (otherwise it is an extra parameter matching an ellipsis).
8582
8583 On S/390, we use general purpose registers 2 through 6 to
8584 pass integer, pointer, and certain structure arguments, and
8585 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8586 to pass floating point arguments. All remaining arguments
8587 are pushed to the stack. */
8588
8589 static rtx
8590 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8591 const_tree type, bool named ATTRIBUTE_UNUSED)
8592 {
8593 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8594
8595 if (s390_function_arg_float (mode, type))
8596 {
8597 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8598 return 0;
8599 else
8600 return gen_rtx_REG (mode, cum->fprs + 16);
8601 }
8602 else if (s390_function_arg_integer (mode, type))
8603 {
8604 int size = s390_function_arg_size (mode, type);
8605 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8606
8607 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8608 return 0;
8609 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8610 return gen_rtx_REG (mode, cum->gprs + 2);
8611 else if (n_gprs == 2)
8612 {
8613 rtvec p = rtvec_alloc (2);
8614
8615 RTVEC_ELT (p, 0)
8616 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8617 const0_rtx);
8618 RTVEC_ELT (p, 1)
8619 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8620 GEN_INT (4));
8621
8622 return gen_rtx_PARALLEL (mode, p);
8623 }
8624 }
8625
8626 /* After the real arguments, expand_call calls us once again
8627 with a void_type_node type. Whatever we return here is
8628 passed as operand 2 to the call expanders.
8629
8630 We don't need this feature ... */
8631 else if (type == void_type_node)
8632 return const0_rtx;
8633
8634 gcc_unreachable ();
8635 }
8636
8637 /* Return true if return values of type TYPE should be returned
8638 in a memory buffer whose address is passed by the caller as
8639 hidden first argument. */
8640
8641 static bool
8642 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8643 {
8644 /* We accept small integral (and similar) types. */
8645 if (INTEGRAL_TYPE_P (type)
8646 || POINTER_TYPE_P (type)
8647 || TREE_CODE (type) == OFFSET_TYPE
8648 || TREE_CODE (type) == REAL_TYPE)
8649 return int_size_in_bytes (type) > 8;
8650
8651 /* Aggregates and similar constructs are always returned
8652 in memory. */
8653 if (AGGREGATE_TYPE_P (type)
8654 || TREE_CODE (type) == COMPLEX_TYPE
8655 || TREE_CODE (type) == VECTOR_TYPE)
8656 return true;
8657
8658 /* ??? We get called on all sorts of random stuff from
8659 aggregate_value_p. We can't abort, but it's not clear
8660 what's safe to return. Pretend it's a struct I guess. */
8661 return true;
8662 }
8663
8664 /* Function arguments and return values are promoted to word size. */
8665
8666 static enum machine_mode
8667 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8668 int *punsignedp,
8669 const_tree fntype ATTRIBUTE_UNUSED,
8670 int for_return ATTRIBUTE_UNUSED)
8671 {
8672 if (INTEGRAL_MODE_P (mode)
8673 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8674 {
8675 if (type != NULL_TREE && POINTER_TYPE_P (type))
8676 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8677 return Pmode;
8678 }
8679
8680 return mode;
8681 }
8682
8683 /* Define where to return a (scalar) value of type RET_TYPE.
8684 If RET_TYPE is null, define where to return a (scalar)
8685 value of mode MODE from a libcall. */
8686
8687 static rtx
8688 s390_function_and_libcall_value (enum machine_mode mode,
8689 const_tree ret_type,
8690 const_tree fntype_or_decl,
8691 bool outgoing ATTRIBUTE_UNUSED)
8692 {
8693 /* For normal functions perform the promotion as
8694 promote_function_mode would do. */
8695 if (ret_type)
8696 {
8697 int unsignedp = TYPE_UNSIGNED (ret_type);
8698 mode = promote_function_mode (ret_type, mode, &unsignedp,
8699 fntype_or_decl, 1);
8700 }
8701
8702 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8703 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8704
8705 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8706 return gen_rtx_REG (mode, 16);
8707 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8708 || UNITS_PER_LONG == UNITS_PER_WORD)
8709 return gen_rtx_REG (mode, 2);
8710 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8711 {
8712 /* This case is triggered when returning a 64 bit value with
8713 -m31 -mzarch. Although the value would fit into a single
8714 register it has to be forced into a 32 bit register pair in
8715 order to match the ABI. */
8716 rtvec p = rtvec_alloc (2);
8717
8718 RTVEC_ELT (p, 0)
8719 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8720 RTVEC_ELT (p, 1)
8721 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8722
8723 return gen_rtx_PARALLEL (mode, p);
8724 }
8725
8726 gcc_unreachable ();
8727 }
8728
8729 /* Define where to return a scalar return value of type RET_TYPE. */
8730
8731 static rtx
8732 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8733 bool outgoing)
8734 {
8735 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8736 fn_decl_or_type, outgoing);
8737 }
8738
8739 /* Define where to return a scalar libcall return value of mode
8740 MODE. */
8741
8742 static rtx
8743 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8744 {
8745 return s390_function_and_libcall_value (mode, NULL_TREE,
8746 NULL_TREE, true);
8747 }
8748
8749
8750 /* Create and return the va_list datatype.
8751
8752 On S/390, va_list is an array type equivalent to
8753
8754 typedef struct __va_list_tag
8755 {
8756 long __gpr;
8757 long __fpr;
8758 void *__overflow_arg_area;
8759 void *__reg_save_area;
8760 } va_list[1];
8761
8762 where __gpr and __fpr hold the number of general purpose
8763 or floating point arguments used up to now, respectively,
8764 __overflow_arg_area points to the stack location of the
8765 next argument passed on the stack, and __reg_save_area
8766 always points to the start of the register area in the
8767 call frame of the current function. The function prologue
8768 saves all registers used for argument passing into this
8769 area if the function uses variable arguments. */
8770
8771 static tree
8772 s390_build_builtin_va_list (void)
8773 {
8774 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8775
8776 record = lang_hooks.types.make_type (RECORD_TYPE);
8777
8778 type_decl =
8779 build_decl (BUILTINS_LOCATION,
8780 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8781
8782 f_gpr = build_decl (BUILTINS_LOCATION,
8783 FIELD_DECL, get_identifier ("__gpr"),
8784 long_integer_type_node);
8785 f_fpr = build_decl (BUILTINS_LOCATION,
8786 FIELD_DECL, get_identifier ("__fpr"),
8787 long_integer_type_node);
8788 f_ovf = build_decl (BUILTINS_LOCATION,
8789 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8790 ptr_type_node);
8791 f_sav = build_decl (BUILTINS_LOCATION,
8792 FIELD_DECL, get_identifier ("__reg_save_area"),
8793 ptr_type_node);
8794
8795 va_list_gpr_counter_field = f_gpr;
8796 va_list_fpr_counter_field = f_fpr;
8797
8798 DECL_FIELD_CONTEXT (f_gpr) = record;
8799 DECL_FIELD_CONTEXT (f_fpr) = record;
8800 DECL_FIELD_CONTEXT (f_ovf) = record;
8801 DECL_FIELD_CONTEXT (f_sav) = record;
8802
8803 TYPE_STUB_DECL (record) = type_decl;
8804 TYPE_NAME (record) = type_decl;
8805 TYPE_FIELDS (record) = f_gpr;
8806 DECL_CHAIN (f_gpr) = f_fpr;
8807 DECL_CHAIN (f_fpr) = f_ovf;
8808 DECL_CHAIN (f_ovf) = f_sav;
8809
8810 layout_type (record);
8811
8812 /* The correct type is an array type of one element. */
8813 return build_array_type (record, build_index_type (size_zero_node));
8814 }
8815
8816 /* Implement va_start by filling the va_list structure VALIST.
8817 STDARG_P is always true, and ignored.
8818 NEXTARG points to the first anonymous stack argument.
8819
8820 The following global variables are used to initialize
8821 the va_list structure:
8822
8823 crtl->args.info:
8824 holds number of gprs and fprs used for named arguments.
8825 crtl->args.arg_offset_rtx:
8826 holds the offset of the first anonymous stack argument
8827 (relative to the virtual arg pointer). */
8828
8829 static void
8830 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8831 {
8832 HOST_WIDE_INT n_gpr, n_fpr;
8833 int off;
8834 tree f_gpr, f_fpr, f_ovf, f_sav;
8835 tree gpr, fpr, ovf, sav, t;
8836
8837 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8838 f_fpr = DECL_CHAIN (f_gpr);
8839 f_ovf = DECL_CHAIN (f_fpr);
8840 f_sav = DECL_CHAIN (f_ovf);
8841
8842 valist = build_simple_mem_ref (valist);
8843 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8844 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8845 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8846 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8847
8848 /* Count number of gp and fp argument registers used. */
8849
8850 n_gpr = crtl->args.info.gprs;
8851 n_fpr = crtl->args.info.fprs;
8852
8853 if (cfun->va_list_gpr_size)
8854 {
8855 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8856 build_int_cst (NULL_TREE, n_gpr));
8857 TREE_SIDE_EFFECTS (t) = 1;
8858 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8859 }
8860
8861 if (cfun->va_list_fpr_size)
8862 {
8863 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8864 build_int_cst (NULL_TREE, n_fpr));
8865 TREE_SIDE_EFFECTS (t) = 1;
8866 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8867 }
8868
8869 /* Find the overflow area. */
8870 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8871 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8872 {
8873 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8874
8875 off = INTVAL (crtl->args.arg_offset_rtx);
8876 off = off < 0 ? 0 : off;
8877 if (TARGET_DEBUG_ARG)
8878 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8879 (int)n_gpr, (int)n_fpr, off);
8880
8881 t = fold_build_pointer_plus_hwi (t, off);
8882
8883 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8884 TREE_SIDE_EFFECTS (t) = 1;
8885 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8886 }
8887
8888 /* Find the register save area. */
8889 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8890 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8891 {
8892 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8893 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
8894
8895 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8896 TREE_SIDE_EFFECTS (t) = 1;
8897 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8898 }
8899 }
8900
8901 /* Implement va_arg by updating the va_list structure
8902 VALIST as required to retrieve an argument of type
8903 TYPE, and returning that argument.
8904
8905 Generates code equivalent to:
8906
8907 if (integral value) {
8908 if (size <= 4 && args.gpr < 5 ||
8909 size > 4 && args.gpr < 4 )
8910 ret = args.reg_save_area[args.gpr+8]
8911 else
8912 ret = *args.overflow_arg_area++;
8913 } else if (float value) {
8914 if (args.fgpr < 2)
8915 ret = args.reg_save_area[args.fpr+64]
8916 else
8917 ret = *args.overflow_arg_area++;
8918 } else if (aggregate value) {
8919 if (args.gpr < 5)
8920 ret = *args.reg_save_area[args.gpr]
8921 else
8922 ret = **args.overflow_arg_area++;
8923 } */
8924
8925 static tree
8926 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8927 gimple_seq *post_p ATTRIBUTE_UNUSED)
8928 {
8929 tree f_gpr, f_fpr, f_ovf, f_sav;
8930 tree gpr, fpr, ovf, sav, reg, t, u;
8931 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8932 tree lab_false, lab_over, addr;
8933
8934 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8935 f_fpr = DECL_CHAIN (f_gpr);
8936 f_ovf = DECL_CHAIN (f_fpr);
8937 f_sav = DECL_CHAIN (f_ovf);
8938
8939 valist = build_va_arg_indirect_ref (valist);
8940 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8941 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8942 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8943
8944 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8945 both appear on a lhs. */
8946 valist = unshare_expr (valist);
8947 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8948
8949 size = int_size_in_bytes (type);
8950
8951 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8952 {
8953 if (TARGET_DEBUG_ARG)
8954 {
8955 fprintf (stderr, "va_arg: aggregate type");
8956 debug_tree (type);
8957 }
8958
8959 /* Aggregates are passed by reference. */
8960 indirect_p = 1;
8961 reg = gpr;
8962 n_reg = 1;
8963
8964 /* kernel stack layout on 31 bit: It is assumed here that no padding
8965 will be added by s390_frame_info because for va_args always an even
8966 number of gprs has to be saved r15-r2 = 14 regs. */
8967 sav_ofs = 2 * UNITS_PER_LONG;
8968 sav_scale = UNITS_PER_LONG;
8969 size = UNITS_PER_LONG;
8970 max_reg = GP_ARG_NUM_REG - n_reg;
8971 }
8972 else if (s390_function_arg_float (TYPE_MODE (type), type))
8973 {
8974 if (TARGET_DEBUG_ARG)
8975 {
8976 fprintf (stderr, "va_arg: float type");
8977 debug_tree (type);
8978 }
8979
8980 /* FP args go in FP registers, if present. */
8981 indirect_p = 0;
8982 reg = fpr;
8983 n_reg = 1;
8984 sav_ofs = 16 * UNITS_PER_LONG;
8985 sav_scale = 8;
8986 max_reg = FP_ARG_NUM_REG - n_reg;
8987 }
8988 else
8989 {
8990 if (TARGET_DEBUG_ARG)
8991 {
8992 fprintf (stderr, "va_arg: other type");
8993 debug_tree (type);
8994 }
8995
8996 /* Otherwise into GP registers. */
8997 indirect_p = 0;
8998 reg = gpr;
8999 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9000
9001 /* kernel stack layout on 31 bit: It is assumed here that no padding
9002 will be added by s390_frame_info because for va_args always an even
9003 number of gprs has to be saved r15-r2 = 14 regs. */
9004 sav_ofs = 2 * UNITS_PER_LONG;
9005
9006 if (size < UNITS_PER_LONG)
9007 sav_ofs += UNITS_PER_LONG - size;
9008
9009 sav_scale = UNITS_PER_LONG;
9010 max_reg = GP_ARG_NUM_REG - n_reg;
9011 }
9012
9013 /* Pull the value out of the saved registers ... */
9014
9015 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9016 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9017 addr = create_tmp_var (ptr_type_node, "addr");
9018
9019 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9020 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9021 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9022 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9023 gimplify_and_add (t, pre_p);
9024
9025 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9026 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9027 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9028 t = fold_build_pointer_plus (t, u);
9029
9030 gimplify_assign (addr, t, pre_p);
9031
9032 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9033
9034 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9035
9036
9037 /* ... Otherwise out of the overflow area. */
9038
9039 t = ovf;
9040 if (size < UNITS_PER_LONG)
9041 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9042
9043 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9044
9045 gimplify_assign (addr, t, pre_p);
9046
9047 t = fold_build_pointer_plus_hwi (t, size);
9048 gimplify_assign (ovf, t, pre_p);
9049
9050 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9051
9052
9053 /* Increment register save count. */
9054
9055 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9056 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9057 gimplify_and_add (u, pre_p);
9058
9059 if (indirect_p)
9060 {
9061 t = build_pointer_type_for_mode (build_pointer_type (type),
9062 ptr_mode, true);
9063 addr = fold_convert (t, addr);
9064 addr = build_va_arg_indirect_ref (addr);
9065 }
9066 else
9067 {
9068 t = build_pointer_type_for_mode (type, ptr_mode, true);
9069 addr = fold_convert (t, addr);
9070 }
9071
9072 return build_va_arg_indirect_ref (addr);
9073 }
9074
9075
9076 /* Builtins. */
9077
9078 enum s390_builtin
9079 {
9080 S390_BUILTIN_THREAD_POINTER,
9081 S390_BUILTIN_SET_THREAD_POINTER,
9082
9083 S390_BUILTIN_max
9084 };
9085
9086 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9087 CODE_FOR_get_tp_64,
9088 CODE_FOR_set_tp_64
9089 };
9090
9091 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9092 CODE_FOR_get_tp_31,
9093 CODE_FOR_set_tp_31
9094 };
9095
9096 static void
9097 s390_init_builtins (void)
9098 {
9099 tree ftype;
9100
9101 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9102 add_builtin_function ("__builtin_thread_pointer", ftype,
9103 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9104 NULL, NULL_TREE);
9105
9106 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9107 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9108 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9109 NULL, NULL_TREE);
9110 }
9111
9112 /* Expand an expression EXP that calls a built-in function,
9113 with result going to TARGET if that's convenient
9114 (and in mode MODE if that's convenient).
9115 SUBTARGET may be used as the target for computing one of EXP's operands.
9116 IGNORE is nonzero if the value is to be ignored. */
9117
9118 static rtx
9119 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9120 enum machine_mode mode ATTRIBUTE_UNUSED,
9121 int ignore ATTRIBUTE_UNUSED)
9122 {
9123 #define MAX_ARGS 2
9124
9125 enum insn_code const *code_for_builtin =
9126 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9127
9128 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9129 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9130 enum insn_code icode;
9131 rtx op[MAX_ARGS], pat;
9132 int arity;
9133 bool nonvoid;
9134 tree arg;
9135 call_expr_arg_iterator iter;
9136
9137 if (fcode >= S390_BUILTIN_max)
9138 internal_error ("bad builtin fcode");
9139 icode = code_for_builtin[fcode];
9140 if (icode == 0)
9141 internal_error ("bad builtin fcode");
9142
9143 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9144
9145 arity = 0;
9146 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9147 {
9148 const struct insn_operand_data *insn_op;
9149
9150 if (arg == error_mark_node)
9151 return NULL_RTX;
9152 if (arity > MAX_ARGS)
9153 return NULL_RTX;
9154
9155 insn_op = &insn_data[icode].operand[arity + nonvoid];
9156
9157 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9158
9159 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9160 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9161 arity++;
9162 }
9163
9164 if (nonvoid)
9165 {
9166 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9167 if (!target
9168 || GET_MODE (target) != tmode
9169 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9170 target = gen_reg_rtx (tmode);
9171 }
9172
9173 switch (arity)
9174 {
9175 case 0:
9176 pat = GEN_FCN (icode) (target);
9177 break;
9178 case 1:
9179 if (nonvoid)
9180 pat = GEN_FCN (icode) (target, op[0]);
9181 else
9182 pat = GEN_FCN (icode) (op[0]);
9183 break;
9184 case 2:
9185 pat = GEN_FCN (icode) (target, op[0], op[1]);
9186 break;
9187 default:
9188 gcc_unreachable ();
9189 }
9190 if (!pat)
9191 return NULL_RTX;
9192 emit_insn (pat);
9193
9194 if (nonvoid)
9195 return target;
9196 else
9197 return const0_rtx;
9198 }
9199
9200
9201 /* Output assembly code for the trampoline template to
9202 stdio stream FILE.
9203
9204 On S/390, we use gpr 1 internally in the trampoline code;
9205 gpr 0 is used to hold the static chain. */
9206
9207 static void
9208 s390_asm_trampoline_template (FILE *file)
9209 {
9210 rtx op[2];
9211 op[0] = gen_rtx_REG (Pmode, 0);
9212 op[1] = gen_rtx_REG (Pmode, 1);
9213
9214 if (TARGET_64BIT)
9215 {
9216 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9217 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9218 output_asm_insn ("br\t%1", op); /* 2 byte */
9219 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9220 }
9221 else
9222 {
9223 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9224 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9225 output_asm_insn ("br\t%1", op); /* 2 byte */
9226 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9227 }
9228 }
9229
9230 /* Emit RTL insns to initialize the variable parts of a trampoline.
9231 FNADDR is an RTX for the address of the function's pure code.
9232 CXT is an RTX for the static chain value for the function. */
9233
9234 static void
9235 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9236 {
9237 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9238 rtx mem;
9239
9240 emit_block_move (m_tramp, assemble_trampoline_template (),
9241 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9242
9243 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9244 emit_move_insn (mem, cxt);
9245 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9246 emit_move_insn (mem, fnaddr);
9247 }
9248
9249 /* Output assembler code to FILE to increment profiler label # LABELNO
9250 for profiling a function entry. */
9251
9252 void
9253 s390_function_profiler (FILE *file, int labelno)
9254 {
9255 rtx op[7];
9256
9257 char label[128];
9258 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9259
9260 fprintf (file, "# function profiler \n");
9261
9262 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9263 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9264 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9265
9266 op[2] = gen_rtx_REG (Pmode, 1);
9267 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9268 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9269
9270 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9271 if (flag_pic)
9272 {
9273 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9274 op[4] = gen_rtx_CONST (Pmode, op[4]);
9275 }
9276
9277 if (TARGET_64BIT)
9278 {
9279 output_asm_insn ("stg\t%0,%1", op);
9280 output_asm_insn ("larl\t%2,%3", op);
9281 output_asm_insn ("brasl\t%0,%4", op);
9282 output_asm_insn ("lg\t%0,%1", op);
9283 }
9284 else if (!flag_pic)
9285 {
9286 op[6] = gen_label_rtx ();
9287
9288 output_asm_insn ("st\t%0,%1", op);
9289 output_asm_insn ("bras\t%2,%l6", op);
9290 output_asm_insn (".long\t%4", op);
9291 output_asm_insn (".long\t%3", op);
9292 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9293 output_asm_insn ("l\t%0,0(%2)", op);
9294 output_asm_insn ("l\t%2,4(%2)", op);
9295 output_asm_insn ("basr\t%0,%0", op);
9296 output_asm_insn ("l\t%0,%1", op);
9297 }
9298 else
9299 {
9300 op[5] = gen_label_rtx ();
9301 op[6] = gen_label_rtx ();
9302
9303 output_asm_insn ("st\t%0,%1", op);
9304 output_asm_insn ("bras\t%2,%l6", op);
9305 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9306 output_asm_insn (".long\t%4-%l5", op);
9307 output_asm_insn (".long\t%3-%l5", op);
9308 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9309 output_asm_insn ("lr\t%0,%2", op);
9310 output_asm_insn ("a\t%0,0(%2)", op);
9311 output_asm_insn ("a\t%2,4(%2)", op);
9312 output_asm_insn ("basr\t%0,%0", op);
9313 output_asm_insn ("l\t%0,%1", op);
9314 }
9315 }
9316
9317 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9318 into its SYMBOL_REF_FLAGS. */
9319
9320 static void
9321 s390_encode_section_info (tree decl, rtx rtl, int first)
9322 {
9323 default_encode_section_info (decl, rtl, first);
9324
9325 if (TREE_CODE (decl) == VAR_DECL)
9326 {
9327 /* If a variable has a forced alignment to < 2 bytes, mark it
9328 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9329 operand. */
9330 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9331 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9332 if (!DECL_SIZE (decl)
9333 || !DECL_ALIGN (decl)
9334 || !host_integerp (DECL_SIZE (decl), 0)
9335 || (DECL_ALIGN (decl) <= 64
9336 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9337 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9338 }
9339
9340 /* Literal pool references don't have a decl so they are handled
9341 differently here. We rely on the information in the MEM_ALIGN
9342 entry to decide upon natural alignment. */
9343 if (MEM_P (rtl)
9344 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9345 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9346 && (MEM_ALIGN (rtl) == 0
9347 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9348 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9349 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9350 }
9351
9352 /* Output thunk to FILE that implements a C++ virtual function call (with
9353 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9354 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9355 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9356 relative to the resulting this pointer. */
9357
9358 static void
9359 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9360 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9361 tree function)
9362 {
9363 rtx op[10];
9364 int nonlocal = 0;
9365
9366 /* Make sure unwind info is emitted for the thunk if needed. */
9367 final_start_function (emit_barrier (), file, 1);
9368
9369 /* Operand 0 is the target function. */
9370 op[0] = XEXP (DECL_RTL (function), 0);
9371 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9372 {
9373 nonlocal = 1;
9374 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9375 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9376 op[0] = gen_rtx_CONST (Pmode, op[0]);
9377 }
9378
9379 /* Operand 1 is the 'this' pointer. */
9380 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9381 op[1] = gen_rtx_REG (Pmode, 3);
9382 else
9383 op[1] = gen_rtx_REG (Pmode, 2);
9384
9385 /* Operand 2 is the delta. */
9386 op[2] = GEN_INT (delta);
9387
9388 /* Operand 3 is the vcall_offset. */
9389 op[3] = GEN_INT (vcall_offset);
9390
9391 /* Operand 4 is the temporary register. */
9392 op[4] = gen_rtx_REG (Pmode, 1);
9393
9394 /* Operands 5 to 8 can be used as labels. */
9395 op[5] = NULL_RTX;
9396 op[6] = NULL_RTX;
9397 op[7] = NULL_RTX;
9398 op[8] = NULL_RTX;
9399
9400 /* Operand 9 can be used for temporary register. */
9401 op[9] = NULL_RTX;
9402
9403 /* Generate code. */
9404 if (TARGET_64BIT)
9405 {
9406 /* Setup literal pool pointer if required. */
9407 if ((!DISP_IN_RANGE (delta)
9408 && !CONST_OK_FOR_K (delta)
9409 && !CONST_OK_FOR_Os (delta))
9410 || (!DISP_IN_RANGE (vcall_offset)
9411 && !CONST_OK_FOR_K (vcall_offset)
9412 && !CONST_OK_FOR_Os (vcall_offset)))
9413 {
9414 op[5] = gen_label_rtx ();
9415 output_asm_insn ("larl\t%4,%5", op);
9416 }
9417
9418 /* Add DELTA to this pointer. */
9419 if (delta)
9420 {
9421 if (CONST_OK_FOR_J (delta))
9422 output_asm_insn ("la\t%1,%2(%1)", op);
9423 else if (DISP_IN_RANGE (delta))
9424 output_asm_insn ("lay\t%1,%2(%1)", op);
9425 else if (CONST_OK_FOR_K (delta))
9426 output_asm_insn ("aghi\t%1,%2", op);
9427 else if (CONST_OK_FOR_Os (delta))
9428 output_asm_insn ("agfi\t%1,%2", op);
9429 else
9430 {
9431 op[6] = gen_label_rtx ();
9432 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9433 }
9434 }
9435
9436 /* Perform vcall adjustment. */
9437 if (vcall_offset)
9438 {
9439 if (DISP_IN_RANGE (vcall_offset))
9440 {
9441 output_asm_insn ("lg\t%4,0(%1)", op);
9442 output_asm_insn ("ag\t%1,%3(%4)", op);
9443 }
9444 else if (CONST_OK_FOR_K (vcall_offset))
9445 {
9446 output_asm_insn ("lghi\t%4,%3", op);
9447 output_asm_insn ("ag\t%4,0(%1)", op);
9448 output_asm_insn ("ag\t%1,0(%4)", op);
9449 }
9450 else if (CONST_OK_FOR_Os (vcall_offset))
9451 {
9452 output_asm_insn ("lgfi\t%4,%3", op);
9453 output_asm_insn ("ag\t%4,0(%1)", op);
9454 output_asm_insn ("ag\t%1,0(%4)", op);
9455 }
9456 else
9457 {
9458 op[7] = gen_label_rtx ();
9459 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9460 output_asm_insn ("ag\t%4,0(%1)", op);
9461 output_asm_insn ("ag\t%1,0(%4)", op);
9462 }
9463 }
9464
9465 /* Jump to target. */
9466 output_asm_insn ("jg\t%0", op);
9467
9468 /* Output literal pool if required. */
9469 if (op[5])
9470 {
9471 output_asm_insn (".align\t4", op);
9472 targetm.asm_out.internal_label (file, "L",
9473 CODE_LABEL_NUMBER (op[5]));
9474 }
9475 if (op[6])
9476 {
9477 targetm.asm_out.internal_label (file, "L",
9478 CODE_LABEL_NUMBER (op[6]));
9479 output_asm_insn (".long\t%2", op);
9480 }
9481 if (op[7])
9482 {
9483 targetm.asm_out.internal_label (file, "L",
9484 CODE_LABEL_NUMBER (op[7]));
9485 output_asm_insn (".long\t%3", op);
9486 }
9487 }
9488 else
9489 {
9490 /* Setup base pointer if required. */
9491 if (!vcall_offset
9492 || (!DISP_IN_RANGE (delta)
9493 && !CONST_OK_FOR_K (delta)
9494 && !CONST_OK_FOR_Os (delta))
9495 || (!DISP_IN_RANGE (delta)
9496 && !CONST_OK_FOR_K (vcall_offset)
9497 && !CONST_OK_FOR_Os (vcall_offset)))
9498 {
9499 op[5] = gen_label_rtx ();
9500 output_asm_insn ("basr\t%4,0", op);
9501 targetm.asm_out.internal_label (file, "L",
9502 CODE_LABEL_NUMBER (op[5]));
9503 }
9504
9505 /* Add DELTA to this pointer. */
9506 if (delta)
9507 {
9508 if (CONST_OK_FOR_J (delta))
9509 output_asm_insn ("la\t%1,%2(%1)", op);
9510 else if (DISP_IN_RANGE (delta))
9511 output_asm_insn ("lay\t%1,%2(%1)", op);
9512 else if (CONST_OK_FOR_K (delta))
9513 output_asm_insn ("ahi\t%1,%2", op);
9514 else if (CONST_OK_FOR_Os (delta))
9515 output_asm_insn ("afi\t%1,%2", op);
9516 else
9517 {
9518 op[6] = gen_label_rtx ();
9519 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9520 }
9521 }
9522
9523 /* Perform vcall adjustment. */
9524 if (vcall_offset)
9525 {
9526 if (CONST_OK_FOR_J (vcall_offset))
9527 {
9528 output_asm_insn ("l\t%4,0(%1)", op);
9529 output_asm_insn ("a\t%1,%3(%4)", op);
9530 }
9531 else if (DISP_IN_RANGE (vcall_offset))
9532 {
9533 output_asm_insn ("l\t%4,0(%1)", op);
9534 output_asm_insn ("ay\t%1,%3(%4)", op);
9535 }
9536 else if (CONST_OK_FOR_K (vcall_offset))
9537 {
9538 output_asm_insn ("lhi\t%4,%3", op);
9539 output_asm_insn ("a\t%4,0(%1)", op);
9540 output_asm_insn ("a\t%1,0(%4)", op);
9541 }
9542 else if (CONST_OK_FOR_Os (vcall_offset))
9543 {
9544 output_asm_insn ("iilf\t%4,%3", op);
9545 output_asm_insn ("a\t%4,0(%1)", op);
9546 output_asm_insn ("a\t%1,0(%4)", op);
9547 }
9548 else
9549 {
9550 op[7] = gen_label_rtx ();
9551 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9552 output_asm_insn ("a\t%4,0(%1)", op);
9553 output_asm_insn ("a\t%1,0(%4)", op);
9554 }
9555
9556 /* We had to clobber the base pointer register.
9557 Re-setup the base pointer (with a different base). */
9558 op[5] = gen_label_rtx ();
9559 output_asm_insn ("basr\t%4,0", op);
9560 targetm.asm_out.internal_label (file, "L",
9561 CODE_LABEL_NUMBER (op[5]));
9562 }
9563
9564 /* Jump to target. */
9565 op[8] = gen_label_rtx ();
9566
9567 if (!flag_pic)
9568 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9569 else if (!nonlocal)
9570 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9571 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9572 else if (flag_pic == 1)
9573 {
9574 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9575 output_asm_insn ("l\t%4,%0(%4)", op);
9576 }
9577 else if (flag_pic == 2)
9578 {
9579 op[9] = gen_rtx_REG (Pmode, 0);
9580 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9581 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9582 output_asm_insn ("ar\t%4,%9", op);
9583 output_asm_insn ("l\t%4,0(%4)", op);
9584 }
9585
9586 output_asm_insn ("br\t%4", op);
9587
9588 /* Output literal pool. */
9589 output_asm_insn (".align\t4", op);
9590
9591 if (nonlocal && flag_pic == 2)
9592 output_asm_insn (".long\t%0", op);
9593 if (nonlocal)
9594 {
9595 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9596 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9597 }
9598
9599 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9600 if (!flag_pic)
9601 output_asm_insn (".long\t%0", op);
9602 else
9603 output_asm_insn (".long\t%0-%5", op);
9604
9605 if (op[6])
9606 {
9607 targetm.asm_out.internal_label (file, "L",
9608 CODE_LABEL_NUMBER (op[6]));
9609 output_asm_insn (".long\t%2", op);
9610 }
9611 if (op[7])
9612 {
9613 targetm.asm_out.internal_label (file, "L",
9614 CODE_LABEL_NUMBER (op[7]));
9615 output_asm_insn (".long\t%3", op);
9616 }
9617 }
9618 final_end_function ();
9619 }
9620
9621 static bool
9622 s390_valid_pointer_mode (enum machine_mode mode)
9623 {
9624 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9625 }
9626
9627 /* Checks whether the given CALL_EXPR would use a caller
9628 saved register. This is used to decide whether sibling call
9629 optimization could be performed on the respective function
9630 call. */
9631
9632 static bool
9633 s390_call_saved_register_used (tree call_expr)
9634 {
9635 CUMULATIVE_ARGS cum_v;
9636 cumulative_args_t cum;
9637 tree parameter;
9638 enum machine_mode mode;
9639 tree type;
9640 rtx parm_rtx;
9641 int reg, i;
9642
9643 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9644 cum = pack_cumulative_args (&cum_v);
9645
9646 for (i = 0; i < call_expr_nargs (call_expr); i++)
9647 {
9648 parameter = CALL_EXPR_ARG (call_expr, i);
9649 gcc_assert (parameter);
9650
9651 /* For an undeclared variable passed as parameter we will get
9652 an ERROR_MARK node here. */
9653 if (TREE_CODE (parameter) == ERROR_MARK)
9654 return true;
9655
9656 type = TREE_TYPE (parameter);
9657 gcc_assert (type);
9658
9659 mode = TYPE_MODE (type);
9660 gcc_assert (mode);
9661
9662 if (pass_by_reference (&cum_v, mode, type, true))
9663 {
9664 mode = Pmode;
9665 type = build_pointer_type (type);
9666 }
9667
9668 parm_rtx = s390_function_arg (cum, mode, type, 0);
9669
9670 s390_function_arg_advance (cum, mode, type, 0);
9671
9672 if (!parm_rtx)
9673 continue;
9674
9675 if (REG_P (parm_rtx))
9676 {
9677 for (reg = 0;
9678 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9679 reg++)
9680 if (!call_used_regs[reg + REGNO (parm_rtx)])
9681 return true;
9682 }
9683
9684 if (GET_CODE (parm_rtx) == PARALLEL)
9685 {
9686 int i;
9687
9688 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9689 {
9690 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9691
9692 gcc_assert (REG_P (r));
9693
9694 for (reg = 0;
9695 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9696 reg++)
9697 if (!call_used_regs[reg + REGNO (r)])
9698 return true;
9699 }
9700 }
9701
9702 }
9703 return false;
9704 }
9705
9706 /* Return true if the given call expression can be
9707 turned into a sibling call.
9708 DECL holds the declaration of the function to be called whereas
9709 EXP is the call expression itself. */
9710
9711 static bool
9712 s390_function_ok_for_sibcall (tree decl, tree exp)
9713 {
9714 /* The TPF epilogue uses register 1. */
9715 if (TARGET_TPF_PROFILING)
9716 return false;
9717
9718 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9719 which would have to be restored before the sibcall. */
9720 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9721 return false;
9722
9723 /* Register 6 on s390 is available as an argument register but unfortunately
9724 "caller saved". This makes functions needing this register for arguments
9725 not suitable for sibcalls. */
9726 return !s390_call_saved_register_used (exp);
9727 }
9728
9729 /* Return the fixed registers used for condition codes. */
9730
9731 static bool
9732 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9733 {
9734 *p1 = CC_REGNUM;
9735 *p2 = INVALID_REGNUM;
9736
9737 return true;
9738 }
9739
9740 /* This function is used by the call expanders of the machine description.
9741 It emits the call insn itself together with the necessary operations
9742 to adjust the target address and returns the emitted insn.
9743 ADDR_LOCATION is the target address rtx
9744 TLS_CALL the location of the thread-local symbol
9745 RESULT_REG the register where the result of the call should be stored
9746 RETADDR_REG the register where the return address should be stored
9747 If this parameter is NULL_RTX the call is considered
9748 to be a sibling call. */
9749
9750 rtx
9751 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9752 rtx retaddr_reg)
9753 {
9754 bool plt_call = false;
9755 rtx insn;
9756 rtx call;
9757 rtx clobber;
9758 rtvec vec;
9759
9760 /* Direct function calls need special treatment. */
9761 if (GET_CODE (addr_location) == SYMBOL_REF)
9762 {
9763 /* When calling a global routine in PIC mode, we must
9764 replace the symbol itself with the PLT stub. */
9765 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9766 {
9767 if (retaddr_reg != NULL_RTX)
9768 {
9769 addr_location = gen_rtx_UNSPEC (Pmode,
9770 gen_rtvec (1, addr_location),
9771 UNSPEC_PLT);
9772 addr_location = gen_rtx_CONST (Pmode, addr_location);
9773 plt_call = true;
9774 }
9775 else
9776 /* For -fpic code the PLT entries might use r12 which is
9777 call-saved. Therefore we cannot do a sibcall when
9778 calling directly using a symbol ref. When reaching
9779 this point we decided (in s390_function_ok_for_sibcall)
9780 to do a sibcall for a function pointer but one of the
9781 optimizers was able to get rid of the function pointer
9782 by propagating the symbol ref into the call. This
9783 optimization is illegal for S/390 so we turn the direct
9784 call into a indirect call again. */
9785 addr_location = force_reg (Pmode, addr_location);
9786 }
9787
9788 /* Unless we can use the bras(l) insn, force the
9789 routine address into a register. */
9790 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9791 {
9792 if (flag_pic)
9793 addr_location = legitimize_pic_address (addr_location, 0);
9794 else
9795 addr_location = force_reg (Pmode, addr_location);
9796 }
9797 }
9798
9799 /* If it is already an indirect call or the code above moved the
9800 SYMBOL_REF to somewhere else make sure the address can be found in
9801 register 1. */
9802 if (retaddr_reg == NULL_RTX
9803 && GET_CODE (addr_location) != SYMBOL_REF
9804 && !plt_call)
9805 {
9806 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9807 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9808 }
9809
9810 addr_location = gen_rtx_MEM (QImode, addr_location);
9811 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9812
9813 if (result_reg != NULL_RTX)
9814 call = gen_rtx_SET (VOIDmode, result_reg, call);
9815
9816 if (retaddr_reg != NULL_RTX)
9817 {
9818 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9819
9820 if (tls_call != NULL_RTX)
9821 vec = gen_rtvec (3, call, clobber,
9822 gen_rtx_USE (VOIDmode, tls_call));
9823 else
9824 vec = gen_rtvec (2, call, clobber);
9825
9826 call = gen_rtx_PARALLEL (VOIDmode, vec);
9827 }
9828
9829 insn = emit_call_insn (call);
9830
9831 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9832 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9833 {
9834 /* s390_function_ok_for_sibcall should
9835 have denied sibcalls in this case. */
9836 gcc_assert (retaddr_reg != NULL_RTX);
9837 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9838 }
9839 return insn;
9840 }
9841
9842 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9843
9844 static void
9845 s390_conditional_register_usage (void)
9846 {
9847 int i;
9848
9849 if (flag_pic)
9850 {
9851 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9852 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9853 }
9854 if (TARGET_CPU_ZARCH)
9855 {
9856 fixed_regs[BASE_REGNUM] = 0;
9857 call_used_regs[BASE_REGNUM] = 0;
9858 fixed_regs[RETURN_REGNUM] = 0;
9859 call_used_regs[RETURN_REGNUM] = 0;
9860 }
9861 if (TARGET_64BIT)
9862 {
9863 for (i = 24; i < 32; i++)
9864 call_used_regs[i] = call_really_used_regs[i] = 0;
9865 }
9866 else
9867 {
9868 for (i = 18; i < 20; i++)
9869 call_used_regs[i] = call_really_used_regs[i] = 0;
9870 }
9871
9872 if (TARGET_SOFT_FLOAT)
9873 {
9874 for (i = 16; i < 32; i++)
9875 call_used_regs[i] = fixed_regs[i] = 1;
9876 }
9877 }
9878
9879 /* Corresponding function to eh_return expander. */
9880
9881 static GTY(()) rtx s390_tpf_eh_return_symbol;
9882 void
9883 s390_emit_tpf_eh_return (rtx target)
9884 {
9885 rtx insn, reg;
9886
9887 if (!s390_tpf_eh_return_symbol)
9888 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9889
9890 reg = gen_rtx_REG (Pmode, 2);
9891
9892 emit_move_insn (reg, target);
9893 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9894 gen_rtx_REG (Pmode, RETURN_REGNUM));
9895 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9896
9897 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9898 }
9899
9900 /* Rework the prologue/epilogue to avoid saving/restoring
9901 registers unnecessarily. */
9902
9903 static void
9904 s390_optimize_prologue (void)
9905 {
9906 rtx insn, new_insn, next_insn;
9907
9908 /* Do a final recompute of the frame-related data. */
9909
9910 s390_update_frame_layout ();
9911
9912 /* If all special registers are in fact used, there's nothing we
9913 can do, so no point in walking the insn list. */
9914
9915 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9916 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9917 && (TARGET_CPU_ZARCH
9918 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9919 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9920 return;
9921
9922 /* Search for prologue/epilogue insns and replace them. */
9923
9924 for (insn = get_insns (); insn; insn = next_insn)
9925 {
9926 int first, last, off;
9927 rtx set, base, offset;
9928
9929 next_insn = NEXT_INSN (insn);
9930
9931 if (GET_CODE (insn) != INSN)
9932 continue;
9933
9934 if (GET_CODE (PATTERN (insn)) == PARALLEL
9935 && store_multiple_operation (PATTERN (insn), VOIDmode))
9936 {
9937 set = XVECEXP (PATTERN (insn), 0, 0);
9938 first = REGNO (SET_SRC (set));
9939 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9940 offset = const0_rtx;
9941 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9942 off = INTVAL (offset);
9943
9944 if (GET_CODE (base) != REG || off < 0)
9945 continue;
9946 if (cfun_frame_layout.first_save_gpr != -1
9947 && (cfun_frame_layout.first_save_gpr < first
9948 || cfun_frame_layout.last_save_gpr > last))
9949 continue;
9950 if (REGNO (base) != STACK_POINTER_REGNUM
9951 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9952 continue;
9953 if (first > BASE_REGNUM || last < BASE_REGNUM)
9954 continue;
9955
9956 if (cfun_frame_layout.first_save_gpr != -1)
9957 {
9958 new_insn = save_gprs (base,
9959 off + (cfun_frame_layout.first_save_gpr
9960 - first) * UNITS_PER_LONG,
9961 cfun_frame_layout.first_save_gpr,
9962 cfun_frame_layout.last_save_gpr);
9963 new_insn = emit_insn_before (new_insn, insn);
9964 INSN_ADDRESSES_NEW (new_insn, -1);
9965 }
9966
9967 remove_insn (insn);
9968 continue;
9969 }
9970
9971 if (cfun_frame_layout.first_save_gpr == -1
9972 && GET_CODE (PATTERN (insn)) == SET
9973 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9974 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9975 || (!TARGET_CPU_ZARCH
9976 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9977 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9978 {
9979 set = PATTERN (insn);
9980 first = REGNO (SET_SRC (set));
9981 offset = const0_rtx;
9982 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9983 off = INTVAL (offset);
9984
9985 if (GET_CODE (base) != REG || off < 0)
9986 continue;
9987 if (REGNO (base) != STACK_POINTER_REGNUM
9988 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9989 continue;
9990
9991 remove_insn (insn);
9992 continue;
9993 }
9994
9995 if (GET_CODE (PATTERN (insn)) == PARALLEL
9996 && load_multiple_operation (PATTERN (insn), VOIDmode))
9997 {
9998 set = XVECEXP (PATTERN (insn), 0, 0);
9999 first = REGNO (SET_DEST (set));
10000 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10001 offset = const0_rtx;
10002 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10003 off = INTVAL (offset);
10004
10005 if (GET_CODE (base) != REG || off < 0)
10006 continue;
10007 if (cfun_frame_layout.first_restore_gpr != -1
10008 && (cfun_frame_layout.first_restore_gpr < first
10009 || cfun_frame_layout.last_restore_gpr > last))
10010 continue;
10011 if (REGNO (base) != STACK_POINTER_REGNUM
10012 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10013 continue;
10014 if (first > BASE_REGNUM || last < BASE_REGNUM)
10015 continue;
10016
10017 if (cfun_frame_layout.first_restore_gpr != -1)
10018 {
10019 new_insn = restore_gprs (base,
10020 off + (cfun_frame_layout.first_restore_gpr
10021 - first) * UNITS_PER_LONG,
10022 cfun_frame_layout.first_restore_gpr,
10023 cfun_frame_layout.last_restore_gpr);
10024 new_insn = emit_insn_before (new_insn, insn);
10025 INSN_ADDRESSES_NEW (new_insn, -1);
10026 }
10027
10028 remove_insn (insn);
10029 continue;
10030 }
10031
10032 if (cfun_frame_layout.first_restore_gpr == -1
10033 && GET_CODE (PATTERN (insn)) == SET
10034 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10035 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10036 || (!TARGET_CPU_ZARCH
10037 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10038 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10039 {
10040 set = PATTERN (insn);
10041 first = REGNO (SET_DEST (set));
10042 offset = const0_rtx;
10043 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10044 off = INTVAL (offset);
10045
10046 if (GET_CODE (base) != REG || off < 0)
10047 continue;
10048 if (REGNO (base) != STACK_POINTER_REGNUM
10049 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10050 continue;
10051
10052 remove_insn (insn);
10053 continue;
10054 }
10055 }
10056 }
10057
10058 /* On z10 and later the dynamic branch prediction must see the
10059 backward jump within a certain windows. If not it falls back to
10060 the static prediction. This function rearranges the loop backward
10061 branch in a way which makes the static prediction always correct.
10062 The function returns true if it added an instruction. */
10063 static bool
10064 s390_fix_long_loop_prediction (rtx insn)
10065 {
10066 rtx set = single_set (insn);
10067 rtx code_label, label_ref, new_label;
10068 rtx uncond_jump;
10069 rtx cur_insn;
10070 rtx tmp;
10071 int distance;
10072
10073 /* This will exclude branch on count and branch on index patterns
10074 since these are correctly statically predicted. */
10075 if (!set
10076 || SET_DEST (set) != pc_rtx
10077 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10078 return false;
10079
10080 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10081 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10082
10083 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10084
10085 code_label = XEXP (label_ref, 0);
10086
10087 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10088 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10089 || (INSN_ADDRESSES (INSN_UID (insn))
10090 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10091 return false;
10092
10093 for (distance = 0, cur_insn = PREV_INSN (insn);
10094 distance < PREDICT_DISTANCE - 6;
10095 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10096 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10097 return false;
10098
10099 new_label = gen_label_rtx ();
10100 uncond_jump = emit_jump_insn_after (
10101 gen_rtx_SET (VOIDmode, pc_rtx,
10102 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10103 insn);
10104 emit_label_after (new_label, uncond_jump);
10105
10106 tmp = XEXP (SET_SRC (set), 1);
10107 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10108 XEXP (SET_SRC (set), 2) = tmp;
10109 INSN_CODE (insn) = -1;
10110
10111 XEXP (label_ref, 0) = new_label;
10112 JUMP_LABEL (insn) = new_label;
10113 JUMP_LABEL (uncond_jump) = code_label;
10114
10115 return true;
10116 }
10117
10118 /* Returns 1 if INSN reads the value of REG for purposes not related
10119 to addressing of memory, and 0 otherwise. */
10120 static int
10121 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10122 {
10123 return reg_referenced_p (reg, PATTERN (insn))
10124 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10125 }
10126
10127 /* Starting from INSN find_cond_jump looks downwards in the insn
10128 stream for a single jump insn which is the last user of the
10129 condition code set in INSN. */
10130 static rtx
10131 find_cond_jump (rtx insn)
10132 {
10133 for (; insn; insn = NEXT_INSN (insn))
10134 {
10135 rtx ite, cc;
10136
10137 if (LABEL_P (insn))
10138 break;
10139
10140 if (!JUMP_P (insn))
10141 {
10142 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10143 break;
10144 continue;
10145 }
10146
10147 /* This will be triggered by a return. */
10148 if (GET_CODE (PATTERN (insn)) != SET)
10149 break;
10150
10151 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10152 ite = SET_SRC (PATTERN (insn));
10153
10154 if (GET_CODE (ite) != IF_THEN_ELSE)
10155 break;
10156
10157 cc = XEXP (XEXP (ite, 0), 0);
10158 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10159 break;
10160
10161 if (find_reg_note (insn, REG_DEAD, cc))
10162 return insn;
10163 break;
10164 }
10165
10166 return NULL_RTX;
10167 }
10168
10169 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10170 the semantics does not change. If NULL_RTX is passed as COND the
10171 function tries to find the conditional jump starting with INSN. */
10172 static void
10173 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10174 {
10175 rtx tmp = *op0;
10176
10177 if (cond == NULL_RTX)
10178 {
10179 rtx jump = find_cond_jump (NEXT_INSN (insn));
10180 jump = jump ? single_set (jump) : NULL_RTX;
10181
10182 if (jump == NULL_RTX)
10183 return;
10184
10185 cond = XEXP (XEXP (jump, 1), 0);
10186 }
10187
10188 *op0 = *op1;
10189 *op1 = tmp;
10190 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10191 }
10192
10193 /* On z10, instructions of the compare-and-branch family have the
10194 property to access the register occurring as second operand with
10195 its bits complemented. If such a compare is grouped with a second
10196 instruction that accesses the same register non-complemented, and
10197 if that register's value is delivered via a bypass, then the
10198 pipeline recycles, thereby causing significant performance decline.
10199 This function locates such situations and exchanges the two
10200 operands of the compare. The function return true whenever it
10201 added an insn. */
10202 static bool
10203 s390_z10_optimize_cmp (rtx insn)
10204 {
10205 rtx prev_insn, next_insn;
10206 bool insn_added_p = false;
10207 rtx cond, *op0, *op1;
10208
10209 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10210 {
10211 /* Handle compare and branch and branch on count
10212 instructions. */
10213 rtx pattern = single_set (insn);
10214
10215 if (!pattern
10216 || SET_DEST (pattern) != pc_rtx
10217 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10218 return false;
10219
10220 cond = XEXP (SET_SRC (pattern), 0);
10221 op0 = &XEXP (cond, 0);
10222 op1 = &XEXP (cond, 1);
10223 }
10224 else if (GET_CODE (PATTERN (insn)) == SET)
10225 {
10226 rtx src, dest;
10227
10228 /* Handle normal compare instructions. */
10229 src = SET_SRC (PATTERN (insn));
10230 dest = SET_DEST (PATTERN (insn));
10231
10232 if (!REG_P (dest)
10233 || !CC_REGNO_P (REGNO (dest))
10234 || GET_CODE (src) != COMPARE)
10235 return false;
10236
10237 /* s390_swap_cmp will try to find the conditional
10238 jump when passing NULL_RTX as condition. */
10239 cond = NULL_RTX;
10240 op0 = &XEXP (src, 0);
10241 op1 = &XEXP (src, 1);
10242 }
10243 else
10244 return false;
10245
10246 if (!REG_P (*op0) || !REG_P (*op1))
10247 return false;
10248
10249 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10250 return false;
10251
10252 /* Swap the COMPARE arguments and its mask if there is a
10253 conflicting access in the previous insn. */
10254 prev_insn = prev_active_insn (insn);
10255 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10256 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10257 s390_swap_cmp (cond, op0, op1, insn);
10258
10259 /* Check if there is a conflict with the next insn. If there
10260 was no conflict with the previous insn, then swap the
10261 COMPARE arguments and its mask. If we already swapped
10262 the operands, or if swapping them would cause a conflict
10263 with the previous insn, issue a NOP after the COMPARE in
10264 order to separate the two instuctions. */
10265 next_insn = next_active_insn (insn);
10266 if (next_insn != NULL_RTX && INSN_P (next_insn)
10267 && s390_non_addr_reg_read_p (*op1, next_insn))
10268 {
10269 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10270 && s390_non_addr_reg_read_p (*op0, prev_insn))
10271 {
10272 if (REGNO (*op1) == 0)
10273 emit_insn_after (gen_nop1 (), insn);
10274 else
10275 emit_insn_after (gen_nop (), insn);
10276 insn_added_p = true;
10277 }
10278 else
10279 s390_swap_cmp (cond, op0, op1, insn);
10280 }
10281 return insn_added_p;
10282 }
10283
10284 /* Perform machine-dependent processing. */
10285
10286 static void
10287 s390_reorg (void)
10288 {
10289 bool pool_overflow = false;
10290
10291 /* Make sure all splits have been performed; splits after
10292 machine_dependent_reorg might confuse insn length counts. */
10293 split_all_insns_noflow ();
10294
10295 /* Install the main literal pool and the associated base
10296 register load insns.
10297
10298 In addition, there are two problematic situations we need
10299 to correct:
10300
10301 - the literal pool might be > 4096 bytes in size, so that
10302 some of its elements cannot be directly accessed
10303
10304 - a branch target might be > 64K away from the branch, so that
10305 it is not possible to use a PC-relative instruction.
10306
10307 To fix those, we split the single literal pool into multiple
10308 pool chunks, reloading the pool base register at various
10309 points throughout the function to ensure it always points to
10310 the pool chunk the following code expects, and / or replace
10311 PC-relative branches by absolute branches.
10312
10313 However, the two problems are interdependent: splitting the
10314 literal pool can move a branch further away from its target,
10315 causing the 64K limit to overflow, and on the other hand,
10316 replacing a PC-relative branch by an absolute branch means
10317 we need to put the branch target address into the literal
10318 pool, possibly causing it to overflow.
10319
10320 So, we loop trying to fix up both problems until we manage
10321 to satisfy both conditions at the same time. Note that the
10322 loop is guaranteed to terminate as every pass of the loop
10323 strictly decreases the total number of PC-relative branches
10324 in the function. (This is not completely true as there
10325 might be branch-over-pool insns introduced by chunkify_start.
10326 Those never need to be split however.) */
10327
10328 for (;;)
10329 {
10330 struct constant_pool *pool = NULL;
10331
10332 /* Collect the literal pool. */
10333 if (!pool_overflow)
10334 {
10335 pool = s390_mainpool_start ();
10336 if (!pool)
10337 pool_overflow = true;
10338 }
10339
10340 /* If literal pool overflowed, start to chunkify it. */
10341 if (pool_overflow)
10342 pool = s390_chunkify_start ();
10343
10344 /* Split out-of-range branches. If this has created new
10345 literal pool entries, cancel current chunk list and
10346 recompute it. zSeries machines have large branch
10347 instructions, so we never need to split a branch. */
10348 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10349 {
10350 if (pool_overflow)
10351 s390_chunkify_cancel (pool);
10352 else
10353 s390_mainpool_cancel (pool);
10354
10355 continue;
10356 }
10357
10358 /* If we made it up to here, both conditions are satisfied.
10359 Finish up literal pool related changes. */
10360 if (pool_overflow)
10361 s390_chunkify_finish (pool);
10362 else
10363 s390_mainpool_finish (pool);
10364
10365 /* We're done splitting branches. */
10366 cfun->machine->split_branches_pending_p = false;
10367 break;
10368 }
10369
10370 /* Generate out-of-pool execute target insns. */
10371 if (TARGET_CPU_ZARCH)
10372 {
10373 rtx insn, label, target;
10374
10375 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10376 {
10377 label = s390_execute_label (insn);
10378 if (!label)
10379 continue;
10380
10381 gcc_assert (label != const0_rtx);
10382
10383 target = emit_label (XEXP (label, 0));
10384 INSN_ADDRESSES_NEW (target, -1);
10385
10386 target = emit_insn (s390_execute_target (insn));
10387 INSN_ADDRESSES_NEW (target, -1);
10388 }
10389 }
10390
10391 /* Try to optimize prologue and epilogue further. */
10392 s390_optimize_prologue ();
10393
10394 /* Walk over the insns and do some >=z10 specific changes. */
10395 if (s390_tune == PROCESSOR_2097_Z10
10396 || s390_tune == PROCESSOR_2817_Z196)
10397 {
10398 rtx insn;
10399 bool insn_added_p = false;
10400
10401 /* The insn lengths and addresses have to be up to date for the
10402 following manipulations. */
10403 shorten_branches (get_insns ());
10404
10405 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10406 {
10407 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10408 continue;
10409
10410 if (JUMP_P (insn))
10411 insn_added_p |= s390_fix_long_loop_prediction (insn);
10412
10413 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10414 || GET_CODE (PATTERN (insn)) == SET)
10415 && s390_tune == PROCESSOR_2097_Z10)
10416 insn_added_p |= s390_z10_optimize_cmp (insn);
10417 }
10418
10419 /* Adjust branches if we added new instructions. */
10420 if (insn_added_p)
10421 shorten_branches (get_insns ());
10422 }
10423 }
10424
10425 /* Return true if INSN is a fp load insn writing register REGNO. */
10426 static inline bool
10427 s390_fpload_toreg (rtx insn, unsigned int regno)
10428 {
10429 rtx set;
10430 enum attr_type flag = s390_safe_attr_type (insn);
10431
10432 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10433 return false;
10434
10435 set = single_set (insn);
10436
10437 if (set == NULL_RTX)
10438 return false;
10439
10440 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10441 return false;
10442
10443 if (REGNO (SET_DEST (set)) != regno)
10444 return false;
10445
10446 return true;
10447 }
10448
10449 /* This value describes the distance to be avoided between an
10450 aritmetic fp instruction and an fp load writing the same register.
10451 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10452 fine but the exact value has to be avoided. Otherwise the FP
10453 pipeline will throw an exception causing a major penalty. */
10454 #define Z10_EARLYLOAD_DISTANCE 7
10455
10456 /* Rearrange the ready list in order to avoid the situation described
10457 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10458 moved to the very end of the ready list. */
10459 static void
10460 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10461 {
10462 unsigned int regno;
10463 int nready = *nready_p;
10464 rtx tmp;
10465 int i;
10466 rtx insn;
10467 rtx set;
10468 enum attr_type flag;
10469 int distance;
10470
10471 /* Skip DISTANCE - 1 active insns. */
10472 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10473 distance > 0 && insn != NULL_RTX;
10474 distance--, insn = prev_active_insn (insn))
10475 if (CALL_P (insn) || JUMP_P (insn))
10476 return;
10477
10478 if (insn == NULL_RTX)
10479 return;
10480
10481 set = single_set (insn);
10482
10483 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10484 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10485 return;
10486
10487 flag = s390_safe_attr_type (insn);
10488
10489 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10490 return;
10491
10492 regno = REGNO (SET_DEST (set));
10493 i = nready - 1;
10494
10495 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10496 i--;
10497
10498 if (!i)
10499 return;
10500
10501 tmp = ready[i];
10502 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10503 ready[0] = tmp;
10504 }
10505
10506 /* This function is called via hook TARGET_SCHED_REORDER before
10507 issueing one insn from list READY which contains *NREADYP entries.
10508 For target z10 it reorders load instructions to avoid early load
10509 conflicts in the floating point pipeline */
10510 static int
10511 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10512 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10513 {
10514 if (s390_tune == PROCESSOR_2097_Z10)
10515 if (reload_completed && *nreadyp > 1)
10516 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10517
10518 return s390_issue_rate ();
10519 }
10520
10521 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10522 the scheduler has issued INSN. It stores the last issued insn into
10523 last_scheduled_insn in order to make it available for
10524 s390_sched_reorder. */
10525 static int
10526 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10527 int verbose ATTRIBUTE_UNUSED,
10528 rtx insn, int more)
10529 {
10530 last_scheduled_insn = insn;
10531
10532 if (GET_CODE (PATTERN (insn)) != USE
10533 && GET_CODE (PATTERN (insn)) != CLOBBER)
10534 return more - 1;
10535 else
10536 return more;
10537 }
10538
10539 static void
10540 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10541 int verbose ATTRIBUTE_UNUSED,
10542 int max_ready ATTRIBUTE_UNUSED)
10543 {
10544 last_scheduled_insn = NULL_RTX;
10545 }
10546
10547 /* This function checks the whole of insn X for memory references. The
10548 function always returns zero because the framework it is called
10549 from would stop recursively analyzing the insn upon a return value
10550 other than zero. The real result of this function is updating
10551 counter variable MEM_COUNT. */
10552 static int
10553 check_dpu (rtx *x, unsigned *mem_count)
10554 {
10555 if (*x != NULL_RTX && MEM_P (*x))
10556 (*mem_count)++;
10557 return 0;
10558 }
10559
10560 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10561 a new number struct loop *loop should be unrolled if tuned for cpus with
10562 a built-in stride prefetcher.
10563 The loop is analyzed for memory accesses by calling check_dpu for
10564 each rtx of the loop. Depending on the loop_depth and the amount of
10565 memory accesses a new number <=nunroll is returned to improve the
10566 behaviour of the hardware prefetch unit. */
10567 static unsigned
10568 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10569 {
10570 basic_block *bbs;
10571 rtx insn;
10572 unsigned i;
10573 unsigned mem_count = 0;
10574
10575 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10576 return nunroll;
10577
10578 /* Count the number of memory references within the loop body. */
10579 bbs = get_loop_body (loop);
10580 for (i = 0; i < loop->num_nodes; i++)
10581 {
10582 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10583 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10584 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10585 }
10586 free (bbs);
10587
10588 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10589 if (mem_count == 0)
10590 return nunroll;
10591
10592 switch (loop_depth(loop))
10593 {
10594 case 1:
10595 return MIN (nunroll, 28 / mem_count);
10596 case 2:
10597 return MIN (nunroll, 22 / mem_count);
10598 default:
10599 return MIN (nunroll, 16 / mem_count);
10600 }
10601 }
10602
10603 /* Initialize GCC target structure. */
10604
10605 #undef TARGET_ASM_ALIGNED_HI_OP
10606 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10607 #undef TARGET_ASM_ALIGNED_DI_OP
10608 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10609 #undef TARGET_ASM_INTEGER
10610 #define TARGET_ASM_INTEGER s390_assemble_integer
10611
10612 #undef TARGET_ASM_OPEN_PAREN
10613 #define TARGET_ASM_OPEN_PAREN ""
10614
10615 #undef TARGET_ASM_CLOSE_PAREN
10616 #define TARGET_ASM_CLOSE_PAREN ""
10617
10618 #undef TARGET_OPTION_OVERRIDE
10619 #define TARGET_OPTION_OVERRIDE s390_option_override
10620
10621 #undef TARGET_ENCODE_SECTION_INFO
10622 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10623
10624 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10625 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10626
10627 #ifdef HAVE_AS_TLS
10628 #undef TARGET_HAVE_TLS
10629 #define TARGET_HAVE_TLS true
10630 #endif
10631 #undef TARGET_CANNOT_FORCE_CONST_MEM
10632 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10633
10634 #undef TARGET_DELEGITIMIZE_ADDRESS
10635 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10636
10637 #undef TARGET_LEGITIMIZE_ADDRESS
10638 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10639
10640 #undef TARGET_RETURN_IN_MEMORY
10641 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10642
10643 #undef TARGET_INIT_BUILTINS
10644 #define TARGET_INIT_BUILTINS s390_init_builtins
10645 #undef TARGET_EXPAND_BUILTIN
10646 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10647
10648 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10649 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10650
10651 #undef TARGET_ASM_OUTPUT_MI_THUNK
10652 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10653 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10654 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10655
10656 #undef TARGET_SCHED_ADJUST_PRIORITY
10657 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10658 #undef TARGET_SCHED_ISSUE_RATE
10659 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10662
10663 #undef TARGET_SCHED_VARIABLE_ISSUE
10664 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10665 #undef TARGET_SCHED_REORDER
10666 #define TARGET_SCHED_REORDER s390_sched_reorder
10667 #undef TARGET_SCHED_INIT
10668 #define TARGET_SCHED_INIT s390_sched_init
10669
10670 #undef TARGET_CANNOT_COPY_INSN_P
10671 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10672 #undef TARGET_RTX_COSTS
10673 #define TARGET_RTX_COSTS s390_rtx_costs
10674 #undef TARGET_ADDRESS_COST
10675 #define TARGET_ADDRESS_COST s390_address_cost
10676 #undef TARGET_REGISTER_MOVE_COST
10677 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10678 #undef TARGET_MEMORY_MOVE_COST
10679 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10680
10681 #undef TARGET_MACHINE_DEPENDENT_REORG
10682 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10683
10684 #undef TARGET_VALID_POINTER_MODE
10685 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10686
10687 #undef TARGET_BUILD_BUILTIN_VA_LIST
10688 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10689 #undef TARGET_EXPAND_BUILTIN_VA_START
10690 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10691 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10692 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10693
10694 #undef TARGET_PROMOTE_FUNCTION_MODE
10695 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10696 #undef TARGET_PASS_BY_REFERENCE
10697 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10698
10699 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10700 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10701 #undef TARGET_FUNCTION_ARG
10702 #define TARGET_FUNCTION_ARG s390_function_arg
10703 #undef TARGET_FUNCTION_ARG_ADVANCE
10704 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10705 #undef TARGET_FUNCTION_VALUE
10706 #define TARGET_FUNCTION_VALUE s390_function_value
10707 #undef TARGET_LIBCALL_VALUE
10708 #define TARGET_LIBCALL_VALUE s390_libcall_value
10709
10710 #undef TARGET_FIXED_CONDITION_CODE_REGS
10711 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10712
10713 #undef TARGET_CC_MODES_COMPATIBLE
10714 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10715
10716 #undef TARGET_INVALID_WITHIN_DOLOOP
10717 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10718
10719 #ifdef HAVE_AS_TLS
10720 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10721 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10722 #endif
10723
10724 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10725 #undef TARGET_MANGLE_TYPE
10726 #define TARGET_MANGLE_TYPE s390_mangle_type
10727 #endif
10728
10729 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10730 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10731
10732 #undef TARGET_PREFERRED_RELOAD_CLASS
10733 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10734
10735 #undef TARGET_SECONDARY_RELOAD
10736 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10737
10738 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10739 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10740
10741 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10742 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10743
10744 #undef TARGET_LEGITIMATE_ADDRESS_P
10745 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10746
10747 #undef TARGET_LEGITIMATE_CONSTANT_P
10748 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10749
10750 #undef TARGET_CAN_ELIMINATE
10751 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10752
10753 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10754 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10755
10756 #undef TARGET_LOOP_UNROLL_ADJUST
10757 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10758
10759 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10760 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10761 #undef TARGET_TRAMPOLINE_INIT
10762 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10763
10764 #undef TARGET_UNWIND_WORD_MODE
10765 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10766
10767 struct gcc_target targetm = TARGET_INITIALIZER;
10768
10769 #include "gt-s390.h"