s390.h (PREFERRED_RELOAD_CLASS): Remove.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* Which cpu are we tuning for. */
281 enum processor_type s390_tune = PROCESSOR_max;
282 int s390_tune_flags;
283 /* Which instruction set architecture to use. */
284 enum processor_type s390_arch;
285 int s390_arch_flags;
286
287 HOST_WIDE_INT s390_warn_framesize = 0;
288 HOST_WIDE_INT s390_stack_size = 0;
289 HOST_WIDE_INT s390_stack_guard = 0;
290
291 /* The following structure is embedded in the machine
292 specific part of struct function. */
293
294 struct GTY (()) s390_frame_layout
295 {
296 /* Offset within stack frame. */
297 HOST_WIDE_INT gprs_offset;
298 HOST_WIDE_INT f0_offset;
299 HOST_WIDE_INT f4_offset;
300 HOST_WIDE_INT f8_offset;
301 HOST_WIDE_INT backchain_offset;
302
303 /* Number of first and last gpr where slots in the register
304 save area are reserved for. */
305 int first_save_gpr_slot;
306 int last_save_gpr_slot;
307
308 /* Number of first and last gpr to be saved, restored. */
309 int first_save_gpr;
310 int first_restore_gpr;
311 int last_save_gpr;
312 int last_restore_gpr;
313
314 /* Bits standing for floating point registers. Set, if the
315 respective register has to be saved. Starting with reg 16 (f0)
316 at the rightmost bit.
317 Bit 15 - 8 7 6 5 4 3 2 1 0
318 fpr 15 - 8 7 5 3 1 6 4 2 0
319 reg 31 - 24 23 22 21 20 19 18 17 16 */
320 unsigned int fpr_bitmap;
321
322 /* Number of floating point registers f8-f15 which must be saved. */
323 int high_fprs;
324
325 /* Set if return address needs to be saved.
326 This flag is set by s390_return_addr_rtx if it could not use
327 the initial value of r14 and therefore depends on r14 saved
328 to the stack. */
329 bool save_return_addr_p;
330
331 /* Size of stack frame. */
332 HOST_WIDE_INT frame_size;
333 };
334
335 /* Define the structure for the machine field in struct function. */
336
337 struct GTY(()) machine_function
338 {
339 struct s390_frame_layout frame_layout;
340
341 /* Literal pool base register. */
342 rtx base_reg;
343
344 /* True if we may need to perform branch splitting. */
345 bool split_branches_pending_p;
346
347 /* Some local-dynamic TLS symbol name. */
348 const char *some_ld_name;
349
350 bool has_landing_pad_p;
351 };
352
353 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
354
355 #define cfun_frame_layout (cfun->machine->frame_layout)
356 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
357 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
358 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
359 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
360 (1 << (BITNUM)))
361 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
362 (1 << (BITNUM))))
363
364 /* Number of GPRs and FPRs used for argument passing. */
365 #define GP_ARG_NUM_REG 5
366 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
367
368 /* A couple of shortcuts. */
369 #define CONST_OK_FOR_J(x) \
370 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
371 #define CONST_OK_FOR_K(x) \
372 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
373 #define CONST_OK_FOR_Os(x) \
374 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
375 #define CONST_OK_FOR_Op(x) \
376 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
377 #define CONST_OK_FOR_On(x) \
378 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
379
380 #define REGNO_PAIR_OK(REGNO, MODE) \
381 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
382
383 /* That's the read ahead of the dynamic branch prediction unit in
384 bytes on a z10 (or higher) CPU. */
385 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
386
387 static enum machine_mode
388 s390_libgcc_cmp_return_mode (void)
389 {
390 return TARGET_64BIT ? DImode : SImode;
391 }
392
393 static enum machine_mode
394 s390_libgcc_shift_count_mode (void)
395 {
396 return TARGET_64BIT ? DImode : SImode;
397 }
398
399 static enum machine_mode
400 s390_unwind_word_mode (void)
401 {
402 return TARGET_64BIT ? DImode : SImode;
403 }
404
405 /* Return true if the back end supports mode MODE. */
406 static bool
407 s390_scalar_mode_supported_p (enum machine_mode mode)
408 {
409 /* In contrast to the default implementation reject TImode constants on 31bit
410 TARGET_ZARCH for ABI compliance. */
411 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
412 return false;
413
414 if (DECIMAL_FLOAT_MODE_P (mode))
415 return default_decimal_float_supported_p ();
416
417 return default_scalar_mode_supported_p (mode);
418 }
419
420 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
421
422 void
423 s390_set_has_landing_pad_p (bool value)
424 {
425 cfun->machine->has_landing_pad_p = value;
426 }
427
428 /* If two condition code modes are compatible, return a condition code
429 mode which is compatible with both. Otherwise, return
430 VOIDmode. */
431
432 static enum machine_mode
433 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
434 {
435 if (m1 == m2)
436 return m1;
437
438 switch (m1)
439 {
440 case CCZmode:
441 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
442 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
443 return m2;
444 return VOIDmode;
445
446 case CCSmode:
447 case CCUmode:
448 case CCTmode:
449 case CCSRmode:
450 case CCURmode:
451 case CCZ1mode:
452 if (m2 == CCZmode)
453 return m1;
454
455 return VOIDmode;
456
457 default:
458 return VOIDmode;
459 }
460 return VOIDmode;
461 }
462
463 /* Return true if SET either doesn't set the CC register, or else
464 the source and destination have matching CC modes and that
465 CC mode is at least as constrained as REQ_MODE. */
466
467 static bool
468 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
469 {
470 enum machine_mode set_mode;
471
472 gcc_assert (GET_CODE (set) == SET);
473
474 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
475 return 1;
476
477 set_mode = GET_MODE (SET_DEST (set));
478 switch (set_mode)
479 {
480 case CCSmode:
481 case CCSRmode:
482 case CCUmode:
483 case CCURmode:
484 case CCLmode:
485 case CCL1mode:
486 case CCL2mode:
487 case CCL3mode:
488 case CCT1mode:
489 case CCT2mode:
490 case CCT3mode:
491 if (req_mode != set_mode)
492 return 0;
493 break;
494
495 case CCZmode:
496 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
497 && req_mode != CCSRmode && req_mode != CCURmode)
498 return 0;
499 break;
500
501 case CCAPmode:
502 case CCANmode:
503 if (req_mode != CCAmode)
504 return 0;
505 break;
506
507 default:
508 gcc_unreachable ();
509 }
510
511 return (GET_MODE (SET_SRC (set)) == set_mode);
512 }
513
514 /* Return true if every SET in INSN that sets the CC register
515 has source and destination with matching CC modes and that
516 CC mode is at least as constrained as REQ_MODE.
517 If REQ_MODE is VOIDmode, always return false. */
518
519 bool
520 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
521 {
522 int i;
523
524 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
525 if (req_mode == VOIDmode)
526 return false;
527
528 if (GET_CODE (PATTERN (insn)) == SET)
529 return s390_match_ccmode_set (PATTERN (insn), req_mode);
530
531 if (GET_CODE (PATTERN (insn)) == PARALLEL)
532 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
533 {
534 rtx set = XVECEXP (PATTERN (insn), 0, i);
535 if (GET_CODE (set) == SET)
536 if (!s390_match_ccmode_set (set, req_mode))
537 return false;
538 }
539
540 return true;
541 }
542
543 /* If a test-under-mask instruction can be used to implement
544 (compare (and ... OP1) OP2), return the CC mode required
545 to do that. Otherwise, return VOIDmode.
546 MIXED is true if the instruction can distinguish between
547 CC1 and CC2 for mixed selected bits (TMxx), it is false
548 if the instruction cannot (TM). */
549
550 enum machine_mode
551 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
552 {
553 int bit0, bit1;
554
555 /* ??? Fixme: should work on CONST_DOUBLE as well. */
556 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
557 return VOIDmode;
558
559 /* Selected bits all zero: CC0.
560 e.g.: int a; if ((a & (16 + 128)) == 0) */
561 if (INTVAL (op2) == 0)
562 return CCTmode;
563
564 /* Selected bits all one: CC3.
565 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
566 if (INTVAL (op2) == INTVAL (op1))
567 return CCT3mode;
568
569 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
570 int a;
571 if ((a & (16 + 128)) == 16) -> CCT1
572 if ((a & (16 + 128)) == 128) -> CCT2 */
573 if (mixed)
574 {
575 bit1 = exact_log2 (INTVAL (op2));
576 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
577 if (bit0 != -1 && bit1 != -1)
578 return bit0 > bit1 ? CCT1mode : CCT2mode;
579 }
580
581 return VOIDmode;
582 }
583
584 /* Given a comparison code OP (EQ, NE, etc.) and the operands
585 OP0 and OP1 of a COMPARE, return the mode to be used for the
586 comparison. */
587
588 enum machine_mode
589 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
590 {
591 switch (code)
592 {
593 case EQ:
594 case NE:
595 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
596 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
597 return CCAPmode;
598 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
599 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
600 return CCAPmode;
601 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
602 || GET_CODE (op1) == NEG)
603 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
604 return CCLmode;
605
606 if (GET_CODE (op0) == AND)
607 {
608 /* Check whether we can potentially do it via TM. */
609 enum machine_mode ccmode;
610 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
611 if (ccmode != VOIDmode)
612 {
613 /* Relax CCTmode to CCZmode to allow fall-back to AND
614 if that turns out to be beneficial. */
615 return ccmode == CCTmode ? CCZmode : ccmode;
616 }
617 }
618
619 if (register_operand (op0, HImode)
620 && GET_CODE (op1) == CONST_INT
621 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
622 return CCT3mode;
623 if (register_operand (op0, QImode)
624 && GET_CODE (op1) == CONST_INT
625 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
626 return CCT3mode;
627
628 return CCZmode;
629
630 case LE:
631 case LT:
632 case GE:
633 case GT:
634 /* The only overflow condition of NEG and ABS happens when
635 -INT_MAX is used as parameter, which stays negative. So
636 we have an overflow from a positive value to a negative.
637 Using CCAP mode the resulting cc can be used for comparisons. */
638 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
639 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
640 return CCAPmode;
641
642 /* If constants are involved in an add instruction it is possible to use
643 the resulting cc for comparisons with zero. Knowing the sign of the
644 constant the overflow behavior gets predictable. e.g.:
645 int a, b; if ((b = a + c) > 0)
646 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
647 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
648 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
649 {
650 if (INTVAL (XEXP((op0), 1)) < 0)
651 return CCANmode;
652 else
653 return CCAPmode;
654 }
655 /* Fall through. */
656 case UNORDERED:
657 case ORDERED:
658 case UNEQ:
659 case UNLE:
660 case UNLT:
661 case UNGE:
662 case UNGT:
663 case LTGT:
664 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
665 && GET_CODE (op1) != CONST_INT)
666 return CCSRmode;
667 return CCSmode;
668
669 case LTU:
670 case GEU:
671 if (GET_CODE (op0) == PLUS
672 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
673 return CCL1mode;
674
675 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
676 && GET_CODE (op1) != CONST_INT)
677 return CCURmode;
678 return CCUmode;
679
680 case LEU:
681 case GTU:
682 if (GET_CODE (op0) == MINUS
683 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
684 return CCL2mode;
685
686 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
687 && GET_CODE (op1) != CONST_INT)
688 return CCURmode;
689 return CCUmode;
690
691 default:
692 gcc_unreachable ();
693 }
694 }
695
696 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
697 that we can implement more efficiently. */
698
699 void
700 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
701 {
702 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
703 if ((*code == EQ || *code == NE)
704 && *op1 == const0_rtx
705 && GET_CODE (*op0) == ZERO_EXTRACT
706 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
707 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
708 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
709 {
710 rtx inner = XEXP (*op0, 0);
711 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
712 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
713 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
714
715 if (len > 0 && len < modesize
716 && pos >= 0 && pos + len <= modesize
717 && modesize <= HOST_BITS_PER_WIDE_INT)
718 {
719 unsigned HOST_WIDE_INT block;
720 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
721 block <<= modesize - pos - len;
722
723 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
724 gen_int_mode (block, GET_MODE (inner)));
725 }
726 }
727
728 /* Narrow AND of memory against immediate to enable TM. */
729 if ((*code == EQ || *code == NE)
730 && *op1 == const0_rtx
731 && GET_CODE (*op0) == AND
732 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
733 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
734 {
735 rtx inner = XEXP (*op0, 0);
736 rtx mask = XEXP (*op0, 1);
737
738 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
739 if (GET_CODE (inner) == SUBREG
740 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
741 && (GET_MODE_SIZE (GET_MODE (inner))
742 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
743 && ((INTVAL (mask)
744 & GET_MODE_MASK (GET_MODE (inner))
745 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
746 == 0))
747 inner = SUBREG_REG (inner);
748
749 /* Do not change volatile MEMs. */
750 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
751 {
752 int part = s390_single_part (XEXP (*op0, 1),
753 GET_MODE (inner), QImode, 0);
754 if (part >= 0)
755 {
756 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
757 inner = adjust_address_nv (inner, QImode, part);
758 *op0 = gen_rtx_AND (QImode, inner, mask);
759 }
760 }
761 }
762
763 /* Narrow comparisons against 0xffff to HImode if possible. */
764 if ((*code == EQ || *code == NE)
765 && GET_CODE (*op1) == CONST_INT
766 && INTVAL (*op1) == 0xffff
767 && SCALAR_INT_MODE_P (GET_MODE (*op0))
768 && (nonzero_bits (*op0, GET_MODE (*op0))
769 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
770 {
771 *op0 = gen_lowpart (HImode, *op0);
772 *op1 = constm1_rtx;
773 }
774
775 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
776 if (GET_CODE (*op0) == UNSPEC
777 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
778 && XVECLEN (*op0, 0) == 1
779 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
780 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
781 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
782 && *op1 == const0_rtx)
783 {
784 enum rtx_code new_code = UNKNOWN;
785 switch (*code)
786 {
787 case EQ: new_code = EQ; break;
788 case NE: new_code = NE; break;
789 case LT: new_code = GTU; break;
790 case GT: new_code = LTU; break;
791 case LE: new_code = GEU; break;
792 case GE: new_code = LEU; break;
793 default: break;
794 }
795
796 if (new_code != UNKNOWN)
797 {
798 *op0 = XVECEXP (*op0, 0, 0);
799 *code = new_code;
800 }
801 }
802
803 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
804 if (GET_CODE (*op0) == UNSPEC
805 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
806 && XVECLEN (*op0, 0) == 1
807 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
808 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
809 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
810 && *op1 == const0_rtx)
811 {
812 enum rtx_code new_code = UNKNOWN;
813 switch (*code)
814 {
815 case EQ: new_code = EQ; break;
816 case NE: new_code = NE; break;
817 default: break;
818 }
819
820 if (new_code != UNKNOWN)
821 {
822 *op0 = XVECEXP (*op0, 0, 0);
823 *code = new_code;
824 }
825 }
826
827 /* Simplify cascaded EQ, NE with const0_rtx. */
828 if ((*code == NE || *code == EQ)
829 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
830 && GET_MODE (*op0) == SImode
831 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
832 && REG_P (XEXP (*op0, 0))
833 && XEXP (*op0, 1) == const0_rtx
834 && *op1 == const0_rtx)
835 {
836 if ((*code == EQ && GET_CODE (*op0) == NE)
837 || (*code == NE && GET_CODE (*op0) == EQ))
838 *code = EQ;
839 else
840 *code = NE;
841 *op0 = XEXP (*op0, 0);
842 }
843
844 /* Prefer register over memory as first operand. */
845 if (MEM_P (*op0) && REG_P (*op1))
846 {
847 rtx tem = *op0; *op0 = *op1; *op1 = tem;
848 *code = swap_condition (*code);
849 }
850 }
851
852 /* Emit a compare instruction suitable to implement the comparison
853 OP0 CODE OP1. Return the correct condition RTL to be placed in
854 the IF_THEN_ELSE of the conditional branch testing the result. */
855
856 rtx
857 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
858 {
859 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
860 rtx cc;
861
862 /* Do not output a redundant compare instruction if a compare_and_swap
863 pattern already computed the result and the machine modes are compatible. */
864 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
865 {
866 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
867 == GET_MODE (op0));
868 cc = op0;
869 }
870 else
871 {
872 cc = gen_rtx_REG (mode, CC_REGNUM);
873 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
874 }
875
876 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
877 }
878
879 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
880 matches CMP.
881 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
882 conditional branch testing the result. */
883
884 static rtx
885 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
886 {
887 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
888 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
889 }
890
891 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
892 unconditional jump, else a conditional jump under condition COND. */
893
894 void
895 s390_emit_jump (rtx target, rtx cond)
896 {
897 rtx insn;
898
899 target = gen_rtx_LABEL_REF (VOIDmode, target);
900 if (cond)
901 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
902
903 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
904 emit_jump_insn (insn);
905 }
906
907 /* Return branch condition mask to implement a branch
908 specified by CODE. Return -1 for invalid comparisons. */
909
910 int
911 s390_branch_condition_mask (rtx code)
912 {
913 const int CC0 = 1 << 3;
914 const int CC1 = 1 << 2;
915 const int CC2 = 1 << 1;
916 const int CC3 = 1 << 0;
917
918 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
919 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
920 gcc_assert (XEXP (code, 1) == const0_rtx);
921
922 switch (GET_MODE (XEXP (code, 0)))
923 {
924 case CCZmode:
925 case CCZ1mode:
926 switch (GET_CODE (code))
927 {
928 case EQ: return CC0;
929 case NE: return CC1 | CC2 | CC3;
930 default: return -1;
931 }
932 break;
933
934 case CCT1mode:
935 switch (GET_CODE (code))
936 {
937 case EQ: return CC1;
938 case NE: return CC0 | CC2 | CC3;
939 default: return -1;
940 }
941 break;
942
943 case CCT2mode:
944 switch (GET_CODE (code))
945 {
946 case EQ: return CC2;
947 case NE: return CC0 | CC1 | CC3;
948 default: return -1;
949 }
950 break;
951
952 case CCT3mode:
953 switch (GET_CODE (code))
954 {
955 case EQ: return CC3;
956 case NE: return CC0 | CC1 | CC2;
957 default: return -1;
958 }
959 break;
960
961 case CCLmode:
962 switch (GET_CODE (code))
963 {
964 case EQ: return CC0 | CC2;
965 case NE: return CC1 | CC3;
966 default: return -1;
967 }
968 break;
969
970 case CCL1mode:
971 switch (GET_CODE (code))
972 {
973 case LTU: return CC2 | CC3; /* carry */
974 case GEU: return CC0 | CC1; /* no carry */
975 default: return -1;
976 }
977 break;
978
979 case CCL2mode:
980 switch (GET_CODE (code))
981 {
982 case GTU: return CC0 | CC1; /* borrow */
983 case LEU: return CC2 | CC3; /* no borrow */
984 default: return -1;
985 }
986 break;
987
988 case CCL3mode:
989 switch (GET_CODE (code))
990 {
991 case EQ: return CC0 | CC2;
992 case NE: return CC1 | CC3;
993 case LTU: return CC1;
994 case GTU: return CC3;
995 case LEU: return CC1 | CC2;
996 case GEU: return CC2 | CC3;
997 default: return -1;
998 }
999
1000 case CCUmode:
1001 switch (GET_CODE (code))
1002 {
1003 case EQ: return CC0;
1004 case NE: return CC1 | CC2 | CC3;
1005 case LTU: return CC1;
1006 case GTU: return CC2;
1007 case LEU: return CC0 | CC1;
1008 case GEU: return CC0 | CC2;
1009 default: return -1;
1010 }
1011 break;
1012
1013 case CCURmode:
1014 switch (GET_CODE (code))
1015 {
1016 case EQ: return CC0;
1017 case NE: return CC2 | CC1 | CC3;
1018 case LTU: return CC2;
1019 case GTU: return CC1;
1020 case LEU: return CC0 | CC2;
1021 case GEU: return CC0 | CC1;
1022 default: return -1;
1023 }
1024 break;
1025
1026 case CCAPmode:
1027 switch (GET_CODE (code))
1028 {
1029 case EQ: return CC0;
1030 case NE: return CC1 | CC2 | CC3;
1031 case LT: return CC1 | CC3;
1032 case GT: return CC2;
1033 case LE: return CC0 | CC1 | CC3;
1034 case GE: return CC0 | CC2;
1035 default: return -1;
1036 }
1037 break;
1038
1039 case CCANmode:
1040 switch (GET_CODE (code))
1041 {
1042 case EQ: return CC0;
1043 case NE: return CC1 | CC2 | CC3;
1044 case LT: return CC1;
1045 case GT: return CC2 | CC3;
1046 case LE: return CC0 | CC1;
1047 case GE: return CC0 | CC2 | CC3;
1048 default: return -1;
1049 }
1050 break;
1051
1052 case CCSmode:
1053 switch (GET_CODE (code))
1054 {
1055 case EQ: return CC0;
1056 case NE: return CC1 | CC2 | CC3;
1057 case LT: return CC1;
1058 case GT: return CC2;
1059 case LE: return CC0 | CC1;
1060 case GE: return CC0 | CC2;
1061 case UNORDERED: return CC3;
1062 case ORDERED: return CC0 | CC1 | CC2;
1063 case UNEQ: return CC0 | CC3;
1064 case UNLT: return CC1 | CC3;
1065 case UNGT: return CC2 | CC3;
1066 case UNLE: return CC0 | CC1 | CC3;
1067 case UNGE: return CC0 | CC2 | CC3;
1068 case LTGT: return CC1 | CC2;
1069 default: return -1;
1070 }
1071 break;
1072
1073 case CCSRmode:
1074 switch (GET_CODE (code))
1075 {
1076 case EQ: return CC0;
1077 case NE: return CC2 | CC1 | CC3;
1078 case LT: return CC2;
1079 case GT: return CC1;
1080 case LE: return CC0 | CC2;
1081 case GE: return CC0 | CC1;
1082 case UNORDERED: return CC3;
1083 case ORDERED: return CC0 | CC2 | CC1;
1084 case UNEQ: return CC0 | CC3;
1085 case UNLT: return CC2 | CC3;
1086 case UNGT: return CC1 | CC3;
1087 case UNLE: return CC0 | CC2 | CC3;
1088 case UNGE: return CC0 | CC1 | CC3;
1089 case LTGT: return CC2 | CC1;
1090 default: return -1;
1091 }
1092 break;
1093
1094 default:
1095 return -1;
1096 }
1097 }
1098
1099
1100 /* Return branch condition mask to implement a compare and branch
1101 specified by CODE. Return -1 for invalid comparisons. */
1102
1103 int
1104 s390_compare_and_branch_condition_mask (rtx code)
1105 {
1106 const int CC0 = 1 << 3;
1107 const int CC1 = 1 << 2;
1108 const int CC2 = 1 << 1;
1109
1110 switch (GET_CODE (code))
1111 {
1112 case EQ:
1113 return CC0;
1114 case NE:
1115 return CC1 | CC2;
1116 case LT:
1117 case LTU:
1118 return CC1;
1119 case GT:
1120 case GTU:
1121 return CC2;
1122 case LE:
1123 case LEU:
1124 return CC0 | CC1;
1125 case GE:
1126 case GEU:
1127 return CC0 | CC2;
1128 default:
1129 gcc_unreachable ();
1130 }
1131 return -1;
1132 }
1133
1134 /* If INV is false, return assembler mnemonic string to implement
1135 a branch specified by CODE. If INV is true, return mnemonic
1136 for the corresponding inverted branch. */
1137
1138 static const char *
1139 s390_branch_condition_mnemonic (rtx code, int inv)
1140 {
1141 int mask;
1142
1143 static const char *const mnemonic[16] =
1144 {
1145 NULL, "o", "h", "nle",
1146 "l", "nhe", "lh", "ne",
1147 "e", "nlh", "he", "nl",
1148 "le", "nh", "no", NULL
1149 };
1150
1151 if (GET_CODE (XEXP (code, 0)) == REG
1152 && REGNO (XEXP (code, 0)) == CC_REGNUM
1153 && XEXP (code, 1) == const0_rtx)
1154 mask = s390_branch_condition_mask (code);
1155 else
1156 mask = s390_compare_and_branch_condition_mask (code);
1157
1158 gcc_assert (mask >= 0);
1159
1160 if (inv)
1161 mask ^= 15;
1162
1163 gcc_assert (mask >= 1 && mask <= 14);
1164
1165 return mnemonic[mask];
1166 }
1167
1168 /* Return the part of op which has a value different from def.
1169 The size of the part is determined by mode.
1170 Use this function only if you already know that op really
1171 contains such a part. */
1172
1173 unsigned HOST_WIDE_INT
1174 s390_extract_part (rtx op, enum machine_mode mode, int def)
1175 {
1176 unsigned HOST_WIDE_INT value = 0;
1177 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1178 int part_bits = GET_MODE_BITSIZE (mode);
1179 unsigned HOST_WIDE_INT part_mask
1180 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1181 int i;
1182
1183 for (i = 0; i < max_parts; i++)
1184 {
1185 if (i == 0)
1186 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1187 else
1188 value >>= part_bits;
1189
1190 if ((value & part_mask) != (def & part_mask))
1191 return value & part_mask;
1192 }
1193
1194 gcc_unreachable ();
1195 }
1196
1197 /* If OP is an integer constant of mode MODE with exactly one
1198 part of mode PART_MODE unequal to DEF, return the number of that
1199 part. Otherwise, return -1. */
1200
1201 int
1202 s390_single_part (rtx op,
1203 enum machine_mode mode,
1204 enum machine_mode part_mode,
1205 int def)
1206 {
1207 unsigned HOST_WIDE_INT value = 0;
1208 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1209 unsigned HOST_WIDE_INT part_mask
1210 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1211 int i, part = -1;
1212
1213 if (GET_CODE (op) != CONST_INT)
1214 return -1;
1215
1216 for (i = 0; i < n_parts; i++)
1217 {
1218 if (i == 0)
1219 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1220 else
1221 value >>= GET_MODE_BITSIZE (part_mode);
1222
1223 if ((value & part_mask) != (def & part_mask))
1224 {
1225 if (part != -1)
1226 return -1;
1227 else
1228 part = i;
1229 }
1230 }
1231 return part == -1 ? -1 : n_parts - 1 - part;
1232 }
1233
1234 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1235 bits and no other bits are set in IN. POS and LENGTH can be used
1236 to obtain the start position and the length of the bitfield.
1237
1238 POS gives the position of the first bit of the bitfield counting
1239 from the lowest order bit starting with zero. In order to use this
1240 value for S/390 instructions this has to be converted to "bits big
1241 endian" style. */
1242
1243 bool
1244 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1245 int *pos, int *length)
1246 {
1247 int tmp_pos = 0;
1248 int tmp_length = 0;
1249 int i;
1250 unsigned HOST_WIDE_INT mask = 1ULL;
1251 bool contiguous = false;
1252
1253 for (i = 0; i < size; mask <<= 1, i++)
1254 {
1255 if (contiguous)
1256 {
1257 if (mask & in)
1258 tmp_length++;
1259 else
1260 break;
1261 }
1262 else
1263 {
1264 if (mask & in)
1265 {
1266 contiguous = true;
1267 tmp_length++;
1268 }
1269 else
1270 tmp_pos++;
1271 }
1272 }
1273
1274 if (!tmp_length)
1275 return false;
1276
1277 /* Calculate a mask for all bits beyond the contiguous bits. */
1278 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1279
1280 if (mask & in)
1281 return false;
1282
1283 if (tmp_length + tmp_pos - 1 > size)
1284 return false;
1285
1286 if (length)
1287 *length = tmp_length;
1288
1289 if (pos)
1290 *pos = tmp_pos;
1291
1292 return true;
1293 }
1294
1295 /* Check whether we can (and want to) split a double-word
1296 move in mode MODE from SRC to DST into two single-word
1297 moves, moving the subword FIRST_SUBWORD first. */
1298
1299 bool
1300 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1301 {
1302 /* Floating point registers cannot be split. */
1303 if (FP_REG_P (src) || FP_REG_P (dst))
1304 return false;
1305
1306 /* We don't need to split if operands are directly accessible. */
1307 if (s_operand (src, mode) || s_operand (dst, mode))
1308 return false;
1309
1310 /* Non-offsettable memory references cannot be split. */
1311 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1312 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1313 return false;
1314
1315 /* Moving the first subword must not clobber a register
1316 needed to move the second subword. */
1317 if (register_operand (dst, mode))
1318 {
1319 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1320 if (reg_overlap_mentioned_p (subreg, src))
1321 return false;
1322 }
1323
1324 return true;
1325 }
1326
1327 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1328 and [MEM2, MEM2 + SIZE] do overlap and false
1329 otherwise. */
1330
1331 bool
1332 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1333 {
1334 rtx addr1, addr2, addr_delta;
1335 HOST_WIDE_INT delta;
1336
1337 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1338 return true;
1339
1340 if (size == 0)
1341 return false;
1342
1343 addr1 = XEXP (mem1, 0);
1344 addr2 = XEXP (mem2, 0);
1345
1346 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1347
1348 /* This overlapping check is used by peepholes merging memory block operations.
1349 Overlapping operations would otherwise be recognized by the S/390 hardware
1350 and would fall back to a slower implementation. Allowing overlapping
1351 operations would lead to slow code but not to wrong code. Therefore we are
1352 somewhat optimistic if we cannot prove that the memory blocks are
1353 overlapping.
1354 That's why we return false here although this may accept operations on
1355 overlapping memory areas. */
1356 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1357 return false;
1358
1359 delta = INTVAL (addr_delta);
1360
1361 if (delta == 0
1362 || (delta > 0 && delta < size)
1363 || (delta < 0 && -delta < size))
1364 return true;
1365
1366 return false;
1367 }
1368
1369 /* Check whether the address of memory reference MEM2 equals exactly
1370 the address of memory reference MEM1 plus DELTA. Return true if
1371 we can prove this to be the case, false otherwise. */
1372
1373 bool
1374 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1375 {
1376 rtx addr1, addr2, addr_delta;
1377
1378 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1379 return false;
1380
1381 addr1 = XEXP (mem1, 0);
1382 addr2 = XEXP (mem2, 0);
1383
1384 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1385 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1386 return false;
1387
1388 return true;
1389 }
1390
1391 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1392
1393 void
1394 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1395 rtx *operands)
1396 {
1397 enum machine_mode wmode = mode;
1398 rtx dst = operands[0];
1399 rtx src1 = operands[1];
1400 rtx src2 = operands[2];
1401 rtx op, clob, tem;
1402
1403 /* If we cannot handle the operation directly, use a temp register. */
1404 if (!s390_logical_operator_ok_p (operands))
1405 dst = gen_reg_rtx (mode);
1406
1407 /* QImode and HImode patterns make sense only if we have a destination
1408 in memory. Otherwise perform the operation in SImode. */
1409 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1410 wmode = SImode;
1411
1412 /* Widen operands if required. */
1413 if (mode != wmode)
1414 {
1415 if (GET_CODE (dst) == SUBREG
1416 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1417 dst = tem;
1418 else if (REG_P (dst))
1419 dst = gen_rtx_SUBREG (wmode, dst, 0);
1420 else
1421 dst = gen_reg_rtx (wmode);
1422
1423 if (GET_CODE (src1) == SUBREG
1424 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1425 src1 = tem;
1426 else if (GET_MODE (src1) != VOIDmode)
1427 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1428
1429 if (GET_CODE (src2) == SUBREG
1430 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1431 src2 = tem;
1432 else if (GET_MODE (src2) != VOIDmode)
1433 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1434 }
1435
1436 /* Emit the instruction. */
1437 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1438 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1439 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1440
1441 /* Fix up the destination if needed. */
1442 if (dst != operands[0])
1443 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1444 }
1445
1446 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1447
1448 bool
1449 s390_logical_operator_ok_p (rtx *operands)
1450 {
1451 /* If the destination operand is in memory, it needs to coincide
1452 with one of the source operands. After reload, it has to be
1453 the first source operand. */
1454 if (GET_CODE (operands[0]) == MEM)
1455 return rtx_equal_p (operands[0], operands[1])
1456 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1457
1458 return true;
1459 }
1460
1461 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1462 operand IMMOP to switch from SS to SI type instructions. */
1463
1464 void
1465 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1466 {
1467 int def = code == AND ? -1 : 0;
1468 HOST_WIDE_INT mask;
1469 int part;
1470
1471 gcc_assert (GET_CODE (*memop) == MEM);
1472 gcc_assert (!MEM_VOLATILE_P (*memop));
1473
1474 mask = s390_extract_part (*immop, QImode, def);
1475 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1476 gcc_assert (part >= 0);
1477
1478 *memop = adjust_address (*memop, QImode, part);
1479 *immop = gen_int_mode (mask, QImode);
1480 }
1481
1482
1483 /* How to allocate a 'struct machine_function'. */
1484
1485 static struct machine_function *
1486 s390_init_machine_status (void)
1487 {
1488 return ggc_alloc_cleared_machine_function ();
1489 }
1490
1491 /* Change optimizations to be performed, depending on the
1492 optimization level. */
1493
1494 static const struct default_options s390_option_optimization_table[] =
1495 {
1496 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
1497
1498 /* ??? There are apparently still problems with -fcaller-saves. */
1499 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
1500
1501 /* Use MVCLE instructions to decrease code size if requested. */
1502 { OPT_LEVELS_SIZE, OPT_mmvcle, NULL, 1 },
1503
1504 { OPT_LEVELS_NONE, 0, NULL, 0 }
1505 };
1506
1507 /* Implement TARGET_OPTION_INIT_STRUCT. */
1508
1509 static void
1510 s390_option_init_struct (struct gcc_options *opts)
1511 {
1512 /* By default, always emit DWARF-2 unwind info. This allows debugging
1513 without maintaining a stack frame back-chain. */
1514 opts->x_flag_asynchronous_unwind_tables = 1;
1515 }
1516
1517 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1518 to the associated processor_type and processor_flags if so. */
1519
1520 static bool
1521 s390_handle_arch_option (const char *arg,
1522 enum processor_type *type,
1523 int *flags)
1524 {
1525 static struct pta
1526 {
1527 const char *const name; /* processor name or nickname. */
1528 const enum processor_type processor;
1529 const int flags; /* From enum processor_flags. */
1530 }
1531 const processor_alias_table[] =
1532 {
1533 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1534 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1535 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1536 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1537 | PF_LONG_DISPLACEMENT},
1538 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1539 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1540 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1541 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1542 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1543 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1544 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1545 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1546 };
1547 size_t i;
1548
1549 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1550 if (strcmp (arg, processor_alias_table[i].name) == 0)
1551 {
1552 *type = processor_alias_table[i].processor;
1553 *flags = processor_alias_table[i].flags;
1554 return true;
1555 }
1556
1557 *type = PROCESSOR_max;
1558 *flags = 0;
1559 return false;
1560 }
1561
1562 /* Implement TARGET_HANDLE_OPTION. */
1563
1564 static bool
1565 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1566 {
1567 switch (code)
1568 {
1569 case OPT_march_:
1570 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1571
1572 case OPT_mstack_guard_:
1573 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1574 return false;
1575 if (exact_log2 (s390_stack_guard) == -1)
1576 error ("stack guard value must be an exact power of 2");
1577 return true;
1578
1579 case OPT_mstack_size_:
1580 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1581 return false;
1582 if (exact_log2 (s390_stack_size) == -1)
1583 error ("stack size must be an exact power of 2");
1584 return true;
1585
1586 case OPT_mtune_:
1587 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1588
1589 case OPT_mwarn_framesize_:
1590 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1591
1592 default:
1593 return true;
1594 }
1595 }
1596
1597 static void
1598 s390_option_override (void)
1599 {
1600 /* Set up function hooks. */
1601 init_machine_status = s390_init_machine_status;
1602
1603 /* Architecture mode defaults according to ABI. */
1604 if (!(target_flags_explicit & MASK_ZARCH))
1605 {
1606 if (TARGET_64BIT)
1607 target_flags |= MASK_ZARCH;
1608 else
1609 target_flags &= ~MASK_ZARCH;
1610 }
1611
1612 /* Determine processor architectural level. */
1613 if (!s390_arch_string)
1614 {
1615 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1616 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1617 }
1618
1619 /* This check is triggered when the user specified a wrong -march=
1620 string and prevents subsequent error messages from being
1621 issued. */
1622 if (s390_arch == PROCESSOR_max)
1623 return;
1624
1625 /* Determine processor to tune for. */
1626 if (s390_tune == PROCESSOR_max)
1627 {
1628 s390_tune = s390_arch;
1629 s390_tune_flags = s390_arch_flags;
1630 }
1631
1632 /* Sanity checks. */
1633 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1634 error ("z/Architecture mode not supported on %s", s390_arch_string);
1635 if (TARGET_64BIT && !TARGET_ZARCH)
1636 error ("64-bit ABI not supported in ESA/390 mode");
1637
1638 if (TARGET_HARD_DFP && !TARGET_DFP)
1639 {
1640 if (target_flags_explicit & MASK_HARD_DFP)
1641 {
1642 if (!TARGET_CPU_DFP)
1643 error ("hardware decimal floating point instructions"
1644 " not available on %s", s390_arch_string);
1645 if (!TARGET_ZARCH)
1646 error ("hardware decimal floating point instructions"
1647 " not available in ESA/390 mode");
1648 }
1649 else
1650 target_flags &= ~MASK_HARD_DFP;
1651 }
1652
1653 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1654 {
1655 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1656 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1657
1658 target_flags &= ~MASK_HARD_DFP;
1659 }
1660
1661 /* Set processor cost function. */
1662 switch (s390_tune)
1663 {
1664 case PROCESSOR_2084_Z990:
1665 s390_cost = &z990_cost;
1666 break;
1667 case PROCESSOR_2094_Z9_109:
1668 s390_cost = &z9_109_cost;
1669 break;
1670 case PROCESSOR_2097_Z10:
1671 s390_cost = &z10_cost;
1672 case PROCESSOR_2817_Z196:
1673 s390_cost = &z196_cost;
1674 break;
1675 default:
1676 s390_cost = &z900_cost;
1677 }
1678
1679 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1680 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1681 "in combination");
1682
1683 if (s390_stack_size)
1684 {
1685 if (s390_stack_guard >= s390_stack_size)
1686 error ("stack size must be greater than the stack guard value");
1687 else if (s390_stack_size > 1 << 16)
1688 error ("stack size must not be greater than 64k");
1689 }
1690 else if (s390_stack_guard)
1691 error ("-mstack-guard implies use of -mstack-size");
1692
1693 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1694 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1695 target_flags |= MASK_LONG_DOUBLE_128;
1696 #endif
1697
1698 if (s390_tune == PROCESSOR_2097_Z10
1699 || s390_tune == PROCESSOR_2817_Z196)
1700 {
1701 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1702 global_options.x_param_values,
1703 global_options_set.x_param_values);
1704 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1705 global_options.x_param_values,
1706 global_options_set.x_param_values);
1707 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1708 global_options.x_param_values,
1709 global_options_set.x_param_values);
1710 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1711 global_options.x_param_values,
1712 global_options_set.x_param_values);
1713 }
1714
1715 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1716 global_options.x_param_values,
1717 global_options_set.x_param_values);
1718 /* values for loop prefetching */
1719 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1720 global_options.x_param_values,
1721 global_options_set.x_param_values);
1722 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1723 global_options.x_param_values,
1724 global_options_set.x_param_values);
1725 /* s390 has more than 2 levels and the size is much larger. Since
1726 we are always running virtualized assume that we only get a small
1727 part of the caches above l1. */
1728 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1729 global_options.x_param_values,
1730 global_options_set.x_param_values);
1731 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1732 global_options.x_param_values,
1733 global_options_set.x_param_values);
1734 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1735 global_options.x_param_values,
1736 global_options_set.x_param_values);
1737
1738 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1739 requires the arch flags to be evaluated already. Since prefetching
1740 is beneficial on s390, we enable it if available. */
1741 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1742 flag_prefetch_loop_arrays = 1;
1743 }
1744
1745 /* Map for smallest class containing reg regno. */
1746
1747 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1748 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1749 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1750 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1751 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1752 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1753 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1754 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1755 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1756 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1757 ACCESS_REGS, ACCESS_REGS
1758 };
1759
1760 /* Return attribute type of insn. */
1761
1762 static enum attr_type
1763 s390_safe_attr_type (rtx insn)
1764 {
1765 if (recog_memoized (insn) >= 0)
1766 return get_attr_type (insn);
1767 else
1768 return TYPE_NONE;
1769 }
1770
1771 /* Return true if DISP is a valid short displacement. */
1772
1773 static bool
1774 s390_short_displacement (rtx disp)
1775 {
1776 /* No displacement is OK. */
1777 if (!disp)
1778 return true;
1779
1780 /* Without the long displacement facility we don't need to
1781 distingiush between long and short displacement. */
1782 if (!TARGET_LONG_DISPLACEMENT)
1783 return true;
1784
1785 /* Integer displacement in range. */
1786 if (GET_CODE (disp) == CONST_INT)
1787 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1788
1789 /* GOT offset is not OK, the GOT can be large. */
1790 if (GET_CODE (disp) == CONST
1791 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1792 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1793 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1794 return false;
1795
1796 /* All other symbolic constants are literal pool references,
1797 which are OK as the literal pool must be small. */
1798 if (GET_CODE (disp) == CONST)
1799 return true;
1800
1801 return false;
1802 }
1803
1804 /* Decompose a RTL expression ADDR for a memory address into
1805 its components, returned in OUT.
1806
1807 Returns false if ADDR is not a valid memory address, true
1808 otherwise. If OUT is NULL, don't return the components,
1809 but check for validity only.
1810
1811 Note: Only addresses in canonical form are recognized.
1812 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1813 canonical form so that they will be recognized. */
1814
1815 static int
1816 s390_decompose_address (rtx addr, struct s390_address *out)
1817 {
1818 HOST_WIDE_INT offset = 0;
1819 rtx base = NULL_RTX;
1820 rtx indx = NULL_RTX;
1821 rtx disp = NULL_RTX;
1822 rtx orig_disp;
1823 bool pointer = false;
1824 bool base_ptr = false;
1825 bool indx_ptr = false;
1826 bool literal_pool = false;
1827
1828 /* We may need to substitute the literal pool base register into the address
1829 below. However, at this point we do not know which register is going to
1830 be used as base, so we substitute the arg pointer register. This is going
1831 to be treated as holding a pointer below -- it shouldn't be used for any
1832 other purpose. */
1833 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1834
1835 /* Decompose address into base + index + displacement. */
1836
1837 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1838 base = addr;
1839
1840 else if (GET_CODE (addr) == PLUS)
1841 {
1842 rtx op0 = XEXP (addr, 0);
1843 rtx op1 = XEXP (addr, 1);
1844 enum rtx_code code0 = GET_CODE (op0);
1845 enum rtx_code code1 = GET_CODE (op1);
1846
1847 if (code0 == REG || code0 == UNSPEC)
1848 {
1849 if (code1 == REG || code1 == UNSPEC)
1850 {
1851 indx = op0; /* index + base */
1852 base = op1;
1853 }
1854
1855 else
1856 {
1857 base = op0; /* base + displacement */
1858 disp = op1;
1859 }
1860 }
1861
1862 else if (code0 == PLUS)
1863 {
1864 indx = XEXP (op0, 0); /* index + base + disp */
1865 base = XEXP (op0, 1);
1866 disp = op1;
1867 }
1868
1869 else
1870 {
1871 return false;
1872 }
1873 }
1874
1875 else
1876 disp = addr; /* displacement */
1877
1878 /* Extract integer part of displacement. */
1879 orig_disp = disp;
1880 if (disp)
1881 {
1882 if (GET_CODE (disp) == CONST_INT)
1883 {
1884 offset = INTVAL (disp);
1885 disp = NULL_RTX;
1886 }
1887 else if (GET_CODE (disp) == CONST
1888 && GET_CODE (XEXP (disp, 0)) == PLUS
1889 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1890 {
1891 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1892 disp = XEXP (XEXP (disp, 0), 0);
1893 }
1894 }
1895
1896 /* Strip off CONST here to avoid special case tests later. */
1897 if (disp && GET_CODE (disp) == CONST)
1898 disp = XEXP (disp, 0);
1899
1900 /* We can convert literal pool addresses to
1901 displacements by basing them off the base register. */
1902 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1903 {
1904 /* Either base or index must be free to hold the base register. */
1905 if (!base)
1906 base = fake_pool_base, literal_pool = true;
1907 else if (!indx)
1908 indx = fake_pool_base, literal_pool = true;
1909 else
1910 return false;
1911
1912 /* Mark up the displacement. */
1913 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1914 UNSPEC_LTREL_OFFSET);
1915 }
1916
1917 /* Validate base register. */
1918 if (base)
1919 {
1920 if (GET_CODE (base) == UNSPEC)
1921 switch (XINT (base, 1))
1922 {
1923 case UNSPEC_LTREF:
1924 if (!disp)
1925 disp = gen_rtx_UNSPEC (Pmode,
1926 gen_rtvec (1, XVECEXP (base, 0, 0)),
1927 UNSPEC_LTREL_OFFSET);
1928 else
1929 return false;
1930
1931 base = XVECEXP (base, 0, 1);
1932 break;
1933
1934 case UNSPEC_LTREL_BASE:
1935 if (XVECLEN (base, 0) == 1)
1936 base = fake_pool_base, literal_pool = true;
1937 else
1938 base = XVECEXP (base, 0, 1);
1939 break;
1940
1941 default:
1942 return false;
1943 }
1944
1945 if (!REG_P (base)
1946 || (GET_MODE (base) != SImode
1947 && GET_MODE (base) != Pmode))
1948 return false;
1949
1950 if (REGNO (base) == STACK_POINTER_REGNUM
1951 || REGNO (base) == FRAME_POINTER_REGNUM
1952 || ((reload_completed || reload_in_progress)
1953 && frame_pointer_needed
1954 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1955 || REGNO (base) == ARG_POINTER_REGNUM
1956 || (flag_pic
1957 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1958 pointer = base_ptr = true;
1959
1960 if ((reload_completed || reload_in_progress)
1961 && base == cfun->machine->base_reg)
1962 pointer = base_ptr = literal_pool = true;
1963 }
1964
1965 /* Validate index register. */
1966 if (indx)
1967 {
1968 if (GET_CODE (indx) == UNSPEC)
1969 switch (XINT (indx, 1))
1970 {
1971 case UNSPEC_LTREF:
1972 if (!disp)
1973 disp = gen_rtx_UNSPEC (Pmode,
1974 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1975 UNSPEC_LTREL_OFFSET);
1976 else
1977 return false;
1978
1979 indx = XVECEXP (indx, 0, 1);
1980 break;
1981
1982 case UNSPEC_LTREL_BASE:
1983 if (XVECLEN (indx, 0) == 1)
1984 indx = fake_pool_base, literal_pool = true;
1985 else
1986 indx = XVECEXP (indx, 0, 1);
1987 break;
1988
1989 default:
1990 return false;
1991 }
1992
1993 if (!REG_P (indx)
1994 || (GET_MODE (indx) != SImode
1995 && GET_MODE (indx) != Pmode))
1996 return false;
1997
1998 if (REGNO (indx) == STACK_POINTER_REGNUM
1999 || REGNO (indx) == FRAME_POINTER_REGNUM
2000 || ((reload_completed || reload_in_progress)
2001 && frame_pointer_needed
2002 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2003 || REGNO (indx) == ARG_POINTER_REGNUM
2004 || (flag_pic
2005 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2006 pointer = indx_ptr = true;
2007
2008 if ((reload_completed || reload_in_progress)
2009 && indx == cfun->machine->base_reg)
2010 pointer = indx_ptr = literal_pool = true;
2011 }
2012
2013 /* Prefer to use pointer as base, not index. */
2014 if (base && indx && !base_ptr
2015 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2016 {
2017 rtx tmp = base;
2018 base = indx;
2019 indx = tmp;
2020 }
2021
2022 /* Validate displacement. */
2023 if (!disp)
2024 {
2025 /* If virtual registers are involved, the displacement will change later
2026 anyway as the virtual registers get eliminated. This could make a
2027 valid displacement invalid, but it is more likely to make an invalid
2028 displacement valid, because we sometimes access the register save area
2029 via negative offsets to one of those registers.
2030 Thus we don't check the displacement for validity here. If after
2031 elimination the displacement turns out to be invalid after all,
2032 this is fixed up by reload in any case. */
2033 if (base != arg_pointer_rtx
2034 && indx != arg_pointer_rtx
2035 && base != return_address_pointer_rtx
2036 && indx != return_address_pointer_rtx
2037 && base != frame_pointer_rtx
2038 && indx != frame_pointer_rtx
2039 && base != virtual_stack_vars_rtx
2040 && indx != virtual_stack_vars_rtx)
2041 if (!DISP_IN_RANGE (offset))
2042 return false;
2043 }
2044 else
2045 {
2046 /* All the special cases are pointers. */
2047 pointer = true;
2048
2049 /* In the small-PIC case, the linker converts @GOT
2050 and @GOTNTPOFF offsets to possible displacements. */
2051 if (GET_CODE (disp) == UNSPEC
2052 && (XINT (disp, 1) == UNSPEC_GOT
2053 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2054 && flag_pic == 1)
2055 {
2056 ;
2057 }
2058
2059 /* Accept pool label offsets. */
2060 else if (GET_CODE (disp) == UNSPEC
2061 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2062 ;
2063
2064 /* Accept literal pool references. */
2065 else if (GET_CODE (disp) == UNSPEC
2066 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2067 {
2068 orig_disp = gen_rtx_CONST (Pmode, disp);
2069 if (offset)
2070 {
2071 /* If we have an offset, make sure it does not
2072 exceed the size of the constant pool entry. */
2073 rtx sym = XVECEXP (disp, 0, 0);
2074 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2075 return false;
2076
2077 orig_disp = plus_constant (orig_disp, offset);
2078 }
2079 }
2080
2081 else
2082 return false;
2083 }
2084
2085 if (!base && !indx)
2086 pointer = true;
2087
2088 if (out)
2089 {
2090 out->base = base;
2091 out->indx = indx;
2092 out->disp = orig_disp;
2093 out->pointer = pointer;
2094 out->literal_pool = literal_pool;
2095 }
2096
2097 return true;
2098 }
2099
2100 /* Decompose a RTL expression OP for a shift count into its components,
2101 and return the base register in BASE and the offset in OFFSET.
2102
2103 Return true if OP is a valid shift count, false if not. */
2104
2105 bool
2106 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2107 {
2108 HOST_WIDE_INT off = 0;
2109
2110 /* We can have an integer constant, an address register,
2111 or a sum of the two. */
2112 if (GET_CODE (op) == CONST_INT)
2113 {
2114 off = INTVAL (op);
2115 op = NULL_RTX;
2116 }
2117 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2118 {
2119 off = INTVAL (XEXP (op, 1));
2120 op = XEXP (op, 0);
2121 }
2122 while (op && GET_CODE (op) == SUBREG)
2123 op = SUBREG_REG (op);
2124
2125 if (op && GET_CODE (op) != REG)
2126 return false;
2127
2128 if (offset)
2129 *offset = off;
2130 if (base)
2131 *base = op;
2132
2133 return true;
2134 }
2135
2136
2137 /* Return true if CODE is a valid address without index. */
2138
2139 bool
2140 s390_legitimate_address_without_index_p (rtx op)
2141 {
2142 struct s390_address addr;
2143
2144 if (!s390_decompose_address (XEXP (op, 0), &addr))
2145 return false;
2146 if (addr.indx)
2147 return false;
2148
2149 return true;
2150 }
2151
2152
2153 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2154 and return these parts in SYMREF and ADDEND. You can pass NULL in
2155 SYMREF and/or ADDEND if you are not interested in these values.
2156 Literal pool references are *not* considered symbol references. */
2157
2158 static bool
2159 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2160 {
2161 HOST_WIDE_INT tmpaddend = 0;
2162
2163 if (GET_CODE (addr) == CONST)
2164 addr = XEXP (addr, 0);
2165
2166 if (GET_CODE (addr) == PLUS)
2167 {
2168 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2169 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2170 && CONST_INT_P (XEXP (addr, 1)))
2171 {
2172 tmpaddend = INTVAL (XEXP (addr, 1));
2173 addr = XEXP (addr, 0);
2174 }
2175 else
2176 return false;
2177 }
2178 else
2179 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2180 return false;
2181
2182 if (symref)
2183 *symref = addr;
2184 if (addend)
2185 *addend = tmpaddend;
2186
2187 return true;
2188 }
2189
2190
2191 /* Return true if the address in OP is valid for constraint letter C
2192 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2193 pool MEMs should be accepted. Only the Q, R, S, T constraint
2194 letters are allowed for C. */
2195
2196 static int
2197 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2198 {
2199 struct s390_address addr;
2200 bool decomposed = false;
2201
2202 /* This check makes sure that no symbolic address (except literal
2203 pool references) are accepted by the R or T constraints. */
2204 if (s390_symref_operand_p (op, NULL, NULL))
2205 return 0;
2206
2207 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2208 if (!lit_pool_ok)
2209 {
2210 if (!s390_decompose_address (op, &addr))
2211 return 0;
2212 if (addr.literal_pool)
2213 return 0;
2214 decomposed = true;
2215 }
2216
2217 switch (c)
2218 {
2219 case 'Q': /* no index short displacement */
2220 if (!decomposed && !s390_decompose_address (op, &addr))
2221 return 0;
2222 if (addr.indx)
2223 return 0;
2224 if (!s390_short_displacement (addr.disp))
2225 return 0;
2226 break;
2227
2228 case 'R': /* with index short displacement */
2229 if (TARGET_LONG_DISPLACEMENT)
2230 {
2231 if (!decomposed && !s390_decompose_address (op, &addr))
2232 return 0;
2233 if (!s390_short_displacement (addr.disp))
2234 return 0;
2235 }
2236 /* Any invalid address here will be fixed up by reload,
2237 so accept it for the most generic constraint. */
2238 break;
2239
2240 case 'S': /* no index long displacement */
2241 if (!TARGET_LONG_DISPLACEMENT)
2242 return 0;
2243 if (!decomposed && !s390_decompose_address (op, &addr))
2244 return 0;
2245 if (addr.indx)
2246 return 0;
2247 if (s390_short_displacement (addr.disp))
2248 return 0;
2249 break;
2250
2251 case 'T': /* with index long displacement */
2252 if (!TARGET_LONG_DISPLACEMENT)
2253 return 0;
2254 /* Any invalid address here will be fixed up by reload,
2255 so accept it for the most generic constraint. */
2256 if ((decomposed || s390_decompose_address (op, &addr))
2257 && s390_short_displacement (addr.disp))
2258 return 0;
2259 break;
2260 default:
2261 return 0;
2262 }
2263 return 1;
2264 }
2265
2266
2267 /* Evaluates constraint strings described by the regular expression
2268 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2269 the constraint given in STR, or 0 else. */
2270
2271 int
2272 s390_mem_constraint (const char *str, rtx op)
2273 {
2274 char c = str[0];
2275
2276 switch (c)
2277 {
2278 case 'A':
2279 /* Check for offsettable variants of memory constraints. */
2280 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2281 return 0;
2282 if ((reload_completed || reload_in_progress)
2283 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2284 return 0;
2285 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2286 case 'B':
2287 /* Check for non-literal-pool variants of memory constraints. */
2288 if (!MEM_P (op))
2289 return 0;
2290 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2291 case 'Q':
2292 case 'R':
2293 case 'S':
2294 case 'T':
2295 if (GET_CODE (op) != MEM)
2296 return 0;
2297 return s390_check_qrst_address (c, XEXP (op, 0), true);
2298 case 'U':
2299 return (s390_check_qrst_address ('Q', op, true)
2300 || s390_check_qrst_address ('R', op, true));
2301 case 'W':
2302 return (s390_check_qrst_address ('S', op, true)
2303 || s390_check_qrst_address ('T', op, true));
2304 case 'Y':
2305 /* Simply check for the basic form of a shift count. Reload will
2306 take care of making sure we have a proper base register. */
2307 if (!s390_decompose_shift_count (op, NULL, NULL))
2308 return 0;
2309 break;
2310 case 'Z':
2311 return s390_check_qrst_address (str[1], op, true);
2312 default:
2313 return 0;
2314 }
2315 return 1;
2316 }
2317
2318
2319 /* Evaluates constraint strings starting with letter O. Input
2320 parameter C is the second letter following the "O" in the constraint
2321 string. Returns 1 if VALUE meets the respective constraint and 0
2322 otherwise. */
2323
2324 int
2325 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2326 {
2327 if (!TARGET_EXTIMM)
2328 return 0;
2329
2330 switch (c)
2331 {
2332 case 's':
2333 return trunc_int_for_mode (value, SImode) == value;
2334
2335 case 'p':
2336 return value == 0
2337 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2338
2339 case 'n':
2340 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2341
2342 default:
2343 gcc_unreachable ();
2344 }
2345 }
2346
2347
2348 /* Evaluates constraint strings starting with letter N. Parameter STR
2349 contains the letters following letter "N" in the constraint string.
2350 Returns true if VALUE matches the constraint. */
2351
2352 int
2353 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2354 {
2355 enum machine_mode mode, part_mode;
2356 int def;
2357 int part, part_goal;
2358
2359
2360 if (str[0] == 'x')
2361 part_goal = -1;
2362 else
2363 part_goal = str[0] - '0';
2364
2365 switch (str[1])
2366 {
2367 case 'Q':
2368 part_mode = QImode;
2369 break;
2370 case 'H':
2371 part_mode = HImode;
2372 break;
2373 case 'S':
2374 part_mode = SImode;
2375 break;
2376 default:
2377 return 0;
2378 }
2379
2380 switch (str[2])
2381 {
2382 case 'H':
2383 mode = HImode;
2384 break;
2385 case 'S':
2386 mode = SImode;
2387 break;
2388 case 'D':
2389 mode = DImode;
2390 break;
2391 default:
2392 return 0;
2393 }
2394
2395 switch (str[3])
2396 {
2397 case '0':
2398 def = 0;
2399 break;
2400 case 'F':
2401 def = -1;
2402 break;
2403 default:
2404 return 0;
2405 }
2406
2407 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2408 return 0;
2409
2410 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2411 if (part < 0)
2412 return 0;
2413 if (part_goal != -1 && part_goal != part)
2414 return 0;
2415
2416 return 1;
2417 }
2418
2419
2420 /* Returns true if the input parameter VALUE is a float zero. */
2421
2422 int
2423 s390_float_const_zero_p (rtx value)
2424 {
2425 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2426 && value == CONST0_RTX (GET_MODE (value)));
2427 }
2428
2429 /* Implement TARGET_REGISTER_MOVE_COST. */
2430
2431 static int
2432 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2433 reg_class_t from, reg_class_t to)
2434 {
2435 /* On s390, copy between fprs and gprs is expensive. */
2436 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2437 && reg_classes_intersect_p (to, FP_REGS))
2438 || (reg_classes_intersect_p (from, FP_REGS)
2439 && reg_classes_intersect_p (to, GENERAL_REGS)))
2440 return 10;
2441
2442 return 1;
2443 }
2444
2445 /* Implement TARGET_MEMORY_MOVE_COST. */
2446
2447 static int
2448 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2449 reg_class_t rclass ATTRIBUTE_UNUSED,
2450 bool in ATTRIBUTE_UNUSED)
2451 {
2452 return 1;
2453 }
2454
2455 /* Compute a (partial) cost for rtx X. Return true if the complete
2456 cost has been computed, and false if subexpressions should be
2457 scanned. In either case, *TOTAL contains the cost result.
2458 CODE contains GET_CODE (x), OUTER_CODE contains the code
2459 of the superexpression of x. */
2460
2461 static bool
2462 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2463 bool speed ATTRIBUTE_UNUSED)
2464 {
2465 switch (code)
2466 {
2467 case CONST:
2468 case CONST_INT:
2469 case LABEL_REF:
2470 case SYMBOL_REF:
2471 case CONST_DOUBLE:
2472 case MEM:
2473 *total = 0;
2474 return true;
2475
2476 case ASHIFT:
2477 case ASHIFTRT:
2478 case LSHIFTRT:
2479 case ROTATE:
2480 case ROTATERT:
2481 case AND:
2482 case IOR:
2483 case XOR:
2484 case NEG:
2485 case NOT:
2486 *total = COSTS_N_INSNS (1);
2487 return false;
2488
2489 case PLUS:
2490 case MINUS:
2491 *total = COSTS_N_INSNS (1);
2492 return false;
2493
2494 case MULT:
2495 switch (GET_MODE (x))
2496 {
2497 case SImode:
2498 {
2499 rtx left = XEXP (x, 0);
2500 rtx right = XEXP (x, 1);
2501 if (GET_CODE (right) == CONST_INT
2502 && CONST_OK_FOR_K (INTVAL (right)))
2503 *total = s390_cost->mhi;
2504 else if (GET_CODE (left) == SIGN_EXTEND)
2505 *total = s390_cost->mh;
2506 else
2507 *total = s390_cost->ms; /* msr, ms, msy */
2508 break;
2509 }
2510 case DImode:
2511 {
2512 rtx left = XEXP (x, 0);
2513 rtx right = XEXP (x, 1);
2514 if (TARGET_ZARCH)
2515 {
2516 if (GET_CODE (right) == CONST_INT
2517 && CONST_OK_FOR_K (INTVAL (right)))
2518 *total = s390_cost->mghi;
2519 else if (GET_CODE (left) == SIGN_EXTEND)
2520 *total = s390_cost->msgf;
2521 else
2522 *total = s390_cost->msg; /* msgr, msg */
2523 }
2524 else /* TARGET_31BIT */
2525 {
2526 if (GET_CODE (left) == SIGN_EXTEND
2527 && GET_CODE (right) == SIGN_EXTEND)
2528 /* mulsidi case: mr, m */
2529 *total = s390_cost->m;
2530 else if (GET_CODE (left) == ZERO_EXTEND
2531 && GET_CODE (right) == ZERO_EXTEND
2532 && TARGET_CPU_ZARCH)
2533 /* umulsidi case: ml, mlr */
2534 *total = s390_cost->ml;
2535 else
2536 /* Complex calculation is required. */
2537 *total = COSTS_N_INSNS (40);
2538 }
2539 break;
2540 }
2541 case SFmode:
2542 case DFmode:
2543 *total = s390_cost->mult_df;
2544 break;
2545 case TFmode:
2546 *total = s390_cost->mxbr;
2547 break;
2548 default:
2549 return false;
2550 }
2551 return false;
2552
2553 case FMA:
2554 switch (GET_MODE (x))
2555 {
2556 case DFmode:
2557 *total = s390_cost->madbr;
2558 break;
2559 case SFmode:
2560 *total = s390_cost->maebr;
2561 break;
2562 default:
2563 return false;
2564 }
2565 /* Negate in the third argument is free: FMSUB. */
2566 if (GET_CODE (XEXP (x, 2)) == NEG)
2567 {
2568 *total += (rtx_cost (XEXP (x, 0), FMA, speed)
2569 + rtx_cost (XEXP (x, 1), FMA, speed)
2570 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, speed));
2571 return true;
2572 }
2573 return false;
2574
2575 case UDIV:
2576 case UMOD:
2577 if (GET_MODE (x) == TImode) /* 128 bit division */
2578 *total = s390_cost->dlgr;
2579 else if (GET_MODE (x) == DImode)
2580 {
2581 rtx right = XEXP (x, 1);
2582 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2583 *total = s390_cost->dlr;
2584 else /* 64 by 64 bit division */
2585 *total = s390_cost->dlgr;
2586 }
2587 else if (GET_MODE (x) == SImode) /* 32 bit division */
2588 *total = s390_cost->dlr;
2589 return false;
2590
2591 case DIV:
2592 case MOD:
2593 if (GET_MODE (x) == DImode)
2594 {
2595 rtx right = XEXP (x, 1);
2596 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2597 if (TARGET_ZARCH)
2598 *total = s390_cost->dsgfr;
2599 else
2600 *total = s390_cost->dr;
2601 else /* 64 by 64 bit division */
2602 *total = s390_cost->dsgr;
2603 }
2604 else if (GET_MODE (x) == SImode) /* 32 bit division */
2605 *total = s390_cost->dlr;
2606 else if (GET_MODE (x) == SFmode)
2607 {
2608 *total = s390_cost->debr;
2609 }
2610 else if (GET_MODE (x) == DFmode)
2611 {
2612 *total = s390_cost->ddbr;
2613 }
2614 else if (GET_MODE (x) == TFmode)
2615 {
2616 *total = s390_cost->dxbr;
2617 }
2618 return false;
2619
2620 case SQRT:
2621 if (GET_MODE (x) == SFmode)
2622 *total = s390_cost->sqebr;
2623 else if (GET_MODE (x) == DFmode)
2624 *total = s390_cost->sqdbr;
2625 else /* TFmode */
2626 *total = s390_cost->sqxbr;
2627 return false;
2628
2629 case SIGN_EXTEND:
2630 case ZERO_EXTEND:
2631 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2632 || outer_code == PLUS || outer_code == MINUS
2633 || outer_code == COMPARE)
2634 *total = 0;
2635 return false;
2636
2637 case COMPARE:
2638 *total = COSTS_N_INSNS (1);
2639 if (GET_CODE (XEXP (x, 0)) == AND
2640 && GET_CODE (XEXP (x, 1)) == CONST_INT
2641 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2642 {
2643 rtx op0 = XEXP (XEXP (x, 0), 0);
2644 rtx op1 = XEXP (XEXP (x, 0), 1);
2645 rtx op2 = XEXP (x, 1);
2646
2647 if (memory_operand (op0, GET_MODE (op0))
2648 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2649 return true;
2650 if (register_operand (op0, GET_MODE (op0))
2651 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2652 return true;
2653 }
2654 return false;
2655
2656 default:
2657 return false;
2658 }
2659 }
2660
2661 /* Return the cost of an address rtx ADDR. */
2662
2663 static int
2664 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2665 {
2666 struct s390_address ad;
2667 if (!s390_decompose_address (addr, &ad))
2668 return 1000;
2669
2670 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2671 }
2672
2673 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2674 otherwise return 0. */
2675
2676 int
2677 tls_symbolic_operand (rtx op)
2678 {
2679 if (GET_CODE (op) != SYMBOL_REF)
2680 return 0;
2681 return SYMBOL_REF_TLS_MODEL (op);
2682 }
2683 \f
2684 /* Split DImode access register reference REG (on 64-bit) into its constituent
2685 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2686 gen_highpart cannot be used as they assume all registers are word-sized,
2687 while our access registers have only half that size. */
2688
2689 void
2690 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2691 {
2692 gcc_assert (TARGET_64BIT);
2693 gcc_assert (ACCESS_REG_P (reg));
2694 gcc_assert (GET_MODE (reg) == DImode);
2695 gcc_assert (!(REGNO (reg) & 1));
2696
2697 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2698 *hi = gen_rtx_REG (SImode, REGNO (reg));
2699 }
2700
2701 /* Return true if OP contains a symbol reference */
2702
2703 bool
2704 symbolic_reference_mentioned_p (rtx op)
2705 {
2706 const char *fmt;
2707 int i;
2708
2709 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2710 return 1;
2711
2712 fmt = GET_RTX_FORMAT (GET_CODE (op));
2713 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2714 {
2715 if (fmt[i] == 'E')
2716 {
2717 int j;
2718
2719 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2720 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2721 return 1;
2722 }
2723
2724 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2725 return 1;
2726 }
2727
2728 return 0;
2729 }
2730
2731 /* Return true if OP contains a reference to a thread-local symbol. */
2732
2733 bool
2734 tls_symbolic_reference_mentioned_p (rtx op)
2735 {
2736 const char *fmt;
2737 int i;
2738
2739 if (GET_CODE (op) == SYMBOL_REF)
2740 return tls_symbolic_operand (op);
2741
2742 fmt = GET_RTX_FORMAT (GET_CODE (op));
2743 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2744 {
2745 if (fmt[i] == 'E')
2746 {
2747 int j;
2748
2749 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2750 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2751 return true;
2752 }
2753
2754 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2755 return true;
2756 }
2757
2758 return false;
2759 }
2760
2761
2762 /* Return true if OP is a legitimate general operand when
2763 generating PIC code. It is given that flag_pic is on
2764 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2765
2766 int
2767 legitimate_pic_operand_p (rtx op)
2768 {
2769 /* Accept all non-symbolic constants. */
2770 if (!SYMBOLIC_CONST (op))
2771 return 1;
2772
2773 /* Reject everything else; must be handled
2774 via emit_symbolic_move. */
2775 return 0;
2776 }
2777
2778 /* Returns true if the constant value OP is a legitimate general operand.
2779 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2780
2781 int
2782 legitimate_constant_p (rtx op)
2783 {
2784 /* Accept all non-symbolic constants. */
2785 if (!SYMBOLIC_CONST (op))
2786 return 1;
2787
2788 /* Accept immediate LARL operands. */
2789 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2790 return 1;
2791
2792 /* Thread-local symbols are never legal constants. This is
2793 so that emit_call knows that computing such addresses
2794 might require a function call. */
2795 if (TLS_SYMBOLIC_CONST (op))
2796 return 0;
2797
2798 /* In the PIC case, symbolic constants must *not* be
2799 forced into the literal pool. We accept them here,
2800 so that they will be handled by emit_symbolic_move. */
2801 if (flag_pic)
2802 return 1;
2803
2804 /* All remaining non-PIC symbolic constants are
2805 forced into the literal pool. */
2806 return 0;
2807 }
2808
2809 /* Determine if it's legal to put X into the constant pool. This
2810 is not possible if X contains the address of a symbol that is
2811 not constant (TLS) or not known at final link time (PIC). */
2812
2813 static bool
2814 s390_cannot_force_const_mem (rtx x)
2815 {
2816 switch (GET_CODE (x))
2817 {
2818 case CONST_INT:
2819 case CONST_DOUBLE:
2820 /* Accept all non-symbolic constants. */
2821 return false;
2822
2823 case LABEL_REF:
2824 /* Labels are OK iff we are non-PIC. */
2825 return flag_pic != 0;
2826
2827 case SYMBOL_REF:
2828 /* 'Naked' TLS symbol references are never OK,
2829 non-TLS symbols are OK iff we are non-PIC. */
2830 if (tls_symbolic_operand (x))
2831 return true;
2832 else
2833 return flag_pic != 0;
2834
2835 case CONST:
2836 return s390_cannot_force_const_mem (XEXP (x, 0));
2837 case PLUS:
2838 case MINUS:
2839 return s390_cannot_force_const_mem (XEXP (x, 0))
2840 || s390_cannot_force_const_mem (XEXP (x, 1));
2841
2842 case UNSPEC:
2843 switch (XINT (x, 1))
2844 {
2845 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2846 case UNSPEC_LTREL_OFFSET:
2847 case UNSPEC_GOT:
2848 case UNSPEC_GOTOFF:
2849 case UNSPEC_PLTOFF:
2850 case UNSPEC_TLSGD:
2851 case UNSPEC_TLSLDM:
2852 case UNSPEC_NTPOFF:
2853 case UNSPEC_DTPOFF:
2854 case UNSPEC_GOTNTPOFF:
2855 case UNSPEC_INDNTPOFF:
2856 return false;
2857
2858 /* If the literal pool shares the code section, be put
2859 execute template placeholders into the pool as well. */
2860 case UNSPEC_INSN:
2861 return TARGET_CPU_ZARCH;
2862
2863 default:
2864 return true;
2865 }
2866 break;
2867
2868 default:
2869 gcc_unreachable ();
2870 }
2871 }
2872
2873 /* Returns true if the constant value OP is a legitimate general
2874 operand during and after reload. The difference to
2875 legitimate_constant_p is that this function will not accept
2876 a constant that would need to be forced to the literal pool
2877 before it can be used as operand.
2878 This function accepts all constants which can be loaded directly
2879 into a GPR. */
2880
2881 bool
2882 legitimate_reload_constant_p (rtx op)
2883 {
2884 /* Accept la(y) operands. */
2885 if (GET_CODE (op) == CONST_INT
2886 && DISP_IN_RANGE (INTVAL (op)))
2887 return true;
2888
2889 /* Accept l(g)hi/l(g)fi operands. */
2890 if (GET_CODE (op) == CONST_INT
2891 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2892 return true;
2893
2894 /* Accept lliXX operands. */
2895 if (TARGET_ZARCH
2896 && GET_CODE (op) == CONST_INT
2897 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2898 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2899 return true;
2900
2901 if (TARGET_EXTIMM
2902 && GET_CODE (op) == CONST_INT
2903 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2904 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2905 return true;
2906
2907 /* Accept larl operands. */
2908 if (TARGET_CPU_ZARCH
2909 && larl_operand (op, VOIDmode))
2910 return true;
2911
2912 /* Accept floating-point zero operands that fit into a single GPR. */
2913 if (GET_CODE (op) == CONST_DOUBLE
2914 && s390_float_const_zero_p (op)
2915 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2916 return true;
2917
2918 /* Accept double-word operands that can be split. */
2919 if (GET_CODE (op) == CONST_INT
2920 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2921 {
2922 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2923 rtx hi = operand_subword (op, 0, 0, dword_mode);
2924 rtx lo = operand_subword (op, 1, 0, dword_mode);
2925 return legitimate_reload_constant_p (hi)
2926 && legitimate_reload_constant_p (lo);
2927 }
2928
2929 /* Everything else cannot be handled without reload. */
2930 return false;
2931 }
2932
2933 /* Returns true if the constant value OP is a legitimate fp operand
2934 during and after reload.
2935 This function accepts all constants which can be loaded directly
2936 into an FPR. */
2937
2938 static bool
2939 legitimate_reload_fp_constant_p (rtx op)
2940 {
2941 /* Accept floating-point zero operands if the load zero instruction
2942 can be used. */
2943 if (TARGET_Z196
2944 && GET_CODE (op) == CONST_DOUBLE
2945 && s390_float_const_zero_p (op))
2946 return true;
2947
2948 return false;
2949 }
2950
2951 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2952 return the class of reg to actually use. */
2953
2954 static reg_class_t
2955 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2956 {
2957 switch (GET_CODE (op))
2958 {
2959 /* Constants we cannot reload into general registers
2960 must be forced into the literal pool. */
2961 case CONST_DOUBLE:
2962 case CONST_INT:
2963 if (reg_class_subset_p (GENERAL_REGS, rclass)
2964 && legitimate_reload_constant_p (op))
2965 return GENERAL_REGS;
2966 else if (reg_class_subset_p (ADDR_REGS, rclass)
2967 && legitimate_reload_constant_p (op))
2968 return ADDR_REGS;
2969 else if (reg_class_subset_p (FP_REGS, rclass)
2970 && legitimate_reload_fp_constant_p (op))
2971 return FP_REGS;
2972 return NO_REGS;
2973
2974 /* If a symbolic constant or a PLUS is reloaded,
2975 it is most likely being used as an address, so
2976 prefer ADDR_REGS. If 'class' is not a superset
2977 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2978 case PLUS:
2979 case LABEL_REF:
2980 case SYMBOL_REF:
2981 case CONST:
2982 if (reg_class_subset_p (ADDR_REGS, rclass))
2983 return ADDR_REGS;
2984 else
2985 return NO_REGS;
2986
2987 default:
2988 break;
2989 }
2990
2991 return rclass;
2992 }
2993
2994 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2995 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2996 aligned. */
2997
2998 bool
2999 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3000 {
3001 HOST_WIDE_INT addend;
3002 rtx symref;
3003
3004 if (!s390_symref_operand_p (addr, &symref, &addend))
3005 return false;
3006
3007 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
3008 && !(addend & (alignment - 1)));
3009 }
3010
3011 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3012 operand SCRATCH is used to reload the even part of the address and
3013 adding one. */
3014
3015 void
3016 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3017 {
3018 HOST_WIDE_INT addend;
3019 rtx symref;
3020
3021 if (!s390_symref_operand_p (addr, &symref, &addend))
3022 gcc_unreachable ();
3023
3024 if (!(addend & 1))
3025 /* Easy case. The addend is even so larl will do fine. */
3026 emit_move_insn (reg, addr);
3027 else
3028 {
3029 /* We can leave the scratch register untouched if the target
3030 register is a valid base register. */
3031 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3032 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3033 scratch = reg;
3034
3035 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3036 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3037
3038 if (addend != 1)
3039 emit_move_insn (scratch,
3040 gen_rtx_CONST (Pmode,
3041 gen_rtx_PLUS (Pmode, symref,
3042 GEN_INT (addend - 1))));
3043 else
3044 emit_move_insn (scratch, symref);
3045
3046 /* Increment the address using la in order to avoid clobbering cc. */
3047 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3048 }
3049 }
3050
3051 /* Generate what is necessary to move between REG and MEM using
3052 SCRATCH. The direction is given by TOMEM. */
3053
3054 void
3055 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3056 {
3057 /* Reload might have pulled a constant out of the literal pool.
3058 Force it back in. */
3059 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3060 || GET_CODE (mem) == CONST)
3061 mem = force_const_mem (GET_MODE (reg), mem);
3062
3063 gcc_assert (MEM_P (mem));
3064
3065 /* For a load from memory we can leave the scratch register
3066 untouched if the target register is a valid base register. */
3067 if (!tomem
3068 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3069 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3070 && GET_MODE (reg) == GET_MODE (scratch))
3071 scratch = reg;
3072
3073 /* Load address into scratch register. Since we can't have a
3074 secondary reload for a secondary reload we have to cover the case
3075 where larl would need a secondary reload here as well. */
3076 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3077
3078 /* Now we can use a standard load/store to do the move. */
3079 if (tomem)
3080 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3081 else
3082 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3083 }
3084
3085 /* Inform reload about cases where moving X with a mode MODE to a register in
3086 RCLASS requires an extra scratch or immediate register. Return the class
3087 needed for the immediate register. */
3088
3089 static reg_class_t
3090 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3091 enum machine_mode mode, secondary_reload_info *sri)
3092 {
3093 enum reg_class rclass = (enum reg_class) rclass_i;
3094
3095 /* Intermediate register needed. */
3096 if (reg_classes_intersect_p (CC_REGS, rclass))
3097 return GENERAL_REGS;
3098
3099 if (TARGET_Z10)
3100 {
3101 /* On z10 several optimizer steps may generate larl operands with
3102 an odd addend. */
3103 if (in_p
3104 && s390_symref_operand_p (x, NULL, NULL)
3105 && mode == Pmode
3106 && !s390_check_symref_alignment (x, 2))
3107 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3108 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3109
3110 /* On z10 we need a scratch register when moving QI, TI or floating
3111 point mode values from or to a memory location with a SYMBOL_REF
3112 or if the symref addend of a SI or DI move is not aligned to the
3113 width of the access. */
3114 if (MEM_P (x)
3115 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3116 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3117 || (!TARGET_ZARCH && mode == DImode)
3118 || ((mode == HImode || mode == SImode || mode == DImode)
3119 && (!s390_check_symref_alignment (XEXP (x, 0),
3120 GET_MODE_SIZE (mode))))))
3121 {
3122 #define __SECONDARY_RELOAD_CASE(M,m) \
3123 case M##mode: \
3124 if (TARGET_64BIT) \
3125 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3126 CODE_FOR_reload##m##di_tomem_z10; \
3127 else \
3128 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3129 CODE_FOR_reload##m##si_tomem_z10; \
3130 break;
3131
3132 switch (GET_MODE (x))
3133 {
3134 __SECONDARY_RELOAD_CASE (QI, qi);
3135 __SECONDARY_RELOAD_CASE (HI, hi);
3136 __SECONDARY_RELOAD_CASE (SI, si);
3137 __SECONDARY_RELOAD_CASE (DI, di);
3138 __SECONDARY_RELOAD_CASE (TI, ti);
3139 __SECONDARY_RELOAD_CASE (SF, sf);
3140 __SECONDARY_RELOAD_CASE (DF, df);
3141 __SECONDARY_RELOAD_CASE (TF, tf);
3142 __SECONDARY_RELOAD_CASE (SD, sd);
3143 __SECONDARY_RELOAD_CASE (DD, dd);
3144 __SECONDARY_RELOAD_CASE (TD, td);
3145
3146 default:
3147 gcc_unreachable ();
3148 }
3149 #undef __SECONDARY_RELOAD_CASE
3150 }
3151 }
3152
3153 /* We need a scratch register when loading a PLUS expression which
3154 is not a legitimate operand of the LOAD ADDRESS instruction. */
3155 if (in_p && s390_plus_operand (x, mode))
3156 sri->icode = (TARGET_64BIT ?
3157 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3158
3159 /* Performing a multiword move from or to memory we have to make sure the
3160 second chunk in memory is addressable without causing a displacement
3161 overflow. If that would be the case we calculate the address in
3162 a scratch register. */
3163 if (MEM_P (x)
3164 && GET_CODE (XEXP (x, 0)) == PLUS
3165 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3166 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3167 + GET_MODE_SIZE (mode) - 1))
3168 {
3169 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3170 in a s_operand address since we may fallback to lm/stm. So we only
3171 have to care about overflows in the b+i+d case. */
3172 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3173 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3174 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3175 /* For FP_REGS no lm/stm is available so this check is triggered
3176 for displacement overflows in b+i+d and b+d like addresses. */
3177 || (reg_classes_intersect_p (FP_REGS, rclass)
3178 && s390_class_max_nregs (FP_REGS, mode) > 1))
3179 {
3180 if (in_p)
3181 sri->icode = (TARGET_64BIT ?
3182 CODE_FOR_reloaddi_nonoffmem_in :
3183 CODE_FOR_reloadsi_nonoffmem_in);
3184 else
3185 sri->icode = (TARGET_64BIT ?
3186 CODE_FOR_reloaddi_nonoffmem_out :
3187 CODE_FOR_reloadsi_nonoffmem_out);
3188 }
3189 }
3190
3191 /* A scratch address register is needed when a symbolic constant is
3192 copied to r0 compiling with -fPIC. In other cases the target
3193 register might be used as temporary (see legitimize_pic_address). */
3194 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3195 sri->icode = (TARGET_64BIT ?
3196 CODE_FOR_reloaddi_PIC_addr :
3197 CODE_FOR_reloadsi_PIC_addr);
3198
3199 /* Either scratch or no register needed. */
3200 return NO_REGS;
3201 }
3202
3203 /* Generate code to load SRC, which is PLUS that is not a
3204 legitimate operand for the LA instruction, into TARGET.
3205 SCRATCH may be used as scratch register. */
3206
3207 void
3208 s390_expand_plus_operand (rtx target, rtx src,
3209 rtx scratch)
3210 {
3211 rtx sum1, sum2;
3212 struct s390_address ad;
3213
3214 /* src must be a PLUS; get its two operands. */
3215 gcc_assert (GET_CODE (src) == PLUS);
3216 gcc_assert (GET_MODE (src) == Pmode);
3217
3218 /* Check if any of the two operands is already scheduled
3219 for replacement by reload. This can happen e.g. when
3220 float registers occur in an address. */
3221 sum1 = find_replacement (&XEXP (src, 0));
3222 sum2 = find_replacement (&XEXP (src, 1));
3223 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3224
3225 /* If the address is already strictly valid, there's nothing to do. */
3226 if (!s390_decompose_address (src, &ad)
3227 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3228 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3229 {
3230 /* Otherwise, one of the operands cannot be an address register;
3231 we reload its value into the scratch register. */
3232 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3233 {
3234 emit_move_insn (scratch, sum1);
3235 sum1 = scratch;
3236 }
3237 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3238 {
3239 emit_move_insn (scratch, sum2);
3240 sum2 = scratch;
3241 }
3242
3243 /* According to the way these invalid addresses are generated
3244 in reload.c, it should never happen (at least on s390) that
3245 *neither* of the PLUS components, after find_replacements
3246 was applied, is an address register. */
3247 if (sum1 == scratch && sum2 == scratch)
3248 {
3249 debug_rtx (src);
3250 gcc_unreachable ();
3251 }
3252
3253 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3254 }
3255
3256 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3257 is only ever performed on addresses, so we can mark the
3258 sum as legitimate for LA in any case. */
3259 s390_load_address (target, src);
3260 }
3261
3262
3263 /* Return true if ADDR is a valid memory address.
3264 STRICT specifies whether strict register checking applies. */
3265
3266 static bool
3267 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3268 {
3269 struct s390_address ad;
3270
3271 if (TARGET_Z10
3272 && larl_operand (addr, VOIDmode)
3273 && (mode == VOIDmode
3274 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3275 return true;
3276
3277 if (!s390_decompose_address (addr, &ad))
3278 return false;
3279
3280 if (strict)
3281 {
3282 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3283 return false;
3284
3285 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3286 return false;
3287 }
3288 else
3289 {
3290 if (ad.base
3291 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3292 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3293 return false;
3294
3295 if (ad.indx
3296 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3297 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3298 return false;
3299 }
3300 return true;
3301 }
3302
3303 /* Return true if OP is a valid operand for the LA instruction.
3304 In 31-bit, we need to prove that the result is used as an
3305 address, as LA performs only a 31-bit addition. */
3306
3307 bool
3308 legitimate_la_operand_p (rtx op)
3309 {
3310 struct s390_address addr;
3311 if (!s390_decompose_address (op, &addr))
3312 return false;
3313
3314 return (TARGET_64BIT || addr.pointer);
3315 }
3316
3317 /* Return true if it is valid *and* preferable to use LA to
3318 compute the sum of OP1 and OP2. */
3319
3320 bool
3321 preferred_la_operand_p (rtx op1, rtx op2)
3322 {
3323 struct s390_address addr;
3324
3325 if (op2 != const0_rtx)
3326 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3327
3328 if (!s390_decompose_address (op1, &addr))
3329 return false;
3330 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3331 return false;
3332 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3333 return false;
3334
3335 /* Avoid LA instructions with index register on z196; it is
3336 preferable to use regular add instructions when possible. */
3337 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3338 return false;
3339
3340 if (!TARGET_64BIT && !addr.pointer)
3341 return false;
3342
3343 if (addr.pointer)
3344 return true;
3345
3346 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3347 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3348 return true;
3349
3350 return false;
3351 }
3352
3353 /* Emit a forced load-address operation to load SRC into DST.
3354 This will use the LOAD ADDRESS instruction even in situations
3355 where legitimate_la_operand_p (SRC) returns false. */
3356
3357 void
3358 s390_load_address (rtx dst, rtx src)
3359 {
3360 if (TARGET_64BIT)
3361 emit_move_insn (dst, src);
3362 else
3363 emit_insn (gen_force_la_31 (dst, src));
3364 }
3365
3366 /* Return a legitimate reference for ORIG (an address) using the
3367 register REG. If REG is 0, a new pseudo is generated.
3368
3369 There are two types of references that must be handled:
3370
3371 1. Global data references must load the address from the GOT, via
3372 the PIC reg. An insn is emitted to do this load, and the reg is
3373 returned.
3374
3375 2. Static data references, constant pool addresses, and code labels
3376 compute the address as an offset from the GOT, whose base is in
3377 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3378 differentiate them from global data objects. The returned
3379 address is the PIC reg + an unspec constant.
3380
3381 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3382 reg also appears in the address. */
3383
3384 rtx
3385 legitimize_pic_address (rtx orig, rtx reg)
3386 {
3387 rtx addr = orig;
3388 rtx new_rtx = orig;
3389 rtx base;
3390
3391 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3392
3393 if (GET_CODE (addr) == LABEL_REF
3394 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3395 {
3396 /* This is a local symbol. */
3397 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3398 {
3399 /* Access local symbols PC-relative via LARL.
3400 This is the same as in the non-PIC case, so it is
3401 handled automatically ... */
3402 }
3403 else
3404 {
3405 /* Access local symbols relative to the GOT. */
3406
3407 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3408
3409 if (reload_in_progress || reload_completed)
3410 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3411
3412 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3413 addr = gen_rtx_CONST (Pmode, addr);
3414 addr = force_const_mem (Pmode, addr);
3415 emit_move_insn (temp, addr);
3416
3417 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3418 if (reg != 0)
3419 {
3420 s390_load_address (reg, new_rtx);
3421 new_rtx = reg;
3422 }
3423 }
3424 }
3425 else if (GET_CODE (addr) == SYMBOL_REF)
3426 {
3427 if (reg == 0)
3428 reg = gen_reg_rtx (Pmode);
3429
3430 if (flag_pic == 1)
3431 {
3432 /* Assume GOT offset < 4k. This is handled the same way
3433 in both 31- and 64-bit code (@GOT). */
3434
3435 if (reload_in_progress || reload_completed)
3436 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3437
3438 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3439 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3440 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3441 new_rtx = gen_const_mem (Pmode, new_rtx);
3442 emit_move_insn (reg, new_rtx);
3443 new_rtx = reg;
3444 }
3445 else if (TARGET_CPU_ZARCH)
3446 {
3447 /* If the GOT offset might be >= 4k, we determine the position
3448 of the GOT entry via a PC-relative LARL (@GOTENT). */
3449
3450 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3451
3452 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3453 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3454
3455 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3456 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3457 emit_move_insn (temp, new_rtx);
3458
3459 new_rtx = gen_const_mem (Pmode, temp);
3460 emit_move_insn (reg, new_rtx);
3461 new_rtx = reg;
3462 }
3463 else
3464 {
3465 /* If the GOT offset might be >= 4k, we have to load it
3466 from the literal pool (@GOT). */
3467
3468 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3469
3470 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3471 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3472
3473 if (reload_in_progress || reload_completed)
3474 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3475
3476 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3477 addr = gen_rtx_CONST (Pmode, addr);
3478 addr = force_const_mem (Pmode, addr);
3479 emit_move_insn (temp, addr);
3480
3481 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3482 new_rtx = gen_const_mem (Pmode, new_rtx);
3483 emit_move_insn (reg, new_rtx);
3484 new_rtx = reg;
3485 }
3486 }
3487 else
3488 {
3489 if (GET_CODE (addr) == CONST)
3490 {
3491 addr = XEXP (addr, 0);
3492 if (GET_CODE (addr) == UNSPEC)
3493 {
3494 gcc_assert (XVECLEN (addr, 0) == 1);
3495 switch (XINT (addr, 1))
3496 {
3497 /* If someone moved a GOT-relative UNSPEC
3498 out of the literal pool, force them back in. */
3499 case UNSPEC_GOTOFF:
3500 case UNSPEC_PLTOFF:
3501 new_rtx = force_const_mem (Pmode, orig);
3502 break;
3503
3504 /* @GOT is OK as is if small. */
3505 case UNSPEC_GOT:
3506 if (flag_pic == 2)
3507 new_rtx = force_const_mem (Pmode, orig);
3508 break;
3509
3510 /* @GOTENT is OK as is. */
3511 case UNSPEC_GOTENT:
3512 break;
3513
3514 /* @PLT is OK as is on 64-bit, must be converted to
3515 GOT-relative @PLTOFF on 31-bit. */
3516 case UNSPEC_PLT:
3517 if (!TARGET_CPU_ZARCH)
3518 {
3519 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3520
3521 if (reload_in_progress || reload_completed)
3522 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3523
3524 addr = XVECEXP (addr, 0, 0);
3525 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3526 UNSPEC_PLTOFF);
3527 addr = gen_rtx_CONST (Pmode, addr);
3528 addr = force_const_mem (Pmode, addr);
3529 emit_move_insn (temp, addr);
3530
3531 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3532 if (reg != 0)
3533 {
3534 s390_load_address (reg, new_rtx);
3535 new_rtx = reg;
3536 }
3537 }
3538 break;
3539
3540 /* Everything else cannot happen. */
3541 default:
3542 gcc_unreachable ();
3543 }
3544 }
3545 else
3546 gcc_assert (GET_CODE (addr) == PLUS);
3547 }
3548 if (GET_CODE (addr) == PLUS)
3549 {
3550 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3551
3552 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3553 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3554
3555 /* Check first to see if this is a constant offset
3556 from a local symbol reference. */
3557 if ((GET_CODE (op0) == LABEL_REF
3558 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3559 && GET_CODE (op1) == CONST_INT)
3560 {
3561 if (TARGET_CPU_ZARCH
3562 && larl_operand (op0, VOIDmode)
3563 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3564 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3565 {
3566 if (INTVAL (op1) & 1)
3567 {
3568 /* LARL can't handle odd offsets, so emit a
3569 pair of LARL and LA. */
3570 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3571
3572 if (!DISP_IN_RANGE (INTVAL (op1)))
3573 {
3574 HOST_WIDE_INT even = INTVAL (op1) - 1;
3575 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3576 op0 = gen_rtx_CONST (Pmode, op0);
3577 op1 = const1_rtx;
3578 }
3579
3580 emit_move_insn (temp, op0);
3581 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3582
3583 if (reg != 0)
3584 {
3585 s390_load_address (reg, new_rtx);
3586 new_rtx = reg;
3587 }
3588 }
3589 else
3590 {
3591 /* If the offset is even, we can just use LARL.
3592 This will happen automatically. */
3593 }
3594 }
3595 else
3596 {
3597 /* Access local symbols relative to the GOT. */
3598
3599 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3600
3601 if (reload_in_progress || reload_completed)
3602 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3603
3604 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3605 UNSPEC_GOTOFF);
3606 addr = gen_rtx_PLUS (Pmode, addr, op1);
3607 addr = gen_rtx_CONST (Pmode, addr);
3608 addr = force_const_mem (Pmode, addr);
3609 emit_move_insn (temp, addr);
3610
3611 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3612 if (reg != 0)
3613 {
3614 s390_load_address (reg, new_rtx);
3615 new_rtx = reg;
3616 }
3617 }
3618 }
3619
3620 /* Now, check whether it is a GOT relative symbol plus offset
3621 that was pulled out of the literal pool. Force it back in. */
3622
3623 else if (GET_CODE (op0) == UNSPEC
3624 && GET_CODE (op1) == CONST_INT
3625 && XINT (op0, 1) == UNSPEC_GOTOFF)
3626 {
3627 gcc_assert (XVECLEN (op0, 0) == 1);
3628
3629 new_rtx = force_const_mem (Pmode, orig);
3630 }
3631
3632 /* Otherwise, compute the sum. */
3633 else
3634 {
3635 base = legitimize_pic_address (XEXP (addr, 0), reg);
3636 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3637 base == reg ? NULL_RTX : reg);
3638 if (GET_CODE (new_rtx) == CONST_INT)
3639 new_rtx = plus_constant (base, INTVAL (new_rtx));
3640 else
3641 {
3642 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3643 {
3644 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3645 new_rtx = XEXP (new_rtx, 1);
3646 }
3647 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3648 }
3649
3650 if (GET_CODE (new_rtx) == CONST)
3651 new_rtx = XEXP (new_rtx, 0);
3652 new_rtx = force_operand (new_rtx, 0);
3653 }
3654 }
3655 }
3656 return new_rtx;
3657 }
3658
3659 /* Load the thread pointer into a register. */
3660
3661 rtx
3662 s390_get_thread_pointer (void)
3663 {
3664 rtx tp = gen_reg_rtx (Pmode);
3665
3666 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3667 mark_reg_pointer (tp, BITS_PER_WORD);
3668
3669 return tp;
3670 }
3671
3672 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3673 in s390_tls_symbol which always refers to __tls_get_offset.
3674 The returned offset is written to RESULT_REG and an USE rtx is
3675 generated for TLS_CALL. */
3676
3677 static GTY(()) rtx s390_tls_symbol;
3678
3679 static void
3680 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3681 {
3682 rtx insn;
3683
3684 gcc_assert (flag_pic);
3685
3686 if (!s390_tls_symbol)
3687 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3688
3689 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3690 gen_rtx_REG (Pmode, RETURN_REGNUM));
3691
3692 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3693 RTL_CONST_CALL_P (insn) = 1;
3694 }
3695
3696 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3697 this (thread-local) address. REG may be used as temporary. */
3698
3699 static rtx
3700 legitimize_tls_address (rtx addr, rtx reg)
3701 {
3702 rtx new_rtx, tls_call, temp, base, r2, insn;
3703
3704 if (GET_CODE (addr) == SYMBOL_REF)
3705 switch (tls_symbolic_operand (addr))
3706 {
3707 case TLS_MODEL_GLOBAL_DYNAMIC:
3708 start_sequence ();
3709 r2 = gen_rtx_REG (Pmode, 2);
3710 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3711 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3712 new_rtx = force_const_mem (Pmode, new_rtx);
3713 emit_move_insn (r2, new_rtx);
3714 s390_emit_tls_call_insn (r2, tls_call);
3715 insn = get_insns ();
3716 end_sequence ();
3717
3718 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3719 temp = gen_reg_rtx (Pmode);
3720 emit_libcall_block (insn, temp, r2, new_rtx);
3721
3722 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3723 if (reg != 0)
3724 {
3725 s390_load_address (reg, new_rtx);
3726 new_rtx = reg;
3727 }
3728 break;
3729
3730 case TLS_MODEL_LOCAL_DYNAMIC:
3731 start_sequence ();
3732 r2 = gen_rtx_REG (Pmode, 2);
3733 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3734 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3735 new_rtx = force_const_mem (Pmode, new_rtx);
3736 emit_move_insn (r2, new_rtx);
3737 s390_emit_tls_call_insn (r2, tls_call);
3738 insn = get_insns ();
3739 end_sequence ();
3740
3741 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3742 temp = gen_reg_rtx (Pmode);
3743 emit_libcall_block (insn, temp, r2, new_rtx);
3744
3745 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3746 base = gen_reg_rtx (Pmode);
3747 s390_load_address (base, new_rtx);
3748
3749 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3750 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3751 new_rtx = force_const_mem (Pmode, new_rtx);
3752 temp = gen_reg_rtx (Pmode);
3753 emit_move_insn (temp, new_rtx);
3754
3755 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3756 if (reg != 0)
3757 {
3758 s390_load_address (reg, new_rtx);
3759 new_rtx = reg;
3760 }
3761 break;
3762
3763 case TLS_MODEL_INITIAL_EXEC:
3764 if (flag_pic == 1)
3765 {
3766 /* Assume GOT offset < 4k. This is handled the same way
3767 in both 31- and 64-bit code. */
3768
3769 if (reload_in_progress || reload_completed)
3770 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3771
3772 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3773 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3774 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3775 new_rtx = gen_const_mem (Pmode, new_rtx);
3776 temp = gen_reg_rtx (Pmode);
3777 emit_move_insn (temp, new_rtx);
3778 }
3779 else if (TARGET_CPU_ZARCH)
3780 {
3781 /* If the GOT offset might be >= 4k, we determine the position
3782 of the GOT entry via a PC-relative LARL. */
3783
3784 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3785 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3786 temp = gen_reg_rtx (Pmode);
3787 emit_move_insn (temp, new_rtx);
3788
3789 new_rtx = gen_const_mem (Pmode, temp);
3790 temp = gen_reg_rtx (Pmode);
3791 emit_move_insn (temp, new_rtx);
3792 }
3793 else if (flag_pic)
3794 {
3795 /* If the GOT offset might be >= 4k, we have to load it
3796 from the literal pool. */
3797
3798 if (reload_in_progress || reload_completed)
3799 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3800
3801 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3802 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3803 new_rtx = force_const_mem (Pmode, new_rtx);
3804 temp = gen_reg_rtx (Pmode);
3805 emit_move_insn (temp, new_rtx);
3806
3807 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3808 new_rtx = gen_const_mem (Pmode, new_rtx);
3809
3810 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3811 temp = gen_reg_rtx (Pmode);
3812 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3813 }
3814 else
3815 {
3816 /* In position-dependent code, load the absolute address of
3817 the GOT entry from the literal pool. */
3818
3819 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3820 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3821 new_rtx = force_const_mem (Pmode, new_rtx);
3822 temp = gen_reg_rtx (Pmode);
3823 emit_move_insn (temp, new_rtx);
3824
3825 new_rtx = temp;
3826 new_rtx = gen_const_mem (Pmode, new_rtx);
3827 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3828 temp = gen_reg_rtx (Pmode);
3829 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3830 }
3831
3832 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3833 if (reg != 0)
3834 {
3835 s390_load_address (reg, new_rtx);
3836 new_rtx = reg;
3837 }
3838 break;
3839
3840 case TLS_MODEL_LOCAL_EXEC:
3841 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3842 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3843 new_rtx = force_const_mem (Pmode, new_rtx);
3844 temp = gen_reg_rtx (Pmode);
3845 emit_move_insn (temp, new_rtx);
3846
3847 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3848 if (reg != 0)
3849 {
3850 s390_load_address (reg, new_rtx);
3851 new_rtx = reg;
3852 }
3853 break;
3854
3855 default:
3856 gcc_unreachable ();
3857 }
3858
3859 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3860 {
3861 switch (XINT (XEXP (addr, 0), 1))
3862 {
3863 case UNSPEC_INDNTPOFF:
3864 gcc_assert (TARGET_CPU_ZARCH);
3865 new_rtx = addr;
3866 break;
3867
3868 default:
3869 gcc_unreachable ();
3870 }
3871 }
3872
3873 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3874 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3875 {
3876 new_rtx = XEXP (XEXP (addr, 0), 0);
3877 if (GET_CODE (new_rtx) != SYMBOL_REF)
3878 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3879
3880 new_rtx = legitimize_tls_address (new_rtx, reg);
3881 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3882 new_rtx = force_operand (new_rtx, 0);
3883 }
3884
3885 else
3886 gcc_unreachable (); /* for now ... */
3887
3888 return new_rtx;
3889 }
3890
3891 /* Emit insns making the address in operands[1] valid for a standard
3892 move to operands[0]. operands[1] is replaced by an address which
3893 should be used instead of the former RTX to emit the move
3894 pattern. */
3895
3896 void
3897 emit_symbolic_move (rtx *operands)
3898 {
3899 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3900
3901 if (GET_CODE (operands[0]) == MEM)
3902 operands[1] = force_reg (Pmode, operands[1]);
3903 else if (TLS_SYMBOLIC_CONST (operands[1]))
3904 operands[1] = legitimize_tls_address (operands[1], temp);
3905 else if (flag_pic)
3906 operands[1] = legitimize_pic_address (operands[1], temp);
3907 }
3908
3909 /* Try machine-dependent ways of modifying an illegitimate address X
3910 to be legitimate. If we find one, return the new, valid address.
3911
3912 OLDX is the address as it was before break_out_memory_refs was called.
3913 In some cases it is useful to look at this to decide what needs to be done.
3914
3915 MODE is the mode of the operand pointed to by X.
3916
3917 When -fpic is used, special handling is needed for symbolic references.
3918 See comments by legitimize_pic_address for details. */
3919
3920 static rtx
3921 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3922 enum machine_mode mode ATTRIBUTE_UNUSED)
3923 {
3924 rtx constant_term = const0_rtx;
3925
3926 if (TLS_SYMBOLIC_CONST (x))
3927 {
3928 x = legitimize_tls_address (x, 0);
3929
3930 if (s390_legitimate_address_p (mode, x, FALSE))
3931 return x;
3932 }
3933 else if (GET_CODE (x) == PLUS
3934 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3935 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3936 {
3937 return x;
3938 }
3939 else if (flag_pic)
3940 {
3941 if (SYMBOLIC_CONST (x)
3942 || (GET_CODE (x) == PLUS
3943 && (SYMBOLIC_CONST (XEXP (x, 0))
3944 || SYMBOLIC_CONST (XEXP (x, 1)))))
3945 x = legitimize_pic_address (x, 0);
3946
3947 if (s390_legitimate_address_p (mode, x, FALSE))
3948 return x;
3949 }
3950
3951 x = eliminate_constant_term (x, &constant_term);
3952
3953 /* Optimize loading of large displacements by splitting them
3954 into the multiple of 4K and the rest; this allows the
3955 former to be CSE'd if possible.
3956
3957 Don't do this if the displacement is added to a register
3958 pointing into the stack frame, as the offsets will
3959 change later anyway. */
3960
3961 if (GET_CODE (constant_term) == CONST_INT
3962 && !TARGET_LONG_DISPLACEMENT
3963 && !DISP_IN_RANGE (INTVAL (constant_term))
3964 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3965 {
3966 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3967 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3968
3969 rtx temp = gen_reg_rtx (Pmode);
3970 rtx val = force_operand (GEN_INT (upper), temp);
3971 if (val != temp)
3972 emit_move_insn (temp, val);
3973
3974 x = gen_rtx_PLUS (Pmode, x, temp);
3975 constant_term = GEN_INT (lower);
3976 }
3977
3978 if (GET_CODE (x) == PLUS)
3979 {
3980 if (GET_CODE (XEXP (x, 0)) == REG)
3981 {
3982 rtx temp = gen_reg_rtx (Pmode);
3983 rtx val = force_operand (XEXP (x, 1), temp);
3984 if (val != temp)
3985 emit_move_insn (temp, val);
3986
3987 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3988 }
3989
3990 else if (GET_CODE (XEXP (x, 1)) == REG)
3991 {
3992 rtx temp = gen_reg_rtx (Pmode);
3993 rtx val = force_operand (XEXP (x, 0), temp);
3994 if (val != temp)
3995 emit_move_insn (temp, val);
3996
3997 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3998 }
3999 }
4000
4001 if (constant_term != const0_rtx)
4002 x = gen_rtx_PLUS (Pmode, x, constant_term);
4003
4004 return x;
4005 }
4006
4007 /* Try a machine-dependent way of reloading an illegitimate address AD
4008 operand. If we find one, push the reload and and return the new address.
4009
4010 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4011 and TYPE is the reload type of the current reload. */
4012
4013 rtx
4014 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4015 int opnum, int type)
4016 {
4017 if (!optimize || TARGET_LONG_DISPLACEMENT)
4018 return NULL_RTX;
4019
4020 if (GET_CODE (ad) == PLUS)
4021 {
4022 rtx tem = simplify_binary_operation (PLUS, Pmode,
4023 XEXP (ad, 0), XEXP (ad, 1));
4024 if (tem)
4025 ad = tem;
4026 }
4027
4028 if (GET_CODE (ad) == PLUS
4029 && GET_CODE (XEXP (ad, 0)) == REG
4030 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4031 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4032 {
4033 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4034 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4035 rtx cst, tem, new_rtx;
4036
4037 cst = GEN_INT (upper);
4038 if (!legitimate_reload_constant_p (cst))
4039 cst = force_const_mem (Pmode, cst);
4040
4041 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4042 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4043
4044 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4045 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4046 opnum, (enum reload_type) type);
4047 return new_rtx;
4048 }
4049
4050 return NULL_RTX;
4051 }
4052
4053 /* Emit code to move LEN bytes from DST to SRC. */
4054
4055 void
4056 s390_expand_movmem (rtx dst, rtx src, rtx len)
4057 {
4058 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4059 {
4060 if (INTVAL (len) > 0)
4061 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4062 }
4063
4064 else if (TARGET_MVCLE)
4065 {
4066 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4067 }
4068
4069 else
4070 {
4071 rtx dst_addr, src_addr, count, blocks, temp;
4072 rtx loop_start_label = gen_label_rtx ();
4073 rtx loop_end_label = gen_label_rtx ();
4074 rtx end_label = gen_label_rtx ();
4075 enum machine_mode mode;
4076
4077 mode = GET_MODE (len);
4078 if (mode == VOIDmode)
4079 mode = Pmode;
4080
4081 dst_addr = gen_reg_rtx (Pmode);
4082 src_addr = gen_reg_rtx (Pmode);
4083 count = gen_reg_rtx (mode);
4084 blocks = gen_reg_rtx (mode);
4085
4086 convert_move (count, len, 1);
4087 emit_cmp_and_jump_insns (count, const0_rtx,
4088 EQ, NULL_RTX, mode, 1, end_label);
4089
4090 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4091 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4092 dst = change_address (dst, VOIDmode, dst_addr);
4093 src = change_address (src, VOIDmode, src_addr);
4094
4095 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4096 OPTAB_DIRECT);
4097 if (temp != count)
4098 emit_move_insn (count, temp);
4099
4100 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4101 OPTAB_DIRECT);
4102 if (temp != blocks)
4103 emit_move_insn (blocks, temp);
4104
4105 emit_cmp_and_jump_insns (blocks, const0_rtx,
4106 EQ, NULL_RTX, mode, 1, loop_end_label);
4107
4108 emit_label (loop_start_label);
4109
4110 if (TARGET_Z10
4111 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4112 {
4113 rtx prefetch;
4114
4115 /* Issue a read prefetch for the +3 cache line. */
4116 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4117 const0_rtx, const0_rtx);
4118 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4119 emit_insn (prefetch);
4120
4121 /* Issue a write prefetch for the +3 cache line. */
4122 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4123 const1_rtx, const0_rtx);
4124 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4125 emit_insn (prefetch);
4126 }
4127
4128 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4129 s390_load_address (dst_addr,
4130 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4131 s390_load_address (src_addr,
4132 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4133
4134 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4135 OPTAB_DIRECT);
4136 if (temp != blocks)
4137 emit_move_insn (blocks, temp);
4138
4139 emit_cmp_and_jump_insns (blocks, const0_rtx,
4140 EQ, NULL_RTX, mode, 1, loop_end_label);
4141
4142 emit_jump (loop_start_label);
4143 emit_label (loop_end_label);
4144
4145 emit_insn (gen_movmem_short (dst, src,
4146 convert_to_mode (Pmode, count, 1)));
4147 emit_label (end_label);
4148 }
4149 }
4150
4151 /* Emit code to set LEN bytes at DST to VAL.
4152 Make use of clrmem if VAL is zero. */
4153
4154 void
4155 s390_expand_setmem (rtx dst, rtx len, rtx val)
4156 {
4157 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4158 return;
4159
4160 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4161
4162 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4163 {
4164 if (val == const0_rtx && INTVAL (len) <= 256)
4165 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4166 else
4167 {
4168 /* Initialize memory by storing the first byte. */
4169 emit_move_insn (adjust_address (dst, QImode, 0), val);
4170
4171 if (INTVAL (len) > 1)
4172 {
4173 /* Initiate 1 byte overlap move.
4174 The first byte of DST is propagated through DSTP1.
4175 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4176 DST is set to size 1 so the rest of the memory location
4177 does not count as source operand. */
4178 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4179 set_mem_size (dst, const1_rtx);
4180
4181 emit_insn (gen_movmem_short (dstp1, dst,
4182 GEN_INT (INTVAL (len) - 2)));
4183 }
4184 }
4185 }
4186
4187 else if (TARGET_MVCLE)
4188 {
4189 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4190 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4191 }
4192
4193 else
4194 {
4195 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4196 rtx loop_start_label = gen_label_rtx ();
4197 rtx loop_end_label = gen_label_rtx ();
4198 rtx end_label = gen_label_rtx ();
4199 enum machine_mode mode;
4200
4201 mode = GET_MODE (len);
4202 if (mode == VOIDmode)
4203 mode = Pmode;
4204
4205 dst_addr = gen_reg_rtx (Pmode);
4206 count = gen_reg_rtx (mode);
4207 blocks = gen_reg_rtx (mode);
4208
4209 convert_move (count, len, 1);
4210 emit_cmp_and_jump_insns (count, const0_rtx,
4211 EQ, NULL_RTX, mode, 1, end_label);
4212
4213 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4214 dst = change_address (dst, VOIDmode, dst_addr);
4215
4216 if (val == const0_rtx)
4217 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4218 OPTAB_DIRECT);
4219 else
4220 {
4221 dstp1 = adjust_address (dst, VOIDmode, 1);
4222 set_mem_size (dst, const1_rtx);
4223
4224 /* Initialize memory by storing the first byte. */
4225 emit_move_insn (adjust_address (dst, QImode, 0), val);
4226
4227 /* If count is 1 we are done. */
4228 emit_cmp_and_jump_insns (count, const1_rtx,
4229 EQ, NULL_RTX, mode, 1, end_label);
4230
4231 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4232 OPTAB_DIRECT);
4233 }
4234 if (temp != count)
4235 emit_move_insn (count, temp);
4236
4237 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4238 OPTAB_DIRECT);
4239 if (temp != blocks)
4240 emit_move_insn (blocks, temp);
4241
4242 emit_cmp_and_jump_insns (blocks, const0_rtx,
4243 EQ, NULL_RTX, mode, 1, loop_end_label);
4244
4245 emit_label (loop_start_label);
4246
4247 if (TARGET_Z10
4248 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4249 {
4250 /* Issue a write prefetch for the +4 cache line. */
4251 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4252 GEN_INT (1024)),
4253 const1_rtx, const0_rtx);
4254 emit_insn (prefetch);
4255 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4256 }
4257
4258 if (val == const0_rtx)
4259 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4260 else
4261 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4262 s390_load_address (dst_addr,
4263 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4264
4265 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4266 OPTAB_DIRECT);
4267 if (temp != blocks)
4268 emit_move_insn (blocks, temp);
4269
4270 emit_cmp_and_jump_insns (blocks, const0_rtx,
4271 EQ, NULL_RTX, mode, 1, loop_end_label);
4272
4273 emit_jump (loop_start_label);
4274 emit_label (loop_end_label);
4275
4276 if (val == const0_rtx)
4277 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4278 else
4279 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4280 emit_label (end_label);
4281 }
4282 }
4283
4284 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4285 and return the result in TARGET. */
4286
4287 void
4288 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4289 {
4290 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4291 rtx tmp;
4292
4293 /* As the result of CMPINT is inverted compared to what we need,
4294 we have to swap the operands. */
4295 tmp = op0; op0 = op1; op1 = tmp;
4296
4297 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4298 {
4299 if (INTVAL (len) > 0)
4300 {
4301 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4302 emit_insn (gen_cmpint (target, ccreg));
4303 }
4304 else
4305 emit_move_insn (target, const0_rtx);
4306 }
4307 else if (TARGET_MVCLE)
4308 {
4309 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4310 emit_insn (gen_cmpint (target, ccreg));
4311 }
4312 else
4313 {
4314 rtx addr0, addr1, count, blocks, temp;
4315 rtx loop_start_label = gen_label_rtx ();
4316 rtx loop_end_label = gen_label_rtx ();
4317 rtx end_label = gen_label_rtx ();
4318 enum machine_mode mode;
4319
4320 mode = GET_MODE (len);
4321 if (mode == VOIDmode)
4322 mode = Pmode;
4323
4324 addr0 = gen_reg_rtx (Pmode);
4325 addr1 = gen_reg_rtx (Pmode);
4326 count = gen_reg_rtx (mode);
4327 blocks = gen_reg_rtx (mode);
4328
4329 convert_move (count, len, 1);
4330 emit_cmp_and_jump_insns (count, const0_rtx,
4331 EQ, NULL_RTX, mode, 1, end_label);
4332
4333 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4334 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4335 op0 = change_address (op0, VOIDmode, addr0);
4336 op1 = change_address (op1, VOIDmode, addr1);
4337
4338 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4339 OPTAB_DIRECT);
4340 if (temp != count)
4341 emit_move_insn (count, temp);
4342
4343 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4344 OPTAB_DIRECT);
4345 if (temp != blocks)
4346 emit_move_insn (blocks, temp);
4347
4348 emit_cmp_and_jump_insns (blocks, const0_rtx,
4349 EQ, NULL_RTX, mode, 1, loop_end_label);
4350
4351 emit_label (loop_start_label);
4352
4353 if (TARGET_Z10
4354 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4355 {
4356 rtx prefetch;
4357
4358 /* Issue a read prefetch for the +2 cache line of operand 1. */
4359 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4360 const0_rtx, const0_rtx);
4361 emit_insn (prefetch);
4362 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4363
4364 /* Issue a read prefetch for the +2 cache line of operand 2. */
4365 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4366 const0_rtx, const0_rtx);
4367 emit_insn (prefetch);
4368 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4369 }
4370
4371 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4372 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4373 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4374 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4375 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4376 emit_jump_insn (temp);
4377
4378 s390_load_address (addr0,
4379 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4380 s390_load_address (addr1,
4381 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4382
4383 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4384 OPTAB_DIRECT);
4385 if (temp != blocks)
4386 emit_move_insn (blocks, temp);
4387
4388 emit_cmp_and_jump_insns (blocks, const0_rtx,
4389 EQ, NULL_RTX, mode, 1, loop_end_label);
4390
4391 emit_jump (loop_start_label);
4392 emit_label (loop_end_label);
4393
4394 emit_insn (gen_cmpmem_short (op0, op1,
4395 convert_to_mode (Pmode, count, 1)));
4396 emit_label (end_label);
4397
4398 emit_insn (gen_cmpint (target, ccreg));
4399 }
4400 }
4401
4402
4403 /* Expand conditional increment or decrement using alc/slb instructions.
4404 Should generate code setting DST to either SRC or SRC + INCREMENT,
4405 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4406 Returns true if successful, false otherwise.
4407
4408 That makes it possible to implement some if-constructs without jumps e.g.:
4409 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4410 unsigned int a, b, c;
4411 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4412 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4413 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4414 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4415
4416 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4417 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4418 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4419 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4420 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4421
4422 bool
4423 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4424 rtx dst, rtx src, rtx increment)
4425 {
4426 enum machine_mode cmp_mode;
4427 enum machine_mode cc_mode;
4428 rtx op_res;
4429 rtx insn;
4430 rtvec p;
4431 int ret;
4432
4433 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4434 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4435 cmp_mode = SImode;
4436 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4437 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4438 cmp_mode = DImode;
4439 else
4440 return false;
4441
4442 /* Try ADD LOGICAL WITH CARRY. */
4443 if (increment == const1_rtx)
4444 {
4445 /* Determine CC mode to use. */
4446 if (cmp_code == EQ || cmp_code == NE)
4447 {
4448 if (cmp_op1 != const0_rtx)
4449 {
4450 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4451 NULL_RTX, 0, OPTAB_WIDEN);
4452 cmp_op1 = const0_rtx;
4453 }
4454
4455 cmp_code = cmp_code == EQ ? LEU : GTU;
4456 }
4457
4458 if (cmp_code == LTU || cmp_code == LEU)
4459 {
4460 rtx tem = cmp_op0;
4461 cmp_op0 = cmp_op1;
4462 cmp_op1 = tem;
4463 cmp_code = swap_condition (cmp_code);
4464 }
4465
4466 switch (cmp_code)
4467 {
4468 case GTU:
4469 cc_mode = CCUmode;
4470 break;
4471
4472 case GEU:
4473 cc_mode = CCL3mode;
4474 break;
4475
4476 default:
4477 return false;
4478 }
4479
4480 /* Emit comparison instruction pattern. */
4481 if (!register_operand (cmp_op0, cmp_mode))
4482 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4483
4484 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4485 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4486 /* We use insn_invalid_p here to add clobbers if required. */
4487 ret = insn_invalid_p (emit_insn (insn));
4488 gcc_assert (!ret);
4489
4490 /* Emit ALC instruction pattern. */
4491 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4492 gen_rtx_REG (cc_mode, CC_REGNUM),
4493 const0_rtx);
4494
4495 if (src != const0_rtx)
4496 {
4497 if (!register_operand (src, GET_MODE (dst)))
4498 src = force_reg (GET_MODE (dst), src);
4499
4500 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4501 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4502 }
4503
4504 p = rtvec_alloc (2);
4505 RTVEC_ELT (p, 0) =
4506 gen_rtx_SET (VOIDmode, dst, op_res);
4507 RTVEC_ELT (p, 1) =
4508 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4509 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4510
4511 return true;
4512 }
4513
4514 /* Try SUBTRACT LOGICAL WITH BORROW. */
4515 if (increment == constm1_rtx)
4516 {
4517 /* Determine CC mode to use. */
4518 if (cmp_code == EQ || cmp_code == NE)
4519 {
4520 if (cmp_op1 != const0_rtx)
4521 {
4522 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4523 NULL_RTX, 0, OPTAB_WIDEN);
4524 cmp_op1 = const0_rtx;
4525 }
4526
4527 cmp_code = cmp_code == EQ ? LEU : GTU;
4528 }
4529
4530 if (cmp_code == GTU || cmp_code == GEU)
4531 {
4532 rtx tem = cmp_op0;
4533 cmp_op0 = cmp_op1;
4534 cmp_op1 = tem;
4535 cmp_code = swap_condition (cmp_code);
4536 }
4537
4538 switch (cmp_code)
4539 {
4540 case LEU:
4541 cc_mode = CCUmode;
4542 break;
4543
4544 case LTU:
4545 cc_mode = CCL3mode;
4546 break;
4547
4548 default:
4549 return false;
4550 }
4551
4552 /* Emit comparison instruction pattern. */
4553 if (!register_operand (cmp_op0, cmp_mode))
4554 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4555
4556 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4557 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4558 /* We use insn_invalid_p here to add clobbers if required. */
4559 ret = insn_invalid_p (emit_insn (insn));
4560 gcc_assert (!ret);
4561
4562 /* Emit SLB instruction pattern. */
4563 if (!register_operand (src, GET_MODE (dst)))
4564 src = force_reg (GET_MODE (dst), src);
4565
4566 op_res = gen_rtx_MINUS (GET_MODE (dst),
4567 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4568 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4569 gen_rtx_REG (cc_mode, CC_REGNUM),
4570 const0_rtx));
4571 p = rtvec_alloc (2);
4572 RTVEC_ELT (p, 0) =
4573 gen_rtx_SET (VOIDmode, dst, op_res);
4574 RTVEC_ELT (p, 1) =
4575 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4576 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4577
4578 return true;
4579 }
4580
4581 return false;
4582 }
4583
4584 /* Expand code for the insv template. Return true if successful. */
4585
4586 bool
4587 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4588 {
4589 int bitsize = INTVAL (op1);
4590 int bitpos = INTVAL (op2);
4591
4592 /* On z10 we can use the risbg instruction to implement insv. */
4593 if (TARGET_Z10
4594 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4595 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4596 {
4597 rtx op;
4598 rtx clobber;
4599
4600 op = gen_rtx_SET (GET_MODE(src),
4601 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4602 src);
4603 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4604 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4605
4606 return true;
4607 }
4608
4609 /* We need byte alignment. */
4610 if (bitsize % BITS_PER_UNIT)
4611 return false;
4612
4613 if (bitpos == 0
4614 && memory_operand (dest, VOIDmode)
4615 && (register_operand (src, word_mode)
4616 || const_int_operand (src, VOIDmode)))
4617 {
4618 /* Emit standard pattern if possible. */
4619 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4620 if (GET_MODE_BITSIZE (mode) == bitsize)
4621 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4622
4623 /* (set (ze (mem)) (const_int)). */
4624 else if (const_int_operand (src, VOIDmode))
4625 {
4626 int size = bitsize / BITS_PER_UNIT;
4627 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4628 GET_MODE_SIZE (word_mode) - size);
4629
4630 dest = adjust_address (dest, BLKmode, 0);
4631 set_mem_size (dest, GEN_INT (size));
4632 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4633 }
4634
4635 /* (set (ze (mem)) (reg)). */
4636 else if (register_operand (src, word_mode))
4637 {
4638 if (bitsize <= GET_MODE_BITSIZE (SImode))
4639 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4640 const0_rtx), src);
4641 else
4642 {
4643 /* Emit st,stcmh sequence. */
4644 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4645 int size = stcmh_width / BITS_PER_UNIT;
4646
4647 emit_move_insn (adjust_address (dest, SImode, size),
4648 gen_lowpart (SImode, src));
4649 set_mem_size (dest, GEN_INT (size));
4650 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4651 (stcmh_width), const0_rtx),
4652 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4653 (GET_MODE_BITSIZE (SImode))));
4654 }
4655 }
4656 else
4657 return false;
4658
4659 return true;
4660 }
4661
4662 /* (set (ze (reg)) (const_int)). */
4663 if (TARGET_ZARCH
4664 && register_operand (dest, word_mode)
4665 && (bitpos % 16) == 0
4666 && (bitsize % 16) == 0
4667 && const_int_operand (src, VOIDmode))
4668 {
4669 HOST_WIDE_INT val = INTVAL (src);
4670 int regpos = bitpos + bitsize;
4671
4672 while (regpos > bitpos)
4673 {
4674 enum machine_mode putmode;
4675 int putsize;
4676
4677 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4678 putmode = SImode;
4679 else
4680 putmode = HImode;
4681
4682 putsize = GET_MODE_BITSIZE (putmode);
4683 regpos -= putsize;
4684 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4685 GEN_INT (putsize),
4686 GEN_INT (regpos)),
4687 gen_int_mode (val, putmode));
4688 val >>= putsize;
4689 }
4690 gcc_assert (regpos == bitpos);
4691 return true;
4692 }
4693
4694 return false;
4695 }
4696
4697 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4698 register that holds VAL of mode MODE shifted by COUNT bits. */
4699
4700 static inline rtx
4701 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4702 {
4703 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4704 NULL_RTX, 1, OPTAB_DIRECT);
4705 return expand_simple_binop (SImode, ASHIFT, val, count,
4706 NULL_RTX, 1, OPTAB_DIRECT);
4707 }
4708
4709 /* Structure to hold the initial parameters for a compare_and_swap operation
4710 in HImode and QImode. */
4711
4712 struct alignment_context
4713 {
4714 rtx memsi; /* SI aligned memory location. */
4715 rtx shift; /* Bit offset with regard to lsb. */
4716 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4717 rtx modemaski; /* ~modemask */
4718 bool aligned; /* True if memory is aligned, false else. */
4719 };
4720
4721 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4722 structure AC for transparent simplifying, if the memory alignment is known
4723 to be at least 32bit. MEM is the memory location for the actual operation
4724 and MODE its mode. */
4725
4726 static void
4727 init_alignment_context (struct alignment_context *ac, rtx mem,
4728 enum machine_mode mode)
4729 {
4730 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4731 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4732
4733 if (ac->aligned)
4734 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4735 else
4736 {
4737 /* Alignment is unknown. */
4738 rtx byteoffset, addr, align;
4739
4740 /* Force the address into a register. */
4741 addr = force_reg (Pmode, XEXP (mem, 0));
4742
4743 /* Align it to SImode. */
4744 align = expand_simple_binop (Pmode, AND, addr,
4745 GEN_INT (-GET_MODE_SIZE (SImode)),
4746 NULL_RTX, 1, OPTAB_DIRECT);
4747 /* Generate MEM. */
4748 ac->memsi = gen_rtx_MEM (SImode, align);
4749 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4750 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4751 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4752
4753 /* Calculate shiftcount. */
4754 byteoffset = expand_simple_binop (Pmode, AND, addr,
4755 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4756 NULL_RTX, 1, OPTAB_DIRECT);
4757 /* As we already have some offset, evaluate the remaining distance. */
4758 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4759 NULL_RTX, 1, OPTAB_DIRECT);
4760
4761 }
4762 /* Shift is the byte count, but we need the bitcount. */
4763 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4764 NULL_RTX, 1, OPTAB_DIRECT);
4765 /* Calculate masks. */
4766 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4767 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4768 NULL_RTX, 1, OPTAB_DIRECT);
4769 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4770 }
4771
4772 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4773 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4774 to set if CMP == MEM.
4775 CMP is never in memory for compare_and_swap_cc because
4776 expand_bool_compare_and_swap puts it into a register for later compare. */
4777
4778 void
4779 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4780 {
4781 struct alignment_context ac;
4782 rtx cmpv, newv, val, resv, cc;
4783 rtx res = gen_reg_rtx (SImode);
4784 rtx csloop = gen_label_rtx ();
4785 rtx csend = gen_label_rtx ();
4786
4787 gcc_assert (register_operand (target, VOIDmode));
4788 gcc_assert (MEM_P (mem));
4789
4790 init_alignment_context (&ac, mem, mode);
4791
4792 /* Shift the values to the correct bit positions. */
4793 if (!(ac.aligned && MEM_P (cmp)))
4794 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4795 if (!(ac.aligned && MEM_P (new_rtx)))
4796 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4797
4798 /* Load full word. Subsequent loads are performed by CS. */
4799 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4800 NULL_RTX, 1, OPTAB_DIRECT);
4801
4802 /* Start CS loop. */
4803 emit_label (csloop);
4804 /* val = "<mem>00..0<mem>"
4805 * cmp = "00..0<cmp>00..0"
4806 * new = "00..0<new>00..0"
4807 */
4808
4809 /* Patch cmp and new with val at correct position. */
4810 if (ac.aligned && MEM_P (cmp))
4811 {
4812 cmpv = force_reg (SImode, val);
4813 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4814 }
4815 else
4816 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4817 NULL_RTX, 1, OPTAB_DIRECT));
4818 if (ac.aligned && MEM_P (new_rtx))
4819 {
4820 newv = force_reg (SImode, val);
4821 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4822 }
4823 else
4824 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4825 NULL_RTX, 1, OPTAB_DIRECT));
4826
4827 /* Jump to end if we're done (likely?). */
4828 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4829 cmpv, newv));
4830
4831 /* Check for changes outside mode. */
4832 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4833 NULL_RTX, 1, OPTAB_DIRECT);
4834 cc = s390_emit_compare (NE, resv, val);
4835 emit_move_insn (val, resv);
4836 /* Loop internal if so. */
4837 s390_emit_jump (csloop, cc);
4838
4839 emit_label (csend);
4840
4841 /* Return the correct part of the bitfield. */
4842 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4843 NULL_RTX, 1, OPTAB_DIRECT), 1);
4844 }
4845
4846 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4847 and VAL the value to play with. If AFTER is true then store the value
4848 MEM holds after the operation, if AFTER is false then store the value MEM
4849 holds before the operation. If TARGET is zero then discard that value, else
4850 store it to TARGET. */
4851
4852 void
4853 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4854 rtx target, rtx mem, rtx val, bool after)
4855 {
4856 struct alignment_context ac;
4857 rtx cmp;
4858 rtx new_rtx = gen_reg_rtx (SImode);
4859 rtx orig = gen_reg_rtx (SImode);
4860 rtx csloop = gen_label_rtx ();
4861
4862 gcc_assert (!target || register_operand (target, VOIDmode));
4863 gcc_assert (MEM_P (mem));
4864
4865 init_alignment_context (&ac, mem, mode);
4866
4867 /* Shift val to the correct bit positions.
4868 Preserve "icm", but prevent "ex icm". */
4869 if (!(ac.aligned && code == SET && MEM_P (val)))
4870 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4871
4872 /* Further preparation insns. */
4873 if (code == PLUS || code == MINUS)
4874 emit_move_insn (orig, val);
4875 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4876 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4877 NULL_RTX, 1, OPTAB_DIRECT);
4878
4879 /* Load full word. Subsequent loads are performed by CS. */
4880 cmp = force_reg (SImode, ac.memsi);
4881
4882 /* Start CS loop. */
4883 emit_label (csloop);
4884 emit_move_insn (new_rtx, cmp);
4885
4886 /* Patch new with val at correct position. */
4887 switch (code)
4888 {
4889 case PLUS:
4890 case MINUS:
4891 val = expand_simple_binop (SImode, code, new_rtx, orig,
4892 NULL_RTX, 1, OPTAB_DIRECT);
4893 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4894 NULL_RTX, 1, OPTAB_DIRECT);
4895 /* FALLTHRU */
4896 case SET:
4897 if (ac.aligned && MEM_P (val))
4898 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4899 else
4900 {
4901 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4902 NULL_RTX, 1, OPTAB_DIRECT);
4903 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4904 NULL_RTX, 1, OPTAB_DIRECT);
4905 }
4906 break;
4907 case AND:
4908 case IOR:
4909 case XOR:
4910 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4911 NULL_RTX, 1, OPTAB_DIRECT);
4912 break;
4913 case MULT: /* NAND */
4914 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4915 NULL_RTX, 1, OPTAB_DIRECT);
4916 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4917 NULL_RTX, 1, OPTAB_DIRECT);
4918 break;
4919 default:
4920 gcc_unreachable ();
4921 }
4922
4923 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4924 ac.memsi, cmp, new_rtx));
4925
4926 /* Return the correct part of the bitfield. */
4927 if (target)
4928 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4929 after ? new_rtx : cmp, ac.shift,
4930 NULL_RTX, 1, OPTAB_DIRECT), 1);
4931 }
4932
4933 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4934 We need to emit DTP-relative relocations. */
4935
4936 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4937
4938 static void
4939 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4940 {
4941 switch (size)
4942 {
4943 case 4:
4944 fputs ("\t.long\t", file);
4945 break;
4946 case 8:
4947 fputs ("\t.quad\t", file);
4948 break;
4949 default:
4950 gcc_unreachable ();
4951 }
4952 output_addr_const (file, x);
4953 fputs ("@DTPOFF", file);
4954 }
4955
4956 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4957 /* Implement TARGET_MANGLE_TYPE. */
4958
4959 static const char *
4960 s390_mangle_type (const_tree type)
4961 {
4962 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4963 && TARGET_LONG_DOUBLE_128)
4964 return "g";
4965
4966 /* For all other types, use normal C++ mangling. */
4967 return NULL;
4968 }
4969 #endif
4970
4971 /* In the name of slightly smaller debug output, and to cater to
4972 general assembler lossage, recognize various UNSPEC sequences
4973 and turn them back into a direct symbol reference. */
4974
4975 static rtx
4976 s390_delegitimize_address (rtx orig_x)
4977 {
4978 rtx x, y;
4979
4980 orig_x = delegitimize_mem_from_attrs (orig_x);
4981 x = orig_x;
4982 if (GET_CODE (x) != MEM)
4983 return orig_x;
4984
4985 x = XEXP (x, 0);
4986 if (GET_CODE (x) == PLUS
4987 && GET_CODE (XEXP (x, 1)) == CONST
4988 && GET_CODE (XEXP (x, 0)) == REG
4989 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4990 {
4991 y = XEXP (XEXP (x, 1), 0);
4992 if (GET_CODE (y) == UNSPEC
4993 && XINT (y, 1) == UNSPEC_GOT)
4994 return XVECEXP (y, 0, 0);
4995 return orig_x;
4996 }
4997
4998 if (GET_CODE (x) == CONST)
4999 {
5000 y = XEXP (x, 0);
5001 if (GET_CODE (y) == UNSPEC
5002 && XINT (y, 1) == UNSPEC_GOTENT)
5003 return XVECEXP (y, 0, 0);
5004 return orig_x;
5005 }
5006
5007 return orig_x;
5008 }
5009
5010 /* Output operand OP to stdio stream FILE.
5011 OP is an address (register + offset) which is not used to address data;
5012 instead the rightmost bits are interpreted as the value. */
5013
5014 static void
5015 print_shift_count_operand (FILE *file, rtx op)
5016 {
5017 HOST_WIDE_INT offset;
5018 rtx base;
5019
5020 /* Extract base register and offset. */
5021 if (!s390_decompose_shift_count (op, &base, &offset))
5022 gcc_unreachable ();
5023
5024 /* Sanity check. */
5025 if (base)
5026 {
5027 gcc_assert (GET_CODE (base) == REG);
5028 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5029 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5030 }
5031
5032 /* Offsets are constricted to twelve bits. */
5033 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5034 if (base)
5035 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5036 }
5037
5038 /* See 'get_some_local_dynamic_name'. */
5039
5040 static int
5041 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5042 {
5043 rtx x = *px;
5044
5045 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5046 {
5047 x = get_pool_constant (x);
5048 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5049 }
5050
5051 if (GET_CODE (x) == SYMBOL_REF
5052 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5053 {
5054 cfun->machine->some_ld_name = XSTR (x, 0);
5055 return 1;
5056 }
5057
5058 return 0;
5059 }
5060
5061 /* Locate some local-dynamic symbol still in use by this function
5062 so that we can print its name in local-dynamic base patterns. */
5063
5064 static const char *
5065 get_some_local_dynamic_name (void)
5066 {
5067 rtx insn;
5068
5069 if (cfun->machine->some_ld_name)
5070 return cfun->machine->some_ld_name;
5071
5072 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5073 if (INSN_P (insn)
5074 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5075 return cfun->machine->some_ld_name;
5076
5077 gcc_unreachable ();
5078 }
5079
5080 /* Output machine-dependent UNSPECs occurring in address constant X
5081 in assembler syntax to stdio stream FILE. Returns true if the
5082 constant X could be recognized, false otherwise. */
5083
5084 static bool
5085 s390_output_addr_const_extra (FILE *file, rtx x)
5086 {
5087 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5088 switch (XINT (x, 1))
5089 {
5090 case UNSPEC_GOTENT:
5091 output_addr_const (file, XVECEXP (x, 0, 0));
5092 fprintf (file, "@GOTENT");
5093 return true;
5094 case UNSPEC_GOT:
5095 output_addr_const (file, XVECEXP (x, 0, 0));
5096 fprintf (file, "@GOT");
5097 return true;
5098 case UNSPEC_GOTOFF:
5099 output_addr_const (file, XVECEXP (x, 0, 0));
5100 fprintf (file, "@GOTOFF");
5101 return true;
5102 case UNSPEC_PLT:
5103 output_addr_const (file, XVECEXP (x, 0, 0));
5104 fprintf (file, "@PLT");
5105 return true;
5106 case UNSPEC_PLTOFF:
5107 output_addr_const (file, XVECEXP (x, 0, 0));
5108 fprintf (file, "@PLTOFF");
5109 return true;
5110 case UNSPEC_TLSGD:
5111 output_addr_const (file, XVECEXP (x, 0, 0));
5112 fprintf (file, "@TLSGD");
5113 return true;
5114 case UNSPEC_TLSLDM:
5115 assemble_name (file, get_some_local_dynamic_name ());
5116 fprintf (file, "@TLSLDM");
5117 return true;
5118 case UNSPEC_DTPOFF:
5119 output_addr_const (file, XVECEXP (x, 0, 0));
5120 fprintf (file, "@DTPOFF");
5121 return true;
5122 case UNSPEC_NTPOFF:
5123 output_addr_const (file, XVECEXP (x, 0, 0));
5124 fprintf (file, "@NTPOFF");
5125 return true;
5126 case UNSPEC_GOTNTPOFF:
5127 output_addr_const (file, XVECEXP (x, 0, 0));
5128 fprintf (file, "@GOTNTPOFF");
5129 return true;
5130 case UNSPEC_INDNTPOFF:
5131 output_addr_const (file, XVECEXP (x, 0, 0));
5132 fprintf (file, "@INDNTPOFF");
5133 return true;
5134 }
5135
5136 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5137 switch (XINT (x, 1))
5138 {
5139 case UNSPEC_POOL_OFFSET:
5140 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5141 output_addr_const (file, x);
5142 return true;
5143 }
5144 return false;
5145 }
5146
5147 /* Output address operand ADDR in assembler syntax to
5148 stdio stream FILE. */
5149
5150 void
5151 print_operand_address (FILE *file, rtx addr)
5152 {
5153 struct s390_address ad;
5154
5155 if (s390_symref_operand_p (addr, NULL, NULL))
5156 {
5157 if (!TARGET_Z10)
5158 {
5159 output_operand_lossage ("symbolic memory references are "
5160 "only supported on z10 or later");
5161 return;
5162 }
5163 output_addr_const (file, addr);
5164 return;
5165 }
5166
5167 if (!s390_decompose_address (addr, &ad)
5168 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5169 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5170 output_operand_lossage ("cannot decompose address");
5171
5172 if (ad.disp)
5173 output_addr_const (file, ad.disp);
5174 else
5175 fprintf (file, "0");
5176
5177 if (ad.base && ad.indx)
5178 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5179 reg_names[REGNO (ad.base)]);
5180 else if (ad.base)
5181 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5182 }
5183
5184 /* Output operand X in assembler syntax to stdio stream FILE.
5185 CODE specified the format flag. The following format flags
5186 are recognized:
5187
5188 'C': print opcode suffix for branch condition.
5189 'D': print opcode suffix for inverse branch condition.
5190 'E': print opcode suffix for branch on index instruction.
5191 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5192 'G': print the size of the operand in bytes.
5193 'O': print only the displacement of a memory reference.
5194 'R': print only the base register of a memory reference.
5195 'S': print S-type memory reference (base+displacement).
5196 'N': print the second word of a DImode operand.
5197 'M': print the second word of a TImode operand.
5198 'Y': print shift count operand.
5199
5200 'b': print integer X as if it's an unsigned byte.
5201 'c': print integer X as if it's an signed byte.
5202 'x': print integer X as if it's an unsigned halfword.
5203 'h': print integer X as if it's a signed halfword.
5204 'i': print the first nonzero HImode part of X.
5205 'j': print the first HImode part unequal to -1 of X.
5206 'k': print the first nonzero SImode part of X.
5207 'm': print the first SImode part unequal to -1 of X.
5208 'o': print integer X as if it's an unsigned 32bit word. */
5209
5210 void
5211 print_operand (FILE *file, rtx x, int code)
5212 {
5213 switch (code)
5214 {
5215 case 'C':
5216 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5217 return;
5218
5219 case 'D':
5220 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5221 return;
5222
5223 case 'E':
5224 if (GET_CODE (x) == LE)
5225 fprintf (file, "l");
5226 else if (GET_CODE (x) == GT)
5227 fprintf (file, "h");
5228 else
5229 output_operand_lossage ("invalid comparison operator "
5230 "for 'E' output modifier");
5231 return;
5232
5233 case 'J':
5234 if (GET_CODE (x) == SYMBOL_REF)
5235 {
5236 fprintf (file, "%s", ":tls_load:");
5237 output_addr_const (file, x);
5238 }
5239 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5240 {
5241 fprintf (file, "%s", ":tls_gdcall:");
5242 output_addr_const (file, XVECEXP (x, 0, 0));
5243 }
5244 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5245 {
5246 fprintf (file, "%s", ":tls_ldcall:");
5247 assemble_name (file, get_some_local_dynamic_name ());
5248 }
5249 else
5250 output_operand_lossage ("invalid reference for 'J' output modifier");
5251 return;
5252
5253 case 'G':
5254 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5255 return;
5256
5257 case 'O':
5258 {
5259 struct s390_address ad;
5260 int ret;
5261
5262 if (!MEM_P (x))
5263 {
5264 output_operand_lossage ("memory reference expected for "
5265 "'O' output modifier");
5266 return;
5267 }
5268
5269 ret = s390_decompose_address (XEXP (x, 0), &ad);
5270
5271 if (!ret
5272 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5273 || ad.indx)
5274 {
5275 output_operand_lossage ("invalid address for 'O' output modifier");
5276 return;
5277 }
5278
5279 if (ad.disp)
5280 output_addr_const (file, ad.disp);
5281 else
5282 fprintf (file, "0");
5283 }
5284 return;
5285
5286 case 'R':
5287 {
5288 struct s390_address ad;
5289 int ret;
5290
5291 if (!MEM_P (x))
5292 {
5293 output_operand_lossage ("memory reference expected for "
5294 "'R' output modifier");
5295 return;
5296 }
5297
5298 ret = s390_decompose_address (XEXP (x, 0), &ad);
5299
5300 if (!ret
5301 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5302 || ad.indx)
5303 {
5304 output_operand_lossage ("invalid address for 'R' output modifier");
5305 return;
5306 }
5307
5308 if (ad.base)
5309 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5310 else
5311 fprintf (file, "0");
5312 }
5313 return;
5314
5315 case 'S':
5316 {
5317 struct s390_address ad;
5318 int ret;
5319
5320 if (!MEM_P (x))
5321 {
5322 output_operand_lossage ("memory reference expected for "
5323 "'S' output modifier");
5324 return;
5325 }
5326 ret = s390_decompose_address (XEXP (x, 0), &ad);
5327
5328 if (!ret
5329 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5330 || ad.indx)
5331 {
5332 output_operand_lossage ("invalid address for 'S' output modifier");
5333 return;
5334 }
5335
5336 if (ad.disp)
5337 output_addr_const (file, ad.disp);
5338 else
5339 fprintf (file, "0");
5340
5341 if (ad.base)
5342 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5343 }
5344 return;
5345
5346 case 'N':
5347 if (GET_CODE (x) == REG)
5348 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5349 else if (GET_CODE (x) == MEM)
5350 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5351 else
5352 output_operand_lossage ("register or memory expression expected "
5353 "for 'N' output modifier");
5354 break;
5355
5356 case 'M':
5357 if (GET_CODE (x) == REG)
5358 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5359 else if (GET_CODE (x) == MEM)
5360 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5361 else
5362 output_operand_lossage ("register or memory expression expected "
5363 "for 'M' output modifier");
5364 break;
5365
5366 case 'Y':
5367 print_shift_count_operand (file, x);
5368 return;
5369 }
5370
5371 switch (GET_CODE (x))
5372 {
5373 case REG:
5374 fprintf (file, "%s", reg_names[REGNO (x)]);
5375 break;
5376
5377 case MEM:
5378 output_address (XEXP (x, 0));
5379 break;
5380
5381 case CONST:
5382 case CODE_LABEL:
5383 case LABEL_REF:
5384 case SYMBOL_REF:
5385 output_addr_const (file, x);
5386 break;
5387
5388 case CONST_INT:
5389 if (code == 'b')
5390 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5391 else if (code == 'c')
5392 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5393 else if (code == 'x')
5394 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5395 else if (code == 'h')
5396 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5397 else if (code == 'i')
5398 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5399 s390_extract_part (x, HImode, 0));
5400 else if (code == 'j')
5401 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5402 s390_extract_part (x, HImode, -1));
5403 else if (code == 'k')
5404 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5405 s390_extract_part (x, SImode, 0));
5406 else if (code == 'm')
5407 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5408 s390_extract_part (x, SImode, -1));
5409 else if (code == 'o')
5410 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5411 else
5412 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5413 break;
5414
5415 case CONST_DOUBLE:
5416 gcc_assert (GET_MODE (x) == VOIDmode);
5417 if (code == 'b')
5418 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5419 else if (code == 'x')
5420 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5421 else if (code == 'h')
5422 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5423 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5424 else
5425 {
5426 if (code == 0)
5427 output_operand_lossage ("invalid constant - try using "
5428 "an output modifier");
5429 else
5430 output_operand_lossage ("invalid constant for output modifier '%c'",
5431 code);
5432 }
5433 break;
5434
5435 default:
5436 if (code == 0)
5437 output_operand_lossage ("invalid expression - try using "
5438 "an output modifier");
5439 else
5440 output_operand_lossage ("invalid expression for output "
5441 "modifier '%c'", code);
5442 break;
5443 }
5444 }
5445
5446 /* Target hook for assembling integer objects. We need to define it
5447 here to work a round a bug in some versions of GAS, which couldn't
5448 handle values smaller than INT_MIN when printed in decimal. */
5449
5450 static bool
5451 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5452 {
5453 if (size == 8 && aligned_p
5454 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5455 {
5456 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5457 INTVAL (x));
5458 return true;
5459 }
5460 return default_assemble_integer (x, size, aligned_p);
5461 }
5462
5463 /* Returns true if register REGNO is used for forming
5464 a memory address in expression X. */
5465
5466 static bool
5467 reg_used_in_mem_p (int regno, rtx x)
5468 {
5469 enum rtx_code code = GET_CODE (x);
5470 int i, j;
5471 const char *fmt;
5472
5473 if (code == MEM)
5474 {
5475 if (refers_to_regno_p (regno, regno+1,
5476 XEXP (x, 0), 0))
5477 return true;
5478 }
5479 else if (code == SET
5480 && GET_CODE (SET_DEST (x)) == PC)
5481 {
5482 if (refers_to_regno_p (regno, regno+1,
5483 SET_SRC (x), 0))
5484 return true;
5485 }
5486
5487 fmt = GET_RTX_FORMAT (code);
5488 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5489 {
5490 if (fmt[i] == 'e'
5491 && reg_used_in_mem_p (regno, XEXP (x, i)))
5492 return true;
5493
5494 else if (fmt[i] == 'E')
5495 for (j = 0; j < XVECLEN (x, i); j++)
5496 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5497 return true;
5498 }
5499 return false;
5500 }
5501
5502 /* Returns true if expression DEP_RTX sets an address register
5503 used by instruction INSN to address memory. */
5504
5505 static bool
5506 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5507 {
5508 rtx target, pat;
5509
5510 if (GET_CODE (dep_rtx) == INSN)
5511 dep_rtx = PATTERN (dep_rtx);
5512
5513 if (GET_CODE (dep_rtx) == SET)
5514 {
5515 target = SET_DEST (dep_rtx);
5516 if (GET_CODE (target) == STRICT_LOW_PART)
5517 target = XEXP (target, 0);
5518 while (GET_CODE (target) == SUBREG)
5519 target = SUBREG_REG (target);
5520
5521 if (GET_CODE (target) == REG)
5522 {
5523 int regno = REGNO (target);
5524
5525 if (s390_safe_attr_type (insn) == TYPE_LA)
5526 {
5527 pat = PATTERN (insn);
5528 if (GET_CODE (pat) == PARALLEL)
5529 {
5530 gcc_assert (XVECLEN (pat, 0) == 2);
5531 pat = XVECEXP (pat, 0, 0);
5532 }
5533 gcc_assert (GET_CODE (pat) == SET);
5534 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5535 }
5536 else if (get_attr_atype (insn) == ATYPE_AGEN)
5537 return reg_used_in_mem_p (regno, PATTERN (insn));
5538 }
5539 }
5540 return false;
5541 }
5542
5543 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5544
5545 int
5546 s390_agen_dep_p (rtx dep_insn, rtx insn)
5547 {
5548 rtx dep_rtx = PATTERN (dep_insn);
5549 int i;
5550
5551 if (GET_CODE (dep_rtx) == SET
5552 && addr_generation_dependency_p (dep_rtx, insn))
5553 return 1;
5554 else if (GET_CODE (dep_rtx) == PARALLEL)
5555 {
5556 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5557 {
5558 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5559 return 1;
5560 }
5561 }
5562 return 0;
5563 }
5564
5565
5566 /* A C statement (sans semicolon) to update the integer scheduling priority
5567 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5568 reduce the priority to execute INSN later. Do not define this macro if
5569 you do not need to adjust the scheduling priorities of insns.
5570
5571 A STD instruction should be scheduled earlier,
5572 in order to use the bypass. */
5573 static int
5574 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5575 {
5576 if (! INSN_P (insn))
5577 return priority;
5578
5579 if (s390_tune != PROCESSOR_2084_Z990
5580 && s390_tune != PROCESSOR_2094_Z9_109
5581 && s390_tune != PROCESSOR_2097_Z10
5582 && s390_tune != PROCESSOR_2817_Z196)
5583 return priority;
5584
5585 switch (s390_safe_attr_type (insn))
5586 {
5587 case TYPE_FSTOREDF:
5588 case TYPE_FSTORESF:
5589 priority = priority << 3;
5590 break;
5591 case TYPE_STORE:
5592 case TYPE_STM:
5593 priority = priority << 1;
5594 break;
5595 default:
5596 break;
5597 }
5598 return priority;
5599 }
5600
5601
5602 /* The number of instructions that can be issued per cycle. */
5603
5604 static int
5605 s390_issue_rate (void)
5606 {
5607 switch (s390_tune)
5608 {
5609 case PROCESSOR_2084_Z990:
5610 case PROCESSOR_2094_Z9_109:
5611 case PROCESSOR_2817_Z196:
5612 return 3;
5613 case PROCESSOR_2097_Z10:
5614 return 2;
5615 default:
5616 return 1;
5617 }
5618 }
5619
5620 static int
5621 s390_first_cycle_multipass_dfa_lookahead (void)
5622 {
5623 return 4;
5624 }
5625
5626 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5627 Fix up MEMs as required. */
5628
5629 static void
5630 annotate_constant_pool_refs (rtx *x)
5631 {
5632 int i, j;
5633 const char *fmt;
5634
5635 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5636 || !CONSTANT_POOL_ADDRESS_P (*x));
5637
5638 /* Literal pool references can only occur inside a MEM ... */
5639 if (GET_CODE (*x) == MEM)
5640 {
5641 rtx memref = XEXP (*x, 0);
5642
5643 if (GET_CODE (memref) == SYMBOL_REF
5644 && CONSTANT_POOL_ADDRESS_P (memref))
5645 {
5646 rtx base = cfun->machine->base_reg;
5647 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5648 UNSPEC_LTREF);
5649
5650 *x = replace_equiv_address (*x, addr);
5651 return;
5652 }
5653
5654 if (GET_CODE (memref) == CONST
5655 && GET_CODE (XEXP (memref, 0)) == PLUS
5656 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5657 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5658 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5659 {
5660 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5661 rtx sym = XEXP (XEXP (memref, 0), 0);
5662 rtx base = cfun->machine->base_reg;
5663 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5664 UNSPEC_LTREF);
5665
5666 *x = replace_equiv_address (*x, plus_constant (addr, off));
5667 return;
5668 }
5669 }
5670
5671 /* ... or a load-address type pattern. */
5672 if (GET_CODE (*x) == SET)
5673 {
5674 rtx addrref = SET_SRC (*x);
5675
5676 if (GET_CODE (addrref) == SYMBOL_REF
5677 && CONSTANT_POOL_ADDRESS_P (addrref))
5678 {
5679 rtx base = cfun->machine->base_reg;
5680 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5681 UNSPEC_LTREF);
5682
5683 SET_SRC (*x) = addr;
5684 return;
5685 }
5686
5687 if (GET_CODE (addrref) == CONST
5688 && GET_CODE (XEXP (addrref, 0)) == PLUS
5689 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5690 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5691 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5692 {
5693 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5694 rtx sym = XEXP (XEXP (addrref, 0), 0);
5695 rtx base = cfun->machine->base_reg;
5696 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5697 UNSPEC_LTREF);
5698
5699 SET_SRC (*x) = plus_constant (addr, off);
5700 return;
5701 }
5702 }
5703
5704 /* Annotate LTREL_BASE as well. */
5705 if (GET_CODE (*x) == UNSPEC
5706 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5707 {
5708 rtx base = cfun->machine->base_reg;
5709 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5710 UNSPEC_LTREL_BASE);
5711 return;
5712 }
5713
5714 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5715 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5716 {
5717 if (fmt[i] == 'e')
5718 {
5719 annotate_constant_pool_refs (&XEXP (*x, i));
5720 }
5721 else if (fmt[i] == 'E')
5722 {
5723 for (j = 0; j < XVECLEN (*x, i); j++)
5724 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5725 }
5726 }
5727 }
5728
5729 /* Split all branches that exceed the maximum distance.
5730 Returns true if this created a new literal pool entry. */
5731
5732 static int
5733 s390_split_branches (void)
5734 {
5735 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5736 int new_literal = 0, ret;
5737 rtx insn, pat, tmp, target;
5738 rtx *label;
5739
5740 /* We need correct insn addresses. */
5741
5742 shorten_branches (get_insns ());
5743
5744 /* Find all branches that exceed 64KB, and split them. */
5745
5746 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5747 {
5748 if (GET_CODE (insn) != JUMP_INSN)
5749 continue;
5750
5751 pat = PATTERN (insn);
5752 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5753 pat = XVECEXP (pat, 0, 0);
5754 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5755 continue;
5756
5757 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5758 {
5759 label = &SET_SRC (pat);
5760 }
5761 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5762 {
5763 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5764 label = &XEXP (SET_SRC (pat), 1);
5765 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5766 label = &XEXP (SET_SRC (pat), 2);
5767 else
5768 continue;
5769 }
5770 else
5771 continue;
5772
5773 if (get_attr_length (insn) <= 4)
5774 continue;
5775
5776 /* We are going to use the return register as scratch register,
5777 make sure it will be saved/restored by the prologue/epilogue. */
5778 cfun_frame_layout.save_return_addr_p = 1;
5779
5780 if (!flag_pic)
5781 {
5782 new_literal = 1;
5783 tmp = force_const_mem (Pmode, *label);
5784 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5785 INSN_ADDRESSES_NEW (tmp, -1);
5786 annotate_constant_pool_refs (&PATTERN (tmp));
5787
5788 target = temp_reg;
5789 }
5790 else
5791 {
5792 new_literal = 1;
5793 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5794 UNSPEC_LTREL_OFFSET);
5795 target = gen_rtx_CONST (Pmode, target);
5796 target = force_const_mem (Pmode, target);
5797 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5798 INSN_ADDRESSES_NEW (tmp, -1);
5799 annotate_constant_pool_refs (&PATTERN (tmp));
5800
5801 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5802 cfun->machine->base_reg),
5803 UNSPEC_LTREL_BASE);
5804 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5805 }
5806
5807 ret = validate_change (insn, label, target, 0);
5808 gcc_assert (ret);
5809 }
5810
5811 return new_literal;
5812 }
5813
5814
5815 /* Find an annotated literal pool symbol referenced in RTX X,
5816 and store it at REF. Will abort if X contains references to
5817 more than one such pool symbol; multiple references to the same
5818 symbol are allowed, however.
5819
5820 The rtx pointed to by REF must be initialized to NULL_RTX
5821 by the caller before calling this routine. */
5822
5823 static void
5824 find_constant_pool_ref (rtx x, rtx *ref)
5825 {
5826 int i, j;
5827 const char *fmt;
5828
5829 /* Ignore LTREL_BASE references. */
5830 if (GET_CODE (x) == UNSPEC
5831 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5832 return;
5833 /* Likewise POOL_ENTRY insns. */
5834 if (GET_CODE (x) == UNSPEC_VOLATILE
5835 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5836 return;
5837
5838 gcc_assert (GET_CODE (x) != SYMBOL_REF
5839 || !CONSTANT_POOL_ADDRESS_P (x));
5840
5841 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5842 {
5843 rtx sym = XVECEXP (x, 0, 0);
5844 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5845 && CONSTANT_POOL_ADDRESS_P (sym));
5846
5847 if (*ref == NULL_RTX)
5848 *ref = sym;
5849 else
5850 gcc_assert (*ref == sym);
5851
5852 return;
5853 }
5854
5855 fmt = GET_RTX_FORMAT (GET_CODE (x));
5856 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5857 {
5858 if (fmt[i] == 'e')
5859 {
5860 find_constant_pool_ref (XEXP (x, i), ref);
5861 }
5862 else if (fmt[i] == 'E')
5863 {
5864 for (j = 0; j < XVECLEN (x, i); j++)
5865 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5866 }
5867 }
5868 }
5869
5870 /* Replace every reference to the annotated literal pool
5871 symbol REF in X by its base plus OFFSET. */
5872
5873 static void
5874 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5875 {
5876 int i, j;
5877 const char *fmt;
5878
5879 gcc_assert (*x != ref);
5880
5881 if (GET_CODE (*x) == UNSPEC
5882 && XINT (*x, 1) == UNSPEC_LTREF
5883 && XVECEXP (*x, 0, 0) == ref)
5884 {
5885 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5886 return;
5887 }
5888
5889 if (GET_CODE (*x) == PLUS
5890 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5891 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5892 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5893 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5894 {
5895 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5896 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5897 return;
5898 }
5899
5900 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5901 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5902 {
5903 if (fmt[i] == 'e')
5904 {
5905 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5906 }
5907 else if (fmt[i] == 'E')
5908 {
5909 for (j = 0; j < XVECLEN (*x, i); j++)
5910 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5911 }
5912 }
5913 }
5914
5915 /* Check whether X contains an UNSPEC_LTREL_BASE.
5916 Return its constant pool symbol if found, NULL_RTX otherwise. */
5917
5918 static rtx
5919 find_ltrel_base (rtx x)
5920 {
5921 int i, j;
5922 const char *fmt;
5923
5924 if (GET_CODE (x) == UNSPEC
5925 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5926 return XVECEXP (x, 0, 0);
5927
5928 fmt = GET_RTX_FORMAT (GET_CODE (x));
5929 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5930 {
5931 if (fmt[i] == 'e')
5932 {
5933 rtx fnd = find_ltrel_base (XEXP (x, i));
5934 if (fnd)
5935 return fnd;
5936 }
5937 else if (fmt[i] == 'E')
5938 {
5939 for (j = 0; j < XVECLEN (x, i); j++)
5940 {
5941 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5942 if (fnd)
5943 return fnd;
5944 }
5945 }
5946 }
5947
5948 return NULL_RTX;
5949 }
5950
5951 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5952
5953 static void
5954 replace_ltrel_base (rtx *x)
5955 {
5956 int i, j;
5957 const char *fmt;
5958
5959 if (GET_CODE (*x) == UNSPEC
5960 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5961 {
5962 *x = XVECEXP (*x, 0, 1);
5963 return;
5964 }
5965
5966 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5967 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5968 {
5969 if (fmt[i] == 'e')
5970 {
5971 replace_ltrel_base (&XEXP (*x, i));
5972 }
5973 else if (fmt[i] == 'E')
5974 {
5975 for (j = 0; j < XVECLEN (*x, i); j++)
5976 replace_ltrel_base (&XVECEXP (*x, i, j));
5977 }
5978 }
5979 }
5980
5981
5982 /* We keep a list of constants which we have to add to internal
5983 constant tables in the middle of large functions. */
5984
5985 #define NR_C_MODES 11
5986 enum machine_mode constant_modes[NR_C_MODES] =
5987 {
5988 TFmode, TImode, TDmode,
5989 DFmode, DImode, DDmode,
5990 SFmode, SImode, SDmode,
5991 HImode,
5992 QImode
5993 };
5994
5995 struct constant
5996 {
5997 struct constant *next;
5998 rtx value;
5999 rtx label;
6000 };
6001
6002 struct constant_pool
6003 {
6004 struct constant_pool *next;
6005 rtx first_insn;
6006 rtx pool_insn;
6007 bitmap insns;
6008 rtx emit_pool_after;
6009
6010 struct constant *constants[NR_C_MODES];
6011 struct constant *execute;
6012 rtx label;
6013 int size;
6014 };
6015
6016 /* Allocate new constant_pool structure. */
6017
6018 static struct constant_pool *
6019 s390_alloc_pool (void)
6020 {
6021 struct constant_pool *pool;
6022 int i;
6023
6024 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6025 pool->next = NULL;
6026 for (i = 0; i < NR_C_MODES; i++)
6027 pool->constants[i] = NULL;
6028
6029 pool->execute = NULL;
6030 pool->label = gen_label_rtx ();
6031 pool->first_insn = NULL_RTX;
6032 pool->pool_insn = NULL_RTX;
6033 pool->insns = BITMAP_ALLOC (NULL);
6034 pool->size = 0;
6035 pool->emit_pool_after = NULL_RTX;
6036
6037 return pool;
6038 }
6039
6040 /* Create new constant pool covering instructions starting at INSN
6041 and chain it to the end of POOL_LIST. */
6042
6043 static struct constant_pool *
6044 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6045 {
6046 struct constant_pool *pool, **prev;
6047
6048 pool = s390_alloc_pool ();
6049 pool->first_insn = insn;
6050
6051 for (prev = pool_list; *prev; prev = &(*prev)->next)
6052 ;
6053 *prev = pool;
6054
6055 return pool;
6056 }
6057
6058 /* End range of instructions covered by POOL at INSN and emit
6059 placeholder insn representing the pool. */
6060
6061 static void
6062 s390_end_pool (struct constant_pool *pool, rtx insn)
6063 {
6064 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6065
6066 if (!insn)
6067 insn = get_last_insn ();
6068
6069 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6070 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6071 }
6072
6073 /* Add INSN to the list of insns covered by POOL. */
6074
6075 static void
6076 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6077 {
6078 bitmap_set_bit (pool->insns, INSN_UID (insn));
6079 }
6080
6081 /* Return pool out of POOL_LIST that covers INSN. */
6082
6083 static struct constant_pool *
6084 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6085 {
6086 struct constant_pool *pool;
6087
6088 for (pool = pool_list; pool; pool = pool->next)
6089 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6090 break;
6091
6092 return pool;
6093 }
6094
6095 /* Add constant VAL of mode MODE to the constant pool POOL. */
6096
6097 static void
6098 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6099 {
6100 struct constant *c;
6101 int i;
6102
6103 for (i = 0; i < NR_C_MODES; i++)
6104 if (constant_modes[i] == mode)
6105 break;
6106 gcc_assert (i != NR_C_MODES);
6107
6108 for (c = pool->constants[i]; c != NULL; c = c->next)
6109 if (rtx_equal_p (val, c->value))
6110 break;
6111
6112 if (c == NULL)
6113 {
6114 c = (struct constant *) xmalloc (sizeof *c);
6115 c->value = val;
6116 c->label = gen_label_rtx ();
6117 c->next = pool->constants[i];
6118 pool->constants[i] = c;
6119 pool->size += GET_MODE_SIZE (mode);
6120 }
6121 }
6122
6123 /* Return an rtx that represents the offset of X from the start of
6124 pool POOL. */
6125
6126 static rtx
6127 s390_pool_offset (struct constant_pool *pool, rtx x)
6128 {
6129 rtx label;
6130
6131 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6132 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6133 UNSPEC_POOL_OFFSET);
6134 return gen_rtx_CONST (GET_MODE (x), x);
6135 }
6136
6137 /* Find constant VAL of mode MODE in the constant pool POOL.
6138 Return an RTX describing the distance from the start of
6139 the pool to the location of the new constant. */
6140
6141 static rtx
6142 s390_find_constant (struct constant_pool *pool, rtx val,
6143 enum machine_mode mode)
6144 {
6145 struct constant *c;
6146 int i;
6147
6148 for (i = 0; i < NR_C_MODES; i++)
6149 if (constant_modes[i] == mode)
6150 break;
6151 gcc_assert (i != NR_C_MODES);
6152
6153 for (c = pool->constants[i]; c != NULL; c = c->next)
6154 if (rtx_equal_p (val, c->value))
6155 break;
6156
6157 gcc_assert (c);
6158
6159 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6160 }
6161
6162 /* Check whether INSN is an execute. Return the label_ref to its
6163 execute target template if so, NULL_RTX otherwise. */
6164
6165 static rtx
6166 s390_execute_label (rtx insn)
6167 {
6168 if (GET_CODE (insn) == INSN
6169 && GET_CODE (PATTERN (insn)) == PARALLEL
6170 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6171 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6172 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6173
6174 return NULL_RTX;
6175 }
6176
6177 /* Add execute target for INSN to the constant pool POOL. */
6178
6179 static void
6180 s390_add_execute (struct constant_pool *pool, rtx insn)
6181 {
6182 struct constant *c;
6183
6184 for (c = pool->execute; c != NULL; c = c->next)
6185 if (INSN_UID (insn) == INSN_UID (c->value))
6186 break;
6187
6188 if (c == NULL)
6189 {
6190 c = (struct constant *) xmalloc (sizeof *c);
6191 c->value = insn;
6192 c->label = gen_label_rtx ();
6193 c->next = pool->execute;
6194 pool->execute = c;
6195 pool->size += 6;
6196 }
6197 }
6198
6199 /* Find execute target for INSN in the constant pool POOL.
6200 Return an RTX describing the distance from the start of
6201 the pool to the location of the execute target. */
6202
6203 static rtx
6204 s390_find_execute (struct constant_pool *pool, rtx insn)
6205 {
6206 struct constant *c;
6207
6208 for (c = pool->execute; c != NULL; c = c->next)
6209 if (INSN_UID (insn) == INSN_UID (c->value))
6210 break;
6211
6212 gcc_assert (c);
6213
6214 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6215 }
6216
6217 /* For an execute INSN, extract the execute target template. */
6218
6219 static rtx
6220 s390_execute_target (rtx insn)
6221 {
6222 rtx pattern = PATTERN (insn);
6223 gcc_assert (s390_execute_label (insn));
6224
6225 if (XVECLEN (pattern, 0) == 2)
6226 {
6227 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6228 }
6229 else
6230 {
6231 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6232 int i;
6233
6234 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6235 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6236
6237 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6238 }
6239
6240 return pattern;
6241 }
6242
6243 /* Indicate that INSN cannot be duplicated. This is the case for
6244 execute insns that carry a unique label. */
6245
6246 static bool
6247 s390_cannot_copy_insn_p (rtx insn)
6248 {
6249 rtx label = s390_execute_label (insn);
6250 return label && label != const0_rtx;
6251 }
6252
6253 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6254 do not emit the pool base label. */
6255
6256 static void
6257 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6258 {
6259 struct constant *c;
6260 rtx insn = pool->pool_insn;
6261 int i;
6262
6263 /* Switch to rodata section. */
6264 if (TARGET_CPU_ZARCH)
6265 {
6266 insn = emit_insn_after (gen_pool_section_start (), insn);
6267 INSN_ADDRESSES_NEW (insn, -1);
6268 }
6269
6270 /* Ensure minimum pool alignment. */
6271 if (TARGET_CPU_ZARCH)
6272 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6273 else
6274 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6275 INSN_ADDRESSES_NEW (insn, -1);
6276
6277 /* Emit pool base label. */
6278 if (!remote_label)
6279 {
6280 insn = emit_label_after (pool->label, insn);
6281 INSN_ADDRESSES_NEW (insn, -1);
6282 }
6283
6284 /* Dump constants in descending alignment requirement order,
6285 ensuring proper alignment for every constant. */
6286 for (i = 0; i < NR_C_MODES; i++)
6287 for (c = pool->constants[i]; c; c = c->next)
6288 {
6289 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6290 rtx value = copy_rtx (c->value);
6291 if (GET_CODE (value) == CONST
6292 && GET_CODE (XEXP (value, 0)) == UNSPEC
6293 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6294 && XVECLEN (XEXP (value, 0), 0) == 1)
6295 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6296
6297 insn = emit_label_after (c->label, insn);
6298 INSN_ADDRESSES_NEW (insn, -1);
6299
6300 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6301 gen_rtvec (1, value),
6302 UNSPECV_POOL_ENTRY);
6303 insn = emit_insn_after (value, insn);
6304 INSN_ADDRESSES_NEW (insn, -1);
6305 }
6306
6307 /* Ensure minimum alignment for instructions. */
6308 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6309 INSN_ADDRESSES_NEW (insn, -1);
6310
6311 /* Output in-pool execute template insns. */
6312 for (c = pool->execute; c; c = c->next)
6313 {
6314 insn = emit_label_after (c->label, insn);
6315 INSN_ADDRESSES_NEW (insn, -1);
6316
6317 insn = emit_insn_after (s390_execute_target (c->value), insn);
6318 INSN_ADDRESSES_NEW (insn, -1);
6319 }
6320
6321 /* Switch back to previous section. */
6322 if (TARGET_CPU_ZARCH)
6323 {
6324 insn = emit_insn_after (gen_pool_section_end (), insn);
6325 INSN_ADDRESSES_NEW (insn, -1);
6326 }
6327
6328 insn = emit_barrier_after (insn);
6329 INSN_ADDRESSES_NEW (insn, -1);
6330
6331 /* Remove placeholder insn. */
6332 remove_insn (pool->pool_insn);
6333 }
6334
6335 /* Free all memory used by POOL. */
6336
6337 static void
6338 s390_free_pool (struct constant_pool *pool)
6339 {
6340 struct constant *c, *next;
6341 int i;
6342
6343 for (i = 0; i < NR_C_MODES; i++)
6344 for (c = pool->constants[i]; c; c = next)
6345 {
6346 next = c->next;
6347 free (c);
6348 }
6349
6350 for (c = pool->execute; c; c = next)
6351 {
6352 next = c->next;
6353 free (c);
6354 }
6355
6356 BITMAP_FREE (pool->insns);
6357 free (pool);
6358 }
6359
6360
6361 /* Collect main literal pool. Return NULL on overflow. */
6362
6363 static struct constant_pool *
6364 s390_mainpool_start (void)
6365 {
6366 struct constant_pool *pool;
6367 rtx insn;
6368
6369 pool = s390_alloc_pool ();
6370
6371 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6372 {
6373 if (GET_CODE (insn) == INSN
6374 && GET_CODE (PATTERN (insn)) == SET
6375 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6376 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6377 {
6378 gcc_assert (!pool->pool_insn);
6379 pool->pool_insn = insn;
6380 }
6381
6382 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6383 {
6384 s390_add_execute (pool, insn);
6385 }
6386 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6387 {
6388 rtx pool_ref = NULL_RTX;
6389 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6390 if (pool_ref)
6391 {
6392 rtx constant = get_pool_constant (pool_ref);
6393 enum machine_mode mode = get_pool_mode (pool_ref);
6394 s390_add_constant (pool, constant, mode);
6395 }
6396 }
6397
6398 /* If hot/cold partitioning is enabled we have to make sure that
6399 the literal pool is emitted in the same section where the
6400 initialization of the literal pool base pointer takes place.
6401 emit_pool_after is only used in the non-overflow case on non
6402 Z cpus where we can emit the literal pool at the end of the
6403 function body within the text section. */
6404 if (NOTE_P (insn)
6405 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6406 && !pool->emit_pool_after)
6407 pool->emit_pool_after = PREV_INSN (insn);
6408 }
6409
6410 gcc_assert (pool->pool_insn || pool->size == 0);
6411
6412 if (pool->size >= 4096)
6413 {
6414 /* We're going to chunkify the pool, so remove the main
6415 pool placeholder insn. */
6416 remove_insn (pool->pool_insn);
6417
6418 s390_free_pool (pool);
6419 pool = NULL;
6420 }
6421
6422 /* If the functions ends with the section where the literal pool
6423 should be emitted set the marker to its end. */
6424 if (pool && !pool->emit_pool_after)
6425 pool->emit_pool_after = get_last_insn ();
6426
6427 return pool;
6428 }
6429
6430 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6431 Modify the current function to output the pool constants as well as
6432 the pool register setup instruction. */
6433
6434 static void
6435 s390_mainpool_finish (struct constant_pool *pool)
6436 {
6437 rtx base_reg = cfun->machine->base_reg;
6438 rtx insn;
6439
6440 /* If the pool is empty, we're done. */
6441 if (pool->size == 0)
6442 {
6443 /* We don't actually need a base register after all. */
6444 cfun->machine->base_reg = NULL_RTX;
6445
6446 if (pool->pool_insn)
6447 remove_insn (pool->pool_insn);
6448 s390_free_pool (pool);
6449 return;
6450 }
6451
6452 /* We need correct insn addresses. */
6453 shorten_branches (get_insns ());
6454
6455 /* On zSeries, we use a LARL to load the pool register. The pool is
6456 located in the .rodata section, so we emit it after the function. */
6457 if (TARGET_CPU_ZARCH)
6458 {
6459 insn = gen_main_base_64 (base_reg, pool->label);
6460 insn = emit_insn_after (insn, pool->pool_insn);
6461 INSN_ADDRESSES_NEW (insn, -1);
6462 remove_insn (pool->pool_insn);
6463
6464 insn = get_last_insn ();
6465 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6466 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6467
6468 s390_dump_pool (pool, 0);
6469 }
6470
6471 /* On S/390, if the total size of the function's code plus literal pool
6472 does not exceed 4096 bytes, we use BASR to set up a function base
6473 pointer, and emit the literal pool at the end of the function. */
6474 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6475 + pool->size + 8 /* alignment slop */ < 4096)
6476 {
6477 insn = gen_main_base_31_small (base_reg, pool->label);
6478 insn = emit_insn_after (insn, pool->pool_insn);
6479 INSN_ADDRESSES_NEW (insn, -1);
6480 remove_insn (pool->pool_insn);
6481
6482 insn = emit_label_after (pool->label, insn);
6483 INSN_ADDRESSES_NEW (insn, -1);
6484
6485 /* emit_pool_after will be set by s390_mainpool_start to the
6486 last insn of the section where the literal pool should be
6487 emitted. */
6488 insn = pool->emit_pool_after;
6489
6490 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6491 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6492
6493 s390_dump_pool (pool, 1);
6494 }
6495
6496 /* Otherwise, we emit an inline literal pool and use BASR to branch
6497 over it, setting up the pool register at the same time. */
6498 else
6499 {
6500 rtx pool_end = gen_label_rtx ();
6501
6502 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6503 insn = emit_insn_after (insn, pool->pool_insn);
6504 INSN_ADDRESSES_NEW (insn, -1);
6505 remove_insn (pool->pool_insn);
6506
6507 insn = emit_label_after (pool->label, insn);
6508 INSN_ADDRESSES_NEW (insn, -1);
6509
6510 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6511 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6512
6513 insn = emit_label_after (pool_end, pool->pool_insn);
6514 INSN_ADDRESSES_NEW (insn, -1);
6515
6516 s390_dump_pool (pool, 1);
6517 }
6518
6519
6520 /* Replace all literal pool references. */
6521
6522 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6523 {
6524 if (INSN_P (insn))
6525 replace_ltrel_base (&PATTERN (insn));
6526
6527 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6528 {
6529 rtx addr, pool_ref = NULL_RTX;
6530 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6531 if (pool_ref)
6532 {
6533 if (s390_execute_label (insn))
6534 addr = s390_find_execute (pool, insn);
6535 else
6536 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6537 get_pool_mode (pool_ref));
6538
6539 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6540 INSN_CODE (insn) = -1;
6541 }
6542 }
6543 }
6544
6545
6546 /* Free the pool. */
6547 s390_free_pool (pool);
6548 }
6549
6550 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6551 We have decided we cannot use this pool, so revert all changes
6552 to the current function that were done by s390_mainpool_start. */
6553 static void
6554 s390_mainpool_cancel (struct constant_pool *pool)
6555 {
6556 /* We didn't actually change the instruction stream, so simply
6557 free the pool memory. */
6558 s390_free_pool (pool);
6559 }
6560
6561
6562 /* Chunkify the literal pool. */
6563
6564 #define S390_POOL_CHUNK_MIN 0xc00
6565 #define S390_POOL_CHUNK_MAX 0xe00
6566
6567 static struct constant_pool *
6568 s390_chunkify_start (void)
6569 {
6570 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6571 int extra_size = 0;
6572 bitmap far_labels;
6573 rtx pending_ltrel = NULL_RTX;
6574 rtx insn;
6575
6576 rtx (*gen_reload_base) (rtx, rtx) =
6577 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6578
6579
6580 /* We need correct insn addresses. */
6581
6582 shorten_branches (get_insns ());
6583
6584 /* Scan all insns and move literals to pool chunks. */
6585
6586 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6587 {
6588 bool section_switch_p = false;
6589
6590 /* Check for pending LTREL_BASE. */
6591 if (INSN_P (insn))
6592 {
6593 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6594 if (ltrel_base)
6595 {
6596 gcc_assert (ltrel_base == pending_ltrel);
6597 pending_ltrel = NULL_RTX;
6598 }
6599 }
6600
6601 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6602 {
6603 if (!curr_pool)
6604 curr_pool = s390_start_pool (&pool_list, insn);
6605
6606 s390_add_execute (curr_pool, insn);
6607 s390_add_pool_insn (curr_pool, insn);
6608 }
6609 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6610 {
6611 rtx pool_ref = NULL_RTX;
6612 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6613 if (pool_ref)
6614 {
6615 rtx constant = get_pool_constant (pool_ref);
6616 enum machine_mode mode = get_pool_mode (pool_ref);
6617
6618 if (!curr_pool)
6619 curr_pool = s390_start_pool (&pool_list, insn);
6620
6621 s390_add_constant (curr_pool, constant, mode);
6622 s390_add_pool_insn (curr_pool, insn);
6623
6624 /* Don't split the pool chunk between a LTREL_OFFSET load
6625 and the corresponding LTREL_BASE. */
6626 if (GET_CODE (constant) == CONST
6627 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6628 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6629 {
6630 gcc_assert (!pending_ltrel);
6631 pending_ltrel = pool_ref;
6632 }
6633 }
6634 }
6635
6636 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6637 {
6638 if (curr_pool)
6639 s390_add_pool_insn (curr_pool, insn);
6640 /* An LTREL_BASE must follow within the same basic block. */
6641 gcc_assert (!pending_ltrel);
6642 }
6643
6644 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6645 section_switch_p = true;
6646
6647 if (!curr_pool
6648 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6649 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6650 continue;
6651
6652 if (TARGET_CPU_ZARCH)
6653 {
6654 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6655 continue;
6656
6657 s390_end_pool (curr_pool, NULL_RTX);
6658 curr_pool = NULL;
6659 }
6660 else
6661 {
6662 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6663 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6664 + extra_size;
6665
6666 /* We will later have to insert base register reload insns.
6667 Those will have an effect on code size, which we need to
6668 consider here. This calculation makes rather pessimistic
6669 worst-case assumptions. */
6670 if (GET_CODE (insn) == CODE_LABEL)
6671 extra_size += 6;
6672
6673 if (chunk_size < S390_POOL_CHUNK_MIN
6674 && curr_pool->size < S390_POOL_CHUNK_MIN
6675 && !section_switch_p)
6676 continue;
6677
6678 /* Pool chunks can only be inserted after BARRIERs ... */
6679 if (GET_CODE (insn) == BARRIER)
6680 {
6681 s390_end_pool (curr_pool, insn);
6682 curr_pool = NULL;
6683 extra_size = 0;
6684 }
6685
6686 /* ... so if we don't find one in time, create one. */
6687 else if (chunk_size > S390_POOL_CHUNK_MAX
6688 || curr_pool->size > S390_POOL_CHUNK_MAX
6689 || section_switch_p)
6690 {
6691 rtx label, jump, barrier;
6692
6693 if (!section_switch_p)
6694 {
6695 /* We can insert the barrier only after a 'real' insn. */
6696 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6697 continue;
6698 if (get_attr_length (insn) == 0)
6699 continue;
6700 /* Don't separate LTREL_BASE from the corresponding
6701 LTREL_OFFSET load. */
6702 if (pending_ltrel)
6703 continue;
6704 }
6705 else
6706 {
6707 gcc_assert (!pending_ltrel);
6708
6709 /* The old pool has to end before the section switch
6710 note in order to make it part of the current
6711 section. */
6712 insn = PREV_INSN (insn);
6713 }
6714
6715 label = gen_label_rtx ();
6716 jump = emit_jump_insn_after (gen_jump (label), insn);
6717 barrier = emit_barrier_after (jump);
6718 insn = emit_label_after (label, barrier);
6719 JUMP_LABEL (jump) = label;
6720 LABEL_NUSES (label) = 1;
6721
6722 INSN_ADDRESSES_NEW (jump, -1);
6723 INSN_ADDRESSES_NEW (barrier, -1);
6724 INSN_ADDRESSES_NEW (insn, -1);
6725
6726 s390_end_pool (curr_pool, barrier);
6727 curr_pool = NULL;
6728 extra_size = 0;
6729 }
6730 }
6731 }
6732
6733 if (curr_pool)
6734 s390_end_pool (curr_pool, NULL_RTX);
6735 gcc_assert (!pending_ltrel);
6736
6737 /* Find all labels that are branched into
6738 from an insn belonging to a different chunk. */
6739
6740 far_labels = BITMAP_ALLOC (NULL);
6741
6742 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6743 {
6744 /* Labels marked with LABEL_PRESERVE_P can be target
6745 of non-local jumps, so we have to mark them.
6746 The same holds for named labels.
6747
6748 Don't do that, however, if it is the label before
6749 a jump table. */
6750
6751 if (GET_CODE (insn) == CODE_LABEL
6752 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6753 {
6754 rtx vec_insn = next_real_insn (insn);
6755 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6756 PATTERN (vec_insn) : NULL_RTX;
6757 if (!vec_pat
6758 || !(GET_CODE (vec_pat) == ADDR_VEC
6759 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6760 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6761 }
6762
6763 /* If we have a direct jump (conditional or unconditional)
6764 or a casesi jump, check all potential targets. */
6765 else if (GET_CODE (insn) == JUMP_INSN)
6766 {
6767 rtx pat = PATTERN (insn);
6768 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6769 pat = XVECEXP (pat, 0, 0);
6770
6771 if (GET_CODE (pat) == SET)
6772 {
6773 rtx label = JUMP_LABEL (insn);
6774 if (label)
6775 {
6776 if (s390_find_pool (pool_list, label)
6777 != s390_find_pool (pool_list, insn))
6778 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6779 }
6780 }
6781 else if (GET_CODE (pat) == PARALLEL
6782 && XVECLEN (pat, 0) == 2
6783 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6784 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6785 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6786 {
6787 /* Find the jump table used by this casesi jump. */
6788 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6789 rtx vec_insn = next_real_insn (vec_label);
6790 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6791 PATTERN (vec_insn) : NULL_RTX;
6792 if (vec_pat
6793 && (GET_CODE (vec_pat) == ADDR_VEC
6794 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6795 {
6796 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6797
6798 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6799 {
6800 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6801
6802 if (s390_find_pool (pool_list, label)
6803 != s390_find_pool (pool_list, insn))
6804 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6805 }
6806 }
6807 }
6808 }
6809 }
6810
6811 /* Insert base register reload insns before every pool. */
6812
6813 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6814 {
6815 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6816 curr_pool->label);
6817 rtx insn = curr_pool->first_insn;
6818 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6819 }
6820
6821 /* Insert base register reload insns at every far label. */
6822
6823 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6824 if (GET_CODE (insn) == CODE_LABEL
6825 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6826 {
6827 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6828 if (pool)
6829 {
6830 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6831 pool->label);
6832 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6833 }
6834 }
6835
6836
6837 BITMAP_FREE (far_labels);
6838
6839
6840 /* Recompute insn addresses. */
6841
6842 init_insn_lengths ();
6843 shorten_branches (get_insns ());
6844
6845 return pool_list;
6846 }
6847
6848 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6849 After we have decided to use this list, finish implementing
6850 all changes to the current function as required. */
6851
6852 static void
6853 s390_chunkify_finish (struct constant_pool *pool_list)
6854 {
6855 struct constant_pool *curr_pool = NULL;
6856 rtx insn;
6857
6858
6859 /* Replace all literal pool references. */
6860
6861 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6862 {
6863 if (INSN_P (insn))
6864 replace_ltrel_base (&PATTERN (insn));
6865
6866 curr_pool = s390_find_pool (pool_list, insn);
6867 if (!curr_pool)
6868 continue;
6869
6870 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6871 {
6872 rtx addr, pool_ref = NULL_RTX;
6873 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6874 if (pool_ref)
6875 {
6876 if (s390_execute_label (insn))
6877 addr = s390_find_execute (curr_pool, insn);
6878 else
6879 addr = s390_find_constant (curr_pool,
6880 get_pool_constant (pool_ref),
6881 get_pool_mode (pool_ref));
6882
6883 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6884 INSN_CODE (insn) = -1;
6885 }
6886 }
6887 }
6888
6889 /* Dump out all literal pools. */
6890
6891 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6892 s390_dump_pool (curr_pool, 0);
6893
6894 /* Free pool list. */
6895
6896 while (pool_list)
6897 {
6898 struct constant_pool *next = pool_list->next;
6899 s390_free_pool (pool_list);
6900 pool_list = next;
6901 }
6902 }
6903
6904 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6905 We have decided we cannot use this list, so revert all changes
6906 to the current function that were done by s390_chunkify_start. */
6907
6908 static void
6909 s390_chunkify_cancel (struct constant_pool *pool_list)
6910 {
6911 struct constant_pool *curr_pool = NULL;
6912 rtx insn;
6913
6914 /* Remove all pool placeholder insns. */
6915
6916 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6917 {
6918 /* Did we insert an extra barrier? Remove it. */
6919 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6920 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6921 rtx label = NEXT_INSN (curr_pool->pool_insn);
6922
6923 if (jump && GET_CODE (jump) == JUMP_INSN
6924 && barrier && GET_CODE (barrier) == BARRIER
6925 && label && GET_CODE (label) == CODE_LABEL
6926 && GET_CODE (PATTERN (jump)) == SET
6927 && SET_DEST (PATTERN (jump)) == pc_rtx
6928 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6929 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6930 {
6931 remove_insn (jump);
6932 remove_insn (barrier);
6933 remove_insn (label);
6934 }
6935
6936 remove_insn (curr_pool->pool_insn);
6937 }
6938
6939 /* Remove all base register reload insns. */
6940
6941 for (insn = get_insns (); insn; )
6942 {
6943 rtx next_insn = NEXT_INSN (insn);
6944
6945 if (GET_CODE (insn) == INSN
6946 && GET_CODE (PATTERN (insn)) == SET
6947 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6948 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6949 remove_insn (insn);
6950
6951 insn = next_insn;
6952 }
6953
6954 /* Free pool list. */
6955
6956 while (pool_list)
6957 {
6958 struct constant_pool *next = pool_list->next;
6959 s390_free_pool (pool_list);
6960 pool_list = next;
6961 }
6962 }
6963
6964 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6965
6966 void
6967 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6968 {
6969 REAL_VALUE_TYPE r;
6970
6971 switch (GET_MODE_CLASS (mode))
6972 {
6973 case MODE_FLOAT:
6974 case MODE_DECIMAL_FLOAT:
6975 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6976
6977 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6978 assemble_real (r, mode, align);
6979 break;
6980
6981 case MODE_INT:
6982 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6983 mark_symbol_refs_as_used (exp);
6984 break;
6985
6986 default:
6987 gcc_unreachable ();
6988 }
6989 }
6990
6991
6992 /* Return an RTL expression representing the value of the return address
6993 for the frame COUNT steps up from the current frame. FRAME is the
6994 frame pointer of that frame. */
6995
6996 rtx
6997 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6998 {
6999 int offset;
7000 rtx addr;
7001
7002 /* Without backchain, we fail for all but the current frame. */
7003
7004 if (!TARGET_BACKCHAIN && count > 0)
7005 return NULL_RTX;
7006
7007 /* For the current frame, we need to make sure the initial
7008 value of RETURN_REGNUM is actually saved. */
7009
7010 if (count == 0)
7011 {
7012 /* On non-z architectures branch splitting could overwrite r14. */
7013 if (TARGET_CPU_ZARCH)
7014 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7015 else
7016 {
7017 cfun_frame_layout.save_return_addr_p = true;
7018 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7019 }
7020 }
7021
7022 if (TARGET_PACKED_STACK)
7023 offset = -2 * UNITS_PER_LONG;
7024 else
7025 offset = RETURN_REGNUM * UNITS_PER_LONG;
7026
7027 addr = plus_constant (frame, offset);
7028 addr = memory_address (Pmode, addr);
7029 return gen_rtx_MEM (Pmode, addr);
7030 }
7031
7032 /* Return an RTL expression representing the back chain stored in
7033 the current stack frame. */
7034
7035 rtx
7036 s390_back_chain_rtx (void)
7037 {
7038 rtx chain;
7039
7040 gcc_assert (TARGET_BACKCHAIN);
7041
7042 if (TARGET_PACKED_STACK)
7043 chain = plus_constant (stack_pointer_rtx,
7044 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7045 else
7046 chain = stack_pointer_rtx;
7047
7048 chain = gen_rtx_MEM (Pmode, chain);
7049 return chain;
7050 }
7051
7052 /* Find first call clobbered register unused in a function.
7053 This could be used as base register in a leaf function
7054 or for holding the return address before epilogue. */
7055
7056 static int
7057 find_unused_clobbered_reg (void)
7058 {
7059 int i;
7060 for (i = 0; i < 6; i++)
7061 if (!df_regs_ever_live_p (i))
7062 return i;
7063 return 0;
7064 }
7065
7066
7067 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7068 clobbered hard regs in SETREG. */
7069
7070 static void
7071 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7072 {
7073 int *regs_ever_clobbered = (int *)data;
7074 unsigned int i, regno;
7075 enum machine_mode mode = GET_MODE (setreg);
7076
7077 if (GET_CODE (setreg) == SUBREG)
7078 {
7079 rtx inner = SUBREG_REG (setreg);
7080 if (!GENERAL_REG_P (inner))
7081 return;
7082 regno = subreg_regno (setreg);
7083 }
7084 else if (GENERAL_REG_P (setreg))
7085 regno = REGNO (setreg);
7086 else
7087 return;
7088
7089 for (i = regno;
7090 i < regno + HARD_REGNO_NREGS (regno, mode);
7091 i++)
7092 regs_ever_clobbered[i] = 1;
7093 }
7094
7095 /* Walks through all basic blocks of the current function looking
7096 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7097 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7098 each of those regs. */
7099
7100 static void
7101 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7102 {
7103 basic_block cur_bb;
7104 rtx cur_insn;
7105 unsigned int i;
7106
7107 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7108
7109 /* For non-leaf functions we have to consider all call clobbered regs to be
7110 clobbered. */
7111 if (!current_function_is_leaf)
7112 {
7113 for (i = 0; i < 16; i++)
7114 regs_ever_clobbered[i] = call_really_used_regs[i];
7115 }
7116
7117 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7118 this work is done by liveness analysis (mark_regs_live_at_end).
7119 Special care is needed for functions containing landing pads. Landing pads
7120 may use the eh registers, but the code which sets these registers is not
7121 contained in that function. Hence s390_regs_ever_clobbered is not able to
7122 deal with this automatically. */
7123 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7124 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7125 if (crtl->calls_eh_return
7126 || (cfun->machine->has_landing_pad_p
7127 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7128 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7129
7130 /* For nonlocal gotos all call-saved registers have to be saved.
7131 This flag is also set for the unwinding code in libgcc.
7132 See expand_builtin_unwind_init. For regs_ever_live this is done by
7133 reload. */
7134 if (cfun->has_nonlocal_label)
7135 for (i = 0; i < 16; i++)
7136 if (!call_really_used_regs[i])
7137 regs_ever_clobbered[i] = 1;
7138
7139 FOR_EACH_BB (cur_bb)
7140 {
7141 FOR_BB_INSNS (cur_bb, cur_insn)
7142 {
7143 if (INSN_P (cur_insn))
7144 note_stores (PATTERN (cur_insn),
7145 s390_reg_clobbered_rtx,
7146 regs_ever_clobbered);
7147 }
7148 }
7149 }
7150
7151 /* Determine the frame area which actually has to be accessed
7152 in the function epilogue. The values are stored at the
7153 given pointers AREA_BOTTOM (address of the lowest used stack
7154 address) and AREA_TOP (address of the first item which does
7155 not belong to the stack frame). */
7156
7157 static void
7158 s390_frame_area (int *area_bottom, int *area_top)
7159 {
7160 int b, t;
7161 int i;
7162
7163 b = INT_MAX;
7164 t = INT_MIN;
7165
7166 if (cfun_frame_layout.first_restore_gpr != -1)
7167 {
7168 b = (cfun_frame_layout.gprs_offset
7169 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7170 t = b + (cfun_frame_layout.last_restore_gpr
7171 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7172 }
7173
7174 if (TARGET_64BIT && cfun_save_high_fprs_p)
7175 {
7176 b = MIN (b, cfun_frame_layout.f8_offset);
7177 t = MAX (t, (cfun_frame_layout.f8_offset
7178 + cfun_frame_layout.high_fprs * 8));
7179 }
7180
7181 if (!TARGET_64BIT)
7182 for (i = 2; i < 4; i++)
7183 if (cfun_fpr_bit_p (i))
7184 {
7185 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7186 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7187 }
7188
7189 *area_bottom = b;
7190 *area_top = t;
7191 }
7192
7193 /* Fill cfun->machine with info about register usage of current function.
7194 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7195
7196 static void
7197 s390_register_info (int clobbered_regs[])
7198 {
7199 int i, j;
7200
7201 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7202 cfun_frame_layout.fpr_bitmap = 0;
7203 cfun_frame_layout.high_fprs = 0;
7204 if (TARGET_64BIT)
7205 for (i = 24; i < 32; i++)
7206 if (df_regs_ever_live_p (i) && !global_regs[i])
7207 {
7208 cfun_set_fpr_bit (i - 16);
7209 cfun_frame_layout.high_fprs++;
7210 }
7211
7212 /* Find first and last gpr to be saved. We trust regs_ever_live
7213 data, except that we don't save and restore global registers.
7214
7215 Also, all registers with special meaning to the compiler need
7216 to be handled extra. */
7217
7218 s390_regs_ever_clobbered (clobbered_regs);
7219
7220 for (i = 0; i < 16; i++)
7221 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7222
7223 if (frame_pointer_needed)
7224 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7225
7226 if (flag_pic)
7227 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7228 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7229
7230 clobbered_regs[BASE_REGNUM]
7231 |= (cfun->machine->base_reg
7232 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7233
7234 clobbered_regs[RETURN_REGNUM]
7235 |= (!current_function_is_leaf
7236 || TARGET_TPF_PROFILING
7237 || cfun->machine->split_branches_pending_p
7238 || cfun_frame_layout.save_return_addr_p
7239 || crtl->calls_eh_return
7240 || cfun->stdarg);
7241
7242 clobbered_regs[STACK_POINTER_REGNUM]
7243 |= (!current_function_is_leaf
7244 || TARGET_TPF_PROFILING
7245 || cfun_save_high_fprs_p
7246 || get_frame_size () > 0
7247 || cfun->calls_alloca
7248 || cfun->stdarg);
7249
7250 for (i = 6; i < 16; i++)
7251 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7252 break;
7253 for (j = 15; j > i; j--)
7254 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7255 break;
7256
7257 if (i == 16)
7258 {
7259 /* Nothing to save/restore. */
7260 cfun_frame_layout.first_save_gpr_slot = -1;
7261 cfun_frame_layout.last_save_gpr_slot = -1;
7262 cfun_frame_layout.first_save_gpr = -1;
7263 cfun_frame_layout.first_restore_gpr = -1;
7264 cfun_frame_layout.last_save_gpr = -1;
7265 cfun_frame_layout.last_restore_gpr = -1;
7266 }
7267 else
7268 {
7269 /* Save slots for gprs from i to j. */
7270 cfun_frame_layout.first_save_gpr_slot = i;
7271 cfun_frame_layout.last_save_gpr_slot = j;
7272
7273 for (i = cfun_frame_layout.first_save_gpr_slot;
7274 i < cfun_frame_layout.last_save_gpr_slot + 1;
7275 i++)
7276 if (clobbered_regs[i])
7277 break;
7278
7279 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7280 if (clobbered_regs[j])
7281 break;
7282
7283 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7284 {
7285 /* Nothing to save/restore. */
7286 cfun_frame_layout.first_save_gpr = -1;
7287 cfun_frame_layout.first_restore_gpr = -1;
7288 cfun_frame_layout.last_save_gpr = -1;
7289 cfun_frame_layout.last_restore_gpr = -1;
7290 }
7291 else
7292 {
7293 /* Save / Restore from gpr i to j. */
7294 cfun_frame_layout.first_save_gpr = i;
7295 cfun_frame_layout.first_restore_gpr = i;
7296 cfun_frame_layout.last_save_gpr = j;
7297 cfun_frame_layout.last_restore_gpr = j;
7298 }
7299 }
7300
7301 if (cfun->stdarg)
7302 {
7303 /* Varargs functions need to save gprs 2 to 6. */
7304 if (cfun->va_list_gpr_size
7305 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7306 {
7307 int min_gpr = crtl->args.info.gprs;
7308 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7309 if (max_gpr > GP_ARG_NUM_REG)
7310 max_gpr = GP_ARG_NUM_REG;
7311
7312 if (cfun_frame_layout.first_save_gpr == -1
7313 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7314 {
7315 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7316 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7317 }
7318
7319 if (cfun_frame_layout.last_save_gpr == -1
7320 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7321 {
7322 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7323 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7324 }
7325 }
7326
7327 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7328 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7329 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7330 {
7331 int min_fpr = crtl->args.info.fprs;
7332 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7333 if (max_fpr > FP_ARG_NUM_REG)
7334 max_fpr = FP_ARG_NUM_REG;
7335
7336 /* ??? This is currently required to ensure proper location
7337 of the fpr save slots within the va_list save area. */
7338 if (TARGET_PACKED_STACK)
7339 min_fpr = 0;
7340
7341 for (i = min_fpr; i < max_fpr; i++)
7342 cfun_set_fpr_bit (i);
7343 }
7344 }
7345
7346 if (!TARGET_64BIT)
7347 for (i = 2; i < 4; i++)
7348 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7349 cfun_set_fpr_bit (i);
7350 }
7351
7352 /* Fill cfun->machine with info about frame of current function. */
7353
7354 static void
7355 s390_frame_info (void)
7356 {
7357 int i;
7358
7359 cfun_frame_layout.frame_size = get_frame_size ();
7360 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7361 fatal_error ("total size of local variables exceeds architecture limit");
7362
7363 if (!TARGET_PACKED_STACK)
7364 {
7365 cfun_frame_layout.backchain_offset = 0;
7366 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7367 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7368 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7369 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7370 * UNITS_PER_LONG);
7371 }
7372 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7373 {
7374 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7375 - UNITS_PER_LONG);
7376 cfun_frame_layout.gprs_offset
7377 = (cfun_frame_layout.backchain_offset
7378 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7379 * UNITS_PER_LONG);
7380
7381 if (TARGET_64BIT)
7382 {
7383 cfun_frame_layout.f4_offset
7384 = (cfun_frame_layout.gprs_offset
7385 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7386
7387 cfun_frame_layout.f0_offset
7388 = (cfun_frame_layout.f4_offset
7389 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7390 }
7391 else
7392 {
7393 /* On 31 bit we have to care about alignment of the
7394 floating point regs to provide fastest access. */
7395 cfun_frame_layout.f0_offset
7396 = ((cfun_frame_layout.gprs_offset
7397 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7398 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7399
7400 cfun_frame_layout.f4_offset
7401 = (cfun_frame_layout.f0_offset
7402 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7403 }
7404 }
7405 else /* no backchain */
7406 {
7407 cfun_frame_layout.f4_offset
7408 = (STACK_POINTER_OFFSET
7409 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7410
7411 cfun_frame_layout.f0_offset
7412 = (cfun_frame_layout.f4_offset
7413 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7414
7415 cfun_frame_layout.gprs_offset
7416 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7417 }
7418
7419 if (current_function_is_leaf
7420 && !TARGET_TPF_PROFILING
7421 && cfun_frame_layout.frame_size == 0
7422 && !cfun_save_high_fprs_p
7423 && !cfun->calls_alloca
7424 && !cfun->stdarg)
7425 return;
7426
7427 if (!TARGET_PACKED_STACK)
7428 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7429 + crtl->outgoing_args_size
7430 + cfun_frame_layout.high_fprs * 8);
7431 else
7432 {
7433 if (TARGET_BACKCHAIN)
7434 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7435
7436 /* No alignment trouble here because f8-f15 are only saved under
7437 64 bit. */
7438 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7439 cfun_frame_layout.f4_offset),
7440 cfun_frame_layout.gprs_offset)
7441 - cfun_frame_layout.high_fprs * 8);
7442
7443 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7444
7445 for (i = 0; i < 8; i++)
7446 if (cfun_fpr_bit_p (i))
7447 cfun_frame_layout.frame_size += 8;
7448
7449 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7450
7451 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7452 the frame size to sustain 8 byte alignment of stack frames. */
7453 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7454 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7455 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7456
7457 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7458 }
7459 }
7460
7461 /* Generate frame layout. Fills in register and frame data for the current
7462 function in cfun->machine. This routine can be called multiple times;
7463 it will re-do the complete frame layout every time. */
7464
7465 static void
7466 s390_init_frame_layout (void)
7467 {
7468 HOST_WIDE_INT frame_size;
7469 int base_used;
7470 int clobbered_regs[16];
7471
7472 /* On S/390 machines, we may need to perform branch splitting, which
7473 will require both base and return address register. We have no
7474 choice but to assume we're going to need them until right at the
7475 end of the machine dependent reorg phase. */
7476 if (!TARGET_CPU_ZARCH)
7477 cfun->machine->split_branches_pending_p = true;
7478
7479 do
7480 {
7481 frame_size = cfun_frame_layout.frame_size;
7482
7483 /* Try to predict whether we'll need the base register. */
7484 base_used = cfun->machine->split_branches_pending_p
7485 || crtl->uses_const_pool
7486 || (!DISP_IN_RANGE (frame_size)
7487 && !CONST_OK_FOR_K (frame_size));
7488
7489 /* Decide which register to use as literal pool base. In small
7490 leaf functions, try to use an unused call-clobbered register
7491 as base register to avoid save/restore overhead. */
7492 if (!base_used)
7493 cfun->machine->base_reg = NULL_RTX;
7494 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7495 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7496 else
7497 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7498
7499 s390_register_info (clobbered_regs);
7500 s390_frame_info ();
7501 }
7502 while (frame_size != cfun_frame_layout.frame_size);
7503 }
7504
7505 /* Update frame layout. Recompute actual register save data based on
7506 current info and update regs_ever_live for the special registers.
7507 May be called multiple times, but may never cause *more* registers
7508 to be saved than s390_init_frame_layout allocated room for. */
7509
7510 static void
7511 s390_update_frame_layout (void)
7512 {
7513 int clobbered_regs[16];
7514
7515 s390_register_info (clobbered_regs);
7516
7517 df_set_regs_ever_live (BASE_REGNUM,
7518 clobbered_regs[BASE_REGNUM] ? true : false);
7519 df_set_regs_ever_live (RETURN_REGNUM,
7520 clobbered_regs[RETURN_REGNUM] ? true : false);
7521 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7522 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7523
7524 if (cfun->machine->base_reg)
7525 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7526 }
7527
7528 /* Return true if it is legal to put a value with MODE into REGNO. */
7529
7530 bool
7531 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7532 {
7533 switch (REGNO_REG_CLASS (regno))
7534 {
7535 case FP_REGS:
7536 if (REGNO_PAIR_OK (regno, mode))
7537 {
7538 if (mode == SImode || mode == DImode)
7539 return true;
7540
7541 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7542 return true;
7543 }
7544 break;
7545 case ADDR_REGS:
7546 if (FRAME_REGNO_P (regno) && mode == Pmode)
7547 return true;
7548
7549 /* fallthrough */
7550 case GENERAL_REGS:
7551 if (REGNO_PAIR_OK (regno, mode))
7552 {
7553 if (TARGET_ZARCH
7554 || (mode != TFmode && mode != TCmode && mode != TDmode))
7555 return true;
7556 }
7557 break;
7558 case CC_REGS:
7559 if (GET_MODE_CLASS (mode) == MODE_CC)
7560 return true;
7561 break;
7562 case ACCESS_REGS:
7563 if (REGNO_PAIR_OK (regno, mode))
7564 {
7565 if (mode == SImode || mode == Pmode)
7566 return true;
7567 }
7568 break;
7569 default:
7570 return false;
7571 }
7572
7573 return false;
7574 }
7575
7576 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7577
7578 bool
7579 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7580 {
7581 /* Once we've decided upon a register to use as base register, it must
7582 no longer be used for any other purpose. */
7583 if (cfun->machine->base_reg)
7584 if (REGNO (cfun->machine->base_reg) == old_reg
7585 || REGNO (cfun->machine->base_reg) == new_reg)
7586 return false;
7587
7588 return true;
7589 }
7590
7591 /* Maximum number of registers to represent a value of mode MODE
7592 in a register of class RCLASS. */
7593
7594 bool
7595 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7596 {
7597 switch (rclass)
7598 {
7599 case FP_REGS:
7600 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7601 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7602 else
7603 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7604 case ACCESS_REGS:
7605 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7606 default:
7607 break;
7608 }
7609 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7610 }
7611
7612 /* Return true if register FROM can be eliminated via register TO. */
7613
7614 static bool
7615 s390_can_eliminate (const int from, const int to)
7616 {
7617 /* On zSeries machines, we have not marked the base register as fixed.
7618 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7619 If a function requires the base register, we say here that this
7620 elimination cannot be performed. This will cause reload to free
7621 up the base register (as if it were fixed). On the other hand,
7622 if the current function does *not* require the base register, we
7623 say here the elimination succeeds, which in turn allows reload
7624 to allocate the base register for any other purpose. */
7625 if (from == BASE_REGNUM && to == BASE_REGNUM)
7626 {
7627 if (TARGET_CPU_ZARCH)
7628 {
7629 s390_init_frame_layout ();
7630 return cfun->machine->base_reg == NULL_RTX;
7631 }
7632
7633 return false;
7634 }
7635
7636 /* Everything else must point into the stack frame. */
7637 gcc_assert (to == STACK_POINTER_REGNUM
7638 || to == HARD_FRAME_POINTER_REGNUM);
7639
7640 gcc_assert (from == FRAME_POINTER_REGNUM
7641 || from == ARG_POINTER_REGNUM
7642 || from == RETURN_ADDRESS_POINTER_REGNUM);
7643
7644 /* Make sure we actually saved the return address. */
7645 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7646 if (!crtl->calls_eh_return
7647 && !cfun->stdarg
7648 && !cfun_frame_layout.save_return_addr_p)
7649 return false;
7650
7651 return true;
7652 }
7653
7654 /* Return offset between register FROM and TO initially after prolog. */
7655
7656 HOST_WIDE_INT
7657 s390_initial_elimination_offset (int from, int to)
7658 {
7659 HOST_WIDE_INT offset;
7660 int index;
7661
7662 /* ??? Why are we called for non-eliminable pairs? */
7663 if (!s390_can_eliminate (from, to))
7664 return 0;
7665
7666 switch (from)
7667 {
7668 case FRAME_POINTER_REGNUM:
7669 offset = (get_frame_size()
7670 + STACK_POINTER_OFFSET
7671 + crtl->outgoing_args_size);
7672 break;
7673
7674 case ARG_POINTER_REGNUM:
7675 s390_init_frame_layout ();
7676 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7677 break;
7678
7679 case RETURN_ADDRESS_POINTER_REGNUM:
7680 s390_init_frame_layout ();
7681 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7682 gcc_assert (index >= 0);
7683 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7684 offset += index * UNITS_PER_LONG;
7685 break;
7686
7687 case BASE_REGNUM:
7688 offset = 0;
7689 break;
7690
7691 default:
7692 gcc_unreachable ();
7693 }
7694
7695 return offset;
7696 }
7697
7698 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7699 to register BASE. Return generated insn. */
7700
7701 static rtx
7702 save_fpr (rtx base, int offset, int regnum)
7703 {
7704 rtx addr;
7705 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7706
7707 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7708 set_mem_alias_set (addr, get_varargs_alias_set ());
7709 else
7710 set_mem_alias_set (addr, get_frame_alias_set ());
7711
7712 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7713 }
7714
7715 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7716 to register BASE. Return generated insn. */
7717
7718 static rtx
7719 restore_fpr (rtx base, int offset, int regnum)
7720 {
7721 rtx addr;
7722 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7723 set_mem_alias_set (addr, get_frame_alias_set ());
7724
7725 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7726 }
7727
7728 /* Return true if REGNO is a global register, but not one
7729 of the special ones that need to be saved/restored in anyway. */
7730
7731 static inline bool
7732 global_not_special_regno_p (int regno)
7733 {
7734 return (global_regs[regno]
7735 /* These registers are special and need to be
7736 restored in any case. */
7737 && !(regno == STACK_POINTER_REGNUM
7738 || regno == RETURN_REGNUM
7739 || regno == BASE_REGNUM
7740 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7741 }
7742
7743 /* Generate insn to save registers FIRST to LAST into
7744 the register save area located at offset OFFSET
7745 relative to register BASE. */
7746
7747 static rtx
7748 save_gprs (rtx base, int offset, int first, int last)
7749 {
7750 rtx addr, insn, note;
7751 int i;
7752
7753 addr = plus_constant (base, offset);
7754 addr = gen_rtx_MEM (Pmode, addr);
7755
7756 set_mem_alias_set (addr, get_frame_alias_set ());
7757
7758 /* Special-case single register. */
7759 if (first == last)
7760 {
7761 if (TARGET_64BIT)
7762 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7763 else
7764 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7765
7766 if (!global_not_special_regno_p (first))
7767 RTX_FRAME_RELATED_P (insn) = 1;
7768 return insn;
7769 }
7770
7771
7772 insn = gen_store_multiple (addr,
7773 gen_rtx_REG (Pmode, first),
7774 GEN_INT (last - first + 1));
7775
7776 if (first <= 6 && cfun->stdarg)
7777 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7778 {
7779 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7780
7781 if (first + i <= 6)
7782 set_mem_alias_set (mem, get_varargs_alias_set ());
7783 }
7784
7785 /* We need to set the FRAME_RELATED flag on all SETs
7786 inside the store-multiple pattern.
7787
7788 However, we must not emit DWARF records for registers 2..5
7789 if they are stored for use by variable arguments ...
7790
7791 ??? Unfortunately, it is not enough to simply not the
7792 FRAME_RELATED flags for those SETs, because the first SET
7793 of the PARALLEL is always treated as if it had the flag
7794 set, even if it does not. Therefore we emit a new pattern
7795 without those registers as REG_FRAME_RELATED_EXPR note. */
7796
7797 if (first >= 6 && !global_not_special_regno_p (first))
7798 {
7799 rtx pat = PATTERN (insn);
7800
7801 for (i = 0; i < XVECLEN (pat, 0); i++)
7802 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7803 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7804 0, i)))))
7805 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7806
7807 RTX_FRAME_RELATED_P (insn) = 1;
7808 }
7809 else if (last >= 6)
7810 {
7811 int start;
7812
7813 for (start = first >= 6 ? first : 6; start <= last; start++)
7814 if (!global_not_special_regno_p (start))
7815 break;
7816
7817 if (start > last)
7818 return insn;
7819
7820 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7821 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7822 gen_rtx_REG (Pmode, start),
7823 GEN_INT (last - start + 1));
7824 note = PATTERN (note);
7825
7826 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7827
7828 for (i = 0; i < XVECLEN (note, 0); i++)
7829 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7830 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7831 0, i)))))
7832 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7833
7834 RTX_FRAME_RELATED_P (insn) = 1;
7835 }
7836
7837 return insn;
7838 }
7839
7840 /* Generate insn to restore registers FIRST to LAST from
7841 the register save area located at offset OFFSET
7842 relative to register BASE. */
7843
7844 static rtx
7845 restore_gprs (rtx base, int offset, int first, int last)
7846 {
7847 rtx addr, insn;
7848
7849 addr = plus_constant (base, offset);
7850 addr = gen_rtx_MEM (Pmode, addr);
7851 set_mem_alias_set (addr, get_frame_alias_set ());
7852
7853 /* Special-case single register. */
7854 if (first == last)
7855 {
7856 if (TARGET_64BIT)
7857 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7858 else
7859 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7860
7861 return insn;
7862 }
7863
7864 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7865 addr,
7866 GEN_INT (last - first + 1));
7867 return insn;
7868 }
7869
7870 /* Return insn sequence to load the GOT register. */
7871
7872 static GTY(()) rtx got_symbol;
7873 rtx
7874 s390_load_got (void)
7875 {
7876 rtx insns;
7877
7878 if (!got_symbol)
7879 {
7880 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7881 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7882 }
7883
7884 start_sequence ();
7885
7886 if (TARGET_CPU_ZARCH)
7887 {
7888 emit_move_insn (pic_offset_table_rtx, got_symbol);
7889 }
7890 else
7891 {
7892 rtx offset;
7893
7894 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7895 UNSPEC_LTREL_OFFSET);
7896 offset = gen_rtx_CONST (Pmode, offset);
7897 offset = force_const_mem (Pmode, offset);
7898
7899 emit_move_insn (pic_offset_table_rtx, offset);
7900
7901 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7902 UNSPEC_LTREL_BASE);
7903 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7904
7905 emit_move_insn (pic_offset_table_rtx, offset);
7906 }
7907
7908 insns = get_insns ();
7909 end_sequence ();
7910 return insns;
7911 }
7912
7913 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7914 and the change to the stack pointer. */
7915
7916 static void
7917 s390_emit_stack_tie (void)
7918 {
7919 rtx mem = gen_frame_mem (BLKmode,
7920 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7921
7922 emit_insn (gen_stack_tie (mem));
7923 }
7924
7925 /* Expand the prologue into a bunch of separate insns. */
7926
7927 void
7928 s390_emit_prologue (void)
7929 {
7930 rtx insn, addr;
7931 rtx temp_reg;
7932 int i;
7933 int offset;
7934 int next_fpr = 0;
7935
7936 /* Complete frame layout. */
7937
7938 s390_update_frame_layout ();
7939
7940 /* Annotate all constant pool references to let the scheduler know
7941 they implicitly use the base register. */
7942
7943 push_topmost_sequence ();
7944
7945 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7946 if (INSN_P (insn))
7947 {
7948 annotate_constant_pool_refs (&PATTERN (insn));
7949 df_insn_rescan (insn);
7950 }
7951
7952 pop_topmost_sequence ();
7953
7954 /* Choose best register to use for temp use within prologue.
7955 See below for why TPF must use the register 1. */
7956
7957 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7958 && !current_function_is_leaf
7959 && !TARGET_TPF_PROFILING)
7960 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7961 else
7962 temp_reg = gen_rtx_REG (Pmode, 1);
7963
7964 /* Save call saved gprs. */
7965 if (cfun_frame_layout.first_save_gpr != -1)
7966 {
7967 insn = save_gprs (stack_pointer_rtx,
7968 cfun_frame_layout.gprs_offset +
7969 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7970 - cfun_frame_layout.first_save_gpr_slot),
7971 cfun_frame_layout.first_save_gpr,
7972 cfun_frame_layout.last_save_gpr);
7973 emit_insn (insn);
7974 }
7975
7976 /* Dummy insn to mark literal pool slot. */
7977
7978 if (cfun->machine->base_reg)
7979 emit_insn (gen_main_pool (cfun->machine->base_reg));
7980
7981 offset = cfun_frame_layout.f0_offset;
7982
7983 /* Save f0 and f2. */
7984 for (i = 0; i < 2; i++)
7985 {
7986 if (cfun_fpr_bit_p (i))
7987 {
7988 save_fpr (stack_pointer_rtx, offset, i + 16);
7989 offset += 8;
7990 }
7991 else if (!TARGET_PACKED_STACK)
7992 offset += 8;
7993 }
7994
7995 /* Save f4 and f6. */
7996 offset = cfun_frame_layout.f4_offset;
7997 for (i = 2; i < 4; i++)
7998 {
7999 if (cfun_fpr_bit_p (i))
8000 {
8001 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8002 offset += 8;
8003
8004 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8005 therefore are not frame related. */
8006 if (!call_really_used_regs[i + 16])
8007 RTX_FRAME_RELATED_P (insn) = 1;
8008 }
8009 else if (!TARGET_PACKED_STACK)
8010 offset += 8;
8011 }
8012
8013 if (TARGET_PACKED_STACK
8014 && cfun_save_high_fprs_p
8015 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8016 {
8017 offset = (cfun_frame_layout.f8_offset
8018 + (cfun_frame_layout.high_fprs - 1) * 8);
8019
8020 for (i = 15; i > 7 && offset >= 0; i--)
8021 if (cfun_fpr_bit_p (i))
8022 {
8023 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8024
8025 RTX_FRAME_RELATED_P (insn) = 1;
8026 offset -= 8;
8027 }
8028 if (offset >= cfun_frame_layout.f8_offset)
8029 next_fpr = i + 16;
8030 }
8031
8032 if (!TARGET_PACKED_STACK)
8033 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8034
8035 if (flag_stack_usage)
8036 current_function_static_stack_size = cfun_frame_layout.frame_size;
8037
8038 /* Decrement stack pointer. */
8039
8040 if (cfun_frame_layout.frame_size > 0)
8041 {
8042 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8043 rtx real_frame_off;
8044
8045 if (s390_stack_size)
8046 {
8047 HOST_WIDE_INT stack_guard;
8048
8049 if (s390_stack_guard)
8050 stack_guard = s390_stack_guard;
8051 else
8052 {
8053 /* If no value for stack guard is provided the smallest power of 2
8054 larger than the current frame size is chosen. */
8055 stack_guard = 1;
8056 while (stack_guard < cfun_frame_layout.frame_size)
8057 stack_guard <<= 1;
8058 }
8059
8060 if (cfun_frame_layout.frame_size >= s390_stack_size)
8061 {
8062 warning (0, "frame size of function %qs is "
8063 HOST_WIDE_INT_PRINT_DEC
8064 " bytes exceeding user provided stack limit of "
8065 HOST_WIDE_INT_PRINT_DEC " bytes. "
8066 "An unconditional trap is added.",
8067 current_function_name(), cfun_frame_layout.frame_size,
8068 s390_stack_size);
8069 emit_insn (gen_trap ());
8070 }
8071 else
8072 {
8073 /* stack_guard has to be smaller than s390_stack_size.
8074 Otherwise we would emit an AND with zero which would
8075 not match the test under mask pattern. */
8076 if (stack_guard >= s390_stack_size)
8077 {
8078 warning (0, "frame size of function %qs is "
8079 HOST_WIDE_INT_PRINT_DEC
8080 " bytes which is more than half the stack size. "
8081 "The dynamic check would not be reliable. "
8082 "No check emitted for this function.",
8083 current_function_name(),
8084 cfun_frame_layout.frame_size);
8085 }
8086 else
8087 {
8088 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8089 & ~(stack_guard - 1));
8090
8091 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8092 GEN_INT (stack_check_mask));
8093 if (TARGET_64BIT)
8094 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8095 t, const0_rtx),
8096 t, const0_rtx, const0_rtx));
8097 else
8098 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8099 t, const0_rtx),
8100 t, const0_rtx, const0_rtx));
8101 }
8102 }
8103 }
8104
8105 if (s390_warn_framesize > 0
8106 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8107 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
8108 current_function_name (), cfun_frame_layout.frame_size);
8109
8110 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8111 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8112
8113 /* Save incoming stack pointer into temp reg. */
8114 if (TARGET_BACKCHAIN || next_fpr)
8115 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8116
8117 /* Subtract frame size from stack pointer. */
8118
8119 if (DISP_IN_RANGE (INTVAL (frame_off)))
8120 {
8121 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8122 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8123 frame_off));
8124 insn = emit_insn (insn);
8125 }
8126 else
8127 {
8128 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8129 frame_off = force_const_mem (Pmode, frame_off);
8130
8131 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8132 annotate_constant_pool_refs (&PATTERN (insn));
8133 }
8134
8135 RTX_FRAME_RELATED_P (insn) = 1;
8136 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8137 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8138 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8139 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8140 real_frame_off)));
8141
8142 /* Set backchain. */
8143
8144 if (TARGET_BACKCHAIN)
8145 {
8146 if (cfun_frame_layout.backchain_offset)
8147 addr = gen_rtx_MEM (Pmode,
8148 plus_constant (stack_pointer_rtx,
8149 cfun_frame_layout.backchain_offset));
8150 else
8151 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8152 set_mem_alias_set (addr, get_frame_alias_set ());
8153 insn = emit_insn (gen_move_insn (addr, temp_reg));
8154 }
8155
8156 /* If we support non-call exceptions (e.g. for Java),
8157 we need to make sure the backchain pointer is set up
8158 before any possibly trapping memory access. */
8159 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8160 {
8161 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8162 emit_clobber (addr);
8163 }
8164 }
8165
8166 /* Save fprs 8 - 15 (64 bit ABI). */
8167
8168 if (cfun_save_high_fprs_p && next_fpr)
8169 {
8170 /* If the stack might be accessed through a different register
8171 we have to make sure that the stack pointer decrement is not
8172 moved below the use of the stack slots. */
8173 s390_emit_stack_tie ();
8174
8175 insn = emit_insn (gen_add2_insn (temp_reg,
8176 GEN_INT (cfun_frame_layout.f8_offset)));
8177
8178 offset = 0;
8179
8180 for (i = 24; i <= next_fpr; i++)
8181 if (cfun_fpr_bit_p (i - 16))
8182 {
8183 rtx addr = plus_constant (stack_pointer_rtx,
8184 cfun_frame_layout.frame_size
8185 + cfun_frame_layout.f8_offset
8186 + offset);
8187
8188 insn = save_fpr (temp_reg, offset, i);
8189 offset += 8;
8190 RTX_FRAME_RELATED_P (insn) = 1;
8191 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8192 gen_rtx_SET (VOIDmode,
8193 gen_rtx_MEM (DFmode, addr),
8194 gen_rtx_REG (DFmode, i)));
8195 }
8196 }
8197
8198 /* Set frame pointer, if needed. */
8199
8200 if (frame_pointer_needed)
8201 {
8202 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8203 RTX_FRAME_RELATED_P (insn) = 1;
8204 }
8205
8206 /* Set up got pointer, if needed. */
8207
8208 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8209 {
8210 rtx insns = s390_load_got ();
8211
8212 for (insn = insns; insn; insn = NEXT_INSN (insn))
8213 annotate_constant_pool_refs (&PATTERN (insn));
8214
8215 emit_insn (insns);
8216 }
8217
8218 if (TARGET_TPF_PROFILING)
8219 {
8220 /* Generate a BAS instruction to serve as a function
8221 entry intercept to facilitate the use of tracing
8222 algorithms located at the branch target. */
8223 emit_insn (gen_prologue_tpf ());
8224
8225 /* Emit a blockage here so that all code
8226 lies between the profiling mechanisms. */
8227 emit_insn (gen_blockage ());
8228 }
8229 }
8230
8231 /* Expand the epilogue into a bunch of separate insns. */
8232
8233 void
8234 s390_emit_epilogue (bool sibcall)
8235 {
8236 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8237 int area_bottom, area_top, offset = 0;
8238 int next_offset;
8239 rtvec p;
8240 int i;
8241
8242 if (TARGET_TPF_PROFILING)
8243 {
8244
8245 /* Generate a BAS instruction to serve as a function
8246 entry intercept to facilitate the use of tracing
8247 algorithms located at the branch target. */
8248
8249 /* Emit a blockage here so that all code
8250 lies between the profiling mechanisms. */
8251 emit_insn (gen_blockage ());
8252
8253 emit_insn (gen_epilogue_tpf ());
8254 }
8255
8256 /* Check whether to use frame or stack pointer for restore. */
8257
8258 frame_pointer = (frame_pointer_needed
8259 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8260
8261 s390_frame_area (&area_bottom, &area_top);
8262
8263 /* Check whether we can access the register save area.
8264 If not, increment the frame pointer as required. */
8265
8266 if (area_top <= area_bottom)
8267 {
8268 /* Nothing to restore. */
8269 }
8270 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8271 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8272 {
8273 /* Area is in range. */
8274 offset = cfun_frame_layout.frame_size;
8275 }
8276 else
8277 {
8278 rtx insn, frame_off, cfa;
8279
8280 offset = area_bottom < 0 ? -area_bottom : 0;
8281 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8282
8283 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8284 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8285 if (DISP_IN_RANGE (INTVAL (frame_off)))
8286 {
8287 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8288 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8289 insn = emit_insn (insn);
8290 }
8291 else
8292 {
8293 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8294 frame_off = force_const_mem (Pmode, frame_off);
8295
8296 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8297 annotate_constant_pool_refs (&PATTERN (insn));
8298 }
8299 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8300 RTX_FRAME_RELATED_P (insn) = 1;
8301 }
8302
8303 /* Restore call saved fprs. */
8304
8305 if (TARGET_64BIT)
8306 {
8307 if (cfun_save_high_fprs_p)
8308 {
8309 next_offset = cfun_frame_layout.f8_offset;
8310 for (i = 24; i < 32; i++)
8311 {
8312 if (cfun_fpr_bit_p (i - 16))
8313 {
8314 restore_fpr (frame_pointer,
8315 offset + next_offset, i);
8316 cfa_restores
8317 = alloc_reg_note (REG_CFA_RESTORE,
8318 gen_rtx_REG (DFmode, i), cfa_restores);
8319 next_offset += 8;
8320 }
8321 }
8322 }
8323
8324 }
8325 else
8326 {
8327 next_offset = cfun_frame_layout.f4_offset;
8328 for (i = 18; i < 20; i++)
8329 {
8330 if (cfun_fpr_bit_p (i - 16))
8331 {
8332 restore_fpr (frame_pointer,
8333 offset + next_offset, i);
8334 cfa_restores
8335 = alloc_reg_note (REG_CFA_RESTORE,
8336 gen_rtx_REG (DFmode, i), cfa_restores);
8337 next_offset += 8;
8338 }
8339 else if (!TARGET_PACKED_STACK)
8340 next_offset += 8;
8341 }
8342
8343 }
8344
8345 /* Return register. */
8346
8347 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8348
8349 /* Restore call saved gprs. */
8350
8351 if (cfun_frame_layout.first_restore_gpr != -1)
8352 {
8353 rtx insn, addr;
8354 int i;
8355
8356 /* Check for global register and save them
8357 to stack location from where they get restored. */
8358
8359 for (i = cfun_frame_layout.first_restore_gpr;
8360 i <= cfun_frame_layout.last_restore_gpr;
8361 i++)
8362 {
8363 if (global_not_special_regno_p (i))
8364 {
8365 addr = plus_constant (frame_pointer,
8366 offset + cfun_frame_layout.gprs_offset
8367 + (i - cfun_frame_layout.first_save_gpr_slot)
8368 * UNITS_PER_LONG);
8369 addr = gen_rtx_MEM (Pmode, addr);
8370 set_mem_alias_set (addr, get_frame_alias_set ());
8371 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8372 }
8373 else
8374 cfa_restores
8375 = alloc_reg_note (REG_CFA_RESTORE,
8376 gen_rtx_REG (Pmode, i), cfa_restores);
8377 }
8378
8379 if (! sibcall)
8380 {
8381 /* Fetch return address from stack before load multiple,
8382 this will do good for scheduling. */
8383
8384 if (cfun_frame_layout.save_return_addr_p
8385 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8386 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8387 {
8388 int return_regnum = find_unused_clobbered_reg();
8389 if (!return_regnum)
8390 return_regnum = 4;
8391 return_reg = gen_rtx_REG (Pmode, return_regnum);
8392
8393 addr = plus_constant (frame_pointer,
8394 offset + cfun_frame_layout.gprs_offset
8395 + (RETURN_REGNUM
8396 - cfun_frame_layout.first_save_gpr_slot)
8397 * UNITS_PER_LONG);
8398 addr = gen_rtx_MEM (Pmode, addr);
8399 set_mem_alias_set (addr, get_frame_alias_set ());
8400 emit_move_insn (return_reg, addr);
8401 }
8402 }
8403
8404 insn = restore_gprs (frame_pointer,
8405 offset + cfun_frame_layout.gprs_offset
8406 + (cfun_frame_layout.first_restore_gpr
8407 - cfun_frame_layout.first_save_gpr_slot)
8408 * UNITS_PER_LONG,
8409 cfun_frame_layout.first_restore_gpr,
8410 cfun_frame_layout.last_restore_gpr);
8411 insn = emit_insn (insn);
8412 REG_NOTES (insn) = cfa_restores;
8413 add_reg_note (insn, REG_CFA_DEF_CFA,
8414 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8415 RTX_FRAME_RELATED_P (insn) = 1;
8416 }
8417
8418 if (! sibcall)
8419 {
8420
8421 /* Return to caller. */
8422
8423 p = rtvec_alloc (2);
8424
8425 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8426 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8427 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8428 }
8429 }
8430
8431
8432 /* Return the size in bytes of a function argument of
8433 type TYPE and/or mode MODE. At least one of TYPE or
8434 MODE must be specified. */
8435
8436 static int
8437 s390_function_arg_size (enum machine_mode mode, const_tree type)
8438 {
8439 if (type)
8440 return int_size_in_bytes (type);
8441
8442 /* No type info available for some library calls ... */
8443 if (mode != BLKmode)
8444 return GET_MODE_SIZE (mode);
8445
8446 /* If we have neither type nor mode, abort */
8447 gcc_unreachable ();
8448 }
8449
8450 /* Return true if a function argument of type TYPE and mode MODE
8451 is to be passed in a floating-point register, if available. */
8452
8453 static bool
8454 s390_function_arg_float (enum machine_mode mode, const_tree type)
8455 {
8456 int size = s390_function_arg_size (mode, type);
8457 if (size > 8)
8458 return false;
8459
8460 /* Soft-float changes the ABI: no floating-point registers are used. */
8461 if (TARGET_SOFT_FLOAT)
8462 return false;
8463
8464 /* No type info available for some library calls ... */
8465 if (!type)
8466 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8467
8468 /* The ABI says that record types with a single member are treated
8469 just like that member would be. */
8470 while (TREE_CODE (type) == RECORD_TYPE)
8471 {
8472 tree field, single = NULL_TREE;
8473
8474 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8475 {
8476 if (TREE_CODE (field) != FIELD_DECL)
8477 continue;
8478
8479 if (single == NULL_TREE)
8480 single = TREE_TYPE (field);
8481 else
8482 return false;
8483 }
8484
8485 if (single == NULL_TREE)
8486 return false;
8487 else
8488 type = single;
8489 }
8490
8491 return TREE_CODE (type) == REAL_TYPE;
8492 }
8493
8494 /* Return true if a function argument of type TYPE and mode MODE
8495 is to be passed in an integer register, or a pair of integer
8496 registers, if available. */
8497
8498 static bool
8499 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8500 {
8501 int size = s390_function_arg_size (mode, type);
8502 if (size > 8)
8503 return false;
8504
8505 /* No type info available for some library calls ... */
8506 if (!type)
8507 return GET_MODE_CLASS (mode) == MODE_INT
8508 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8509
8510 /* We accept small integral (and similar) types. */
8511 if (INTEGRAL_TYPE_P (type)
8512 || POINTER_TYPE_P (type)
8513 || TREE_CODE (type) == NULLPTR_TYPE
8514 || TREE_CODE (type) == OFFSET_TYPE
8515 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8516 return true;
8517
8518 /* We also accept structs of size 1, 2, 4, 8 that are not
8519 passed in floating-point registers. */
8520 if (AGGREGATE_TYPE_P (type)
8521 && exact_log2 (size) >= 0
8522 && !s390_function_arg_float (mode, type))
8523 return true;
8524
8525 return false;
8526 }
8527
8528 /* Return 1 if a function argument of type TYPE and mode MODE
8529 is to be passed by reference. The ABI specifies that only
8530 structures of size 1, 2, 4, or 8 bytes are passed by value,
8531 all other structures (and complex numbers) are passed by
8532 reference. */
8533
8534 static bool
8535 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8536 enum machine_mode mode, const_tree type,
8537 bool named ATTRIBUTE_UNUSED)
8538 {
8539 int size = s390_function_arg_size (mode, type);
8540 if (size > 8)
8541 return true;
8542
8543 if (type)
8544 {
8545 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8546 return 1;
8547
8548 if (TREE_CODE (type) == COMPLEX_TYPE
8549 || TREE_CODE (type) == VECTOR_TYPE)
8550 return 1;
8551 }
8552
8553 return 0;
8554 }
8555
8556 /* Update the data in CUM to advance over an argument of mode MODE and
8557 data type TYPE. (TYPE is null for libcalls where that information
8558 may not be available.). The boolean NAMED specifies whether the
8559 argument is a named argument (as opposed to an unnamed argument
8560 matching an ellipsis). */
8561
8562 static void
8563 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8564 const_tree type, bool named ATTRIBUTE_UNUSED)
8565 {
8566 if (s390_function_arg_float (mode, type))
8567 {
8568 cum->fprs += 1;
8569 }
8570 else if (s390_function_arg_integer (mode, type))
8571 {
8572 int size = s390_function_arg_size (mode, type);
8573 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8574 }
8575 else
8576 gcc_unreachable ();
8577 }
8578
8579 /* Define where to put the arguments to a function.
8580 Value is zero to push the argument on the stack,
8581 or a hard register in which to store the argument.
8582
8583 MODE is the argument's machine mode.
8584 TYPE is the data type of the argument (as a tree).
8585 This is null for libcalls where that information may
8586 not be available.
8587 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8588 the preceding args and about the function being called.
8589 NAMED is nonzero if this argument is a named parameter
8590 (otherwise it is an extra parameter matching an ellipsis).
8591
8592 On S/390, we use general purpose registers 2 through 6 to
8593 pass integer, pointer, and certain structure arguments, and
8594 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8595 to pass floating point arguments. All remaining arguments
8596 are pushed to the stack. */
8597
8598 static rtx
8599 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8600 const_tree type, bool named ATTRIBUTE_UNUSED)
8601 {
8602 if (s390_function_arg_float (mode, type))
8603 {
8604 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8605 return 0;
8606 else
8607 return gen_rtx_REG (mode, cum->fprs + 16);
8608 }
8609 else if (s390_function_arg_integer (mode, type))
8610 {
8611 int size = s390_function_arg_size (mode, type);
8612 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8613
8614 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8615 return 0;
8616 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8617 return gen_rtx_REG (mode, cum->gprs + 2);
8618 else if (n_gprs == 2)
8619 {
8620 rtvec p = rtvec_alloc (2);
8621
8622 RTVEC_ELT (p, 0)
8623 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8624 const0_rtx);
8625 RTVEC_ELT (p, 1)
8626 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8627 GEN_INT (4));
8628
8629 return gen_rtx_PARALLEL (mode, p);
8630 }
8631 }
8632
8633 /* After the real arguments, expand_call calls us once again
8634 with a void_type_node type. Whatever we return here is
8635 passed as operand 2 to the call expanders.
8636
8637 We don't need this feature ... */
8638 else if (type == void_type_node)
8639 return const0_rtx;
8640
8641 gcc_unreachable ();
8642 }
8643
8644 /* Return true if return values of type TYPE should be returned
8645 in a memory buffer whose address is passed by the caller as
8646 hidden first argument. */
8647
8648 static bool
8649 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8650 {
8651 /* We accept small integral (and similar) types. */
8652 if (INTEGRAL_TYPE_P (type)
8653 || POINTER_TYPE_P (type)
8654 || TREE_CODE (type) == OFFSET_TYPE
8655 || TREE_CODE (type) == REAL_TYPE)
8656 return int_size_in_bytes (type) > 8;
8657
8658 /* Aggregates and similar constructs are always returned
8659 in memory. */
8660 if (AGGREGATE_TYPE_P (type)
8661 || TREE_CODE (type) == COMPLEX_TYPE
8662 || TREE_CODE (type) == VECTOR_TYPE)
8663 return true;
8664
8665 /* ??? We get called on all sorts of random stuff from
8666 aggregate_value_p. We can't abort, but it's not clear
8667 what's safe to return. Pretend it's a struct I guess. */
8668 return true;
8669 }
8670
8671 /* Function arguments and return values are promoted to word size. */
8672
8673 static enum machine_mode
8674 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8675 int *punsignedp,
8676 const_tree fntype ATTRIBUTE_UNUSED,
8677 int for_return ATTRIBUTE_UNUSED)
8678 {
8679 if (INTEGRAL_MODE_P (mode)
8680 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8681 {
8682 if (POINTER_TYPE_P (type))
8683 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8684 return Pmode;
8685 }
8686
8687 return mode;
8688 }
8689
8690 /* Define where to return a (scalar) value of type TYPE.
8691 If TYPE is null, define where to return a (scalar)
8692 value of mode MODE from a libcall. */
8693
8694 rtx
8695 s390_function_value (const_tree type, const_tree fn, enum machine_mode mode)
8696 {
8697 if (type)
8698 {
8699 int unsignedp = TYPE_UNSIGNED (type);
8700 mode = promote_function_mode (type, TYPE_MODE (type), &unsignedp, fn, 1);
8701 }
8702
8703 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8704 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8705
8706 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8707 return gen_rtx_REG (mode, 16);
8708 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8709 || UNITS_PER_LONG == UNITS_PER_WORD)
8710 return gen_rtx_REG (mode, 2);
8711 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8712 {
8713 rtvec p = rtvec_alloc (2);
8714
8715 RTVEC_ELT (p, 0)
8716 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8717 RTVEC_ELT (p, 1)
8718 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8719
8720 return gen_rtx_PARALLEL (mode, p);
8721 }
8722
8723 gcc_unreachable ();
8724 }
8725
8726
8727 /* Create and return the va_list datatype.
8728
8729 On S/390, va_list is an array type equivalent to
8730
8731 typedef struct __va_list_tag
8732 {
8733 long __gpr;
8734 long __fpr;
8735 void *__overflow_arg_area;
8736 void *__reg_save_area;
8737 } va_list[1];
8738
8739 where __gpr and __fpr hold the number of general purpose
8740 or floating point arguments used up to now, respectively,
8741 __overflow_arg_area points to the stack location of the
8742 next argument passed on the stack, and __reg_save_area
8743 always points to the start of the register area in the
8744 call frame of the current function. The function prologue
8745 saves all registers used for argument passing into this
8746 area if the function uses variable arguments. */
8747
8748 static tree
8749 s390_build_builtin_va_list (void)
8750 {
8751 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8752
8753 record = lang_hooks.types.make_type (RECORD_TYPE);
8754
8755 type_decl =
8756 build_decl (BUILTINS_LOCATION,
8757 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8758
8759 f_gpr = build_decl (BUILTINS_LOCATION,
8760 FIELD_DECL, get_identifier ("__gpr"),
8761 long_integer_type_node);
8762 f_fpr = build_decl (BUILTINS_LOCATION,
8763 FIELD_DECL, get_identifier ("__fpr"),
8764 long_integer_type_node);
8765 f_ovf = build_decl (BUILTINS_LOCATION,
8766 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8767 ptr_type_node);
8768 f_sav = build_decl (BUILTINS_LOCATION,
8769 FIELD_DECL, get_identifier ("__reg_save_area"),
8770 ptr_type_node);
8771
8772 va_list_gpr_counter_field = f_gpr;
8773 va_list_fpr_counter_field = f_fpr;
8774
8775 DECL_FIELD_CONTEXT (f_gpr) = record;
8776 DECL_FIELD_CONTEXT (f_fpr) = record;
8777 DECL_FIELD_CONTEXT (f_ovf) = record;
8778 DECL_FIELD_CONTEXT (f_sav) = record;
8779
8780 TYPE_STUB_DECL (record) = type_decl;
8781 TYPE_NAME (record) = type_decl;
8782 TYPE_FIELDS (record) = f_gpr;
8783 DECL_CHAIN (f_gpr) = f_fpr;
8784 DECL_CHAIN (f_fpr) = f_ovf;
8785 DECL_CHAIN (f_ovf) = f_sav;
8786
8787 layout_type (record);
8788
8789 /* The correct type is an array type of one element. */
8790 return build_array_type (record, build_index_type (size_zero_node));
8791 }
8792
8793 /* Implement va_start by filling the va_list structure VALIST.
8794 STDARG_P is always true, and ignored.
8795 NEXTARG points to the first anonymous stack argument.
8796
8797 The following global variables are used to initialize
8798 the va_list structure:
8799
8800 crtl->args.info:
8801 holds number of gprs and fprs used for named arguments.
8802 crtl->args.arg_offset_rtx:
8803 holds the offset of the first anonymous stack argument
8804 (relative to the virtual arg pointer). */
8805
8806 static void
8807 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8808 {
8809 HOST_WIDE_INT n_gpr, n_fpr;
8810 int off;
8811 tree f_gpr, f_fpr, f_ovf, f_sav;
8812 tree gpr, fpr, ovf, sav, t;
8813
8814 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8815 f_fpr = DECL_CHAIN (f_gpr);
8816 f_ovf = DECL_CHAIN (f_fpr);
8817 f_sav = DECL_CHAIN (f_ovf);
8818
8819 valist = build_simple_mem_ref (valist);
8820 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8821 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8822 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8823 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8824
8825 /* Count number of gp and fp argument registers used. */
8826
8827 n_gpr = crtl->args.info.gprs;
8828 n_fpr = crtl->args.info.fprs;
8829
8830 if (cfun->va_list_gpr_size)
8831 {
8832 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8833 build_int_cst (NULL_TREE, n_gpr));
8834 TREE_SIDE_EFFECTS (t) = 1;
8835 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8836 }
8837
8838 if (cfun->va_list_fpr_size)
8839 {
8840 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8841 build_int_cst (NULL_TREE, n_fpr));
8842 TREE_SIDE_EFFECTS (t) = 1;
8843 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8844 }
8845
8846 /* Find the overflow area. */
8847 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8848 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8849 {
8850 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8851
8852 off = INTVAL (crtl->args.arg_offset_rtx);
8853 off = off < 0 ? 0 : off;
8854 if (TARGET_DEBUG_ARG)
8855 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8856 (int)n_gpr, (int)n_fpr, off);
8857
8858 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8859
8860 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8861 TREE_SIDE_EFFECTS (t) = 1;
8862 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8863 }
8864
8865 /* Find the register save area. */
8866 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8867 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8868 {
8869 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8870 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8871 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8872
8873 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8874 TREE_SIDE_EFFECTS (t) = 1;
8875 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8876 }
8877 }
8878
8879 /* Implement va_arg by updating the va_list structure
8880 VALIST as required to retrieve an argument of type
8881 TYPE, and returning that argument.
8882
8883 Generates code equivalent to:
8884
8885 if (integral value) {
8886 if (size <= 4 && args.gpr < 5 ||
8887 size > 4 && args.gpr < 4 )
8888 ret = args.reg_save_area[args.gpr+8]
8889 else
8890 ret = *args.overflow_arg_area++;
8891 } else if (float value) {
8892 if (args.fgpr < 2)
8893 ret = args.reg_save_area[args.fpr+64]
8894 else
8895 ret = *args.overflow_arg_area++;
8896 } else if (aggregate value) {
8897 if (args.gpr < 5)
8898 ret = *args.reg_save_area[args.gpr]
8899 else
8900 ret = **args.overflow_arg_area++;
8901 } */
8902
8903 static tree
8904 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8905 gimple_seq *post_p ATTRIBUTE_UNUSED)
8906 {
8907 tree f_gpr, f_fpr, f_ovf, f_sav;
8908 tree gpr, fpr, ovf, sav, reg, t, u;
8909 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8910 tree lab_false, lab_over, addr;
8911
8912 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8913 f_fpr = DECL_CHAIN (f_gpr);
8914 f_ovf = DECL_CHAIN (f_fpr);
8915 f_sav = DECL_CHAIN (f_ovf);
8916
8917 valist = build_va_arg_indirect_ref (valist);
8918 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8919 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8920 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8921
8922 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8923 both appear on a lhs. */
8924 valist = unshare_expr (valist);
8925 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8926
8927 size = int_size_in_bytes (type);
8928
8929 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8930 {
8931 if (TARGET_DEBUG_ARG)
8932 {
8933 fprintf (stderr, "va_arg: aggregate type");
8934 debug_tree (type);
8935 }
8936
8937 /* Aggregates are passed by reference. */
8938 indirect_p = 1;
8939 reg = gpr;
8940 n_reg = 1;
8941
8942 /* kernel stack layout on 31 bit: It is assumed here that no padding
8943 will be added by s390_frame_info because for va_args always an even
8944 number of gprs has to be saved r15-r2 = 14 regs. */
8945 sav_ofs = 2 * UNITS_PER_LONG;
8946 sav_scale = UNITS_PER_LONG;
8947 size = UNITS_PER_LONG;
8948 max_reg = GP_ARG_NUM_REG - n_reg;
8949 }
8950 else if (s390_function_arg_float (TYPE_MODE (type), type))
8951 {
8952 if (TARGET_DEBUG_ARG)
8953 {
8954 fprintf (stderr, "va_arg: float type");
8955 debug_tree (type);
8956 }
8957
8958 /* FP args go in FP registers, if present. */
8959 indirect_p = 0;
8960 reg = fpr;
8961 n_reg = 1;
8962 sav_ofs = 16 * UNITS_PER_LONG;
8963 sav_scale = 8;
8964 max_reg = FP_ARG_NUM_REG - n_reg;
8965 }
8966 else
8967 {
8968 if (TARGET_DEBUG_ARG)
8969 {
8970 fprintf (stderr, "va_arg: other type");
8971 debug_tree (type);
8972 }
8973
8974 /* Otherwise into GP registers. */
8975 indirect_p = 0;
8976 reg = gpr;
8977 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8978
8979 /* kernel stack layout on 31 bit: It is assumed here that no padding
8980 will be added by s390_frame_info because for va_args always an even
8981 number of gprs has to be saved r15-r2 = 14 regs. */
8982 sav_ofs = 2 * UNITS_PER_LONG;
8983
8984 if (size < UNITS_PER_LONG)
8985 sav_ofs += UNITS_PER_LONG - size;
8986
8987 sav_scale = UNITS_PER_LONG;
8988 max_reg = GP_ARG_NUM_REG - n_reg;
8989 }
8990
8991 /* Pull the value out of the saved registers ... */
8992
8993 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8994 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8995 addr = create_tmp_var (ptr_type_node, "addr");
8996
8997 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8998 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8999 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9000 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9001 gimplify_and_add (t, pre_p);
9002
9003 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
9004 size_int (sav_ofs));
9005 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9006 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9007 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
9008
9009 gimplify_assign (addr, t, pre_p);
9010
9011 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9012
9013 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9014
9015
9016 /* ... Otherwise out of the overflow area. */
9017
9018 t = ovf;
9019 if (size < UNITS_PER_LONG)
9020 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9021 size_int (UNITS_PER_LONG - size));
9022
9023 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9024
9025 gimplify_assign (addr, t, pre_p);
9026
9027 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9028 size_int (size));
9029 gimplify_assign (ovf, t, pre_p);
9030
9031 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9032
9033
9034 /* Increment register save count. */
9035
9036 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9037 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9038 gimplify_and_add (u, pre_p);
9039
9040 if (indirect_p)
9041 {
9042 t = build_pointer_type_for_mode (build_pointer_type (type),
9043 ptr_mode, true);
9044 addr = fold_convert (t, addr);
9045 addr = build_va_arg_indirect_ref (addr);
9046 }
9047 else
9048 {
9049 t = build_pointer_type_for_mode (type, ptr_mode, true);
9050 addr = fold_convert (t, addr);
9051 }
9052
9053 return build_va_arg_indirect_ref (addr);
9054 }
9055
9056
9057 /* Builtins. */
9058
9059 enum s390_builtin
9060 {
9061 S390_BUILTIN_THREAD_POINTER,
9062 S390_BUILTIN_SET_THREAD_POINTER,
9063
9064 S390_BUILTIN_max
9065 };
9066
9067 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9068 CODE_FOR_get_tp_64,
9069 CODE_FOR_set_tp_64
9070 };
9071
9072 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9073 CODE_FOR_get_tp_31,
9074 CODE_FOR_set_tp_31
9075 };
9076
9077 static void
9078 s390_init_builtins (void)
9079 {
9080 tree ftype;
9081
9082 ftype = build_function_type (ptr_type_node, void_list_node);
9083 add_builtin_function ("__builtin_thread_pointer", ftype,
9084 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9085 NULL, NULL_TREE);
9086
9087 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9088 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9089 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9090 NULL, NULL_TREE);
9091 }
9092
9093 /* Expand an expression EXP that calls a built-in function,
9094 with result going to TARGET if that's convenient
9095 (and in mode MODE if that's convenient).
9096 SUBTARGET may be used as the target for computing one of EXP's operands.
9097 IGNORE is nonzero if the value is to be ignored. */
9098
9099 static rtx
9100 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9101 enum machine_mode mode ATTRIBUTE_UNUSED,
9102 int ignore ATTRIBUTE_UNUSED)
9103 {
9104 #define MAX_ARGS 2
9105
9106 enum insn_code const *code_for_builtin =
9107 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9108
9109 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9110 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9111 enum insn_code icode;
9112 rtx op[MAX_ARGS], pat;
9113 int arity;
9114 bool nonvoid;
9115 tree arg;
9116 call_expr_arg_iterator iter;
9117
9118 if (fcode >= S390_BUILTIN_max)
9119 internal_error ("bad builtin fcode");
9120 icode = code_for_builtin[fcode];
9121 if (icode == 0)
9122 internal_error ("bad builtin fcode");
9123
9124 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9125
9126 arity = 0;
9127 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9128 {
9129 const struct insn_operand_data *insn_op;
9130
9131 if (arg == error_mark_node)
9132 return NULL_RTX;
9133 if (arity > MAX_ARGS)
9134 return NULL_RTX;
9135
9136 insn_op = &insn_data[icode].operand[arity + nonvoid];
9137
9138 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9139
9140 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9141 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9142 arity++;
9143 }
9144
9145 if (nonvoid)
9146 {
9147 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9148 if (!target
9149 || GET_MODE (target) != tmode
9150 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9151 target = gen_reg_rtx (tmode);
9152 }
9153
9154 switch (arity)
9155 {
9156 case 0:
9157 pat = GEN_FCN (icode) (target);
9158 break;
9159 case 1:
9160 if (nonvoid)
9161 pat = GEN_FCN (icode) (target, op[0]);
9162 else
9163 pat = GEN_FCN (icode) (op[0]);
9164 break;
9165 case 2:
9166 pat = GEN_FCN (icode) (target, op[0], op[1]);
9167 break;
9168 default:
9169 gcc_unreachable ();
9170 }
9171 if (!pat)
9172 return NULL_RTX;
9173 emit_insn (pat);
9174
9175 if (nonvoid)
9176 return target;
9177 else
9178 return const0_rtx;
9179 }
9180
9181
9182 /* Output assembly code for the trampoline template to
9183 stdio stream FILE.
9184
9185 On S/390, we use gpr 1 internally in the trampoline code;
9186 gpr 0 is used to hold the static chain. */
9187
9188 static void
9189 s390_asm_trampoline_template (FILE *file)
9190 {
9191 rtx op[2];
9192 op[0] = gen_rtx_REG (Pmode, 0);
9193 op[1] = gen_rtx_REG (Pmode, 1);
9194
9195 if (TARGET_64BIT)
9196 {
9197 output_asm_insn ("basr\t%1,0", op);
9198 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9199 output_asm_insn ("br\t%1", op);
9200 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9201 }
9202 else
9203 {
9204 output_asm_insn ("basr\t%1,0", op);
9205 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9206 output_asm_insn ("br\t%1", op);
9207 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9208 }
9209 }
9210
9211 /* Emit RTL insns to initialize the variable parts of a trampoline.
9212 FNADDR is an RTX for the address of the function's pure code.
9213 CXT is an RTX for the static chain value for the function. */
9214
9215 static void
9216 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9217 {
9218 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9219 rtx mem;
9220
9221 emit_block_move (m_tramp, assemble_trampoline_template (),
9222 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9223
9224 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9225 emit_move_insn (mem, cxt);
9226 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9227 emit_move_insn (mem, fnaddr);
9228 }
9229
9230 /* Output assembler code to FILE to increment profiler label # LABELNO
9231 for profiling a function entry. */
9232
9233 void
9234 s390_function_profiler (FILE *file, int labelno)
9235 {
9236 rtx op[7];
9237
9238 char label[128];
9239 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9240
9241 fprintf (file, "# function profiler \n");
9242
9243 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9244 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9245 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9246
9247 op[2] = gen_rtx_REG (Pmode, 1);
9248 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9249 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9250
9251 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9252 if (flag_pic)
9253 {
9254 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9255 op[4] = gen_rtx_CONST (Pmode, op[4]);
9256 }
9257
9258 if (TARGET_64BIT)
9259 {
9260 output_asm_insn ("stg\t%0,%1", op);
9261 output_asm_insn ("larl\t%2,%3", op);
9262 output_asm_insn ("brasl\t%0,%4", op);
9263 output_asm_insn ("lg\t%0,%1", op);
9264 }
9265 else if (!flag_pic)
9266 {
9267 op[6] = gen_label_rtx ();
9268
9269 output_asm_insn ("st\t%0,%1", op);
9270 output_asm_insn ("bras\t%2,%l6", op);
9271 output_asm_insn (".long\t%4", op);
9272 output_asm_insn (".long\t%3", op);
9273 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9274 output_asm_insn ("l\t%0,0(%2)", op);
9275 output_asm_insn ("l\t%2,4(%2)", op);
9276 output_asm_insn ("basr\t%0,%0", op);
9277 output_asm_insn ("l\t%0,%1", op);
9278 }
9279 else
9280 {
9281 op[5] = gen_label_rtx ();
9282 op[6] = gen_label_rtx ();
9283
9284 output_asm_insn ("st\t%0,%1", op);
9285 output_asm_insn ("bras\t%2,%l6", op);
9286 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9287 output_asm_insn (".long\t%4-%l5", op);
9288 output_asm_insn (".long\t%3-%l5", op);
9289 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9290 output_asm_insn ("lr\t%0,%2", op);
9291 output_asm_insn ("a\t%0,0(%2)", op);
9292 output_asm_insn ("a\t%2,4(%2)", op);
9293 output_asm_insn ("basr\t%0,%0", op);
9294 output_asm_insn ("l\t%0,%1", op);
9295 }
9296 }
9297
9298 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9299 into its SYMBOL_REF_FLAGS. */
9300
9301 static void
9302 s390_encode_section_info (tree decl, rtx rtl, int first)
9303 {
9304 default_encode_section_info (decl, rtl, first);
9305
9306 if (TREE_CODE (decl) == VAR_DECL)
9307 {
9308 /* If a variable has a forced alignment to < 2 bytes, mark it
9309 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9310 operand. */
9311 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9312 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9313 if (!DECL_SIZE (decl)
9314 || !DECL_ALIGN (decl)
9315 || !host_integerp (DECL_SIZE (decl), 0)
9316 || (DECL_ALIGN (decl) <= 64
9317 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9318 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9319 }
9320
9321 /* Literal pool references don't have a decl so they are handled
9322 differently here. We rely on the information in the MEM_ALIGN
9323 entry to decide upon natural alignment. */
9324 if (MEM_P (rtl)
9325 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9326 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9327 && (MEM_ALIGN (rtl) == 0
9328 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9329 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9330 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9331 }
9332
9333 /* Output thunk to FILE that implements a C++ virtual function call (with
9334 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9335 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9336 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9337 relative to the resulting this pointer. */
9338
9339 static void
9340 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9341 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9342 tree function)
9343 {
9344 rtx op[10];
9345 int nonlocal = 0;
9346
9347 /* Make sure unwind info is emitted for the thunk if needed. */
9348 final_start_function (emit_barrier (), file, 1);
9349
9350 /* Operand 0 is the target function. */
9351 op[0] = XEXP (DECL_RTL (function), 0);
9352 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9353 {
9354 nonlocal = 1;
9355 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9356 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9357 op[0] = gen_rtx_CONST (Pmode, op[0]);
9358 }
9359
9360 /* Operand 1 is the 'this' pointer. */
9361 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9362 op[1] = gen_rtx_REG (Pmode, 3);
9363 else
9364 op[1] = gen_rtx_REG (Pmode, 2);
9365
9366 /* Operand 2 is the delta. */
9367 op[2] = GEN_INT (delta);
9368
9369 /* Operand 3 is the vcall_offset. */
9370 op[3] = GEN_INT (vcall_offset);
9371
9372 /* Operand 4 is the temporary register. */
9373 op[4] = gen_rtx_REG (Pmode, 1);
9374
9375 /* Operands 5 to 8 can be used as labels. */
9376 op[5] = NULL_RTX;
9377 op[6] = NULL_RTX;
9378 op[7] = NULL_RTX;
9379 op[8] = NULL_RTX;
9380
9381 /* Operand 9 can be used for temporary register. */
9382 op[9] = NULL_RTX;
9383
9384 /* Generate code. */
9385 if (TARGET_64BIT)
9386 {
9387 /* Setup literal pool pointer if required. */
9388 if ((!DISP_IN_RANGE (delta)
9389 && !CONST_OK_FOR_K (delta)
9390 && !CONST_OK_FOR_Os (delta))
9391 || (!DISP_IN_RANGE (vcall_offset)
9392 && !CONST_OK_FOR_K (vcall_offset)
9393 && !CONST_OK_FOR_Os (vcall_offset)))
9394 {
9395 op[5] = gen_label_rtx ();
9396 output_asm_insn ("larl\t%4,%5", op);
9397 }
9398
9399 /* Add DELTA to this pointer. */
9400 if (delta)
9401 {
9402 if (CONST_OK_FOR_J (delta))
9403 output_asm_insn ("la\t%1,%2(%1)", op);
9404 else if (DISP_IN_RANGE (delta))
9405 output_asm_insn ("lay\t%1,%2(%1)", op);
9406 else if (CONST_OK_FOR_K (delta))
9407 output_asm_insn ("aghi\t%1,%2", op);
9408 else if (CONST_OK_FOR_Os (delta))
9409 output_asm_insn ("agfi\t%1,%2", op);
9410 else
9411 {
9412 op[6] = gen_label_rtx ();
9413 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9414 }
9415 }
9416
9417 /* Perform vcall adjustment. */
9418 if (vcall_offset)
9419 {
9420 if (DISP_IN_RANGE (vcall_offset))
9421 {
9422 output_asm_insn ("lg\t%4,0(%1)", op);
9423 output_asm_insn ("ag\t%1,%3(%4)", op);
9424 }
9425 else if (CONST_OK_FOR_K (vcall_offset))
9426 {
9427 output_asm_insn ("lghi\t%4,%3", op);
9428 output_asm_insn ("ag\t%4,0(%1)", op);
9429 output_asm_insn ("ag\t%1,0(%4)", op);
9430 }
9431 else if (CONST_OK_FOR_Os (vcall_offset))
9432 {
9433 output_asm_insn ("lgfi\t%4,%3", op);
9434 output_asm_insn ("ag\t%4,0(%1)", op);
9435 output_asm_insn ("ag\t%1,0(%4)", op);
9436 }
9437 else
9438 {
9439 op[7] = gen_label_rtx ();
9440 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9441 output_asm_insn ("ag\t%4,0(%1)", op);
9442 output_asm_insn ("ag\t%1,0(%4)", op);
9443 }
9444 }
9445
9446 /* Jump to target. */
9447 output_asm_insn ("jg\t%0", op);
9448
9449 /* Output literal pool if required. */
9450 if (op[5])
9451 {
9452 output_asm_insn (".align\t4", op);
9453 targetm.asm_out.internal_label (file, "L",
9454 CODE_LABEL_NUMBER (op[5]));
9455 }
9456 if (op[6])
9457 {
9458 targetm.asm_out.internal_label (file, "L",
9459 CODE_LABEL_NUMBER (op[6]));
9460 output_asm_insn (".long\t%2", op);
9461 }
9462 if (op[7])
9463 {
9464 targetm.asm_out.internal_label (file, "L",
9465 CODE_LABEL_NUMBER (op[7]));
9466 output_asm_insn (".long\t%3", op);
9467 }
9468 }
9469 else
9470 {
9471 /* Setup base pointer if required. */
9472 if (!vcall_offset
9473 || (!DISP_IN_RANGE (delta)
9474 && !CONST_OK_FOR_K (delta)
9475 && !CONST_OK_FOR_Os (delta))
9476 || (!DISP_IN_RANGE (delta)
9477 && !CONST_OK_FOR_K (vcall_offset)
9478 && !CONST_OK_FOR_Os (vcall_offset)))
9479 {
9480 op[5] = gen_label_rtx ();
9481 output_asm_insn ("basr\t%4,0", op);
9482 targetm.asm_out.internal_label (file, "L",
9483 CODE_LABEL_NUMBER (op[5]));
9484 }
9485
9486 /* Add DELTA to this pointer. */
9487 if (delta)
9488 {
9489 if (CONST_OK_FOR_J (delta))
9490 output_asm_insn ("la\t%1,%2(%1)", op);
9491 else if (DISP_IN_RANGE (delta))
9492 output_asm_insn ("lay\t%1,%2(%1)", op);
9493 else if (CONST_OK_FOR_K (delta))
9494 output_asm_insn ("ahi\t%1,%2", op);
9495 else if (CONST_OK_FOR_Os (delta))
9496 output_asm_insn ("afi\t%1,%2", op);
9497 else
9498 {
9499 op[6] = gen_label_rtx ();
9500 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9501 }
9502 }
9503
9504 /* Perform vcall adjustment. */
9505 if (vcall_offset)
9506 {
9507 if (CONST_OK_FOR_J (vcall_offset))
9508 {
9509 output_asm_insn ("l\t%4,0(%1)", op);
9510 output_asm_insn ("a\t%1,%3(%4)", op);
9511 }
9512 else if (DISP_IN_RANGE (vcall_offset))
9513 {
9514 output_asm_insn ("l\t%4,0(%1)", op);
9515 output_asm_insn ("ay\t%1,%3(%4)", op);
9516 }
9517 else if (CONST_OK_FOR_K (vcall_offset))
9518 {
9519 output_asm_insn ("lhi\t%4,%3", op);
9520 output_asm_insn ("a\t%4,0(%1)", op);
9521 output_asm_insn ("a\t%1,0(%4)", op);
9522 }
9523 else if (CONST_OK_FOR_Os (vcall_offset))
9524 {
9525 output_asm_insn ("iilf\t%4,%3", op);
9526 output_asm_insn ("a\t%4,0(%1)", op);
9527 output_asm_insn ("a\t%1,0(%4)", op);
9528 }
9529 else
9530 {
9531 op[7] = gen_label_rtx ();
9532 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9533 output_asm_insn ("a\t%4,0(%1)", op);
9534 output_asm_insn ("a\t%1,0(%4)", op);
9535 }
9536
9537 /* We had to clobber the base pointer register.
9538 Re-setup the base pointer (with a different base). */
9539 op[5] = gen_label_rtx ();
9540 output_asm_insn ("basr\t%4,0", op);
9541 targetm.asm_out.internal_label (file, "L",
9542 CODE_LABEL_NUMBER (op[5]));
9543 }
9544
9545 /* Jump to target. */
9546 op[8] = gen_label_rtx ();
9547
9548 if (!flag_pic)
9549 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9550 else if (!nonlocal)
9551 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9552 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9553 else if (flag_pic == 1)
9554 {
9555 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9556 output_asm_insn ("l\t%4,%0(%4)", op);
9557 }
9558 else if (flag_pic == 2)
9559 {
9560 op[9] = gen_rtx_REG (Pmode, 0);
9561 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9562 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9563 output_asm_insn ("ar\t%4,%9", op);
9564 output_asm_insn ("l\t%4,0(%4)", op);
9565 }
9566
9567 output_asm_insn ("br\t%4", op);
9568
9569 /* Output literal pool. */
9570 output_asm_insn (".align\t4", op);
9571
9572 if (nonlocal && flag_pic == 2)
9573 output_asm_insn (".long\t%0", op);
9574 if (nonlocal)
9575 {
9576 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9577 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9578 }
9579
9580 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9581 if (!flag_pic)
9582 output_asm_insn (".long\t%0", op);
9583 else
9584 output_asm_insn (".long\t%0-%5", op);
9585
9586 if (op[6])
9587 {
9588 targetm.asm_out.internal_label (file, "L",
9589 CODE_LABEL_NUMBER (op[6]));
9590 output_asm_insn (".long\t%2", op);
9591 }
9592 if (op[7])
9593 {
9594 targetm.asm_out.internal_label (file, "L",
9595 CODE_LABEL_NUMBER (op[7]));
9596 output_asm_insn (".long\t%3", op);
9597 }
9598 }
9599 final_end_function ();
9600 }
9601
9602 static bool
9603 s390_valid_pointer_mode (enum machine_mode mode)
9604 {
9605 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9606 }
9607
9608 /* Checks whether the given CALL_EXPR would use a caller
9609 saved register. This is used to decide whether sibling call
9610 optimization could be performed on the respective function
9611 call. */
9612
9613 static bool
9614 s390_call_saved_register_used (tree call_expr)
9615 {
9616 CUMULATIVE_ARGS cum;
9617 tree parameter;
9618 enum machine_mode mode;
9619 tree type;
9620 rtx parm_rtx;
9621 int reg, i;
9622
9623 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9624
9625 for (i = 0; i < call_expr_nargs (call_expr); i++)
9626 {
9627 parameter = CALL_EXPR_ARG (call_expr, i);
9628 gcc_assert (parameter);
9629
9630 /* For an undeclared variable passed as parameter we will get
9631 an ERROR_MARK node here. */
9632 if (TREE_CODE (parameter) == ERROR_MARK)
9633 return true;
9634
9635 type = TREE_TYPE (parameter);
9636 gcc_assert (type);
9637
9638 mode = TYPE_MODE (type);
9639 gcc_assert (mode);
9640
9641 if (pass_by_reference (&cum, mode, type, true))
9642 {
9643 mode = Pmode;
9644 type = build_pointer_type (type);
9645 }
9646
9647 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9648
9649 s390_function_arg_advance (&cum, mode, type, 0);
9650
9651 if (!parm_rtx)
9652 continue;
9653
9654 if (REG_P (parm_rtx))
9655 {
9656 for (reg = 0;
9657 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9658 reg++)
9659 if (!call_used_regs[reg + REGNO (parm_rtx)])
9660 return true;
9661 }
9662
9663 if (GET_CODE (parm_rtx) == PARALLEL)
9664 {
9665 int i;
9666
9667 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9668 {
9669 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9670
9671 gcc_assert (REG_P (r));
9672
9673 for (reg = 0;
9674 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9675 reg++)
9676 if (!call_used_regs[reg + REGNO (r)])
9677 return true;
9678 }
9679 }
9680
9681 }
9682 return false;
9683 }
9684
9685 /* Return true if the given call expression can be
9686 turned into a sibling call.
9687 DECL holds the declaration of the function to be called whereas
9688 EXP is the call expression itself. */
9689
9690 static bool
9691 s390_function_ok_for_sibcall (tree decl, tree exp)
9692 {
9693 /* The TPF epilogue uses register 1. */
9694 if (TARGET_TPF_PROFILING)
9695 return false;
9696
9697 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9698 which would have to be restored before the sibcall. */
9699 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9700 return false;
9701
9702 /* Register 6 on s390 is available as an argument register but unfortunately
9703 "caller saved". This makes functions needing this register for arguments
9704 not suitable for sibcalls. */
9705 return !s390_call_saved_register_used (exp);
9706 }
9707
9708 /* Return the fixed registers used for condition codes. */
9709
9710 static bool
9711 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9712 {
9713 *p1 = CC_REGNUM;
9714 *p2 = INVALID_REGNUM;
9715
9716 return true;
9717 }
9718
9719 /* This function is used by the call expanders of the machine description.
9720 It emits the call insn itself together with the necessary operations
9721 to adjust the target address and returns the emitted insn.
9722 ADDR_LOCATION is the target address rtx
9723 TLS_CALL the location of the thread-local symbol
9724 RESULT_REG the register where the result of the call should be stored
9725 RETADDR_REG the register where the return address should be stored
9726 If this parameter is NULL_RTX the call is considered
9727 to be a sibling call. */
9728
9729 rtx
9730 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9731 rtx retaddr_reg)
9732 {
9733 bool plt_call = false;
9734 rtx insn;
9735 rtx call;
9736 rtx clobber;
9737 rtvec vec;
9738
9739 /* Direct function calls need special treatment. */
9740 if (GET_CODE (addr_location) == SYMBOL_REF)
9741 {
9742 /* When calling a global routine in PIC mode, we must
9743 replace the symbol itself with the PLT stub. */
9744 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9745 {
9746 if (retaddr_reg != NULL_RTX)
9747 {
9748 addr_location = gen_rtx_UNSPEC (Pmode,
9749 gen_rtvec (1, addr_location),
9750 UNSPEC_PLT);
9751 addr_location = gen_rtx_CONST (Pmode, addr_location);
9752 plt_call = true;
9753 }
9754 else
9755 /* For -fpic code the PLT entries might use r12 which is
9756 call-saved. Therefore we cannot do a sibcall when
9757 calling directly using a symbol ref. When reaching
9758 this point we decided (in s390_function_ok_for_sibcall)
9759 to do a sibcall for a function pointer but one of the
9760 optimizers was able to get rid of the function pointer
9761 by propagating the symbol ref into the call. This
9762 optimization is illegal for S/390 so we turn the direct
9763 call into a indirect call again. */
9764 addr_location = force_reg (Pmode, addr_location);
9765 }
9766
9767 /* Unless we can use the bras(l) insn, force the
9768 routine address into a register. */
9769 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9770 {
9771 if (flag_pic)
9772 addr_location = legitimize_pic_address (addr_location, 0);
9773 else
9774 addr_location = force_reg (Pmode, addr_location);
9775 }
9776 }
9777
9778 /* If it is already an indirect call or the code above moved the
9779 SYMBOL_REF to somewhere else make sure the address can be found in
9780 register 1. */
9781 if (retaddr_reg == NULL_RTX
9782 && GET_CODE (addr_location) != SYMBOL_REF
9783 && !plt_call)
9784 {
9785 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9786 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9787 }
9788
9789 addr_location = gen_rtx_MEM (QImode, addr_location);
9790 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9791
9792 if (result_reg != NULL_RTX)
9793 call = gen_rtx_SET (VOIDmode, result_reg, call);
9794
9795 if (retaddr_reg != NULL_RTX)
9796 {
9797 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9798
9799 if (tls_call != NULL_RTX)
9800 vec = gen_rtvec (3, call, clobber,
9801 gen_rtx_USE (VOIDmode, tls_call));
9802 else
9803 vec = gen_rtvec (2, call, clobber);
9804
9805 call = gen_rtx_PARALLEL (VOIDmode, vec);
9806 }
9807
9808 insn = emit_call_insn (call);
9809
9810 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9811 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9812 {
9813 /* s390_function_ok_for_sibcall should
9814 have denied sibcalls in this case. */
9815 gcc_assert (retaddr_reg != NULL_RTX);
9816
9817 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9818 }
9819 return insn;
9820 }
9821
9822 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9823
9824 static void
9825 s390_conditional_register_usage (void)
9826 {
9827 int i;
9828
9829 if (flag_pic)
9830 {
9831 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9832 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9833 }
9834 if (TARGET_CPU_ZARCH)
9835 {
9836 fixed_regs[BASE_REGNUM] = 0;
9837 call_used_regs[BASE_REGNUM] = 0;
9838 fixed_regs[RETURN_REGNUM] = 0;
9839 call_used_regs[RETURN_REGNUM] = 0;
9840 }
9841 if (TARGET_64BIT)
9842 {
9843 for (i = 24; i < 32; i++)
9844 call_used_regs[i] = call_really_used_regs[i] = 0;
9845 }
9846 else
9847 {
9848 for (i = 18; i < 20; i++)
9849 call_used_regs[i] = call_really_used_regs[i] = 0;
9850 }
9851
9852 if (TARGET_SOFT_FLOAT)
9853 {
9854 for (i = 16; i < 32; i++)
9855 call_used_regs[i] = fixed_regs[i] = 1;
9856 }
9857 }
9858
9859 /* Corresponding function to eh_return expander. */
9860
9861 static GTY(()) rtx s390_tpf_eh_return_symbol;
9862 void
9863 s390_emit_tpf_eh_return (rtx target)
9864 {
9865 rtx insn, reg;
9866
9867 if (!s390_tpf_eh_return_symbol)
9868 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9869
9870 reg = gen_rtx_REG (Pmode, 2);
9871
9872 emit_move_insn (reg, target);
9873 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9874 gen_rtx_REG (Pmode, RETURN_REGNUM));
9875 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9876
9877 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9878 }
9879
9880 /* Rework the prologue/epilogue to avoid saving/restoring
9881 registers unnecessarily. */
9882
9883 static void
9884 s390_optimize_prologue (void)
9885 {
9886 rtx insn, new_insn, next_insn;
9887
9888 /* Do a final recompute of the frame-related data. */
9889
9890 s390_update_frame_layout ();
9891
9892 /* If all special registers are in fact used, there's nothing we
9893 can do, so no point in walking the insn list. */
9894
9895 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9896 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9897 && (TARGET_CPU_ZARCH
9898 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9899 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9900 return;
9901
9902 /* Search for prologue/epilogue insns and replace them. */
9903
9904 for (insn = get_insns (); insn; insn = next_insn)
9905 {
9906 int first, last, off;
9907 rtx set, base, offset;
9908
9909 next_insn = NEXT_INSN (insn);
9910
9911 if (GET_CODE (insn) != INSN)
9912 continue;
9913
9914 if (GET_CODE (PATTERN (insn)) == PARALLEL
9915 && store_multiple_operation (PATTERN (insn), VOIDmode))
9916 {
9917 set = XVECEXP (PATTERN (insn), 0, 0);
9918 first = REGNO (SET_SRC (set));
9919 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9920 offset = const0_rtx;
9921 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9922 off = INTVAL (offset);
9923
9924 if (GET_CODE (base) != REG || off < 0)
9925 continue;
9926 if (cfun_frame_layout.first_save_gpr != -1
9927 && (cfun_frame_layout.first_save_gpr < first
9928 || cfun_frame_layout.last_save_gpr > last))
9929 continue;
9930 if (REGNO (base) != STACK_POINTER_REGNUM
9931 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9932 continue;
9933 if (first > BASE_REGNUM || last < BASE_REGNUM)
9934 continue;
9935
9936 if (cfun_frame_layout.first_save_gpr != -1)
9937 {
9938 new_insn = save_gprs (base,
9939 off + (cfun_frame_layout.first_save_gpr
9940 - first) * UNITS_PER_LONG,
9941 cfun_frame_layout.first_save_gpr,
9942 cfun_frame_layout.last_save_gpr);
9943 new_insn = emit_insn_before (new_insn, insn);
9944 INSN_ADDRESSES_NEW (new_insn, -1);
9945 }
9946
9947 remove_insn (insn);
9948 continue;
9949 }
9950
9951 if (cfun_frame_layout.first_save_gpr == -1
9952 && GET_CODE (PATTERN (insn)) == SET
9953 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9954 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9955 || (!TARGET_CPU_ZARCH
9956 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9957 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9958 {
9959 set = PATTERN (insn);
9960 first = REGNO (SET_SRC (set));
9961 offset = const0_rtx;
9962 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9963 off = INTVAL (offset);
9964
9965 if (GET_CODE (base) != REG || off < 0)
9966 continue;
9967 if (REGNO (base) != STACK_POINTER_REGNUM
9968 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9969 continue;
9970
9971 remove_insn (insn);
9972 continue;
9973 }
9974
9975 if (GET_CODE (PATTERN (insn)) == PARALLEL
9976 && load_multiple_operation (PATTERN (insn), VOIDmode))
9977 {
9978 set = XVECEXP (PATTERN (insn), 0, 0);
9979 first = REGNO (SET_DEST (set));
9980 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9981 offset = const0_rtx;
9982 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9983 off = INTVAL (offset);
9984
9985 if (GET_CODE (base) != REG || off < 0)
9986 continue;
9987 if (cfun_frame_layout.first_restore_gpr != -1
9988 && (cfun_frame_layout.first_restore_gpr < first
9989 || cfun_frame_layout.last_restore_gpr > last))
9990 continue;
9991 if (REGNO (base) != STACK_POINTER_REGNUM
9992 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9993 continue;
9994 if (first > BASE_REGNUM || last < BASE_REGNUM)
9995 continue;
9996
9997 if (cfun_frame_layout.first_restore_gpr != -1)
9998 {
9999 new_insn = restore_gprs (base,
10000 off + (cfun_frame_layout.first_restore_gpr
10001 - first) * UNITS_PER_LONG,
10002 cfun_frame_layout.first_restore_gpr,
10003 cfun_frame_layout.last_restore_gpr);
10004 new_insn = emit_insn_before (new_insn, insn);
10005 INSN_ADDRESSES_NEW (new_insn, -1);
10006 }
10007
10008 remove_insn (insn);
10009 continue;
10010 }
10011
10012 if (cfun_frame_layout.first_restore_gpr == -1
10013 && GET_CODE (PATTERN (insn)) == SET
10014 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10015 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10016 || (!TARGET_CPU_ZARCH
10017 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10018 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10019 {
10020 set = PATTERN (insn);
10021 first = REGNO (SET_DEST (set));
10022 offset = const0_rtx;
10023 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10024 off = INTVAL (offset);
10025
10026 if (GET_CODE (base) != REG || off < 0)
10027 continue;
10028 if (REGNO (base) != STACK_POINTER_REGNUM
10029 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10030 continue;
10031
10032 remove_insn (insn);
10033 continue;
10034 }
10035 }
10036 }
10037
10038 /* On z10 and later the dynamic branch prediction must see the
10039 backward jump within a certain windows. If not it falls back to
10040 the static prediction. This function rearranges the loop backward
10041 branch in a way which makes the static prediction always correct.
10042 The function returns true if it added an instruction. */
10043 static bool
10044 s390_fix_long_loop_prediction (rtx insn)
10045 {
10046 rtx set = single_set (insn);
10047 rtx code_label, label_ref, new_label;
10048 rtx uncond_jump;
10049 rtx cur_insn;
10050 rtx tmp;
10051 int distance;
10052
10053 /* This will exclude branch on count and branch on index patterns
10054 since these are correctly statically predicted. */
10055 if (!set
10056 || SET_DEST (set) != pc_rtx
10057 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10058 return false;
10059
10060 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10061 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10062
10063 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10064
10065 code_label = XEXP (label_ref, 0);
10066
10067 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10068 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10069 || (INSN_ADDRESSES (INSN_UID (insn))
10070 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10071 return false;
10072
10073 for (distance = 0, cur_insn = PREV_INSN (insn);
10074 distance < PREDICT_DISTANCE - 6;
10075 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10076 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10077 return false;
10078
10079 new_label = gen_label_rtx ();
10080 uncond_jump = emit_jump_insn_after (
10081 gen_rtx_SET (VOIDmode, pc_rtx,
10082 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10083 insn);
10084 emit_label_after (new_label, uncond_jump);
10085
10086 tmp = XEXP (SET_SRC (set), 1);
10087 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10088 XEXP (SET_SRC (set), 2) = tmp;
10089 INSN_CODE (insn) = -1;
10090
10091 XEXP (label_ref, 0) = new_label;
10092 JUMP_LABEL (insn) = new_label;
10093 JUMP_LABEL (uncond_jump) = code_label;
10094
10095 return true;
10096 }
10097
10098 /* Returns 1 if INSN reads the value of REG for purposes not related
10099 to addressing of memory, and 0 otherwise. */
10100 static int
10101 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10102 {
10103 return reg_referenced_p (reg, PATTERN (insn))
10104 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10105 }
10106
10107 /* Starting from INSN find_cond_jump looks downwards in the insn
10108 stream for a single jump insn which is the last user of the
10109 condition code set in INSN. */
10110 static rtx
10111 find_cond_jump (rtx insn)
10112 {
10113 for (; insn; insn = NEXT_INSN (insn))
10114 {
10115 rtx ite, cc;
10116
10117 if (LABEL_P (insn))
10118 break;
10119
10120 if (!JUMP_P (insn))
10121 {
10122 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10123 break;
10124 continue;
10125 }
10126
10127 /* This will be triggered by a return. */
10128 if (GET_CODE (PATTERN (insn)) != SET)
10129 break;
10130
10131 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10132 ite = SET_SRC (PATTERN (insn));
10133
10134 if (GET_CODE (ite) != IF_THEN_ELSE)
10135 break;
10136
10137 cc = XEXP (XEXP (ite, 0), 0);
10138 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10139 break;
10140
10141 if (find_reg_note (insn, REG_DEAD, cc))
10142 return insn;
10143 break;
10144 }
10145
10146 return NULL_RTX;
10147 }
10148
10149 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10150 the semantics does not change. If NULL_RTX is passed as COND the
10151 function tries to find the conditional jump starting with INSN. */
10152 static void
10153 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10154 {
10155 rtx tmp = *op0;
10156
10157 if (cond == NULL_RTX)
10158 {
10159 rtx jump = find_cond_jump (NEXT_INSN (insn));
10160 jump = jump ? single_set (jump) : NULL_RTX;
10161
10162 if (jump == NULL_RTX)
10163 return;
10164
10165 cond = XEXP (XEXP (jump, 1), 0);
10166 }
10167
10168 *op0 = *op1;
10169 *op1 = tmp;
10170 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10171 }
10172
10173 /* On z10, instructions of the compare-and-branch family have the
10174 property to access the register occurring as second operand with
10175 its bits complemented. If such a compare is grouped with a second
10176 instruction that accesses the same register non-complemented, and
10177 if that register's value is delivered via a bypass, then the
10178 pipeline recycles, thereby causing significant performance decline.
10179 This function locates such situations and exchanges the two
10180 operands of the compare. The function return true whenever it
10181 added an insn. */
10182 static bool
10183 s390_z10_optimize_cmp (rtx insn)
10184 {
10185 rtx prev_insn, next_insn;
10186 bool insn_added_p = false;
10187 rtx cond, *op0, *op1;
10188
10189 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10190 {
10191 /* Handle compare and branch and branch on count
10192 instructions. */
10193 rtx pattern = single_set (insn);
10194
10195 if (!pattern
10196 || SET_DEST (pattern) != pc_rtx
10197 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10198 return false;
10199
10200 cond = XEXP (SET_SRC (pattern), 0);
10201 op0 = &XEXP (cond, 0);
10202 op1 = &XEXP (cond, 1);
10203 }
10204 else if (GET_CODE (PATTERN (insn)) == SET)
10205 {
10206 rtx src, dest;
10207
10208 /* Handle normal compare instructions. */
10209 src = SET_SRC (PATTERN (insn));
10210 dest = SET_DEST (PATTERN (insn));
10211
10212 if (!REG_P (dest)
10213 || !CC_REGNO_P (REGNO (dest))
10214 || GET_CODE (src) != COMPARE)
10215 return false;
10216
10217 /* s390_swap_cmp will try to find the conditional
10218 jump when passing NULL_RTX as condition. */
10219 cond = NULL_RTX;
10220 op0 = &XEXP (src, 0);
10221 op1 = &XEXP (src, 1);
10222 }
10223 else
10224 return false;
10225
10226 if (!REG_P (*op0) || !REG_P (*op1))
10227 return false;
10228
10229 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10230 return false;
10231
10232 /* Swap the COMPARE arguments and its mask if there is a
10233 conflicting access in the previous insn. */
10234 prev_insn = prev_active_insn (insn);
10235 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10236 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10237 s390_swap_cmp (cond, op0, op1, insn);
10238
10239 /* Check if there is a conflict with the next insn. If there
10240 was no conflict with the previous insn, then swap the
10241 COMPARE arguments and its mask. If we already swapped
10242 the operands, or if swapping them would cause a conflict
10243 with the previous insn, issue a NOP after the COMPARE in
10244 order to separate the two instuctions. */
10245 next_insn = next_active_insn (insn);
10246 if (next_insn != NULL_RTX && INSN_P (next_insn)
10247 && s390_non_addr_reg_read_p (*op1, next_insn))
10248 {
10249 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10250 && s390_non_addr_reg_read_p (*op0, prev_insn))
10251 {
10252 if (REGNO (*op1) == 0)
10253 emit_insn_after (gen_nop1 (), insn);
10254 else
10255 emit_insn_after (gen_nop (), insn);
10256 insn_added_p = true;
10257 }
10258 else
10259 s390_swap_cmp (cond, op0, op1, insn);
10260 }
10261 return insn_added_p;
10262 }
10263
10264 /* Perform machine-dependent processing. */
10265
10266 static void
10267 s390_reorg (void)
10268 {
10269 bool pool_overflow = false;
10270
10271 /* Make sure all splits have been performed; splits after
10272 machine_dependent_reorg might confuse insn length counts. */
10273 split_all_insns_noflow ();
10274
10275 /* Install the main literal pool and the associated base
10276 register load insns.
10277
10278 In addition, there are two problematic situations we need
10279 to correct:
10280
10281 - the literal pool might be > 4096 bytes in size, so that
10282 some of its elements cannot be directly accessed
10283
10284 - a branch target might be > 64K away from the branch, so that
10285 it is not possible to use a PC-relative instruction.
10286
10287 To fix those, we split the single literal pool into multiple
10288 pool chunks, reloading the pool base register at various
10289 points throughout the function to ensure it always points to
10290 the pool chunk the following code expects, and / or replace
10291 PC-relative branches by absolute branches.
10292
10293 However, the two problems are interdependent: splitting the
10294 literal pool can move a branch further away from its target,
10295 causing the 64K limit to overflow, and on the other hand,
10296 replacing a PC-relative branch by an absolute branch means
10297 we need to put the branch target address into the literal
10298 pool, possibly causing it to overflow.
10299
10300 So, we loop trying to fix up both problems until we manage
10301 to satisfy both conditions at the same time. Note that the
10302 loop is guaranteed to terminate as every pass of the loop
10303 strictly decreases the total number of PC-relative branches
10304 in the function. (This is not completely true as there
10305 might be branch-over-pool insns introduced by chunkify_start.
10306 Those never need to be split however.) */
10307
10308 for (;;)
10309 {
10310 struct constant_pool *pool = NULL;
10311
10312 /* Collect the literal pool. */
10313 if (!pool_overflow)
10314 {
10315 pool = s390_mainpool_start ();
10316 if (!pool)
10317 pool_overflow = true;
10318 }
10319
10320 /* If literal pool overflowed, start to chunkify it. */
10321 if (pool_overflow)
10322 pool = s390_chunkify_start ();
10323
10324 /* Split out-of-range branches. If this has created new
10325 literal pool entries, cancel current chunk list and
10326 recompute it. zSeries machines have large branch
10327 instructions, so we never need to split a branch. */
10328 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10329 {
10330 if (pool_overflow)
10331 s390_chunkify_cancel (pool);
10332 else
10333 s390_mainpool_cancel (pool);
10334
10335 continue;
10336 }
10337
10338 /* If we made it up to here, both conditions are satisfied.
10339 Finish up literal pool related changes. */
10340 if (pool_overflow)
10341 s390_chunkify_finish (pool);
10342 else
10343 s390_mainpool_finish (pool);
10344
10345 /* We're done splitting branches. */
10346 cfun->machine->split_branches_pending_p = false;
10347 break;
10348 }
10349
10350 /* Generate out-of-pool execute target insns. */
10351 if (TARGET_CPU_ZARCH)
10352 {
10353 rtx insn, label, target;
10354
10355 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10356 {
10357 label = s390_execute_label (insn);
10358 if (!label)
10359 continue;
10360
10361 gcc_assert (label != const0_rtx);
10362
10363 target = emit_label (XEXP (label, 0));
10364 INSN_ADDRESSES_NEW (target, -1);
10365
10366 target = emit_insn (s390_execute_target (insn));
10367 INSN_ADDRESSES_NEW (target, -1);
10368 }
10369 }
10370
10371 /* Try to optimize prologue and epilogue further. */
10372 s390_optimize_prologue ();
10373
10374 /* Walk over the insns and do some >=z10 specific changes. */
10375 if (s390_tune == PROCESSOR_2097_Z10
10376 || s390_tune == PROCESSOR_2817_Z196)
10377 {
10378 rtx insn;
10379 bool insn_added_p = false;
10380
10381 /* The insn lengths and addresses have to be up to date for the
10382 following manipulations. */
10383 shorten_branches (get_insns ());
10384
10385 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10386 {
10387 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10388 continue;
10389
10390 if (JUMP_P (insn))
10391 insn_added_p |= s390_fix_long_loop_prediction (insn);
10392
10393 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10394 || GET_CODE (PATTERN (insn)) == SET)
10395 && s390_tune == PROCESSOR_2097_Z10)
10396 insn_added_p |= s390_z10_optimize_cmp (insn);
10397 }
10398
10399 /* Adjust branches if we added new instructions. */
10400 if (insn_added_p)
10401 shorten_branches (get_insns ());
10402 }
10403 }
10404
10405 /* Return true if INSN is a fp load insn writing register REGNO. */
10406 static inline bool
10407 s390_fpload_toreg (rtx insn, unsigned int regno)
10408 {
10409 rtx set;
10410 enum attr_type flag = s390_safe_attr_type (insn);
10411
10412 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10413 return false;
10414
10415 set = single_set (insn);
10416
10417 if (set == NULL_RTX)
10418 return false;
10419
10420 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10421 return false;
10422
10423 if (REGNO (SET_DEST (set)) != regno)
10424 return false;
10425
10426 return true;
10427 }
10428
10429 /* This value describes the distance to be avoided between an
10430 aritmetic fp instruction and an fp load writing the same register.
10431 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10432 fine but the exact value has to be avoided. Otherwise the FP
10433 pipeline will throw an exception causing a major penalty. */
10434 #define Z10_EARLYLOAD_DISTANCE 7
10435
10436 /* Rearrange the ready list in order to avoid the situation described
10437 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10438 moved to the very end of the ready list. */
10439 static void
10440 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10441 {
10442 unsigned int regno;
10443 int nready = *nready_p;
10444 rtx tmp;
10445 int i;
10446 rtx insn;
10447 rtx set;
10448 enum attr_type flag;
10449 int distance;
10450
10451 /* Skip DISTANCE - 1 active insns. */
10452 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10453 distance > 0 && insn != NULL_RTX;
10454 distance--, insn = prev_active_insn (insn))
10455 if (CALL_P (insn) || JUMP_P (insn))
10456 return;
10457
10458 if (insn == NULL_RTX)
10459 return;
10460
10461 set = single_set (insn);
10462
10463 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10464 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10465 return;
10466
10467 flag = s390_safe_attr_type (insn);
10468
10469 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10470 return;
10471
10472 regno = REGNO (SET_DEST (set));
10473 i = nready - 1;
10474
10475 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10476 i--;
10477
10478 if (!i)
10479 return;
10480
10481 tmp = ready[i];
10482 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10483 ready[0] = tmp;
10484 }
10485
10486 /* This function is called via hook TARGET_SCHED_REORDER before
10487 issueing one insn from list READY which contains *NREADYP entries.
10488 For target z10 it reorders load instructions to avoid early load
10489 conflicts in the floating point pipeline */
10490 static int
10491 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10492 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10493 {
10494 if (s390_tune == PROCESSOR_2097_Z10)
10495 if (reload_completed && *nreadyp > 1)
10496 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10497
10498 return s390_issue_rate ();
10499 }
10500
10501 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10502 the scheduler has issued INSN. It stores the last issued insn into
10503 last_scheduled_insn in order to make it available for
10504 s390_sched_reorder. */
10505 static int
10506 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10507 int verbose ATTRIBUTE_UNUSED,
10508 rtx insn, int more)
10509 {
10510 last_scheduled_insn = insn;
10511
10512 if (GET_CODE (PATTERN (insn)) != USE
10513 && GET_CODE (PATTERN (insn)) != CLOBBER)
10514 return more - 1;
10515 else
10516 return more;
10517 }
10518
10519 static void
10520 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10521 int verbose ATTRIBUTE_UNUSED,
10522 int max_ready ATTRIBUTE_UNUSED)
10523 {
10524 last_scheduled_insn = NULL_RTX;
10525 }
10526
10527 /* This function checks the whole of insn X for memory references. The
10528 function always returns zero because the framework it is called
10529 from would stop recursively analyzing the insn upon a return value
10530 other than zero. The real result of this function is updating
10531 counter variable MEM_COUNT. */
10532 static int
10533 check_dpu (rtx *x, unsigned *mem_count)
10534 {
10535 if (*x != NULL_RTX && MEM_P (*x))
10536 (*mem_count)++;
10537 return 0;
10538 }
10539
10540 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10541 a new number struct loop *loop should be unrolled if tuned for cpus with
10542 a built-in stride prefetcher.
10543 The loop is analyzed for memory accesses by calling check_dpu for
10544 each rtx of the loop. Depending on the loop_depth and the amount of
10545 memory accesses a new number <=nunroll is returned to improve the
10546 behaviour of the hardware prefetch unit. */
10547 static unsigned
10548 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10549 {
10550 basic_block *bbs;
10551 rtx insn;
10552 unsigned i;
10553 unsigned mem_count = 0;
10554
10555 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10556 return nunroll;
10557
10558 /* Count the number of memory references within the loop body. */
10559 bbs = get_loop_body (loop);
10560 for (i = 0; i < loop->num_nodes; i++)
10561 {
10562 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10563 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10564 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10565 }
10566 free (bbs);
10567
10568 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10569 if (mem_count == 0)
10570 return nunroll;
10571
10572 switch (loop_depth(loop))
10573 {
10574 case 1:
10575 return MIN (nunroll, 28 / mem_count);
10576 case 2:
10577 return MIN (nunroll, 22 / mem_count);
10578 default:
10579 return MIN (nunroll, 16 / mem_count);
10580 }
10581 }
10582
10583 /* Initialize GCC target structure. */
10584
10585 #undef TARGET_ASM_ALIGNED_HI_OP
10586 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10587 #undef TARGET_ASM_ALIGNED_DI_OP
10588 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10589 #undef TARGET_ASM_INTEGER
10590 #define TARGET_ASM_INTEGER s390_assemble_integer
10591
10592 #undef TARGET_ASM_OPEN_PAREN
10593 #define TARGET_ASM_OPEN_PAREN ""
10594
10595 #undef TARGET_ASM_CLOSE_PAREN
10596 #define TARGET_ASM_CLOSE_PAREN ""
10597
10598 #undef TARGET_DEFAULT_TARGET_FLAGS
10599 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT)
10600
10601 #undef TARGET_HANDLE_OPTION
10602 #define TARGET_HANDLE_OPTION s390_handle_option
10603
10604 #undef TARGET_OPTION_OVERRIDE
10605 #define TARGET_OPTION_OVERRIDE s390_option_override
10606
10607 #undef TARGET_OPTION_OPTIMIZATION_TABLE
10608 #define TARGET_OPTION_OPTIMIZATION_TABLE s390_option_optimization_table
10609
10610 #undef TARGET_OPTION_INIT_STRUCT
10611 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10612
10613 #undef TARGET_ENCODE_SECTION_INFO
10614 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10615
10616 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10617 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10618
10619 #ifdef HAVE_AS_TLS
10620 #undef TARGET_HAVE_TLS
10621 #define TARGET_HAVE_TLS true
10622 #endif
10623 #undef TARGET_CANNOT_FORCE_CONST_MEM
10624 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10625
10626 #undef TARGET_DELEGITIMIZE_ADDRESS
10627 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10628
10629 #undef TARGET_LEGITIMIZE_ADDRESS
10630 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10631
10632 #undef TARGET_RETURN_IN_MEMORY
10633 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10634
10635 #undef TARGET_INIT_BUILTINS
10636 #define TARGET_INIT_BUILTINS s390_init_builtins
10637 #undef TARGET_EXPAND_BUILTIN
10638 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10639
10640 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10641 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10642
10643 #undef TARGET_ASM_OUTPUT_MI_THUNK
10644 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10645 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10646 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10647
10648 #undef TARGET_SCHED_ADJUST_PRIORITY
10649 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10650 #undef TARGET_SCHED_ISSUE_RATE
10651 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10652 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10653 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10654
10655 #undef TARGET_SCHED_VARIABLE_ISSUE
10656 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10657 #undef TARGET_SCHED_REORDER
10658 #define TARGET_SCHED_REORDER s390_sched_reorder
10659 #undef TARGET_SCHED_INIT
10660 #define TARGET_SCHED_INIT s390_sched_init
10661
10662 #undef TARGET_CANNOT_COPY_INSN_P
10663 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10664 #undef TARGET_RTX_COSTS
10665 #define TARGET_RTX_COSTS s390_rtx_costs
10666 #undef TARGET_ADDRESS_COST
10667 #define TARGET_ADDRESS_COST s390_address_cost
10668 #undef TARGET_REGISTER_MOVE_COST
10669 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10670 #undef TARGET_MEMORY_MOVE_COST
10671 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10672
10673 #undef TARGET_MACHINE_DEPENDENT_REORG
10674 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10675
10676 #undef TARGET_VALID_POINTER_MODE
10677 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10678
10679 #undef TARGET_BUILD_BUILTIN_VA_LIST
10680 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10681 #undef TARGET_EXPAND_BUILTIN_VA_START
10682 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10683 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10684 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10685
10686 #undef TARGET_PROMOTE_FUNCTION_MODE
10687 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10688 #undef TARGET_PASS_BY_REFERENCE
10689 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10690
10691 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10692 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10693 #undef TARGET_FUNCTION_ARG
10694 #define TARGET_FUNCTION_ARG s390_function_arg
10695 #undef TARGET_FUNCTION_ARG_ADVANCE
10696 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10697
10698 #undef TARGET_FIXED_CONDITION_CODE_REGS
10699 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10700
10701 #undef TARGET_CC_MODES_COMPATIBLE
10702 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10703
10704 #undef TARGET_INVALID_WITHIN_DOLOOP
10705 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10706
10707 #ifdef HAVE_AS_TLS
10708 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10709 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10710 #endif
10711
10712 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10713 #undef TARGET_MANGLE_TYPE
10714 #define TARGET_MANGLE_TYPE s390_mangle_type
10715 #endif
10716
10717 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10718 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10719
10720 #undef TARGET_PREFERRED_RELOAD_CLASS
10721 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10722
10723 #undef TARGET_SECONDARY_RELOAD
10724 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10725
10726 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10727 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10728
10729 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10730 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10731
10732 #undef TARGET_LEGITIMATE_ADDRESS_P
10733 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10734
10735 #undef TARGET_CAN_ELIMINATE
10736 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10737
10738 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10739 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10740
10741 #undef TARGET_LOOP_UNROLL_ADJUST
10742 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10743
10744 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10745 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10746 #undef TARGET_TRAMPOLINE_INIT
10747 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10748
10749 #undef TARGET_UNWIND_WORD_MODE
10750 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10751
10752 struct gcc_target targetm = TARGET_INITIALIZER;
10753
10754 #include "gt-s390.h"