s390.c (s390_delegitimize_address): Handle GOTOFF unspecs.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* Which cpu are we tuning for. */
281 enum processor_type s390_tune = PROCESSOR_max;
282 int s390_tune_flags;
283 /* Which instruction set architecture to use. */
284 enum processor_type s390_arch;
285 int s390_arch_flags;
286
287 HOST_WIDE_INT s390_warn_framesize = 0;
288 HOST_WIDE_INT s390_stack_size = 0;
289 HOST_WIDE_INT s390_stack_guard = 0;
290
291 /* The following structure is embedded in the machine
292 specific part of struct function. */
293
294 struct GTY (()) s390_frame_layout
295 {
296 /* Offset within stack frame. */
297 HOST_WIDE_INT gprs_offset;
298 HOST_WIDE_INT f0_offset;
299 HOST_WIDE_INT f4_offset;
300 HOST_WIDE_INT f8_offset;
301 HOST_WIDE_INT backchain_offset;
302
303 /* Number of first and last gpr where slots in the register
304 save area are reserved for. */
305 int first_save_gpr_slot;
306 int last_save_gpr_slot;
307
308 /* Number of first and last gpr to be saved, restored. */
309 int first_save_gpr;
310 int first_restore_gpr;
311 int last_save_gpr;
312 int last_restore_gpr;
313
314 /* Bits standing for floating point registers. Set, if the
315 respective register has to be saved. Starting with reg 16 (f0)
316 at the rightmost bit.
317 Bit 15 - 8 7 6 5 4 3 2 1 0
318 fpr 15 - 8 7 5 3 1 6 4 2 0
319 reg 31 - 24 23 22 21 20 19 18 17 16 */
320 unsigned int fpr_bitmap;
321
322 /* Number of floating point registers f8-f15 which must be saved. */
323 int high_fprs;
324
325 /* Set if return address needs to be saved.
326 This flag is set by s390_return_addr_rtx if it could not use
327 the initial value of r14 and therefore depends on r14 saved
328 to the stack. */
329 bool save_return_addr_p;
330
331 /* Size of stack frame. */
332 HOST_WIDE_INT frame_size;
333 };
334
335 /* Define the structure for the machine field in struct function. */
336
337 struct GTY(()) machine_function
338 {
339 struct s390_frame_layout frame_layout;
340
341 /* Literal pool base register. */
342 rtx base_reg;
343
344 /* True if we may need to perform branch splitting. */
345 bool split_branches_pending_p;
346
347 /* Some local-dynamic TLS symbol name. */
348 const char *some_ld_name;
349
350 bool has_landing_pad_p;
351 };
352
353 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
354
355 #define cfun_frame_layout (cfun->machine->frame_layout)
356 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
357 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
358 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
359 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
360 (1 << (BITNUM)))
361 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
362 (1 << (BITNUM))))
363
364 /* Number of GPRs and FPRs used for argument passing. */
365 #define GP_ARG_NUM_REG 5
366 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
367
368 /* A couple of shortcuts. */
369 #define CONST_OK_FOR_J(x) \
370 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
371 #define CONST_OK_FOR_K(x) \
372 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
373 #define CONST_OK_FOR_Os(x) \
374 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
375 #define CONST_OK_FOR_Op(x) \
376 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
377 #define CONST_OK_FOR_On(x) \
378 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
379
380 #define REGNO_PAIR_OK(REGNO, MODE) \
381 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
382
383 /* That's the read ahead of the dynamic branch prediction unit in
384 bytes on a z10 (or higher) CPU. */
385 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
386
387 /* Return the alignment for LABEL. We default to the -falign-labels
388 value except for the literal pool base label. */
389 int
390 s390_label_align (rtx label)
391 {
392 rtx prev_insn = prev_active_insn (label);
393
394 if (prev_insn == NULL_RTX)
395 goto old;
396
397 prev_insn = single_set (prev_insn);
398
399 if (prev_insn == NULL_RTX)
400 goto old;
401
402 prev_insn = SET_SRC (prev_insn);
403
404 /* Don't align literal pool base labels. */
405 if (GET_CODE (prev_insn) == UNSPEC
406 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
407 return 0;
408
409 old:
410 return align_labels_log;
411 }
412
413 static enum machine_mode
414 s390_libgcc_cmp_return_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 static enum machine_mode
420 s390_libgcc_shift_count_mode (void)
421 {
422 return TARGET_64BIT ? DImode : SImode;
423 }
424
425 static enum machine_mode
426 s390_unwind_word_mode (void)
427 {
428 return TARGET_64BIT ? DImode : SImode;
429 }
430
431 /* Return true if the back end supports mode MODE. */
432 static bool
433 s390_scalar_mode_supported_p (enum machine_mode mode)
434 {
435 /* In contrast to the default implementation reject TImode constants on 31bit
436 TARGET_ZARCH for ABI compliance. */
437 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
438 return false;
439
440 if (DECIMAL_FLOAT_MODE_P (mode))
441 return default_decimal_float_supported_p ();
442
443 return default_scalar_mode_supported_p (mode);
444 }
445
446 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
447
448 void
449 s390_set_has_landing_pad_p (bool value)
450 {
451 cfun->machine->has_landing_pad_p = value;
452 }
453
454 /* If two condition code modes are compatible, return a condition code
455 mode which is compatible with both. Otherwise, return
456 VOIDmode. */
457
458 static enum machine_mode
459 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
460 {
461 if (m1 == m2)
462 return m1;
463
464 switch (m1)
465 {
466 case CCZmode:
467 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
468 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
469 return m2;
470 return VOIDmode;
471
472 case CCSmode:
473 case CCUmode:
474 case CCTmode:
475 case CCSRmode:
476 case CCURmode:
477 case CCZ1mode:
478 if (m2 == CCZmode)
479 return m1;
480
481 return VOIDmode;
482
483 default:
484 return VOIDmode;
485 }
486 return VOIDmode;
487 }
488
489 /* Return true if SET either doesn't set the CC register, or else
490 the source and destination have matching CC modes and that
491 CC mode is at least as constrained as REQ_MODE. */
492
493 static bool
494 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
495 {
496 enum machine_mode set_mode;
497
498 gcc_assert (GET_CODE (set) == SET);
499
500 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
501 return 1;
502
503 set_mode = GET_MODE (SET_DEST (set));
504 switch (set_mode)
505 {
506 case CCSmode:
507 case CCSRmode:
508 case CCUmode:
509 case CCURmode:
510 case CCLmode:
511 case CCL1mode:
512 case CCL2mode:
513 case CCL3mode:
514 case CCT1mode:
515 case CCT2mode:
516 case CCT3mode:
517 if (req_mode != set_mode)
518 return 0;
519 break;
520
521 case CCZmode:
522 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
523 && req_mode != CCSRmode && req_mode != CCURmode)
524 return 0;
525 break;
526
527 case CCAPmode:
528 case CCANmode:
529 if (req_mode != CCAmode)
530 return 0;
531 break;
532
533 default:
534 gcc_unreachable ();
535 }
536
537 return (GET_MODE (SET_SRC (set)) == set_mode);
538 }
539
540 /* Return true if every SET in INSN that sets the CC register
541 has source and destination with matching CC modes and that
542 CC mode is at least as constrained as REQ_MODE.
543 If REQ_MODE is VOIDmode, always return false. */
544
545 bool
546 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
547 {
548 int i;
549
550 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
551 if (req_mode == VOIDmode)
552 return false;
553
554 if (GET_CODE (PATTERN (insn)) == SET)
555 return s390_match_ccmode_set (PATTERN (insn), req_mode);
556
557 if (GET_CODE (PATTERN (insn)) == PARALLEL)
558 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
559 {
560 rtx set = XVECEXP (PATTERN (insn), 0, i);
561 if (GET_CODE (set) == SET)
562 if (!s390_match_ccmode_set (set, req_mode))
563 return false;
564 }
565
566 return true;
567 }
568
569 /* If a test-under-mask instruction can be used to implement
570 (compare (and ... OP1) OP2), return the CC mode required
571 to do that. Otherwise, return VOIDmode.
572 MIXED is true if the instruction can distinguish between
573 CC1 and CC2 for mixed selected bits (TMxx), it is false
574 if the instruction cannot (TM). */
575
576 enum machine_mode
577 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
578 {
579 int bit0, bit1;
580
581 /* ??? Fixme: should work on CONST_DOUBLE as well. */
582 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
583 return VOIDmode;
584
585 /* Selected bits all zero: CC0.
586 e.g.: int a; if ((a & (16 + 128)) == 0) */
587 if (INTVAL (op2) == 0)
588 return CCTmode;
589
590 /* Selected bits all one: CC3.
591 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
592 if (INTVAL (op2) == INTVAL (op1))
593 return CCT3mode;
594
595 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
596 int a;
597 if ((a & (16 + 128)) == 16) -> CCT1
598 if ((a & (16 + 128)) == 128) -> CCT2 */
599 if (mixed)
600 {
601 bit1 = exact_log2 (INTVAL (op2));
602 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
603 if (bit0 != -1 && bit1 != -1)
604 return bit0 > bit1 ? CCT1mode : CCT2mode;
605 }
606
607 return VOIDmode;
608 }
609
610 /* Given a comparison code OP (EQ, NE, etc.) and the operands
611 OP0 and OP1 of a COMPARE, return the mode to be used for the
612 comparison. */
613
614 enum machine_mode
615 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
616 {
617 switch (code)
618 {
619 case EQ:
620 case NE:
621 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
622 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
623 return CCAPmode;
624 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
625 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
626 return CCAPmode;
627 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
628 || GET_CODE (op1) == NEG)
629 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
630 return CCLmode;
631
632 if (GET_CODE (op0) == AND)
633 {
634 /* Check whether we can potentially do it via TM. */
635 enum machine_mode ccmode;
636 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
637 if (ccmode != VOIDmode)
638 {
639 /* Relax CCTmode to CCZmode to allow fall-back to AND
640 if that turns out to be beneficial. */
641 return ccmode == CCTmode ? CCZmode : ccmode;
642 }
643 }
644
645 if (register_operand (op0, HImode)
646 && GET_CODE (op1) == CONST_INT
647 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
648 return CCT3mode;
649 if (register_operand (op0, QImode)
650 && GET_CODE (op1) == CONST_INT
651 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
652 return CCT3mode;
653
654 return CCZmode;
655
656 case LE:
657 case LT:
658 case GE:
659 case GT:
660 /* The only overflow condition of NEG and ABS happens when
661 -INT_MAX is used as parameter, which stays negative. So
662 we have an overflow from a positive value to a negative.
663 Using CCAP mode the resulting cc can be used for comparisons. */
664 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
665 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
666 return CCAPmode;
667
668 /* If constants are involved in an add instruction it is possible to use
669 the resulting cc for comparisons with zero. Knowing the sign of the
670 constant the overflow behavior gets predictable. e.g.:
671 int a, b; if ((b = a + c) > 0)
672 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
673 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
674 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
675 {
676 if (INTVAL (XEXP((op0), 1)) < 0)
677 return CCANmode;
678 else
679 return CCAPmode;
680 }
681 /* Fall through. */
682 case UNORDERED:
683 case ORDERED:
684 case UNEQ:
685 case UNLE:
686 case UNLT:
687 case UNGE:
688 case UNGT:
689 case LTGT:
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCSRmode;
693 return CCSmode;
694
695 case LTU:
696 case GEU:
697 if (GET_CODE (op0) == PLUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL1mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 case LEU:
707 case GTU:
708 if (GET_CODE (op0) == MINUS
709 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
710 return CCL2mode;
711
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCURmode;
715 return CCUmode;
716
717 default:
718 gcc_unreachable ();
719 }
720 }
721
722 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
723 that we can implement more efficiently. */
724
725 void
726 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
727 {
728 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
729 if ((*code == EQ || *code == NE)
730 && *op1 == const0_rtx
731 && GET_CODE (*op0) == ZERO_EXTRACT
732 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
733 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
735 {
736 rtx inner = XEXP (*op0, 0);
737 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
738 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
739 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
740
741 if (len > 0 && len < modesize
742 && pos >= 0 && pos + len <= modesize
743 && modesize <= HOST_BITS_PER_WIDE_INT)
744 {
745 unsigned HOST_WIDE_INT block;
746 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
747 block <<= modesize - pos - len;
748
749 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
750 gen_int_mode (block, GET_MODE (inner)));
751 }
752 }
753
754 /* Narrow AND of memory against immediate to enable TM. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == AND
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
760 {
761 rtx inner = XEXP (*op0, 0);
762 rtx mask = XEXP (*op0, 1);
763
764 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
765 if (GET_CODE (inner) == SUBREG
766 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
767 && (GET_MODE_SIZE (GET_MODE (inner))
768 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
769 && ((INTVAL (mask)
770 & GET_MODE_MASK (GET_MODE (inner))
771 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
772 == 0))
773 inner = SUBREG_REG (inner);
774
775 /* Do not change volatile MEMs. */
776 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
777 {
778 int part = s390_single_part (XEXP (*op0, 1),
779 GET_MODE (inner), QImode, 0);
780 if (part >= 0)
781 {
782 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
783 inner = adjust_address_nv (inner, QImode, part);
784 *op0 = gen_rtx_AND (QImode, inner, mask);
785 }
786 }
787 }
788
789 /* Narrow comparisons against 0xffff to HImode if possible. */
790 if ((*code == EQ || *code == NE)
791 && GET_CODE (*op1) == CONST_INT
792 && INTVAL (*op1) == 0xffff
793 && SCALAR_INT_MODE_P (GET_MODE (*op0))
794 && (nonzero_bits (*op0, GET_MODE (*op0))
795 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
796 {
797 *op0 = gen_lowpart (HImode, *op0);
798 *op1 = constm1_rtx;
799 }
800
801 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
802 if (GET_CODE (*op0) == UNSPEC
803 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
804 && XVECLEN (*op0, 0) == 1
805 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
806 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
807 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
808 && *op1 == const0_rtx)
809 {
810 enum rtx_code new_code = UNKNOWN;
811 switch (*code)
812 {
813 case EQ: new_code = EQ; break;
814 case NE: new_code = NE; break;
815 case LT: new_code = GTU; break;
816 case GT: new_code = LTU; break;
817 case LE: new_code = GEU; break;
818 case GE: new_code = LEU; break;
819 default: break;
820 }
821
822 if (new_code != UNKNOWN)
823 {
824 *op0 = XVECEXP (*op0, 0, 0);
825 *code = new_code;
826 }
827 }
828
829 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
830 if (GET_CODE (*op0) == UNSPEC
831 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
832 && XVECLEN (*op0, 0) == 1
833 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
834 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
835 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
836 && *op1 == const0_rtx)
837 {
838 enum rtx_code new_code = UNKNOWN;
839 switch (*code)
840 {
841 case EQ: new_code = EQ; break;
842 case NE: new_code = NE; break;
843 default: break;
844 }
845
846 if (new_code != UNKNOWN)
847 {
848 *op0 = XVECEXP (*op0, 0, 0);
849 *code = new_code;
850 }
851 }
852
853 /* Simplify cascaded EQ, NE with const0_rtx. */
854 if ((*code == NE || *code == EQ)
855 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
856 && GET_MODE (*op0) == SImode
857 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
858 && REG_P (XEXP (*op0, 0))
859 && XEXP (*op0, 1) == const0_rtx
860 && *op1 == const0_rtx)
861 {
862 if ((*code == EQ && GET_CODE (*op0) == NE)
863 || (*code == NE && GET_CODE (*op0) == EQ))
864 *code = EQ;
865 else
866 *code = NE;
867 *op0 = XEXP (*op0, 0);
868 }
869
870 /* Prefer register over memory as first operand. */
871 if (MEM_P (*op0) && REG_P (*op1))
872 {
873 rtx tem = *op0; *op0 = *op1; *op1 = tem;
874 *code = swap_condition (*code);
875 }
876 }
877
878 /* Emit a compare instruction suitable to implement the comparison
879 OP0 CODE OP1. Return the correct condition RTL to be placed in
880 the IF_THEN_ELSE of the conditional branch testing the result. */
881
882 rtx
883 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
884 {
885 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
886 rtx cc;
887
888 /* Do not output a redundant compare instruction if a compare_and_swap
889 pattern already computed the result and the machine modes are compatible. */
890 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
891 {
892 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
893 == GET_MODE (op0));
894 cc = op0;
895 }
896 else
897 {
898 cc = gen_rtx_REG (mode, CC_REGNUM);
899 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
900 }
901
902 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
903 }
904
905 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
906 matches CMP.
907 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
908 conditional branch testing the result. */
909
910 static rtx
911 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
912 {
913 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
914 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
915 }
916
917 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
918 unconditional jump, else a conditional jump under condition COND. */
919
920 void
921 s390_emit_jump (rtx target, rtx cond)
922 {
923 rtx insn;
924
925 target = gen_rtx_LABEL_REF (VOIDmode, target);
926 if (cond)
927 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
928
929 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
930 emit_jump_insn (insn);
931 }
932
933 /* Return branch condition mask to implement a branch
934 specified by CODE. Return -1 for invalid comparisons. */
935
936 int
937 s390_branch_condition_mask (rtx code)
938 {
939 const int CC0 = 1 << 3;
940 const int CC1 = 1 << 2;
941 const int CC2 = 1 << 1;
942 const int CC3 = 1 << 0;
943
944 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
945 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
946 gcc_assert (XEXP (code, 1) == const0_rtx);
947
948 switch (GET_MODE (XEXP (code, 0)))
949 {
950 case CCZmode:
951 case CCZ1mode:
952 switch (GET_CODE (code))
953 {
954 case EQ: return CC0;
955 case NE: return CC1 | CC2 | CC3;
956 default: return -1;
957 }
958 break;
959
960 case CCT1mode:
961 switch (GET_CODE (code))
962 {
963 case EQ: return CC1;
964 case NE: return CC0 | CC2 | CC3;
965 default: return -1;
966 }
967 break;
968
969 case CCT2mode:
970 switch (GET_CODE (code))
971 {
972 case EQ: return CC2;
973 case NE: return CC0 | CC1 | CC3;
974 default: return -1;
975 }
976 break;
977
978 case CCT3mode:
979 switch (GET_CODE (code))
980 {
981 case EQ: return CC3;
982 case NE: return CC0 | CC1 | CC2;
983 default: return -1;
984 }
985 break;
986
987 case CCLmode:
988 switch (GET_CODE (code))
989 {
990 case EQ: return CC0 | CC2;
991 case NE: return CC1 | CC3;
992 default: return -1;
993 }
994 break;
995
996 case CCL1mode:
997 switch (GET_CODE (code))
998 {
999 case LTU: return CC2 | CC3; /* carry */
1000 case GEU: return CC0 | CC1; /* no carry */
1001 default: return -1;
1002 }
1003 break;
1004
1005 case CCL2mode:
1006 switch (GET_CODE (code))
1007 {
1008 case GTU: return CC0 | CC1; /* borrow */
1009 case LEU: return CC2 | CC3; /* no borrow */
1010 default: return -1;
1011 }
1012 break;
1013
1014 case CCL3mode:
1015 switch (GET_CODE (code))
1016 {
1017 case EQ: return CC0 | CC2;
1018 case NE: return CC1 | CC3;
1019 case LTU: return CC1;
1020 case GTU: return CC3;
1021 case LEU: return CC1 | CC2;
1022 case GEU: return CC2 | CC3;
1023 default: return -1;
1024 }
1025
1026 case CCUmode:
1027 switch (GET_CODE (code))
1028 {
1029 case EQ: return CC0;
1030 case NE: return CC1 | CC2 | CC3;
1031 case LTU: return CC1;
1032 case GTU: return CC2;
1033 case LEU: return CC0 | CC1;
1034 case GEU: return CC0 | CC2;
1035 default: return -1;
1036 }
1037 break;
1038
1039 case CCURmode:
1040 switch (GET_CODE (code))
1041 {
1042 case EQ: return CC0;
1043 case NE: return CC2 | CC1 | CC3;
1044 case LTU: return CC2;
1045 case GTU: return CC1;
1046 case LEU: return CC0 | CC2;
1047 case GEU: return CC0 | CC1;
1048 default: return -1;
1049 }
1050 break;
1051
1052 case CCAPmode:
1053 switch (GET_CODE (code))
1054 {
1055 case EQ: return CC0;
1056 case NE: return CC1 | CC2 | CC3;
1057 case LT: return CC1 | CC3;
1058 case GT: return CC2;
1059 case LE: return CC0 | CC1 | CC3;
1060 case GE: return CC0 | CC2;
1061 default: return -1;
1062 }
1063 break;
1064
1065 case CCANmode:
1066 switch (GET_CODE (code))
1067 {
1068 case EQ: return CC0;
1069 case NE: return CC1 | CC2 | CC3;
1070 case LT: return CC1;
1071 case GT: return CC2 | CC3;
1072 case LE: return CC0 | CC1;
1073 case GE: return CC0 | CC2 | CC3;
1074 default: return -1;
1075 }
1076 break;
1077
1078 case CCSmode:
1079 switch (GET_CODE (code))
1080 {
1081 case EQ: return CC0;
1082 case NE: return CC1 | CC2 | CC3;
1083 case LT: return CC1;
1084 case GT: return CC2;
1085 case LE: return CC0 | CC1;
1086 case GE: return CC0 | CC2;
1087 case UNORDERED: return CC3;
1088 case ORDERED: return CC0 | CC1 | CC2;
1089 case UNEQ: return CC0 | CC3;
1090 case UNLT: return CC1 | CC3;
1091 case UNGT: return CC2 | CC3;
1092 case UNLE: return CC0 | CC1 | CC3;
1093 case UNGE: return CC0 | CC2 | CC3;
1094 case LTGT: return CC1 | CC2;
1095 default: return -1;
1096 }
1097 break;
1098
1099 case CCSRmode:
1100 switch (GET_CODE (code))
1101 {
1102 case EQ: return CC0;
1103 case NE: return CC2 | CC1 | CC3;
1104 case LT: return CC2;
1105 case GT: return CC1;
1106 case LE: return CC0 | CC2;
1107 case GE: return CC0 | CC1;
1108 case UNORDERED: return CC3;
1109 case ORDERED: return CC0 | CC2 | CC1;
1110 case UNEQ: return CC0 | CC3;
1111 case UNLT: return CC2 | CC3;
1112 case UNGT: return CC1 | CC3;
1113 case UNLE: return CC0 | CC2 | CC3;
1114 case UNGE: return CC0 | CC1 | CC3;
1115 case LTGT: return CC2 | CC1;
1116 default: return -1;
1117 }
1118 break;
1119
1120 default:
1121 return -1;
1122 }
1123 }
1124
1125
1126 /* Return branch condition mask to implement a compare and branch
1127 specified by CODE. Return -1 for invalid comparisons. */
1128
1129 int
1130 s390_compare_and_branch_condition_mask (rtx code)
1131 {
1132 const int CC0 = 1 << 3;
1133 const int CC1 = 1 << 2;
1134 const int CC2 = 1 << 1;
1135
1136 switch (GET_CODE (code))
1137 {
1138 case EQ:
1139 return CC0;
1140 case NE:
1141 return CC1 | CC2;
1142 case LT:
1143 case LTU:
1144 return CC1;
1145 case GT:
1146 case GTU:
1147 return CC2;
1148 case LE:
1149 case LEU:
1150 return CC0 | CC1;
1151 case GE:
1152 case GEU:
1153 return CC0 | CC2;
1154 default:
1155 gcc_unreachable ();
1156 }
1157 return -1;
1158 }
1159
1160 /* If INV is false, return assembler mnemonic string to implement
1161 a branch specified by CODE. If INV is true, return mnemonic
1162 for the corresponding inverted branch. */
1163
1164 static const char *
1165 s390_branch_condition_mnemonic (rtx code, int inv)
1166 {
1167 int mask;
1168
1169 static const char *const mnemonic[16] =
1170 {
1171 NULL, "o", "h", "nle",
1172 "l", "nhe", "lh", "ne",
1173 "e", "nlh", "he", "nl",
1174 "le", "nh", "no", NULL
1175 };
1176
1177 if (GET_CODE (XEXP (code, 0)) == REG
1178 && REGNO (XEXP (code, 0)) == CC_REGNUM
1179 && XEXP (code, 1) == const0_rtx)
1180 mask = s390_branch_condition_mask (code);
1181 else
1182 mask = s390_compare_and_branch_condition_mask (code);
1183
1184 gcc_assert (mask >= 0);
1185
1186 if (inv)
1187 mask ^= 15;
1188
1189 gcc_assert (mask >= 1 && mask <= 14);
1190
1191 return mnemonic[mask];
1192 }
1193
1194 /* Return the part of op which has a value different from def.
1195 The size of the part is determined by mode.
1196 Use this function only if you already know that op really
1197 contains such a part. */
1198
1199 unsigned HOST_WIDE_INT
1200 s390_extract_part (rtx op, enum machine_mode mode, int def)
1201 {
1202 unsigned HOST_WIDE_INT value = 0;
1203 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1204 int part_bits = GET_MODE_BITSIZE (mode);
1205 unsigned HOST_WIDE_INT part_mask
1206 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1207 int i;
1208
1209 for (i = 0; i < max_parts; i++)
1210 {
1211 if (i == 0)
1212 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1213 else
1214 value >>= part_bits;
1215
1216 if ((value & part_mask) != (def & part_mask))
1217 return value & part_mask;
1218 }
1219
1220 gcc_unreachable ();
1221 }
1222
1223 /* If OP is an integer constant of mode MODE with exactly one
1224 part of mode PART_MODE unequal to DEF, return the number of that
1225 part. Otherwise, return -1. */
1226
1227 int
1228 s390_single_part (rtx op,
1229 enum machine_mode mode,
1230 enum machine_mode part_mode,
1231 int def)
1232 {
1233 unsigned HOST_WIDE_INT value = 0;
1234 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1235 unsigned HOST_WIDE_INT part_mask
1236 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1237 int i, part = -1;
1238
1239 if (GET_CODE (op) != CONST_INT)
1240 return -1;
1241
1242 for (i = 0; i < n_parts; i++)
1243 {
1244 if (i == 0)
1245 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1246 else
1247 value >>= GET_MODE_BITSIZE (part_mode);
1248
1249 if ((value & part_mask) != (def & part_mask))
1250 {
1251 if (part != -1)
1252 return -1;
1253 else
1254 part = i;
1255 }
1256 }
1257 return part == -1 ? -1 : n_parts - 1 - part;
1258 }
1259
1260 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1261 bits and no other bits are set in IN. POS and LENGTH can be used
1262 to obtain the start position and the length of the bitfield.
1263
1264 POS gives the position of the first bit of the bitfield counting
1265 from the lowest order bit starting with zero. In order to use this
1266 value for S/390 instructions this has to be converted to "bits big
1267 endian" style. */
1268
1269 bool
1270 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1271 int *pos, int *length)
1272 {
1273 int tmp_pos = 0;
1274 int tmp_length = 0;
1275 int i;
1276 unsigned HOST_WIDE_INT mask = 1ULL;
1277 bool contiguous = false;
1278
1279 for (i = 0; i < size; mask <<= 1, i++)
1280 {
1281 if (contiguous)
1282 {
1283 if (mask & in)
1284 tmp_length++;
1285 else
1286 break;
1287 }
1288 else
1289 {
1290 if (mask & in)
1291 {
1292 contiguous = true;
1293 tmp_length++;
1294 }
1295 else
1296 tmp_pos++;
1297 }
1298 }
1299
1300 if (!tmp_length)
1301 return false;
1302
1303 /* Calculate a mask for all bits beyond the contiguous bits. */
1304 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1305
1306 if (mask & in)
1307 return false;
1308
1309 if (tmp_length + tmp_pos - 1 > size)
1310 return false;
1311
1312 if (length)
1313 *length = tmp_length;
1314
1315 if (pos)
1316 *pos = tmp_pos;
1317
1318 return true;
1319 }
1320
1321 /* Check whether we can (and want to) split a double-word
1322 move in mode MODE from SRC to DST into two single-word
1323 moves, moving the subword FIRST_SUBWORD first. */
1324
1325 bool
1326 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1327 {
1328 /* Floating point registers cannot be split. */
1329 if (FP_REG_P (src) || FP_REG_P (dst))
1330 return false;
1331
1332 /* We don't need to split if operands are directly accessible. */
1333 if (s_operand (src, mode) || s_operand (dst, mode))
1334 return false;
1335
1336 /* Non-offsettable memory references cannot be split. */
1337 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1338 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1339 return false;
1340
1341 /* Moving the first subword must not clobber a register
1342 needed to move the second subword. */
1343 if (register_operand (dst, mode))
1344 {
1345 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1346 if (reg_overlap_mentioned_p (subreg, src))
1347 return false;
1348 }
1349
1350 return true;
1351 }
1352
1353 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1354 and [MEM2, MEM2 + SIZE] do overlap and false
1355 otherwise. */
1356
1357 bool
1358 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1359 {
1360 rtx addr1, addr2, addr_delta;
1361 HOST_WIDE_INT delta;
1362
1363 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1364 return true;
1365
1366 if (size == 0)
1367 return false;
1368
1369 addr1 = XEXP (mem1, 0);
1370 addr2 = XEXP (mem2, 0);
1371
1372 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1373
1374 /* This overlapping check is used by peepholes merging memory block operations.
1375 Overlapping operations would otherwise be recognized by the S/390 hardware
1376 and would fall back to a slower implementation. Allowing overlapping
1377 operations would lead to slow code but not to wrong code. Therefore we are
1378 somewhat optimistic if we cannot prove that the memory blocks are
1379 overlapping.
1380 That's why we return false here although this may accept operations on
1381 overlapping memory areas. */
1382 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1383 return false;
1384
1385 delta = INTVAL (addr_delta);
1386
1387 if (delta == 0
1388 || (delta > 0 && delta < size)
1389 || (delta < 0 && -delta < size))
1390 return true;
1391
1392 return false;
1393 }
1394
1395 /* Check whether the address of memory reference MEM2 equals exactly
1396 the address of memory reference MEM1 plus DELTA. Return true if
1397 we can prove this to be the case, false otherwise. */
1398
1399 bool
1400 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1401 {
1402 rtx addr1, addr2, addr_delta;
1403
1404 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1405 return false;
1406
1407 addr1 = XEXP (mem1, 0);
1408 addr2 = XEXP (mem2, 0);
1409
1410 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1411 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1412 return false;
1413
1414 return true;
1415 }
1416
1417 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1418
1419 void
1420 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1421 rtx *operands)
1422 {
1423 enum machine_mode wmode = mode;
1424 rtx dst = operands[0];
1425 rtx src1 = operands[1];
1426 rtx src2 = operands[2];
1427 rtx op, clob, tem;
1428
1429 /* If we cannot handle the operation directly, use a temp register. */
1430 if (!s390_logical_operator_ok_p (operands))
1431 dst = gen_reg_rtx (mode);
1432
1433 /* QImode and HImode patterns make sense only if we have a destination
1434 in memory. Otherwise perform the operation in SImode. */
1435 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1436 wmode = SImode;
1437
1438 /* Widen operands if required. */
1439 if (mode != wmode)
1440 {
1441 if (GET_CODE (dst) == SUBREG
1442 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1443 dst = tem;
1444 else if (REG_P (dst))
1445 dst = gen_rtx_SUBREG (wmode, dst, 0);
1446 else
1447 dst = gen_reg_rtx (wmode);
1448
1449 if (GET_CODE (src1) == SUBREG
1450 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1451 src1 = tem;
1452 else if (GET_MODE (src1) != VOIDmode)
1453 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1454
1455 if (GET_CODE (src2) == SUBREG
1456 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1457 src2 = tem;
1458 else if (GET_MODE (src2) != VOIDmode)
1459 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1460 }
1461
1462 /* Emit the instruction. */
1463 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1464 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1465 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1466
1467 /* Fix up the destination if needed. */
1468 if (dst != operands[0])
1469 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1470 }
1471
1472 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1473
1474 bool
1475 s390_logical_operator_ok_p (rtx *operands)
1476 {
1477 /* If the destination operand is in memory, it needs to coincide
1478 with one of the source operands. After reload, it has to be
1479 the first source operand. */
1480 if (GET_CODE (operands[0]) == MEM)
1481 return rtx_equal_p (operands[0], operands[1])
1482 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1483
1484 return true;
1485 }
1486
1487 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1488 operand IMMOP to switch from SS to SI type instructions. */
1489
1490 void
1491 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1492 {
1493 int def = code == AND ? -1 : 0;
1494 HOST_WIDE_INT mask;
1495 int part;
1496
1497 gcc_assert (GET_CODE (*memop) == MEM);
1498 gcc_assert (!MEM_VOLATILE_P (*memop));
1499
1500 mask = s390_extract_part (*immop, QImode, def);
1501 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1502 gcc_assert (part >= 0);
1503
1504 *memop = adjust_address (*memop, QImode, part);
1505 *immop = gen_int_mode (mask, QImode);
1506 }
1507
1508
1509 /* How to allocate a 'struct machine_function'. */
1510
1511 static struct machine_function *
1512 s390_init_machine_status (void)
1513 {
1514 return ggc_alloc_cleared_machine_function ();
1515 }
1516
1517 /* Change optimizations to be performed, depending on the
1518 optimization level. */
1519
1520 static const struct default_options s390_option_optimization_table[] =
1521 {
1522 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
1523
1524 /* ??? There are apparently still problems with -fcaller-saves. */
1525 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
1526
1527 /* Use MVCLE instructions to decrease code size if requested. */
1528 { OPT_LEVELS_SIZE, OPT_mmvcle, NULL, 1 },
1529
1530 { OPT_LEVELS_NONE, 0, NULL, 0 }
1531 };
1532
1533 /* Implement TARGET_OPTION_INIT_STRUCT. */
1534
1535 static void
1536 s390_option_init_struct (struct gcc_options *opts)
1537 {
1538 /* By default, always emit DWARF-2 unwind info. This allows debugging
1539 without maintaining a stack frame back-chain. */
1540 opts->x_flag_asynchronous_unwind_tables = 1;
1541 }
1542
1543 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1544 to the associated processor_type and processor_flags if so. */
1545
1546 static bool
1547 s390_handle_arch_option (const char *arg,
1548 enum processor_type *type,
1549 int *flags)
1550 {
1551 static struct pta
1552 {
1553 const char *const name; /* processor name or nickname. */
1554 const enum processor_type processor;
1555 const int flags; /* From enum processor_flags. */
1556 }
1557 const processor_alias_table[] =
1558 {
1559 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1560 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1561 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1562 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1563 | PF_LONG_DISPLACEMENT},
1564 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1565 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1566 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1567 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1568 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1569 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1570 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1571 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1572 };
1573 size_t i;
1574
1575 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1576 if (strcmp (arg, processor_alias_table[i].name) == 0)
1577 {
1578 *type = processor_alias_table[i].processor;
1579 *flags = processor_alias_table[i].flags;
1580 return true;
1581 }
1582
1583 *type = PROCESSOR_max;
1584 *flags = 0;
1585 return false;
1586 }
1587
1588 /* Implement TARGET_HANDLE_OPTION. */
1589
1590 static bool
1591 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1592 {
1593 switch (code)
1594 {
1595 case OPT_march_:
1596 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1597
1598 case OPT_mstack_guard_:
1599 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1600 return false;
1601 if (exact_log2 (s390_stack_guard) == -1)
1602 error ("stack guard value must be an exact power of 2");
1603 return true;
1604
1605 case OPT_mstack_size_:
1606 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1607 return false;
1608 if (exact_log2 (s390_stack_size) == -1)
1609 error ("stack size must be an exact power of 2");
1610 return true;
1611
1612 case OPT_mtune_:
1613 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1614
1615 case OPT_mwarn_framesize_:
1616 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1617
1618 default:
1619 return true;
1620 }
1621 }
1622
1623 static void
1624 s390_option_override (void)
1625 {
1626 /* Set up function hooks. */
1627 init_machine_status = s390_init_machine_status;
1628
1629 /* Architecture mode defaults according to ABI. */
1630 if (!(target_flags_explicit & MASK_ZARCH))
1631 {
1632 if (TARGET_64BIT)
1633 target_flags |= MASK_ZARCH;
1634 else
1635 target_flags &= ~MASK_ZARCH;
1636 }
1637
1638 /* Determine processor architectural level. */
1639 if (!s390_arch_string)
1640 {
1641 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1642 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1643 }
1644
1645 /* This check is triggered when the user specified a wrong -march=
1646 string and prevents subsequent error messages from being
1647 issued. */
1648 if (s390_arch == PROCESSOR_max)
1649 return;
1650
1651 /* Determine processor to tune for. */
1652 if (s390_tune == PROCESSOR_max)
1653 {
1654 s390_tune = s390_arch;
1655 s390_tune_flags = s390_arch_flags;
1656 }
1657
1658 /* Sanity checks. */
1659 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1660 error ("z/Architecture mode not supported on %s", s390_arch_string);
1661 if (TARGET_64BIT && !TARGET_ZARCH)
1662 error ("64-bit ABI not supported in ESA/390 mode");
1663
1664 if (TARGET_HARD_DFP && !TARGET_DFP)
1665 {
1666 if (target_flags_explicit & MASK_HARD_DFP)
1667 {
1668 if (!TARGET_CPU_DFP)
1669 error ("hardware decimal floating point instructions"
1670 " not available on %s", s390_arch_string);
1671 if (!TARGET_ZARCH)
1672 error ("hardware decimal floating point instructions"
1673 " not available in ESA/390 mode");
1674 }
1675 else
1676 target_flags &= ~MASK_HARD_DFP;
1677 }
1678
1679 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1680 {
1681 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1682 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1683
1684 target_flags &= ~MASK_HARD_DFP;
1685 }
1686
1687 /* Set processor cost function. */
1688 switch (s390_tune)
1689 {
1690 case PROCESSOR_2084_Z990:
1691 s390_cost = &z990_cost;
1692 break;
1693 case PROCESSOR_2094_Z9_109:
1694 s390_cost = &z9_109_cost;
1695 break;
1696 case PROCESSOR_2097_Z10:
1697 s390_cost = &z10_cost;
1698 case PROCESSOR_2817_Z196:
1699 s390_cost = &z196_cost;
1700 break;
1701 default:
1702 s390_cost = &z900_cost;
1703 }
1704
1705 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1706 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1707 "in combination");
1708
1709 if (s390_stack_size)
1710 {
1711 if (s390_stack_guard >= s390_stack_size)
1712 error ("stack size must be greater than the stack guard value");
1713 else if (s390_stack_size > 1 << 16)
1714 error ("stack size must not be greater than 64k");
1715 }
1716 else if (s390_stack_guard)
1717 error ("-mstack-guard implies use of -mstack-size");
1718
1719 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1720 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1721 target_flags |= MASK_LONG_DOUBLE_128;
1722 #endif
1723
1724 if (s390_tune == PROCESSOR_2097_Z10
1725 || s390_tune == PROCESSOR_2817_Z196)
1726 {
1727 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1728 global_options.x_param_values,
1729 global_options_set.x_param_values);
1730 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1731 global_options.x_param_values,
1732 global_options_set.x_param_values);
1733 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1734 global_options.x_param_values,
1735 global_options_set.x_param_values);
1736 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1737 global_options.x_param_values,
1738 global_options_set.x_param_values);
1739 }
1740
1741 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1742 global_options.x_param_values,
1743 global_options_set.x_param_values);
1744 /* values for loop prefetching */
1745 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1746 global_options.x_param_values,
1747 global_options_set.x_param_values);
1748 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1749 global_options.x_param_values,
1750 global_options_set.x_param_values);
1751 /* s390 has more than 2 levels and the size is much larger. Since
1752 we are always running virtualized assume that we only get a small
1753 part of the caches above l1. */
1754 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1755 global_options.x_param_values,
1756 global_options_set.x_param_values);
1757 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1758 global_options.x_param_values,
1759 global_options_set.x_param_values);
1760 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1761 global_options.x_param_values,
1762 global_options_set.x_param_values);
1763
1764 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1765 requires the arch flags to be evaluated already. Since prefetching
1766 is beneficial on s390, we enable it if available. */
1767 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1768 flag_prefetch_loop_arrays = 1;
1769 }
1770
1771 /* Map for smallest class containing reg regno. */
1772
1773 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1774 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1775 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1776 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1777 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1778 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1779 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1780 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1781 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1782 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1783 ACCESS_REGS, ACCESS_REGS
1784 };
1785
1786 /* Return attribute type of insn. */
1787
1788 static enum attr_type
1789 s390_safe_attr_type (rtx insn)
1790 {
1791 if (recog_memoized (insn) >= 0)
1792 return get_attr_type (insn);
1793 else
1794 return TYPE_NONE;
1795 }
1796
1797 /* Return true if DISP is a valid short displacement. */
1798
1799 static bool
1800 s390_short_displacement (rtx disp)
1801 {
1802 /* No displacement is OK. */
1803 if (!disp)
1804 return true;
1805
1806 /* Without the long displacement facility we don't need to
1807 distingiush between long and short displacement. */
1808 if (!TARGET_LONG_DISPLACEMENT)
1809 return true;
1810
1811 /* Integer displacement in range. */
1812 if (GET_CODE (disp) == CONST_INT)
1813 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1814
1815 /* GOT offset is not OK, the GOT can be large. */
1816 if (GET_CODE (disp) == CONST
1817 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1818 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1819 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1820 return false;
1821
1822 /* All other symbolic constants are literal pool references,
1823 which are OK as the literal pool must be small. */
1824 if (GET_CODE (disp) == CONST)
1825 return true;
1826
1827 return false;
1828 }
1829
1830 /* Decompose a RTL expression ADDR for a memory address into
1831 its components, returned in OUT.
1832
1833 Returns false if ADDR is not a valid memory address, true
1834 otherwise. If OUT is NULL, don't return the components,
1835 but check for validity only.
1836
1837 Note: Only addresses in canonical form are recognized.
1838 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1839 canonical form so that they will be recognized. */
1840
1841 static int
1842 s390_decompose_address (rtx addr, struct s390_address *out)
1843 {
1844 HOST_WIDE_INT offset = 0;
1845 rtx base = NULL_RTX;
1846 rtx indx = NULL_RTX;
1847 rtx disp = NULL_RTX;
1848 rtx orig_disp;
1849 bool pointer = false;
1850 bool base_ptr = false;
1851 bool indx_ptr = false;
1852 bool literal_pool = false;
1853
1854 /* We may need to substitute the literal pool base register into the address
1855 below. However, at this point we do not know which register is going to
1856 be used as base, so we substitute the arg pointer register. This is going
1857 to be treated as holding a pointer below -- it shouldn't be used for any
1858 other purpose. */
1859 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1860
1861 /* Decompose address into base + index + displacement. */
1862
1863 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1864 base = addr;
1865
1866 else if (GET_CODE (addr) == PLUS)
1867 {
1868 rtx op0 = XEXP (addr, 0);
1869 rtx op1 = XEXP (addr, 1);
1870 enum rtx_code code0 = GET_CODE (op0);
1871 enum rtx_code code1 = GET_CODE (op1);
1872
1873 if (code0 == REG || code0 == UNSPEC)
1874 {
1875 if (code1 == REG || code1 == UNSPEC)
1876 {
1877 indx = op0; /* index + base */
1878 base = op1;
1879 }
1880
1881 else
1882 {
1883 base = op0; /* base + displacement */
1884 disp = op1;
1885 }
1886 }
1887
1888 else if (code0 == PLUS)
1889 {
1890 indx = XEXP (op0, 0); /* index + base + disp */
1891 base = XEXP (op0, 1);
1892 disp = op1;
1893 }
1894
1895 else
1896 {
1897 return false;
1898 }
1899 }
1900
1901 else
1902 disp = addr; /* displacement */
1903
1904 /* Extract integer part of displacement. */
1905 orig_disp = disp;
1906 if (disp)
1907 {
1908 if (GET_CODE (disp) == CONST_INT)
1909 {
1910 offset = INTVAL (disp);
1911 disp = NULL_RTX;
1912 }
1913 else if (GET_CODE (disp) == CONST
1914 && GET_CODE (XEXP (disp, 0)) == PLUS
1915 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1916 {
1917 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1918 disp = XEXP (XEXP (disp, 0), 0);
1919 }
1920 }
1921
1922 /* Strip off CONST here to avoid special case tests later. */
1923 if (disp && GET_CODE (disp) == CONST)
1924 disp = XEXP (disp, 0);
1925
1926 /* We can convert literal pool addresses to
1927 displacements by basing them off the base register. */
1928 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1929 {
1930 /* Either base or index must be free to hold the base register. */
1931 if (!base)
1932 base = fake_pool_base, literal_pool = true;
1933 else if (!indx)
1934 indx = fake_pool_base, literal_pool = true;
1935 else
1936 return false;
1937
1938 /* Mark up the displacement. */
1939 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1940 UNSPEC_LTREL_OFFSET);
1941 }
1942
1943 /* Validate base register. */
1944 if (base)
1945 {
1946 if (GET_CODE (base) == UNSPEC)
1947 switch (XINT (base, 1))
1948 {
1949 case UNSPEC_LTREF:
1950 if (!disp)
1951 disp = gen_rtx_UNSPEC (Pmode,
1952 gen_rtvec (1, XVECEXP (base, 0, 0)),
1953 UNSPEC_LTREL_OFFSET);
1954 else
1955 return false;
1956
1957 base = XVECEXP (base, 0, 1);
1958 break;
1959
1960 case UNSPEC_LTREL_BASE:
1961 if (XVECLEN (base, 0) == 1)
1962 base = fake_pool_base, literal_pool = true;
1963 else
1964 base = XVECEXP (base, 0, 1);
1965 break;
1966
1967 default:
1968 return false;
1969 }
1970
1971 if (!REG_P (base)
1972 || (GET_MODE (base) != SImode
1973 && GET_MODE (base) != Pmode))
1974 return false;
1975
1976 if (REGNO (base) == STACK_POINTER_REGNUM
1977 || REGNO (base) == FRAME_POINTER_REGNUM
1978 || ((reload_completed || reload_in_progress)
1979 && frame_pointer_needed
1980 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1981 || REGNO (base) == ARG_POINTER_REGNUM
1982 || (flag_pic
1983 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1984 pointer = base_ptr = true;
1985
1986 if ((reload_completed || reload_in_progress)
1987 && base == cfun->machine->base_reg)
1988 pointer = base_ptr = literal_pool = true;
1989 }
1990
1991 /* Validate index register. */
1992 if (indx)
1993 {
1994 if (GET_CODE (indx) == UNSPEC)
1995 switch (XINT (indx, 1))
1996 {
1997 case UNSPEC_LTREF:
1998 if (!disp)
1999 disp = gen_rtx_UNSPEC (Pmode,
2000 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2001 UNSPEC_LTREL_OFFSET);
2002 else
2003 return false;
2004
2005 indx = XVECEXP (indx, 0, 1);
2006 break;
2007
2008 case UNSPEC_LTREL_BASE:
2009 if (XVECLEN (indx, 0) == 1)
2010 indx = fake_pool_base, literal_pool = true;
2011 else
2012 indx = XVECEXP (indx, 0, 1);
2013 break;
2014
2015 default:
2016 return false;
2017 }
2018
2019 if (!REG_P (indx)
2020 || (GET_MODE (indx) != SImode
2021 && GET_MODE (indx) != Pmode))
2022 return false;
2023
2024 if (REGNO (indx) == STACK_POINTER_REGNUM
2025 || REGNO (indx) == FRAME_POINTER_REGNUM
2026 || ((reload_completed || reload_in_progress)
2027 && frame_pointer_needed
2028 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2029 || REGNO (indx) == ARG_POINTER_REGNUM
2030 || (flag_pic
2031 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2032 pointer = indx_ptr = true;
2033
2034 if ((reload_completed || reload_in_progress)
2035 && indx == cfun->machine->base_reg)
2036 pointer = indx_ptr = literal_pool = true;
2037 }
2038
2039 /* Prefer to use pointer as base, not index. */
2040 if (base && indx && !base_ptr
2041 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2042 {
2043 rtx tmp = base;
2044 base = indx;
2045 indx = tmp;
2046 }
2047
2048 /* Validate displacement. */
2049 if (!disp)
2050 {
2051 /* If virtual registers are involved, the displacement will change later
2052 anyway as the virtual registers get eliminated. This could make a
2053 valid displacement invalid, but it is more likely to make an invalid
2054 displacement valid, because we sometimes access the register save area
2055 via negative offsets to one of those registers.
2056 Thus we don't check the displacement for validity here. If after
2057 elimination the displacement turns out to be invalid after all,
2058 this is fixed up by reload in any case. */
2059 if (base != arg_pointer_rtx
2060 && indx != arg_pointer_rtx
2061 && base != return_address_pointer_rtx
2062 && indx != return_address_pointer_rtx
2063 && base != frame_pointer_rtx
2064 && indx != frame_pointer_rtx
2065 && base != virtual_stack_vars_rtx
2066 && indx != virtual_stack_vars_rtx)
2067 if (!DISP_IN_RANGE (offset))
2068 return false;
2069 }
2070 else
2071 {
2072 /* All the special cases are pointers. */
2073 pointer = true;
2074
2075 /* In the small-PIC case, the linker converts @GOT
2076 and @GOTNTPOFF offsets to possible displacements. */
2077 if (GET_CODE (disp) == UNSPEC
2078 && (XINT (disp, 1) == UNSPEC_GOT
2079 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2080 && flag_pic == 1)
2081 {
2082 ;
2083 }
2084
2085 /* Accept pool label offsets. */
2086 else if (GET_CODE (disp) == UNSPEC
2087 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2088 ;
2089
2090 /* Accept literal pool references. */
2091 else if (GET_CODE (disp) == UNSPEC
2092 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2093 {
2094 /* In case CSE pulled a non literal pool reference out of
2095 the pool we have to reject the address. This is
2096 especially important when loading the GOT pointer on non
2097 zarch CPUs. In this case the literal pool contains an lt
2098 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2099 will most likely exceed the displacement. */
2100 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2101 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2102 return false;
2103
2104 orig_disp = gen_rtx_CONST (Pmode, disp);
2105 if (offset)
2106 {
2107 /* If we have an offset, make sure it does not
2108 exceed the size of the constant pool entry. */
2109 rtx sym = XVECEXP (disp, 0, 0);
2110 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2111 return false;
2112
2113 orig_disp = plus_constant (orig_disp, offset);
2114 }
2115 }
2116
2117 else
2118 return false;
2119 }
2120
2121 if (!base && !indx)
2122 pointer = true;
2123
2124 if (out)
2125 {
2126 out->base = base;
2127 out->indx = indx;
2128 out->disp = orig_disp;
2129 out->pointer = pointer;
2130 out->literal_pool = literal_pool;
2131 }
2132
2133 return true;
2134 }
2135
2136 /* Decompose a RTL expression OP for a shift count into its components,
2137 and return the base register in BASE and the offset in OFFSET.
2138
2139 Return true if OP is a valid shift count, false if not. */
2140
2141 bool
2142 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2143 {
2144 HOST_WIDE_INT off = 0;
2145
2146 /* We can have an integer constant, an address register,
2147 or a sum of the two. */
2148 if (GET_CODE (op) == CONST_INT)
2149 {
2150 off = INTVAL (op);
2151 op = NULL_RTX;
2152 }
2153 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2154 {
2155 off = INTVAL (XEXP (op, 1));
2156 op = XEXP (op, 0);
2157 }
2158 while (op && GET_CODE (op) == SUBREG)
2159 op = SUBREG_REG (op);
2160
2161 if (op && GET_CODE (op) != REG)
2162 return false;
2163
2164 if (offset)
2165 *offset = off;
2166 if (base)
2167 *base = op;
2168
2169 return true;
2170 }
2171
2172
2173 /* Return true if CODE is a valid address without index. */
2174
2175 bool
2176 s390_legitimate_address_without_index_p (rtx op)
2177 {
2178 struct s390_address addr;
2179
2180 if (!s390_decompose_address (XEXP (op, 0), &addr))
2181 return false;
2182 if (addr.indx)
2183 return false;
2184
2185 return true;
2186 }
2187
2188
2189 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2190 and return these parts in SYMREF and ADDEND. You can pass NULL in
2191 SYMREF and/or ADDEND if you are not interested in these values.
2192 Literal pool references are *not* considered symbol references. */
2193
2194 static bool
2195 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2196 {
2197 HOST_WIDE_INT tmpaddend = 0;
2198
2199 if (GET_CODE (addr) == CONST)
2200 addr = XEXP (addr, 0);
2201
2202 if (GET_CODE (addr) == PLUS)
2203 {
2204 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2205 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2206 && CONST_INT_P (XEXP (addr, 1)))
2207 {
2208 tmpaddend = INTVAL (XEXP (addr, 1));
2209 addr = XEXP (addr, 0);
2210 }
2211 else
2212 return false;
2213 }
2214 else
2215 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2216 return false;
2217
2218 if (symref)
2219 *symref = addr;
2220 if (addend)
2221 *addend = tmpaddend;
2222
2223 return true;
2224 }
2225
2226
2227 /* Return true if the address in OP is valid for constraint letter C
2228 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2229 pool MEMs should be accepted. Only the Q, R, S, T constraint
2230 letters are allowed for C. */
2231
2232 static int
2233 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2234 {
2235 struct s390_address addr;
2236 bool decomposed = false;
2237
2238 /* This check makes sure that no symbolic address (except literal
2239 pool references) are accepted by the R or T constraints. */
2240 if (s390_symref_operand_p (op, NULL, NULL))
2241 return 0;
2242
2243 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2244 if (!lit_pool_ok)
2245 {
2246 if (!s390_decompose_address (op, &addr))
2247 return 0;
2248 if (addr.literal_pool)
2249 return 0;
2250 decomposed = true;
2251 }
2252
2253 switch (c)
2254 {
2255 case 'Q': /* no index short displacement */
2256 if (!decomposed && !s390_decompose_address (op, &addr))
2257 return 0;
2258 if (addr.indx)
2259 return 0;
2260 if (!s390_short_displacement (addr.disp))
2261 return 0;
2262 break;
2263
2264 case 'R': /* with index short displacement */
2265 if (TARGET_LONG_DISPLACEMENT)
2266 {
2267 if (!decomposed && !s390_decompose_address (op, &addr))
2268 return 0;
2269 if (!s390_short_displacement (addr.disp))
2270 return 0;
2271 }
2272 /* Any invalid address here will be fixed up by reload,
2273 so accept it for the most generic constraint. */
2274 break;
2275
2276 case 'S': /* no index long displacement */
2277 if (!TARGET_LONG_DISPLACEMENT)
2278 return 0;
2279 if (!decomposed && !s390_decompose_address (op, &addr))
2280 return 0;
2281 if (addr.indx)
2282 return 0;
2283 if (s390_short_displacement (addr.disp))
2284 return 0;
2285 break;
2286
2287 case 'T': /* with index long displacement */
2288 if (!TARGET_LONG_DISPLACEMENT)
2289 return 0;
2290 /* Any invalid address here will be fixed up by reload,
2291 so accept it for the most generic constraint. */
2292 if ((decomposed || s390_decompose_address (op, &addr))
2293 && s390_short_displacement (addr.disp))
2294 return 0;
2295 break;
2296 default:
2297 return 0;
2298 }
2299 return 1;
2300 }
2301
2302
2303 /* Evaluates constraint strings described by the regular expression
2304 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2305 the constraint given in STR, or 0 else. */
2306
2307 int
2308 s390_mem_constraint (const char *str, rtx op)
2309 {
2310 char c = str[0];
2311
2312 switch (c)
2313 {
2314 case 'A':
2315 /* Check for offsettable variants of memory constraints. */
2316 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2317 return 0;
2318 if ((reload_completed || reload_in_progress)
2319 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2320 return 0;
2321 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2322 case 'B':
2323 /* Check for non-literal-pool variants of memory constraints. */
2324 if (!MEM_P (op))
2325 return 0;
2326 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2327 case 'Q':
2328 case 'R':
2329 case 'S':
2330 case 'T':
2331 if (GET_CODE (op) != MEM)
2332 return 0;
2333 return s390_check_qrst_address (c, XEXP (op, 0), true);
2334 case 'U':
2335 return (s390_check_qrst_address ('Q', op, true)
2336 || s390_check_qrst_address ('R', op, true));
2337 case 'W':
2338 return (s390_check_qrst_address ('S', op, true)
2339 || s390_check_qrst_address ('T', op, true));
2340 case 'Y':
2341 /* Simply check for the basic form of a shift count. Reload will
2342 take care of making sure we have a proper base register. */
2343 if (!s390_decompose_shift_count (op, NULL, NULL))
2344 return 0;
2345 break;
2346 case 'Z':
2347 return s390_check_qrst_address (str[1], op, true);
2348 default:
2349 return 0;
2350 }
2351 return 1;
2352 }
2353
2354
2355 /* Evaluates constraint strings starting with letter O. Input
2356 parameter C is the second letter following the "O" in the constraint
2357 string. Returns 1 if VALUE meets the respective constraint and 0
2358 otherwise. */
2359
2360 int
2361 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2362 {
2363 if (!TARGET_EXTIMM)
2364 return 0;
2365
2366 switch (c)
2367 {
2368 case 's':
2369 return trunc_int_for_mode (value, SImode) == value;
2370
2371 case 'p':
2372 return value == 0
2373 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2374
2375 case 'n':
2376 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2377
2378 default:
2379 gcc_unreachable ();
2380 }
2381 }
2382
2383
2384 /* Evaluates constraint strings starting with letter N. Parameter STR
2385 contains the letters following letter "N" in the constraint string.
2386 Returns true if VALUE matches the constraint. */
2387
2388 int
2389 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2390 {
2391 enum machine_mode mode, part_mode;
2392 int def;
2393 int part, part_goal;
2394
2395
2396 if (str[0] == 'x')
2397 part_goal = -1;
2398 else
2399 part_goal = str[0] - '0';
2400
2401 switch (str[1])
2402 {
2403 case 'Q':
2404 part_mode = QImode;
2405 break;
2406 case 'H':
2407 part_mode = HImode;
2408 break;
2409 case 'S':
2410 part_mode = SImode;
2411 break;
2412 default:
2413 return 0;
2414 }
2415
2416 switch (str[2])
2417 {
2418 case 'H':
2419 mode = HImode;
2420 break;
2421 case 'S':
2422 mode = SImode;
2423 break;
2424 case 'D':
2425 mode = DImode;
2426 break;
2427 default:
2428 return 0;
2429 }
2430
2431 switch (str[3])
2432 {
2433 case '0':
2434 def = 0;
2435 break;
2436 case 'F':
2437 def = -1;
2438 break;
2439 default:
2440 return 0;
2441 }
2442
2443 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2444 return 0;
2445
2446 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2447 if (part < 0)
2448 return 0;
2449 if (part_goal != -1 && part_goal != part)
2450 return 0;
2451
2452 return 1;
2453 }
2454
2455
2456 /* Returns true if the input parameter VALUE is a float zero. */
2457
2458 int
2459 s390_float_const_zero_p (rtx value)
2460 {
2461 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2462 && value == CONST0_RTX (GET_MODE (value)));
2463 }
2464
2465 /* Implement TARGET_REGISTER_MOVE_COST. */
2466
2467 static int
2468 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2469 reg_class_t from, reg_class_t to)
2470 {
2471 /* On s390, copy between fprs and gprs is expensive. */
2472 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2473 && reg_classes_intersect_p (to, FP_REGS))
2474 || (reg_classes_intersect_p (from, FP_REGS)
2475 && reg_classes_intersect_p (to, GENERAL_REGS)))
2476 return 10;
2477
2478 return 1;
2479 }
2480
2481 /* Implement TARGET_MEMORY_MOVE_COST. */
2482
2483 static int
2484 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2485 reg_class_t rclass ATTRIBUTE_UNUSED,
2486 bool in ATTRIBUTE_UNUSED)
2487 {
2488 return 1;
2489 }
2490
2491 /* Compute a (partial) cost for rtx X. Return true if the complete
2492 cost has been computed, and false if subexpressions should be
2493 scanned. In either case, *TOTAL contains the cost result.
2494 CODE contains GET_CODE (x), OUTER_CODE contains the code
2495 of the superexpression of x. */
2496
2497 static bool
2498 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2499 bool speed ATTRIBUTE_UNUSED)
2500 {
2501 switch (code)
2502 {
2503 case CONST:
2504 case CONST_INT:
2505 case LABEL_REF:
2506 case SYMBOL_REF:
2507 case CONST_DOUBLE:
2508 case MEM:
2509 *total = 0;
2510 return true;
2511
2512 case ASHIFT:
2513 case ASHIFTRT:
2514 case LSHIFTRT:
2515 case ROTATE:
2516 case ROTATERT:
2517 case AND:
2518 case IOR:
2519 case XOR:
2520 case NEG:
2521 case NOT:
2522 *total = COSTS_N_INSNS (1);
2523 return false;
2524
2525 case PLUS:
2526 case MINUS:
2527 *total = COSTS_N_INSNS (1);
2528 return false;
2529
2530 case MULT:
2531 switch (GET_MODE (x))
2532 {
2533 case SImode:
2534 {
2535 rtx left = XEXP (x, 0);
2536 rtx right = XEXP (x, 1);
2537 if (GET_CODE (right) == CONST_INT
2538 && CONST_OK_FOR_K (INTVAL (right)))
2539 *total = s390_cost->mhi;
2540 else if (GET_CODE (left) == SIGN_EXTEND)
2541 *total = s390_cost->mh;
2542 else
2543 *total = s390_cost->ms; /* msr, ms, msy */
2544 break;
2545 }
2546 case DImode:
2547 {
2548 rtx left = XEXP (x, 0);
2549 rtx right = XEXP (x, 1);
2550 if (TARGET_ZARCH)
2551 {
2552 if (GET_CODE (right) == CONST_INT
2553 && CONST_OK_FOR_K (INTVAL (right)))
2554 *total = s390_cost->mghi;
2555 else if (GET_CODE (left) == SIGN_EXTEND)
2556 *total = s390_cost->msgf;
2557 else
2558 *total = s390_cost->msg; /* msgr, msg */
2559 }
2560 else /* TARGET_31BIT */
2561 {
2562 if (GET_CODE (left) == SIGN_EXTEND
2563 && GET_CODE (right) == SIGN_EXTEND)
2564 /* mulsidi case: mr, m */
2565 *total = s390_cost->m;
2566 else if (GET_CODE (left) == ZERO_EXTEND
2567 && GET_CODE (right) == ZERO_EXTEND
2568 && TARGET_CPU_ZARCH)
2569 /* umulsidi case: ml, mlr */
2570 *total = s390_cost->ml;
2571 else
2572 /* Complex calculation is required. */
2573 *total = COSTS_N_INSNS (40);
2574 }
2575 break;
2576 }
2577 case SFmode:
2578 case DFmode:
2579 *total = s390_cost->mult_df;
2580 break;
2581 case TFmode:
2582 *total = s390_cost->mxbr;
2583 break;
2584 default:
2585 return false;
2586 }
2587 return false;
2588
2589 case FMA:
2590 switch (GET_MODE (x))
2591 {
2592 case DFmode:
2593 *total = s390_cost->madbr;
2594 break;
2595 case SFmode:
2596 *total = s390_cost->maebr;
2597 break;
2598 default:
2599 return false;
2600 }
2601 /* Negate in the third argument is free: FMSUB. */
2602 if (GET_CODE (XEXP (x, 2)) == NEG)
2603 {
2604 *total += (rtx_cost (XEXP (x, 0), FMA, speed)
2605 + rtx_cost (XEXP (x, 1), FMA, speed)
2606 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, speed));
2607 return true;
2608 }
2609 return false;
2610
2611 case UDIV:
2612 case UMOD:
2613 if (GET_MODE (x) == TImode) /* 128 bit division */
2614 *total = s390_cost->dlgr;
2615 else if (GET_MODE (x) == DImode)
2616 {
2617 rtx right = XEXP (x, 1);
2618 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2619 *total = s390_cost->dlr;
2620 else /* 64 by 64 bit division */
2621 *total = s390_cost->dlgr;
2622 }
2623 else if (GET_MODE (x) == SImode) /* 32 bit division */
2624 *total = s390_cost->dlr;
2625 return false;
2626
2627 case DIV:
2628 case MOD:
2629 if (GET_MODE (x) == DImode)
2630 {
2631 rtx right = XEXP (x, 1);
2632 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2633 if (TARGET_ZARCH)
2634 *total = s390_cost->dsgfr;
2635 else
2636 *total = s390_cost->dr;
2637 else /* 64 by 64 bit division */
2638 *total = s390_cost->dsgr;
2639 }
2640 else if (GET_MODE (x) == SImode) /* 32 bit division */
2641 *total = s390_cost->dlr;
2642 else if (GET_MODE (x) == SFmode)
2643 {
2644 *total = s390_cost->debr;
2645 }
2646 else if (GET_MODE (x) == DFmode)
2647 {
2648 *total = s390_cost->ddbr;
2649 }
2650 else if (GET_MODE (x) == TFmode)
2651 {
2652 *total = s390_cost->dxbr;
2653 }
2654 return false;
2655
2656 case SQRT:
2657 if (GET_MODE (x) == SFmode)
2658 *total = s390_cost->sqebr;
2659 else if (GET_MODE (x) == DFmode)
2660 *total = s390_cost->sqdbr;
2661 else /* TFmode */
2662 *total = s390_cost->sqxbr;
2663 return false;
2664
2665 case SIGN_EXTEND:
2666 case ZERO_EXTEND:
2667 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2668 || outer_code == PLUS || outer_code == MINUS
2669 || outer_code == COMPARE)
2670 *total = 0;
2671 return false;
2672
2673 case COMPARE:
2674 *total = COSTS_N_INSNS (1);
2675 if (GET_CODE (XEXP (x, 0)) == AND
2676 && GET_CODE (XEXP (x, 1)) == CONST_INT
2677 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2678 {
2679 rtx op0 = XEXP (XEXP (x, 0), 0);
2680 rtx op1 = XEXP (XEXP (x, 0), 1);
2681 rtx op2 = XEXP (x, 1);
2682
2683 if (memory_operand (op0, GET_MODE (op0))
2684 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2685 return true;
2686 if (register_operand (op0, GET_MODE (op0))
2687 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2688 return true;
2689 }
2690 return false;
2691
2692 default:
2693 return false;
2694 }
2695 }
2696
2697 /* Return the cost of an address rtx ADDR. */
2698
2699 static int
2700 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2701 {
2702 struct s390_address ad;
2703 if (!s390_decompose_address (addr, &ad))
2704 return 1000;
2705
2706 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2707 }
2708
2709 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2710 otherwise return 0. */
2711
2712 int
2713 tls_symbolic_operand (rtx op)
2714 {
2715 if (GET_CODE (op) != SYMBOL_REF)
2716 return 0;
2717 return SYMBOL_REF_TLS_MODEL (op);
2718 }
2719 \f
2720 /* Split DImode access register reference REG (on 64-bit) into its constituent
2721 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2722 gen_highpart cannot be used as they assume all registers are word-sized,
2723 while our access registers have only half that size. */
2724
2725 void
2726 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2727 {
2728 gcc_assert (TARGET_64BIT);
2729 gcc_assert (ACCESS_REG_P (reg));
2730 gcc_assert (GET_MODE (reg) == DImode);
2731 gcc_assert (!(REGNO (reg) & 1));
2732
2733 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2734 *hi = gen_rtx_REG (SImode, REGNO (reg));
2735 }
2736
2737 /* Return true if OP contains a symbol reference */
2738
2739 bool
2740 symbolic_reference_mentioned_p (rtx op)
2741 {
2742 const char *fmt;
2743 int i;
2744
2745 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2746 return 1;
2747
2748 fmt = GET_RTX_FORMAT (GET_CODE (op));
2749 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2750 {
2751 if (fmt[i] == 'E')
2752 {
2753 int j;
2754
2755 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2756 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2757 return 1;
2758 }
2759
2760 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2761 return 1;
2762 }
2763
2764 return 0;
2765 }
2766
2767 /* Return true if OP contains a reference to a thread-local symbol. */
2768
2769 bool
2770 tls_symbolic_reference_mentioned_p (rtx op)
2771 {
2772 const char *fmt;
2773 int i;
2774
2775 if (GET_CODE (op) == SYMBOL_REF)
2776 return tls_symbolic_operand (op);
2777
2778 fmt = GET_RTX_FORMAT (GET_CODE (op));
2779 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2780 {
2781 if (fmt[i] == 'E')
2782 {
2783 int j;
2784
2785 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2786 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2787 return true;
2788 }
2789
2790 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2791 return true;
2792 }
2793
2794 return false;
2795 }
2796
2797
2798 /* Return true if OP is a legitimate general operand when
2799 generating PIC code. It is given that flag_pic is on
2800 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2801
2802 int
2803 legitimate_pic_operand_p (rtx op)
2804 {
2805 /* Accept all non-symbolic constants. */
2806 if (!SYMBOLIC_CONST (op))
2807 return 1;
2808
2809 /* Reject everything else; must be handled
2810 via emit_symbolic_move. */
2811 return 0;
2812 }
2813
2814 /* Returns true if the constant value OP is a legitimate general operand.
2815 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2816
2817 int
2818 legitimate_constant_p (rtx op)
2819 {
2820 /* Accept all non-symbolic constants. */
2821 if (!SYMBOLIC_CONST (op))
2822 return 1;
2823
2824 /* Accept immediate LARL operands. */
2825 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2826 return 1;
2827
2828 /* Thread-local symbols are never legal constants. This is
2829 so that emit_call knows that computing such addresses
2830 might require a function call. */
2831 if (TLS_SYMBOLIC_CONST (op))
2832 return 0;
2833
2834 /* In the PIC case, symbolic constants must *not* be
2835 forced into the literal pool. We accept them here,
2836 so that they will be handled by emit_symbolic_move. */
2837 if (flag_pic)
2838 return 1;
2839
2840 /* All remaining non-PIC symbolic constants are
2841 forced into the literal pool. */
2842 return 0;
2843 }
2844
2845 /* Determine if it's legal to put X into the constant pool. This
2846 is not possible if X contains the address of a symbol that is
2847 not constant (TLS) or not known at final link time (PIC). */
2848
2849 static bool
2850 s390_cannot_force_const_mem (rtx x)
2851 {
2852 switch (GET_CODE (x))
2853 {
2854 case CONST_INT:
2855 case CONST_DOUBLE:
2856 /* Accept all non-symbolic constants. */
2857 return false;
2858
2859 case LABEL_REF:
2860 /* Labels are OK iff we are non-PIC. */
2861 return flag_pic != 0;
2862
2863 case SYMBOL_REF:
2864 /* 'Naked' TLS symbol references are never OK,
2865 non-TLS symbols are OK iff we are non-PIC. */
2866 if (tls_symbolic_operand (x))
2867 return true;
2868 else
2869 return flag_pic != 0;
2870
2871 case CONST:
2872 return s390_cannot_force_const_mem (XEXP (x, 0));
2873 case PLUS:
2874 case MINUS:
2875 return s390_cannot_force_const_mem (XEXP (x, 0))
2876 || s390_cannot_force_const_mem (XEXP (x, 1));
2877
2878 case UNSPEC:
2879 switch (XINT (x, 1))
2880 {
2881 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2882 case UNSPEC_LTREL_OFFSET:
2883 case UNSPEC_GOT:
2884 case UNSPEC_GOTOFF:
2885 case UNSPEC_PLTOFF:
2886 case UNSPEC_TLSGD:
2887 case UNSPEC_TLSLDM:
2888 case UNSPEC_NTPOFF:
2889 case UNSPEC_DTPOFF:
2890 case UNSPEC_GOTNTPOFF:
2891 case UNSPEC_INDNTPOFF:
2892 return false;
2893
2894 /* If the literal pool shares the code section, be put
2895 execute template placeholders into the pool as well. */
2896 case UNSPEC_INSN:
2897 return TARGET_CPU_ZARCH;
2898
2899 default:
2900 return true;
2901 }
2902 break;
2903
2904 default:
2905 gcc_unreachable ();
2906 }
2907 }
2908
2909 /* Returns true if the constant value OP is a legitimate general
2910 operand during and after reload. The difference to
2911 legitimate_constant_p is that this function will not accept
2912 a constant that would need to be forced to the literal pool
2913 before it can be used as operand.
2914 This function accepts all constants which can be loaded directly
2915 into a GPR. */
2916
2917 bool
2918 legitimate_reload_constant_p (rtx op)
2919 {
2920 /* Accept la(y) operands. */
2921 if (GET_CODE (op) == CONST_INT
2922 && DISP_IN_RANGE (INTVAL (op)))
2923 return true;
2924
2925 /* Accept l(g)hi/l(g)fi operands. */
2926 if (GET_CODE (op) == CONST_INT
2927 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2928 return true;
2929
2930 /* Accept lliXX operands. */
2931 if (TARGET_ZARCH
2932 && GET_CODE (op) == CONST_INT
2933 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2934 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2935 return true;
2936
2937 if (TARGET_EXTIMM
2938 && GET_CODE (op) == CONST_INT
2939 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2940 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2941 return true;
2942
2943 /* Accept larl operands. */
2944 if (TARGET_CPU_ZARCH
2945 && larl_operand (op, VOIDmode))
2946 return true;
2947
2948 /* Accept floating-point zero operands that fit into a single GPR. */
2949 if (GET_CODE (op) == CONST_DOUBLE
2950 && s390_float_const_zero_p (op)
2951 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2952 return true;
2953
2954 /* Accept double-word operands that can be split. */
2955 if (GET_CODE (op) == CONST_INT
2956 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2957 {
2958 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2959 rtx hi = operand_subword (op, 0, 0, dword_mode);
2960 rtx lo = operand_subword (op, 1, 0, dword_mode);
2961 return legitimate_reload_constant_p (hi)
2962 && legitimate_reload_constant_p (lo);
2963 }
2964
2965 /* Everything else cannot be handled without reload. */
2966 return false;
2967 }
2968
2969 /* Returns true if the constant value OP is a legitimate fp operand
2970 during and after reload.
2971 This function accepts all constants which can be loaded directly
2972 into an FPR. */
2973
2974 static bool
2975 legitimate_reload_fp_constant_p (rtx op)
2976 {
2977 /* Accept floating-point zero operands if the load zero instruction
2978 can be used. */
2979 if (TARGET_Z196
2980 && GET_CODE (op) == CONST_DOUBLE
2981 && s390_float_const_zero_p (op))
2982 return true;
2983
2984 return false;
2985 }
2986
2987 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2988 return the class of reg to actually use. */
2989
2990 static reg_class_t
2991 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2992 {
2993 switch (GET_CODE (op))
2994 {
2995 /* Constants we cannot reload into general registers
2996 must be forced into the literal pool. */
2997 case CONST_DOUBLE:
2998 case CONST_INT:
2999 if (reg_class_subset_p (GENERAL_REGS, rclass)
3000 && legitimate_reload_constant_p (op))
3001 return GENERAL_REGS;
3002 else if (reg_class_subset_p (ADDR_REGS, rclass)
3003 && legitimate_reload_constant_p (op))
3004 return ADDR_REGS;
3005 else if (reg_class_subset_p (FP_REGS, rclass)
3006 && legitimate_reload_fp_constant_p (op))
3007 return FP_REGS;
3008 return NO_REGS;
3009
3010 /* If a symbolic constant or a PLUS is reloaded,
3011 it is most likely being used as an address, so
3012 prefer ADDR_REGS. If 'class' is not a superset
3013 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3014 case PLUS:
3015 case LABEL_REF:
3016 case SYMBOL_REF:
3017 case CONST:
3018 if (reg_class_subset_p (ADDR_REGS, rclass))
3019 return ADDR_REGS;
3020 else
3021 return NO_REGS;
3022
3023 default:
3024 break;
3025 }
3026
3027 return rclass;
3028 }
3029
3030 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3031 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3032 aligned. */
3033
3034 bool
3035 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3036 {
3037 HOST_WIDE_INT addend;
3038 rtx symref;
3039
3040 if (!s390_symref_operand_p (addr, &symref, &addend))
3041 return false;
3042
3043 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
3044 && !(addend & (alignment - 1)));
3045 }
3046
3047 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3048 operand SCRATCH is used to reload the even part of the address and
3049 adding one. */
3050
3051 void
3052 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3053 {
3054 HOST_WIDE_INT addend;
3055 rtx symref;
3056
3057 if (!s390_symref_operand_p (addr, &symref, &addend))
3058 gcc_unreachable ();
3059
3060 if (!(addend & 1))
3061 /* Easy case. The addend is even so larl will do fine. */
3062 emit_move_insn (reg, addr);
3063 else
3064 {
3065 /* We can leave the scratch register untouched if the target
3066 register is a valid base register. */
3067 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3068 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3069 scratch = reg;
3070
3071 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3072 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3073
3074 if (addend != 1)
3075 emit_move_insn (scratch,
3076 gen_rtx_CONST (Pmode,
3077 gen_rtx_PLUS (Pmode, symref,
3078 GEN_INT (addend - 1))));
3079 else
3080 emit_move_insn (scratch, symref);
3081
3082 /* Increment the address using la in order to avoid clobbering cc. */
3083 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3084 }
3085 }
3086
3087 /* Generate what is necessary to move between REG and MEM using
3088 SCRATCH. The direction is given by TOMEM. */
3089
3090 void
3091 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3092 {
3093 /* Reload might have pulled a constant out of the literal pool.
3094 Force it back in. */
3095 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3096 || GET_CODE (mem) == CONST)
3097 mem = force_const_mem (GET_MODE (reg), mem);
3098
3099 gcc_assert (MEM_P (mem));
3100
3101 /* For a load from memory we can leave the scratch register
3102 untouched if the target register is a valid base register. */
3103 if (!tomem
3104 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3105 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3106 && GET_MODE (reg) == GET_MODE (scratch))
3107 scratch = reg;
3108
3109 /* Load address into scratch register. Since we can't have a
3110 secondary reload for a secondary reload we have to cover the case
3111 where larl would need a secondary reload here as well. */
3112 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3113
3114 /* Now we can use a standard load/store to do the move. */
3115 if (tomem)
3116 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3117 else
3118 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3119 }
3120
3121 /* Inform reload about cases where moving X with a mode MODE to a register in
3122 RCLASS requires an extra scratch or immediate register. Return the class
3123 needed for the immediate register. */
3124
3125 static reg_class_t
3126 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3127 enum machine_mode mode, secondary_reload_info *sri)
3128 {
3129 enum reg_class rclass = (enum reg_class) rclass_i;
3130
3131 /* Intermediate register needed. */
3132 if (reg_classes_intersect_p (CC_REGS, rclass))
3133 return GENERAL_REGS;
3134
3135 if (TARGET_Z10)
3136 {
3137 /* On z10 several optimizer steps may generate larl operands with
3138 an odd addend. */
3139 if (in_p
3140 && s390_symref_operand_p (x, NULL, NULL)
3141 && mode == Pmode
3142 && !s390_check_symref_alignment (x, 2))
3143 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3144 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3145
3146 /* On z10 we need a scratch register when moving QI, TI or floating
3147 point mode values from or to a memory location with a SYMBOL_REF
3148 or if the symref addend of a SI or DI move is not aligned to the
3149 width of the access. */
3150 if (MEM_P (x)
3151 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3152 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3153 || (!TARGET_ZARCH && mode == DImode)
3154 || ((mode == HImode || mode == SImode || mode == DImode)
3155 && (!s390_check_symref_alignment (XEXP (x, 0),
3156 GET_MODE_SIZE (mode))))))
3157 {
3158 #define __SECONDARY_RELOAD_CASE(M,m) \
3159 case M##mode: \
3160 if (TARGET_64BIT) \
3161 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3162 CODE_FOR_reload##m##di_tomem_z10; \
3163 else \
3164 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3165 CODE_FOR_reload##m##si_tomem_z10; \
3166 break;
3167
3168 switch (GET_MODE (x))
3169 {
3170 __SECONDARY_RELOAD_CASE (QI, qi);
3171 __SECONDARY_RELOAD_CASE (HI, hi);
3172 __SECONDARY_RELOAD_CASE (SI, si);
3173 __SECONDARY_RELOAD_CASE (DI, di);
3174 __SECONDARY_RELOAD_CASE (TI, ti);
3175 __SECONDARY_RELOAD_CASE (SF, sf);
3176 __SECONDARY_RELOAD_CASE (DF, df);
3177 __SECONDARY_RELOAD_CASE (TF, tf);
3178 __SECONDARY_RELOAD_CASE (SD, sd);
3179 __SECONDARY_RELOAD_CASE (DD, dd);
3180 __SECONDARY_RELOAD_CASE (TD, td);
3181
3182 default:
3183 gcc_unreachable ();
3184 }
3185 #undef __SECONDARY_RELOAD_CASE
3186 }
3187 }
3188
3189 /* We need a scratch register when loading a PLUS expression which
3190 is not a legitimate operand of the LOAD ADDRESS instruction. */
3191 if (in_p && s390_plus_operand (x, mode))
3192 sri->icode = (TARGET_64BIT ?
3193 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3194
3195 /* Performing a multiword move from or to memory we have to make sure the
3196 second chunk in memory is addressable without causing a displacement
3197 overflow. If that would be the case we calculate the address in
3198 a scratch register. */
3199 if (MEM_P (x)
3200 && GET_CODE (XEXP (x, 0)) == PLUS
3201 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3202 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3203 + GET_MODE_SIZE (mode) - 1))
3204 {
3205 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3206 in a s_operand address since we may fallback to lm/stm. So we only
3207 have to care about overflows in the b+i+d case. */
3208 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3209 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3211 /* For FP_REGS no lm/stm is available so this check is triggered
3212 for displacement overflows in b+i+d and b+d like addresses. */
3213 || (reg_classes_intersect_p (FP_REGS, rclass)
3214 && s390_class_max_nregs (FP_REGS, mode) > 1))
3215 {
3216 if (in_p)
3217 sri->icode = (TARGET_64BIT ?
3218 CODE_FOR_reloaddi_nonoffmem_in :
3219 CODE_FOR_reloadsi_nonoffmem_in);
3220 else
3221 sri->icode = (TARGET_64BIT ?
3222 CODE_FOR_reloaddi_nonoffmem_out :
3223 CODE_FOR_reloadsi_nonoffmem_out);
3224 }
3225 }
3226
3227 /* A scratch address register is needed when a symbolic constant is
3228 copied to r0 compiling with -fPIC. In other cases the target
3229 register might be used as temporary (see legitimize_pic_address). */
3230 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3231 sri->icode = (TARGET_64BIT ?
3232 CODE_FOR_reloaddi_PIC_addr :
3233 CODE_FOR_reloadsi_PIC_addr);
3234
3235 /* Either scratch or no register needed. */
3236 return NO_REGS;
3237 }
3238
3239 /* Generate code to load SRC, which is PLUS that is not a
3240 legitimate operand for the LA instruction, into TARGET.
3241 SCRATCH may be used as scratch register. */
3242
3243 void
3244 s390_expand_plus_operand (rtx target, rtx src,
3245 rtx scratch)
3246 {
3247 rtx sum1, sum2;
3248 struct s390_address ad;
3249
3250 /* src must be a PLUS; get its two operands. */
3251 gcc_assert (GET_CODE (src) == PLUS);
3252 gcc_assert (GET_MODE (src) == Pmode);
3253
3254 /* Check if any of the two operands is already scheduled
3255 for replacement by reload. This can happen e.g. when
3256 float registers occur in an address. */
3257 sum1 = find_replacement (&XEXP (src, 0));
3258 sum2 = find_replacement (&XEXP (src, 1));
3259 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3260
3261 /* If the address is already strictly valid, there's nothing to do. */
3262 if (!s390_decompose_address (src, &ad)
3263 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3264 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3265 {
3266 /* Otherwise, one of the operands cannot be an address register;
3267 we reload its value into the scratch register. */
3268 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3269 {
3270 emit_move_insn (scratch, sum1);
3271 sum1 = scratch;
3272 }
3273 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3274 {
3275 emit_move_insn (scratch, sum2);
3276 sum2 = scratch;
3277 }
3278
3279 /* According to the way these invalid addresses are generated
3280 in reload.c, it should never happen (at least on s390) that
3281 *neither* of the PLUS components, after find_replacements
3282 was applied, is an address register. */
3283 if (sum1 == scratch && sum2 == scratch)
3284 {
3285 debug_rtx (src);
3286 gcc_unreachable ();
3287 }
3288
3289 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3290 }
3291
3292 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3293 is only ever performed on addresses, so we can mark the
3294 sum as legitimate for LA in any case. */
3295 s390_load_address (target, src);
3296 }
3297
3298
3299 /* Return true if ADDR is a valid memory address.
3300 STRICT specifies whether strict register checking applies. */
3301
3302 static bool
3303 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3304 {
3305 struct s390_address ad;
3306
3307 if (TARGET_Z10
3308 && larl_operand (addr, VOIDmode)
3309 && (mode == VOIDmode
3310 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3311 return true;
3312
3313 if (!s390_decompose_address (addr, &ad))
3314 return false;
3315
3316 if (strict)
3317 {
3318 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3319 return false;
3320
3321 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3322 return false;
3323 }
3324 else
3325 {
3326 if (ad.base
3327 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3328 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3329 return false;
3330
3331 if (ad.indx
3332 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3333 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3334 return false;
3335 }
3336 return true;
3337 }
3338
3339 /* Return true if OP is a valid operand for the LA instruction.
3340 In 31-bit, we need to prove that the result is used as an
3341 address, as LA performs only a 31-bit addition. */
3342
3343 bool
3344 legitimate_la_operand_p (rtx op)
3345 {
3346 struct s390_address addr;
3347 if (!s390_decompose_address (op, &addr))
3348 return false;
3349
3350 return (TARGET_64BIT || addr.pointer);
3351 }
3352
3353 /* Return true if it is valid *and* preferable to use LA to
3354 compute the sum of OP1 and OP2. */
3355
3356 bool
3357 preferred_la_operand_p (rtx op1, rtx op2)
3358 {
3359 struct s390_address addr;
3360
3361 if (op2 != const0_rtx)
3362 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3363
3364 if (!s390_decompose_address (op1, &addr))
3365 return false;
3366 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3367 return false;
3368 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3369 return false;
3370
3371 /* Avoid LA instructions with index register on z196; it is
3372 preferable to use regular add instructions when possible. */
3373 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3374 return false;
3375
3376 if (!TARGET_64BIT && !addr.pointer)
3377 return false;
3378
3379 if (addr.pointer)
3380 return true;
3381
3382 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3383 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3384 return true;
3385
3386 return false;
3387 }
3388
3389 /* Emit a forced load-address operation to load SRC into DST.
3390 This will use the LOAD ADDRESS instruction even in situations
3391 where legitimate_la_operand_p (SRC) returns false. */
3392
3393 void
3394 s390_load_address (rtx dst, rtx src)
3395 {
3396 if (TARGET_64BIT)
3397 emit_move_insn (dst, src);
3398 else
3399 emit_insn (gen_force_la_31 (dst, src));
3400 }
3401
3402 /* Return a legitimate reference for ORIG (an address) using the
3403 register REG. If REG is 0, a new pseudo is generated.
3404
3405 There are two types of references that must be handled:
3406
3407 1. Global data references must load the address from the GOT, via
3408 the PIC reg. An insn is emitted to do this load, and the reg is
3409 returned.
3410
3411 2. Static data references, constant pool addresses, and code labels
3412 compute the address as an offset from the GOT, whose base is in
3413 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3414 differentiate them from global data objects. The returned
3415 address is the PIC reg + an unspec constant.
3416
3417 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3418 reg also appears in the address. */
3419
3420 rtx
3421 legitimize_pic_address (rtx orig, rtx reg)
3422 {
3423 rtx addr = orig;
3424 rtx new_rtx = orig;
3425 rtx base;
3426
3427 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3428
3429 if (GET_CODE (addr) == LABEL_REF
3430 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3431 {
3432 /* This is a local symbol. */
3433 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3434 {
3435 /* Access local symbols PC-relative via LARL.
3436 This is the same as in the non-PIC case, so it is
3437 handled automatically ... */
3438 }
3439 else
3440 {
3441 /* Access local symbols relative to the GOT. */
3442
3443 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3444
3445 if (reload_in_progress || reload_completed)
3446 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3447
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3449 addr = gen_rtx_CONST (Pmode, addr);
3450 addr = force_const_mem (Pmode, addr);
3451 emit_move_insn (temp, addr);
3452
3453 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3454 if (reg != 0)
3455 {
3456 s390_load_address (reg, new_rtx);
3457 new_rtx = reg;
3458 }
3459 }
3460 }
3461 else if (GET_CODE (addr) == SYMBOL_REF)
3462 {
3463 if (reg == 0)
3464 reg = gen_reg_rtx (Pmode);
3465
3466 if (flag_pic == 1)
3467 {
3468 /* Assume GOT offset < 4k. This is handled the same way
3469 in both 31- and 64-bit code (@GOT). */
3470
3471 if (reload_in_progress || reload_completed)
3472 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3473
3474 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3475 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3476 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3477 new_rtx = gen_const_mem (Pmode, new_rtx);
3478 emit_move_insn (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 else if (TARGET_CPU_ZARCH)
3482 {
3483 /* If the GOT offset might be >= 4k, we determine the position
3484 of the GOT entry via a PC-relative LARL (@GOTENT). */
3485
3486 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3487
3488 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3489 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3490
3491 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3492 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3493 emit_move_insn (temp, new_rtx);
3494
3495 new_rtx = gen_const_mem (Pmode, temp);
3496 emit_move_insn (reg, new_rtx);
3497 new_rtx = reg;
3498 }
3499 else
3500 {
3501 /* If the GOT offset might be >= 4k, we have to load it
3502 from the literal pool (@GOT). */
3503
3504 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3505
3506 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3507 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3508
3509 if (reload_in_progress || reload_completed)
3510 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3511
3512 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3513 addr = gen_rtx_CONST (Pmode, addr);
3514 addr = force_const_mem (Pmode, addr);
3515 emit_move_insn (temp, addr);
3516
3517 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3518 new_rtx = gen_const_mem (Pmode, new_rtx);
3519 emit_move_insn (reg, new_rtx);
3520 new_rtx = reg;
3521 }
3522 }
3523 else
3524 {
3525 if (GET_CODE (addr) == CONST)
3526 {
3527 addr = XEXP (addr, 0);
3528 if (GET_CODE (addr) == UNSPEC)
3529 {
3530 gcc_assert (XVECLEN (addr, 0) == 1);
3531 switch (XINT (addr, 1))
3532 {
3533 /* If someone moved a GOT-relative UNSPEC
3534 out of the literal pool, force them back in. */
3535 case UNSPEC_GOTOFF:
3536 case UNSPEC_PLTOFF:
3537 new_rtx = force_const_mem (Pmode, orig);
3538 break;
3539
3540 /* @GOT is OK as is if small. */
3541 case UNSPEC_GOT:
3542 if (flag_pic == 2)
3543 new_rtx = force_const_mem (Pmode, orig);
3544 break;
3545
3546 /* @GOTENT is OK as is. */
3547 case UNSPEC_GOTENT:
3548 break;
3549
3550 /* @PLT is OK as is on 64-bit, must be converted to
3551 GOT-relative @PLTOFF on 31-bit. */
3552 case UNSPEC_PLT:
3553 if (!TARGET_CPU_ZARCH)
3554 {
3555 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3556
3557 if (reload_in_progress || reload_completed)
3558 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3559
3560 addr = XVECEXP (addr, 0, 0);
3561 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3562 UNSPEC_PLTOFF);
3563 addr = gen_rtx_CONST (Pmode, addr);
3564 addr = force_const_mem (Pmode, addr);
3565 emit_move_insn (temp, addr);
3566
3567 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3568 if (reg != 0)
3569 {
3570 s390_load_address (reg, new_rtx);
3571 new_rtx = reg;
3572 }
3573 }
3574 break;
3575
3576 /* Everything else cannot happen. */
3577 default:
3578 gcc_unreachable ();
3579 }
3580 }
3581 else
3582 gcc_assert (GET_CODE (addr) == PLUS);
3583 }
3584 if (GET_CODE (addr) == PLUS)
3585 {
3586 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3587
3588 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3589 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3590
3591 /* Check first to see if this is a constant offset
3592 from a local symbol reference. */
3593 if ((GET_CODE (op0) == LABEL_REF
3594 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3595 && GET_CODE (op1) == CONST_INT)
3596 {
3597 if (TARGET_CPU_ZARCH
3598 && larl_operand (op0, VOIDmode)
3599 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3600 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3601 {
3602 if (INTVAL (op1) & 1)
3603 {
3604 /* LARL can't handle odd offsets, so emit a
3605 pair of LARL and LA. */
3606 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3607
3608 if (!DISP_IN_RANGE (INTVAL (op1)))
3609 {
3610 HOST_WIDE_INT even = INTVAL (op1) - 1;
3611 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3612 op0 = gen_rtx_CONST (Pmode, op0);
3613 op1 = const1_rtx;
3614 }
3615
3616 emit_move_insn (temp, op0);
3617 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3618
3619 if (reg != 0)
3620 {
3621 s390_load_address (reg, new_rtx);
3622 new_rtx = reg;
3623 }
3624 }
3625 else
3626 {
3627 /* If the offset is even, we can just use LARL.
3628 This will happen automatically. */
3629 }
3630 }
3631 else
3632 {
3633 /* Access local symbols relative to the GOT. */
3634
3635 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3636
3637 if (reload_in_progress || reload_completed)
3638 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3639
3640 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3641 UNSPEC_GOTOFF);
3642 addr = gen_rtx_PLUS (Pmode, addr, op1);
3643 addr = gen_rtx_CONST (Pmode, addr);
3644 addr = force_const_mem (Pmode, addr);
3645 emit_move_insn (temp, addr);
3646
3647 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3648 if (reg != 0)
3649 {
3650 s390_load_address (reg, new_rtx);
3651 new_rtx = reg;
3652 }
3653 }
3654 }
3655
3656 /* Now, check whether it is a GOT relative symbol plus offset
3657 that was pulled out of the literal pool. Force it back in. */
3658
3659 else if (GET_CODE (op0) == UNSPEC
3660 && GET_CODE (op1) == CONST_INT
3661 && XINT (op0, 1) == UNSPEC_GOTOFF)
3662 {
3663 gcc_assert (XVECLEN (op0, 0) == 1);
3664
3665 new_rtx = force_const_mem (Pmode, orig);
3666 }
3667
3668 /* Otherwise, compute the sum. */
3669 else
3670 {
3671 base = legitimize_pic_address (XEXP (addr, 0), reg);
3672 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3673 base == reg ? NULL_RTX : reg);
3674 if (GET_CODE (new_rtx) == CONST_INT)
3675 new_rtx = plus_constant (base, INTVAL (new_rtx));
3676 else
3677 {
3678 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3679 {
3680 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3681 new_rtx = XEXP (new_rtx, 1);
3682 }
3683 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3684 }
3685
3686 if (GET_CODE (new_rtx) == CONST)
3687 new_rtx = XEXP (new_rtx, 0);
3688 new_rtx = force_operand (new_rtx, 0);
3689 }
3690 }
3691 }
3692 return new_rtx;
3693 }
3694
3695 /* Load the thread pointer into a register. */
3696
3697 rtx
3698 s390_get_thread_pointer (void)
3699 {
3700 rtx tp = gen_reg_rtx (Pmode);
3701
3702 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3703 mark_reg_pointer (tp, BITS_PER_WORD);
3704
3705 return tp;
3706 }
3707
3708 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3709 in s390_tls_symbol which always refers to __tls_get_offset.
3710 The returned offset is written to RESULT_REG and an USE rtx is
3711 generated for TLS_CALL. */
3712
3713 static GTY(()) rtx s390_tls_symbol;
3714
3715 static void
3716 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3717 {
3718 rtx insn;
3719
3720 gcc_assert (flag_pic);
3721
3722 if (!s390_tls_symbol)
3723 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3724
3725 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3726 gen_rtx_REG (Pmode, RETURN_REGNUM));
3727
3728 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3729 RTL_CONST_CALL_P (insn) = 1;
3730 }
3731
3732 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3733 this (thread-local) address. REG may be used as temporary. */
3734
3735 static rtx
3736 legitimize_tls_address (rtx addr, rtx reg)
3737 {
3738 rtx new_rtx, tls_call, temp, base, r2, insn;
3739
3740 if (GET_CODE (addr) == SYMBOL_REF)
3741 switch (tls_symbolic_operand (addr))
3742 {
3743 case TLS_MODEL_GLOBAL_DYNAMIC:
3744 start_sequence ();
3745 r2 = gen_rtx_REG (Pmode, 2);
3746 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3747 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 emit_move_insn (r2, new_rtx);
3750 s390_emit_tls_call_insn (r2, tls_call);
3751 insn = get_insns ();
3752 end_sequence ();
3753
3754 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3755 temp = gen_reg_rtx (Pmode);
3756 emit_libcall_block (insn, temp, r2, new_rtx);
3757
3758 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3759 if (reg != 0)
3760 {
3761 s390_load_address (reg, new_rtx);
3762 new_rtx = reg;
3763 }
3764 break;
3765
3766 case TLS_MODEL_LOCAL_DYNAMIC:
3767 start_sequence ();
3768 r2 = gen_rtx_REG (Pmode, 2);
3769 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3770 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3771 new_rtx = force_const_mem (Pmode, new_rtx);
3772 emit_move_insn (r2, new_rtx);
3773 s390_emit_tls_call_insn (r2, tls_call);
3774 insn = get_insns ();
3775 end_sequence ();
3776
3777 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3778 temp = gen_reg_rtx (Pmode);
3779 emit_libcall_block (insn, temp, r2, new_rtx);
3780
3781 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3782 base = gen_reg_rtx (Pmode);
3783 s390_load_address (base, new_rtx);
3784
3785 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3786 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3787 new_rtx = force_const_mem (Pmode, new_rtx);
3788 temp = gen_reg_rtx (Pmode);
3789 emit_move_insn (temp, new_rtx);
3790
3791 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3792 if (reg != 0)
3793 {
3794 s390_load_address (reg, new_rtx);
3795 new_rtx = reg;
3796 }
3797 break;
3798
3799 case TLS_MODEL_INITIAL_EXEC:
3800 if (flag_pic == 1)
3801 {
3802 /* Assume GOT offset < 4k. This is handled the same way
3803 in both 31- and 64-bit code. */
3804
3805 if (reload_in_progress || reload_completed)
3806 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3807
3808 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3809 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3810 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3811 new_rtx = gen_const_mem (Pmode, new_rtx);
3812 temp = gen_reg_rtx (Pmode);
3813 emit_move_insn (temp, new_rtx);
3814 }
3815 else if (TARGET_CPU_ZARCH)
3816 {
3817 /* If the GOT offset might be >= 4k, we determine the position
3818 of the GOT entry via a PC-relative LARL. */
3819
3820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3822 temp = gen_reg_rtx (Pmode);
3823 emit_move_insn (temp, new_rtx);
3824
3825 new_rtx = gen_const_mem (Pmode, temp);
3826 temp = gen_reg_rtx (Pmode);
3827 emit_move_insn (temp, new_rtx);
3828 }
3829 else if (flag_pic)
3830 {
3831 /* If the GOT offset might be >= 4k, we have to load it
3832 from the literal pool. */
3833
3834 if (reload_in_progress || reload_completed)
3835 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3836
3837 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3838 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3839 new_rtx = force_const_mem (Pmode, new_rtx);
3840 temp = gen_reg_rtx (Pmode);
3841 emit_move_insn (temp, new_rtx);
3842
3843 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3844 new_rtx = gen_const_mem (Pmode, new_rtx);
3845
3846 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3847 temp = gen_reg_rtx (Pmode);
3848 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3849 }
3850 else
3851 {
3852 /* In position-dependent code, load the absolute address of
3853 the GOT entry from the literal pool. */
3854
3855 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3856 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3857 new_rtx = force_const_mem (Pmode, new_rtx);
3858 temp = gen_reg_rtx (Pmode);
3859 emit_move_insn (temp, new_rtx);
3860
3861 new_rtx = temp;
3862 new_rtx = gen_const_mem (Pmode, new_rtx);
3863 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3864 temp = gen_reg_rtx (Pmode);
3865 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3866 }
3867
3868 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3869 if (reg != 0)
3870 {
3871 s390_load_address (reg, new_rtx);
3872 new_rtx = reg;
3873 }
3874 break;
3875
3876 case TLS_MODEL_LOCAL_EXEC:
3877 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3878 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3879 new_rtx = force_const_mem (Pmode, new_rtx);
3880 temp = gen_reg_rtx (Pmode);
3881 emit_move_insn (temp, new_rtx);
3882
3883 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3884 if (reg != 0)
3885 {
3886 s390_load_address (reg, new_rtx);
3887 new_rtx = reg;
3888 }
3889 break;
3890
3891 default:
3892 gcc_unreachable ();
3893 }
3894
3895 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3896 {
3897 switch (XINT (XEXP (addr, 0), 1))
3898 {
3899 case UNSPEC_INDNTPOFF:
3900 gcc_assert (TARGET_CPU_ZARCH);
3901 new_rtx = addr;
3902 break;
3903
3904 default:
3905 gcc_unreachable ();
3906 }
3907 }
3908
3909 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3910 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3911 {
3912 new_rtx = XEXP (XEXP (addr, 0), 0);
3913 if (GET_CODE (new_rtx) != SYMBOL_REF)
3914 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3915
3916 new_rtx = legitimize_tls_address (new_rtx, reg);
3917 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3918 new_rtx = force_operand (new_rtx, 0);
3919 }
3920
3921 else
3922 gcc_unreachable (); /* for now ... */
3923
3924 return new_rtx;
3925 }
3926
3927 /* Emit insns making the address in operands[1] valid for a standard
3928 move to operands[0]. operands[1] is replaced by an address which
3929 should be used instead of the former RTX to emit the move
3930 pattern. */
3931
3932 void
3933 emit_symbolic_move (rtx *operands)
3934 {
3935 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3936
3937 if (GET_CODE (operands[0]) == MEM)
3938 operands[1] = force_reg (Pmode, operands[1]);
3939 else if (TLS_SYMBOLIC_CONST (operands[1]))
3940 operands[1] = legitimize_tls_address (operands[1], temp);
3941 else if (flag_pic)
3942 operands[1] = legitimize_pic_address (operands[1], temp);
3943 }
3944
3945 /* Try machine-dependent ways of modifying an illegitimate address X
3946 to be legitimate. If we find one, return the new, valid address.
3947
3948 OLDX is the address as it was before break_out_memory_refs was called.
3949 In some cases it is useful to look at this to decide what needs to be done.
3950
3951 MODE is the mode of the operand pointed to by X.
3952
3953 When -fpic is used, special handling is needed for symbolic references.
3954 See comments by legitimize_pic_address for details. */
3955
3956 static rtx
3957 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3958 enum machine_mode mode ATTRIBUTE_UNUSED)
3959 {
3960 rtx constant_term = const0_rtx;
3961
3962 if (TLS_SYMBOLIC_CONST (x))
3963 {
3964 x = legitimize_tls_address (x, 0);
3965
3966 if (s390_legitimate_address_p (mode, x, FALSE))
3967 return x;
3968 }
3969 else if (GET_CODE (x) == PLUS
3970 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3971 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3972 {
3973 return x;
3974 }
3975 else if (flag_pic)
3976 {
3977 if (SYMBOLIC_CONST (x)
3978 || (GET_CODE (x) == PLUS
3979 && (SYMBOLIC_CONST (XEXP (x, 0))
3980 || SYMBOLIC_CONST (XEXP (x, 1)))))
3981 x = legitimize_pic_address (x, 0);
3982
3983 if (s390_legitimate_address_p (mode, x, FALSE))
3984 return x;
3985 }
3986
3987 x = eliminate_constant_term (x, &constant_term);
3988
3989 /* Optimize loading of large displacements by splitting them
3990 into the multiple of 4K and the rest; this allows the
3991 former to be CSE'd if possible.
3992
3993 Don't do this if the displacement is added to a register
3994 pointing into the stack frame, as the offsets will
3995 change later anyway. */
3996
3997 if (GET_CODE (constant_term) == CONST_INT
3998 && !TARGET_LONG_DISPLACEMENT
3999 && !DISP_IN_RANGE (INTVAL (constant_term))
4000 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4001 {
4002 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4003 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4004
4005 rtx temp = gen_reg_rtx (Pmode);
4006 rtx val = force_operand (GEN_INT (upper), temp);
4007 if (val != temp)
4008 emit_move_insn (temp, val);
4009
4010 x = gen_rtx_PLUS (Pmode, x, temp);
4011 constant_term = GEN_INT (lower);
4012 }
4013
4014 if (GET_CODE (x) == PLUS)
4015 {
4016 if (GET_CODE (XEXP (x, 0)) == REG)
4017 {
4018 rtx temp = gen_reg_rtx (Pmode);
4019 rtx val = force_operand (XEXP (x, 1), temp);
4020 if (val != temp)
4021 emit_move_insn (temp, val);
4022
4023 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4024 }
4025
4026 else if (GET_CODE (XEXP (x, 1)) == REG)
4027 {
4028 rtx temp = gen_reg_rtx (Pmode);
4029 rtx val = force_operand (XEXP (x, 0), temp);
4030 if (val != temp)
4031 emit_move_insn (temp, val);
4032
4033 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4034 }
4035 }
4036
4037 if (constant_term != const0_rtx)
4038 x = gen_rtx_PLUS (Pmode, x, constant_term);
4039
4040 return x;
4041 }
4042
4043 /* Try a machine-dependent way of reloading an illegitimate address AD
4044 operand. If we find one, push the reload and and return the new address.
4045
4046 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4047 and TYPE is the reload type of the current reload. */
4048
4049 rtx
4050 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4051 int opnum, int type)
4052 {
4053 if (!optimize || TARGET_LONG_DISPLACEMENT)
4054 return NULL_RTX;
4055
4056 if (GET_CODE (ad) == PLUS)
4057 {
4058 rtx tem = simplify_binary_operation (PLUS, Pmode,
4059 XEXP (ad, 0), XEXP (ad, 1));
4060 if (tem)
4061 ad = tem;
4062 }
4063
4064 if (GET_CODE (ad) == PLUS
4065 && GET_CODE (XEXP (ad, 0)) == REG
4066 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4067 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4068 {
4069 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4070 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4071 rtx cst, tem, new_rtx;
4072
4073 cst = GEN_INT (upper);
4074 if (!legitimate_reload_constant_p (cst))
4075 cst = force_const_mem (Pmode, cst);
4076
4077 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4078 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4079
4080 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4081 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4082 opnum, (enum reload_type) type);
4083 return new_rtx;
4084 }
4085
4086 return NULL_RTX;
4087 }
4088
4089 /* Emit code to move LEN bytes from DST to SRC. */
4090
4091 void
4092 s390_expand_movmem (rtx dst, rtx src, rtx len)
4093 {
4094 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4095 {
4096 if (INTVAL (len) > 0)
4097 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4098 }
4099
4100 else if (TARGET_MVCLE)
4101 {
4102 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4103 }
4104
4105 else
4106 {
4107 rtx dst_addr, src_addr, count, blocks, temp;
4108 rtx loop_start_label = gen_label_rtx ();
4109 rtx loop_end_label = gen_label_rtx ();
4110 rtx end_label = gen_label_rtx ();
4111 enum machine_mode mode;
4112
4113 mode = GET_MODE (len);
4114 if (mode == VOIDmode)
4115 mode = Pmode;
4116
4117 dst_addr = gen_reg_rtx (Pmode);
4118 src_addr = gen_reg_rtx (Pmode);
4119 count = gen_reg_rtx (mode);
4120 blocks = gen_reg_rtx (mode);
4121
4122 convert_move (count, len, 1);
4123 emit_cmp_and_jump_insns (count, const0_rtx,
4124 EQ, NULL_RTX, mode, 1, end_label);
4125
4126 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4127 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4128 dst = change_address (dst, VOIDmode, dst_addr);
4129 src = change_address (src, VOIDmode, src_addr);
4130
4131 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4132 OPTAB_DIRECT);
4133 if (temp != count)
4134 emit_move_insn (count, temp);
4135
4136 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4137 OPTAB_DIRECT);
4138 if (temp != blocks)
4139 emit_move_insn (blocks, temp);
4140
4141 emit_cmp_and_jump_insns (blocks, const0_rtx,
4142 EQ, NULL_RTX, mode, 1, loop_end_label);
4143
4144 emit_label (loop_start_label);
4145
4146 if (TARGET_Z10
4147 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4148 {
4149 rtx prefetch;
4150
4151 /* Issue a read prefetch for the +3 cache line. */
4152 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4153 const0_rtx, const0_rtx);
4154 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4155 emit_insn (prefetch);
4156
4157 /* Issue a write prefetch for the +3 cache line. */
4158 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4159 const1_rtx, const0_rtx);
4160 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4161 emit_insn (prefetch);
4162 }
4163
4164 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4165 s390_load_address (dst_addr,
4166 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4167 s390_load_address (src_addr,
4168 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4169
4170 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4171 OPTAB_DIRECT);
4172 if (temp != blocks)
4173 emit_move_insn (blocks, temp);
4174
4175 emit_cmp_and_jump_insns (blocks, const0_rtx,
4176 EQ, NULL_RTX, mode, 1, loop_end_label);
4177
4178 emit_jump (loop_start_label);
4179 emit_label (loop_end_label);
4180
4181 emit_insn (gen_movmem_short (dst, src,
4182 convert_to_mode (Pmode, count, 1)));
4183 emit_label (end_label);
4184 }
4185 }
4186
4187 /* Emit code to set LEN bytes at DST to VAL.
4188 Make use of clrmem if VAL is zero. */
4189
4190 void
4191 s390_expand_setmem (rtx dst, rtx len, rtx val)
4192 {
4193 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4194 return;
4195
4196 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4197
4198 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4199 {
4200 if (val == const0_rtx && INTVAL (len) <= 256)
4201 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4202 else
4203 {
4204 /* Initialize memory by storing the first byte. */
4205 emit_move_insn (adjust_address (dst, QImode, 0), val);
4206
4207 if (INTVAL (len) > 1)
4208 {
4209 /* Initiate 1 byte overlap move.
4210 The first byte of DST is propagated through DSTP1.
4211 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4212 DST is set to size 1 so the rest of the memory location
4213 does not count as source operand. */
4214 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4215 set_mem_size (dst, const1_rtx);
4216
4217 emit_insn (gen_movmem_short (dstp1, dst,
4218 GEN_INT (INTVAL (len) - 2)));
4219 }
4220 }
4221 }
4222
4223 else if (TARGET_MVCLE)
4224 {
4225 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4226 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4227 }
4228
4229 else
4230 {
4231 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4232 rtx loop_start_label = gen_label_rtx ();
4233 rtx loop_end_label = gen_label_rtx ();
4234 rtx end_label = gen_label_rtx ();
4235 enum machine_mode mode;
4236
4237 mode = GET_MODE (len);
4238 if (mode == VOIDmode)
4239 mode = Pmode;
4240
4241 dst_addr = gen_reg_rtx (Pmode);
4242 count = gen_reg_rtx (mode);
4243 blocks = gen_reg_rtx (mode);
4244
4245 convert_move (count, len, 1);
4246 emit_cmp_and_jump_insns (count, const0_rtx,
4247 EQ, NULL_RTX, mode, 1, end_label);
4248
4249 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4250 dst = change_address (dst, VOIDmode, dst_addr);
4251
4252 if (val == const0_rtx)
4253 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4254 OPTAB_DIRECT);
4255 else
4256 {
4257 dstp1 = adjust_address (dst, VOIDmode, 1);
4258 set_mem_size (dst, const1_rtx);
4259
4260 /* Initialize memory by storing the first byte. */
4261 emit_move_insn (adjust_address (dst, QImode, 0), val);
4262
4263 /* If count is 1 we are done. */
4264 emit_cmp_and_jump_insns (count, const1_rtx,
4265 EQ, NULL_RTX, mode, 1, end_label);
4266
4267 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4268 OPTAB_DIRECT);
4269 }
4270 if (temp != count)
4271 emit_move_insn (count, temp);
4272
4273 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4274 OPTAB_DIRECT);
4275 if (temp != blocks)
4276 emit_move_insn (blocks, temp);
4277
4278 emit_cmp_and_jump_insns (blocks, const0_rtx,
4279 EQ, NULL_RTX, mode, 1, loop_end_label);
4280
4281 emit_label (loop_start_label);
4282
4283 if (TARGET_Z10
4284 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4285 {
4286 /* Issue a write prefetch for the +4 cache line. */
4287 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4288 GEN_INT (1024)),
4289 const1_rtx, const0_rtx);
4290 emit_insn (prefetch);
4291 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4292 }
4293
4294 if (val == const0_rtx)
4295 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4296 else
4297 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4298 s390_load_address (dst_addr,
4299 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4300
4301 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4302 OPTAB_DIRECT);
4303 if (temp != blocks)
4304 emit_move_insn (blocks, temp);
4305
4306 emit_cmp_and_jump_insns (blocks, const0_rtx,
4307 EQ, NULL_RTX, mode, 1, loop_end_label);
4308
4309 emit_jump (loop_start_label);
4310 emit_label (loop_end_label);
4311
4312 if (val == const0_rtx)
4313 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4314 else
4315 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4316 emit_label (end_label);
4317 }
4318 }
4319
4320 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4321 and return the result in TARGET. */
4322
4323 void
4324 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4325 {
4326 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4327 rtx tmp;
4328
4329 /* As the result of CMPINT is inverted compared to what we need,
4330 we have to swap the operands. */
4331 tmp = op0; op0 = op1; op1 = tmp;
4332
4333 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4334 {
4335 if (INTVAL (len) > 0)
4336 {
4337 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4338 emit_insn (gen_cmpint (target, ccreg));
4339 }
4340 else
4341 emit_move_insn (target, const0_rtx);
4342 }
4343 else if (TARGET_MVCLE)
4344 {
4345 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4346 emit_insn (gen_cmpint (target, ccreg));
4347 }
4348 else
4349 {
4350 rtx addr0, addr1, count, blocks, temp;
4351 rtx loop_start_label = gen_label_rtx ();
4352 rtx loop_end_label = gen_label_rtx ();
4353 rtx end_label = gen_label_rtx ();
4354 enum machine_mode mode;
4355
4356 mode = GET_MODE (len);
4357 if (mode == VOIDmode)
4358 mode = Pmode;
4359
4360 addr0 = gen_reg_rtx (Pmode);
4361 addr1 = gen_reg_rtx (Pmode);
4362 count = gen_reg_rtx (mode);
4363 blocks = gen_reg_rtx (mode);
4364
4365 convert_move (count, len, 1);
4366 emit_cmp_and_jump_insns (count, const0_rtx,
4367 EQ, NULL_RTX, mode, 1, end_label);
4368
4369 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4370 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4371 op0 = change_address (op0, VOIDmode, addr0);
4372 op1 = change_address (op1, VOIDmode, addr1);
4373
4374 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4375 OPTAB_DIRECT);
4376 if (temp != count)
4377 emit_move_insn (count, temp);
4378
4379 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4380 OPTAB_DIRECT);
4381 if (temp != blocks)
4382 emit_move_insn (blocks, temp);
4383
4384 emit_cmp_and_jump_insns (blocks, const0_rtx,
4385 EQ, NULL_RTX, mode, 1, loop_end_label);
4386
4387 emit_label (loop_start_label);
4388
4389 if (TARGET_Z10
4390 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4391 {
4392 rtx prefetch;
4393
4394 /* Issue a read prefetch for the +2 cache line of operand 1. */
4395 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4396 const0_rtx, const0_rtx);
4397 emit_insn (prefetch);
4398 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4399
4400 /* Issue a read prefetch for the +2 cache line of operand 2. */
4401 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4402 const0_rtx, const0_rtx);
4403 emit_insn (prefetch);
4404 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4405 }
4406
4407 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4408 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4409 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4410 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4411 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4412 emit_jump_insn (temp);
4413
4414 s390_load_address (addr0,
4415 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4416 s390_load_address (addr1,
4417 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4418
4419 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4420 OPTAB_DIRECT);
4421 if (temp != blocks)
4422 emit_move_insn (blocks, temp);
4423
4424 emit_cmp_and_jump_insns (blocks, const0_rtx,
4425 EQ, NULL_RTX, mode, 1, loop_end_label);
4426
4427 emit_jump (loop_start_label);
4428 emit_label (loop_end_label);
4429
4430 emit_insn (gen_cmpmem_short (op0, op1,
4431 convert_to_mode (Pmode, count, 1)));
4432 emit_label (end_label);
4433
4434 emit_insn (gen_cmpint (target, ccreg));
4435 }
4436 }
4437
4438
4439 /* Expand conditional increment or decrement using alc/slb instructions.
4440 Should generate code setting DST to either SRC or SRC + INCREMENT,
4441 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4442 Returns true if successful, false otherwise.
4443
4444 That makes it possible to implement some if-constructs without jumps e.g.:
4445 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4446 unsigned int a, b, c;
4447 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4448 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4449 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4450 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4451
4452 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4453 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4454 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4455 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4456 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4457
4458 bool
4459 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4460 rtx dst, rtx src, rtx increment)
4461 {
4462 enum machine_mode cmp_mode;
4463 enum machine_mode cc_mode;
4464 rtx op_res;
4465 rtx insn;
4466 rtvec p;
4467 int ret;
4468
4469 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4470 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4471 cmp_mode = SImode;
4472 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4473 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4474 cmp_mode = DImode;
4475 else
4476 return false;
4477
4478 /* Try ADD LOGICAL WITH CARRY. */
4479 if (increment == const1_rtx)
4480 {
4481 /* Determine CC mode to use. */
4482 if (cmp_code == EQ || cmp_code == NE)
4483 {
4484 if (cmp_op1 != const0_rtx)
4485 {
4486 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4487 NULL_RTX, 0, OPTAB_WIDEN);
4488 cmp_op1 = const0_rtx;
4489 }
4490
4491 cmp_code = cmp_code == EQ ? LEU : GTU;
4492 }
4493
4494 if (cmp_code == LTU || cmp_code == LEU)
4495 {
4496 rtx tem = cmp_op0;
4497 cmp_op0 = cmp_op1;
4498 cmp_op1 = tem;
4499 cmp_code = swap_condition (cmp_code);
4500 }
4501
4502 switch (cmp_code)
4503 {
4504 case GTU:
4505 cc_mode = CCUmode;
4506 break;
4507
4508 case GEU:
4509 cc_mode = CCL3mode;
4510 break;
4511
4512 default:
4513 return false;
4514 }
4515
4516 /* Emit comparison instruction pattern. */
4517 if (!register_operand (cmp_op0, cmp_mode))
4518 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4519
4520 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4521 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4522 /* We use insn_invalid_p here to add clobbers if required. */
4523 ret = insn_invalid_p (emit_insn (insn));
4524 gcc_assert (!ret);
4525
4526 /* Emit ALC instruction pattern. */
4527 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4528 gen_rtx_REG (cc_mode, CC_REGNUM),
4529 const0_rtx);
4530
4531 if (src != const0_rtx)
4532 {
4533 if (!register_operand (src, GET_MODE (dst)))
4534 src = force_reg (GET_MODE (dst), src);
4535
4536 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4537 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4538 }
4539
4540 p = rtvec_alloc (2);
4541 RTVEC_ELT (p, 0) =
4542 gen_rtx_SET (VOIDmode, dst, op_res);
4543 RTVEC_ELT (p, 1) =
4544 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4545 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4546
4547 return true;
4548 }
4549
4550 /* Try SUBTRACT LOGICAL WITH BORROW. */
4551 if (increment == constm1_rtx)
4552 {
4553 /* Determine CC mode to use. */
4554 if (cmp_code == EQ || cmp_code == NE)
4555 {
4556 if (cmp_op1 != const0_rtx)
4557 {
4558 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4559 NULL_RTX, 0, OPTAB_WIDEN);
4560 cmp_op1 = const0_rtx;
4561 }
4562
4563 cmp_code = cmp_code == EQ ? LEU : GTU;
4564 }
4565
4566 if (cmp_code == GTU || cmp_code == GEU)
4567 {
4568 rtx tem = cmp_op0;
4569 cmp_op0 = cmp_op1;
4570 cmp_op1 = tem;
4571 cmp_code = swap_condition (cmp_code);
4572 }
4573
4574 switch (cmp_code)
4575 {
4576 case LEU:
4577 cc_mode = CCUmode;
4578 break;
4579
4580 case LTU:
4581 cc_mode = CCL3mode;
4582 break;
4583
4584 default:
4585 return false;
4586 }
4587
4588 /* Emit comparison instruction pattern. */
4589 if (!register_operand (cmp_op0, cmp_mode))
4590 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4591
4592 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4593 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4594 /* We use insn_invalid_p here to add clobbers if required. */
4595 ret = insn_invalid_p (emit_insn (insn));
4596 gcc_assert (!ret);
4597
4598 /* Emit SLB instruction pattern. */
4599 if (!register_operand (src, GET_MODE (dst)))
4600 src = force_reg (GET_MODE (dst), src);
4601
4602 op_res = gen_rtx_MINUS (GET_MODE (dst),
4603 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4604 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4605 gen_rtx_REG (cc_mode, CC_REGNUM),
4606 const0_rtx));
4607 p = rtvec_alloc (2);
4608 RTVEC_ELT (p, 0) =
4609 gen_rtx_SET (VOIDmode, dst, op_res);
4610 RTVEC_ELT (p, 1) =
4611 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4612 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4613
4614 return true;
4615 }
4616
4617 return false;
4618 }
4619
4620 /* Expand code for the insv template. Return true if successful. */
4621
4622 bool
4623 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4624 {
4625 int bitsize = INTVAL (op1);
4626 int bitpos = INTVAL (op2);
4627
4628 /* On z10 we can use the risbg instruction to implement insv. */
4629 if (TARGET_Z10
4630 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4631 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4632 {
4633 rtx op;
4634 rtx clobber;
4635
4636 op = gen_rtx_SET (GET_MODE(src),
4637 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4638 src);
4639 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4640 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4641
4642 return true;
4643 }
4644
4645 /* We need byte alignment. */
4646 if (bitsize % BITS_PER_UNIT)
4647 return false;
4648
4649 if (bitpos == 0
4650 && memory_operand (dest, VOIDmode)
4651 && (register_operand (src, word_mode)
4652 || const_int_operand (src, VOIDmode)))
4653 {
4654 /* Emit standard pattern if possible. */
4655 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4656 if (GET_MODE_BITSIZE (mode) == bitsize)
4657 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4658
4659 /* (set (ze (mem)) (const_int)). */
4660 else if (const_int_operand (src, VOIDmode))
4661 {
4662 int size = bitsize / BITS_PER_UNIT;
4663 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4664 GET_MODE_SIZE (word_mode) - size);
4665
4666 dest = adjust_address (dest, BLKmode, 0);
4667 set_mem_size (dest, GEN_INT (size));
4668 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4669 }
4670
4671 /* (set (ze (mem)) (reg)). */
4672 else if (register_operand (src, word_mode))
4673 {
4674 if (bitsize <= GET_MODE_BITSIZE (SImode))
4675 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4676 const0_rtx), src);
4677 else
4678 {
4679 /* Emit st,stcmh sequence. */
4680 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4681 int size = stcmh_width / BITS_PER_UNIT;
4682
4683 emit_move_insn (adjust_address (dest, SImode, size),
4684 gen_lowpart (SImode, src));
4685 set_mem_size (dest, GEN_INT (size));
4686 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4687 (stcmh_width), const0_rtx),
4688 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4689 (GET_MODE_BITSIZE (SImode))));
4690 }
4691 }
4692 else
4693 return false;
4694
4695 return true;
4696 }
4697
4698 /* (set (ze (reg)) (const_int)). */
4699 if (TARGET_ZARCH
4700 && register_operand (dest, word_mode)
4701 && (bitpos % 16) == 0
4702 && (bitsize % 16) == 0
4703 && const_int_operand (src, VOIDmode))
4704 {
4705 HOST_WIDE_INT val = INTVAL (src);
4706 int regpos = bitpos + bitsize;
4707
4708 while (regpos > bitpos)
4709 {
4710 enum machine_mode putmode;
4711 int putsize;
4712
4713 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4714 putmode = SImode;
4715 else
4716 putmode = HImode;
4717
4718 putsize = GET_MODE_BITSIZE (putmode);
4719 regpos -= putsize;
4720 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4721 GEN_INT (putsize),
4722 GEN_INT (regpos)),
4723 gen_int_mode (val, putmode));
4724 val >>= putsize;
4725 }
4726 gcc_assert (regpos == bitpos);
4727 return true;
4728 }
4729
4730 return false;
4731 }
4732
4733 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4734 register that holds VAL of mode MODE shifted by COUNT bits. */
4735
4736 static inline rtx
4737 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4738 {
4739 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4740 NULL_RTX, 1, OPTAB_DIRECT);
4741 return expand_simple_binop (SImode, ASHIFT, val, count,
4742 NULL_RTX, 1, OPTAB_DIRECT);
4743 }
4744
4745 /* Structure to hold the initial parameters for a compare_and_swap operation
4746 in HImode and QImode. */
4747
4748 struct alignment_context
4749 {
4750 rtx memsi; /* SI aligned memory location. */
4751 rtx shift; /* Bit offset with regard to lsb. */
4752 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4753 rtx modemaski; /* ~modemask */
4754 bool aligned; /* True if memory is aligned, false else. */
4755 };
4756
4757 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4758 structure AC for transparent simplifying, if the memory alignment is known
4759 to be at least 32bit. MEM is the memory location for the actual operation
4760 and MODE its mode. */
4761
4762 static void
4763 init_alignment_context (struct alignment_context *ac, rtx mem,
4764 enum machine_mode mode)
4765 {
4766 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4767 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4768
4769 if (ac->aligned)
4770 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4771 else
4772 {
4773 /* Alignment is unknown. */
4774 rtx byteoffset, addr, align;
4775
4776 /* Force the address into a register. */
4777 addr = force_reg (Pmode, XEXP (mem, 0));
4778
4779 /* Align it to SImode. */
4780 align = expand_simple_binop (Pmode, AND, addr,
4781 GEN_INT (-GET_MODE_SIZE (SImode)),
4782 NULL_RTX, 1, OPTAB_DIRECT);
4783 /* Generate MEM. */
4784 ac->memsi = gen_rtx_MEM (SImode, align);
4785 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4786 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4787 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4788
4789 /* Calculate shiftcount. */
4790 byteoffset = expand_simple_binop (Pmode, AND, addr,
4791 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4792 NULL_RTX, 1, OPTAB_DIRECT);
4793 /* As we already have some offset, evaluate the remaining distance. */
4794 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4795 NULL_RTX, 1, OPTAB_DIRECT);
4796
4797 }
4798 /* Shift is the byte count, but we need the bitcount. */
4799 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4800 NULL_RTX, 1, OPTAB_DIRECT);
4801 /* Calculate masks. */
4802 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4803 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4804 NULL_RTX, 1, OPTAB_DIRECT);
4805 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4806 }
4807
4808 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4809 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4810 to set if CMP == MEM.
4811 CMP is never in memory for compare_and_swap_cc because
4812 expand_bool_compare_and_swap puts it into a register for later compare. */
4813
4814 void
4815 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4816 {
4817 struct alignment_context ac;
4818 rtx cmpv, newv, val, resv, cc;
4819 rtx res = gen_reg_rtx (SImode);
4820 rtx csloop = gen_label_rtx ();
4821 rtx csend = gen_label_rtx ();
4822
4823 gcc_assert (register_operand (target, VOIDmode));
4824 gcc_assert (MEM_P (mem));
4825
4826 init_alignment_context (&ac, mem, mode);
4827
4828 /* Shift the values to the correct bit positions. */
4829 if (!(ac.aligned && MEM_P (cmp)))
4830 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4831 if (!(ac.aligned && MEM_P (new_rtx)))
4832 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4833
4834 /* Load full word. Subsequent loads are performed by CS. */
4835 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4836 NULL_RTX, 1, OPTAB_DIRECT);
4837
4838 /* Start CS loop. */
4839 emit_label (csloop);
4840 /* val = "<mem>00..0<mem>"
4841 * cmp = "00..0<cmp>00..0"
4842 * new = "00..0<new>00..0"
4843 */
4844
4845 /* Patch cmp and new with val at correct position. */
4846 if (ac.aligned && MEM_P (cmp))
4847 {
4848 cmpv = force_reg (SImode, val);
4849 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4850 }
4851 else
4852 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4853 NULL_RTX, 1, OPTAB_DIRECT));
4854 if (ac.aligned && MEM_P (new_rtx))
4855 {
4856 newv = force_reg (SImode, val);
4857 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4858 }
4859 else
4860 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4861 NULL_RTX, 1, OPTAB_DIRECT));
4862
4863 /* Jump to end if we're done (likely?). */
4864 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4865 cmpv, newv));
4866
4867 /* Check for changes outside mode. */
4868 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4869 NULL_RTX, 1, OPTAB_DIRECT);
4870 cc = s390_emit_compare (NE, resv, val);
4871 emit_move_insn (val, resv);
4872 /* Loop internal if so. */
4873 s390_emit_jump (csloop, cc);
4874
4875 emit_label (csend);
4876
4877 /* Return the correct part of the bitfield. */
4878 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4879 NULL_RTX, 1, OPTAB_DIRECT), 1);
4880 }
4881
4882 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4883 and VAL the value to play with. If AFTER is true then store the value
4884 MEM holds after the operation, if AFTER is false then store the value MEM
4885 holds before the operation. If TARGET is zero then discard that value, else
4886 store it to TARGET. */
4887
4888 void
4889 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4890 rtx target, rtx mem, rtx val, bool after)
4891 {
4892 struct alignment_context ac;
4893 rtx cmp;
4894 rtx new_rtx = gen_reg_rtx (SImode);
4895 rtx orig = gen_reg_rtx (SImode);
4896 rtx csloop = gen_label_rtx ();
4897
4898 gcc_assert (!target || register_operand (target, VOIDmode));
4899 gcc_assert (MEM_P (mem));
4900
4901 init_alignment_context (&ac, mem, mode);
4902
4903 /* Shift val to the correct bit positions.
4904 Preserve "icm", but prevent "ex icm". */
4905 if (!(ac.aligned && code == SET && MEM_P (val)))
4906 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4907
4908 /* Further preparation insns. */
4909 if (code == PLUS || code == MINUS)
4910 emit_move_insn (orig, val);
4911 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4912 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4913 NULL_RTX, 1, OPTAB_DIRECT);
4914
4915 /* Load full word. Subsequent loads are performed by CS. */
4916 cmp = force_reg (SImode, ac.memsi);
4917
4918 /* Start CS loop. */
4919 emit_label (csloop);
4920 emit_move_insn (new_rtx, cmp);
4921
4922 /* Patch new with val at correct position. */
4923 switch (code)
4924 {
4925 case PLUS:
4926 case MINUS:
4927 val = expand_simple_binop (SImode, code, new_rtx, orig,
4928 NULL_RTX, 1, OPTAB_DIRECT);
4929 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4930 NULL_RTX, 1, OPTAB_DIRECT);
4931 /* FALLTHRU */
4932 case SET:
4933 if (ac.aligned && MEM_P (val))
4934 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4935 else
4936 {
4937 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4938 NULL_RTX, 1, OPTAB_DIRECT);
4939 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4940 NULL_RTX, 1, OPTAB_DIRECT);
4941 }
4942 break;
4943 case AND:
4944 case IOR:
4945 case XOR:
4946 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4947 NULL_RTX, 1, OPTAB_DIRECT);
4948 break;
4949 case MULT: /* NAND */
4950 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4951 NULL_RTX, 1, OPTAB_DIRECT);
4952 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4953 NULL_RTX, 1, OPTAB_DIRECT);
4954 break;
4955 default:
4956 gcc_unreachable ();
4957 }
4958
4959 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4960 ac.memsi, cmp, new_rtx));
4961
4962 /* Return the correct part of the bitfield. */
4963 if (target)
4964 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4965 after ? new_rtx : cmp, ac.shift,
4966 NULL_RTX, 1, OPTAB_DIRECT), 1);
4967 }
4968
4969 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4970 We need to emit DTP-relative relocations. */
4971
4972 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4973
4974 static void
4975 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4976 {
4977 switch (size)
4978 {
4979 case 4:
4980 fputs ("\t.long\t", file);
4981 break;
4982 case 8:
4983 fputs ("\t.quad\t", file);
4984 break;
4985 default:
4986 gcc_unreachable ();
4987 }
4988 output_addr_const (file, x);
4989 fputs ("@DTPOFF", file);
4990 }
4991
4992 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4993 /* Implement TARGET_MANGLE_TYPE. */
4994
4995 static const char *
4996 s390_mangle_type (const_tree type)
4997 {
4998 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4999 && TARGET_LONG_DOUBLE_128)
5000 return "g";
5001
5002 /* For all other types, use normal C++ mangling. */
5003 return NULL;
5004 }
5005 #endif
5006
5007 /* In the name of slightly smaller debug output, and to cater to
5008 general assembler lossage, recognize various UNSPEC sequences
5009 and turn them back into a direct symbol reference. */
5010
5011 static rtx
5012 s390_delegitimize_address (rtx orig_x)
5013 {
5014 rtx x, y;
5015
5016 orig_x = delegitimize_mem_from_attrs (orig_x);
5017 x = orig_x;
5018
5019 /* Extract the symbol ref from:
5020 (plus:SI (reg:SI 12 %r12)
5021 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5022 UNSPEC_GOTOFF))) */
5023 if (GET_CODE (x) == PLUS
5024 && REG_P (XEXP (x, 0))
5025 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5026 && GET_CODE (XEXP (x, 1)) == CONST)
5027 {
5028 /* The const operand. */
5029 y = XEXP (XEXP (x, 1), 0);
5030 if (GET_CODE (y) == UNSPEC
5031 && XINT (y, 1) == UNSPEC_GOTOFF)
5032 return XVECEXP (y, 0, 0);
5033 }
5034
5035 if (GET_CODE (x) != MEM)
5036 return orig_x;
5037
5038 x = XEXP (x, 0);
5039 if (GET_CODE (x) == PLUS
5040 && GET_CODE (XEXP (x, 1)) == CONST
5041 && GET_CODE (XEXP (x, 0)) == REG
5042 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5043 {
5044 y = XEXP (XEXP (x, 1), 0);
5045 if (GET_CODE (y) == UNSPEC
5046 && XINT (y, 1) == UNSPEC_GOT)
5047 y = XVECEXP (y, 0, 0);
5048 else
5049 return orig_x;
5050 }
5051 else if (GET_CODE (x) == CONST)
5052 {
5053 y = XEXP (x, 0);
5054 if (GET_CODE (y) == UNSPEC
5055 && XINT (y, 1) == UNSPEC_GOTENT)
5056 y = XVECEXP (y, 0, 0);
5057 else
5058 return orig_x;
5059 }
5060 else
5061 return orig_x;
5062
5063 if (GET_MODE (orig_x) != Pmode)
5064 {
5065 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5066 if (y == NULL_RTX)
5067 return orig_x;
5068 }
5069 return y;
5070 }
5071
5072 /* Output operand OP to stdio stream FILE.
5073 OP is an address (register + offset) which is not used to address data;
5074 instead the rightmost bits are interpreted as the value. */
5075
5076 static void
5077 print_shift_count_operand (FILE *file, rtx op)
5078 {
5079 HOST_WIDE_INT offset;
5080 rtx base;
5081
5082 /* Extract base register and offset. */
5083 if (!s390_decompose_shift_count (op, &base, &offset))
5084 gcc_unreachable ();
5085
5086 /* Sanity check. */
5087 if (base)
5088 {
5089 gcc_assert (GET_CODE (base) == REG);
5090 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5091 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5092 }
5093
5094 /* Offsets are constricted to twelve bits. */
5095 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5096 if (base)
5097 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5098 }
5099
5100 /* See 'get_some_local_dynamic_name'. */
5101
5102 static int
5103 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5104 {
5105 rtx x = *px;
5106
5107 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5108 {
5109 x = get_pool_constant (x);
5110 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5111 }
5112
5113 if (GET_CODE (x) == SYMBOL_REF
5114 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5115 {
5116 cfun->machine->some_ld_name = XSTR (x, 0);
5117 return 1;
5118 }
5119
5120 return 0;
5121 }
5122
5123 /* Locate some local-dynamic symbol still in use by this function
5124 so that we can print its name in local-dynamic base patterns. */
5125
5126 static const char *
5127 get_some_local_dynamic_name (void)
5128 {
5129 rtx insn;
5130
5131 if (cfun->machine->some_ld_name)
5132 return cfun->machine->some_ld_name;
5133
5134 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5135 if (INSN_P (insn)
5136 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5137 return cfun->machine->some_ld_name;
5138
5139 gcc_unreachable ();
5140 }
5141
5142 /* Output machine-dependent UNSPECs occurring in address constant X
5143 in assembler syntax to stdio stream FILE. Returns true if the
5144 constant X could be recognized, false otherwise. */
5145
5146 static bool
5147 s390_output_addr_const_extra (FILE *file, rtx x)
5148 {
5149 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5150 switch (XINT (x, 1))
5151 {
5152 case UNSPEC_GOTENT:
5153 output_addr_const (file, XVECEXP (x, 0, 0));
5154 fprintf (file, "@GOTENT");
5155 return true;
5156 case UNSPEC_GOT:
5157 output_addr_const (file, XVECEXP (x, 0, 0));
5158 fprintf (file, "@GOT");
5159 return true;
5160 case UNSPEC_GOTOFF:
5161 output_addr_const (file, XVECEXP (x, 0, 0));
5162 fprintf (file, "@GOTOFF");
5163 return true;
5164 case UNSPEC_PLT:
5165 output_addr_const (file, XVECEXP (x, 0, 0));
5166 fprintf (file, "@PLT");
5167 return true;
5168 case UNSPEC_PLTOFF:
5169 output_addr_const (file, XVECEXP (x, 0, 0));
5170 fprintf (file, "@PLTOFF");
5171 return true;
5172 case UNSPEC_TLSGD:
5173 output_addr_const (file, XVECEXP (x, 0, 0));
5174 fprintf (file, "@TLSGD");
5175 return true;
5176 case UNSPEC_TLSLDM:
5177 assemble_name (file, get_some_local_dynamic_name ());
5178 fprintf (file, "@TLSLDM");
5179 return true;
5180 case UNSPEC_DTPOFF:
5181 output_addr_const (file, XVECEXP (x, 0, 0));
5182 fprintf (file, "@DTPOFF");
5183 return true;
5184 case UNSPEC_NTPOFF:
5185 output_addr_const (file, XVECEXP (x, 0, 0));
5186 fprintf (file, "@NTPOFF");
5187 return true;
5188 case UNSPEC_GOTNTPOFF:
5189 output_addr_const (file, XVECEXP (x, 0, 0));
5190 fprintf (file, "@GOTNTPOFF");
5191 return true;
5192 case UNSPEC_INDNTPOFF:
5193 output_addr_const (file, XVECEXP (x, 0, 0));
5194 fprintf (file, "@INDNTPOFF");
5195 return true;
5196 }
5197
5198 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5199 switch (XINT (x, 1))
5200 {
5201 case UNSPEC_POOL_OFFSET:
5202 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5203 output_addr_const (file, x);
5204 return true;
5205 }
5206 return false;
5207 }
5208
5209 /* Output address operand ADDR in assembler syntax to
5210 stdio stream FILE. */
5211
5212 void
5213 print_operand_address (FILE *file, rtx addr)
5214 {
5215 struct s390_address ad;
5216
5217 if (s390_symref_operand_p (addr, NULL, NULL))
5218 {
5219 if (!TARGET_Z10)
5220 {
5221 output_operand_lossage ("symbolic memory references are "
5222 "only supported on z10 or later");
5223 return;
5224 }
5225 output_addr_const (file, addr);
5226 return;
5227 }
5228
5229 if (!s390_decompose_address (addr, &ad)
5230 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5231 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5232 output_operand_lossage ("cannot decompose address");
5233
5234 if (ad.disp)
5235 output_addr_const (file, ad.disp);
5236 else
5237 fprintf (file, "0");
5238
5239 if (ad.base && ad.indx)
5240 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5241 reg_names[REGNO (ad.base)]);
5242 else if (ad.base)
5243 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5244 }
5245
5246 /* Output operand X in assembler syntax to stdio stream FILE.
5247 CODE specified the format flag. The following format flags
5248 are recognized:
5249
5250 'C': print opcode suffix for branch condition.
5251 'D': print opcode suffix for inverse branch condition.
5252 'E': print opcode suffix for branch on index instruction.
5253 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5254 'G': print the size of the operand in bytes.
5255 'O': print only the displacement of a memory reference.
5256 'R': print only the base register of a memory reference.
5257 'S': print S-type memory reference (base+displacement).
5258 'N': print the second word of a DImode operand.
5259 'M': print the second word of a TImode operand.
5260 'Y': print shift count operand.
5261
5262 'b': print integer X as if it's an unsigned byte.
5263 'c': print integer X as if it's an signed byte.
5264 'x': print integer X as if it's an unsigned halfword.
5265 'h': print integer X as if it's a signed halfword.
5266 'i': print the first nonzero HImode part of X.
5267 'j': print the first HImode part unequal to -1 of X.
5268 'k': print the first nonzero SImode part of X.
5269 'm': print the first SImode part unequal to -1 of X.
5270 'o': print integer X as if it's an unsigned 32bit word. */
5271
5272 void
5273 print_operand (FILE *file, rtx x, int code)
5274 {
5275 switch (code)
5276 {
5277 case 'C':
5278 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5279 return;
5280
5281 case 'D':
5282 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5283 return;
5284
5285 case 'E':
5286 if (GET_CODE (x) == LE)
5287 fprintf (file, "l");
5288 else if (GET_CODE (x) == GT)
5289 fprintf (file, "h");
5290 else
5291 output_operand_lossage ("invalid comparison operator "
5292 "for 'E' output modifier");
5293 return;
5294
5295 case 'J':
5296 if (GET_CODE (x) == SYMBOL_REF)
5297 {
5298 fprintf (file, "%s", ":tls_load:");
5299 output_addr_const (file, x);
5300 }
5301 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5302 {
5303 fprintf (file, "%s", ":tls_gdcall:");
5304 output_addr_const (file, XVECEXP (x, 0, 0));
5305 }
5306 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5307 {
5308 fprintf (file, "%s", ":tls_ldcall:");
5309 assemble_name (file, get_some_local_dynamic_name ());
5310 }
5311 else
5312 output_operand_lossage ("invalid reference for 'J' output modifier");
5313 return;
5314
5315 case 'G':
5316 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5317 return;
5318
5319 case 'O':
5320 {
5321 struct s390_address ad;
5322 int ret;
5323
5324 if (!MEM_P (x))
5325 {
5326 output_operand_lossage ("memory reference expected for "
5327 "'O' output modifier");
5328 return;
5329 }
5330
5331 ret = s390_decompose_address (XEXP (x, 0), &ad);
5332
5333 if (!ret
5334 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5335 || ad.indx)
5336 {
5337 output_operand_lossage ("invalid address for 'O' output modifier");
5338 return;
5339 }
5340
5341 if (ad.disp)
5342 output_addr_const (file, ad.disp);
5343 else
5344 fprintf (file, "0");
5345 }
5346 return;
5347
5348 case 'R':
5349 {
5350 struct s390_address ad;
5351 int ret;
5352
5353 if (!MEM_P (x))
5354 {
5355 output_operand_lossage ("memory reference expected for "
5356 "'R' output modifier");
5357 return;
5358 }
5359
5360 ret = s390_decompose_address (XEXP (x, 0), &ad);
5361
5362 if (!ret
5363 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5364 || ad.indx)
5365 {
5366 output_operand_lossage ("invalid address for 'R' output modifier");
5367 return;
5368 }
5369
5370 if (ad.base)
5371 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5372 else
5373 fprintf (file, "0");
5374 }
5375 return;
5376
5377 case 'S':
5378 {
5379 struct s390_address ad;
5380 int ret;
5381
5382 if (!MEM_P (x))
5383 {
5384 output_operand_lossage ("memory reference expected for "
5385 "'S' output modifier");
5386 return;
5387 }
5388 ret = s390_decompose_address (XEXP (x, 0), &ad);
5389
5390 if (!ret
5391 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5392 || ad.indx)
5393 {
5394 output_operand_lossage ("invalid address for 'S' output modifier");
5395 return;
5396 }
5397
5398 if (ad.disp)
5399 output_addr_const (file, ad.disp);
5400 else
5401 fprintf (file, "0");
5402
5403 if (ad.base)
5404 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5405 }
5406 return;
5407
5408 case 'N':
5409 if (GET_CODE (x) == REG)
5410 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5411 else if (GET_CODE (x) == MEM)
5412 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5413 else
5414 output_operand_lossage ("register or memory expression expected "
5415 "for 'N' output modifier");
5416 break;
5417
5418 case 'M':
5419 if (GET_CODE (x) == REG)
5420 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5421 else if (GET_CODE (x) == MEM)
5422 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5423 else
5424 output_operand_lossage ("register or memory expression expected "
5425 "for 'M' output modifier");
5426 break;
5427
5428 case 'Y':
5429 print_shift_count_operand (file, x);
5430 return;
5431 }
5432
5433 switch (GET_CODE (x))
5434 {
5435 case REG:
5436 fprintf (file, "%s", reg_names[REGNO (x)]);
5437 break;
5438
5439 case MEM:
5440 output_address (XEXP (x, 0));
5441 break;
5442
5443 case CONST:
5444 case CODE_LABEL:
5445 case LABEL_REF:
5446 case SYMBOL_REF:
5447 output_addr_const (file, x);
5448 break;
5449
5450 case CONST_INT:
5451 if (code == 'b')
5452 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5453 else if (code == 'c')
5454 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5455 else if (code == 'x')
5456 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5457 else if (code == 'h')
5458 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5459 else if (code == 'i')
5460 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5461 s390_extract_part (x, HImode, 0));
5462 else if (code == 'j')
5463 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5464 s390_extract_part (x, HImode, -1));
5465 else if (code == 'k')
5466 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5467 s390_extract_part (x, SImode, 0));
5468 else if (code == 'm')
5469 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5470 s390_extract_part (x, SImode, -1));
5471 else if (code == 'o')
5472 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5473 else
5474 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5475 break;
5476
5477 case CONST_DOUBLE:
5478 gcc_assert (GET_MODE (x) == VOIDmode);
5479 if (code == 'b')
5480 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5481 else if (code == 'x')
5482 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5483 else if (code == 'h')
5484 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5485 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5486 else
5487 {
5488 if (code == 0)
5489 output_operand_lossage ("invalid constant - try using "
5490 "an output modifier");
5491 else
5492 output_operand_lossage ("invalid constant for output modifier '%c'",
5493 code);
5494 }
5495 break;
5496
5497 default:
5498 if (code == 0)
5499 output_operand_lossage ("invalid expression - try using "
5500 "an output modifier");
5501 else
5502 output_operand_lossage ("invalid expression for output "
5503 "modifier '%c'", code);
5504 break;
5505 }
5506 }
5507
5508 /* Target hook for assembling integer objects. We need to define it
5509 here to work a round a bug in some versions of GAS, which couldn't
5510 handle values smaller than INT_MIN when printed in decimal. */
5511
5512 static bool
5513 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5514 {
5515 if (size == 8 && aligned_p
5516 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5517 {
5518 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5519 INTVAL (x));
5520 return true;
5521 }
5522 return default_assemble_integer (x, size, aligned_p);
5523 }
5524
5525 /* Returns true if register REGNO is used for forming
5526 a memory address in expression X. */
5527
5528 static bool
5529 reg_used_in_mem_p (int regno, rtx x)
5530 {
5531 enum rtx_code code = GET_CODE (x);
5532 int i, j;
5533 const char *fmt;
5534
5535 if (code == MEM)
5536 {
5537 if (refers_to_regno_p (regno, regno+1,
5538 XEXP (x, 0), 0))
5539 return true;
5540 }
5541 else if (code == SET
5542 && GET_CODE (SET_DEST (x)) == PC)
5543 {
5544 if (refers_to_regno_p (regno, regno+1,
5545 SET_SRC (x), 0))
5546 return true;
5547 }
5548
5549 fmt = GET_RTX_FORMAT (code);
5550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5551 {
5552 if (fmt[i] == 'e'
5553 && reg_used_in_mem_p (regno, XEXP (x, i)))
5554 return true;
5555
5556 else if (fmt[i] == 'E')
5557 for (j = 0; j < XVECLEN (x, i); j++)
5558 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5559 return true;
5560 }
5561 return false;
5562 }
5563
5564 /* Returns true if expression DEP_RTX sets an address register
5565 used by instruction INSN to address memory. */
5566
5567 static bool
5568 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5569 {
5570 rtx target, pat;
5571
5572 if (GET_CODE (dep_rtx) == INSN)
5573 dep_rtx = PATTERN (dep_rtx);
5574
5575 if (GET_CODE (dep_rtx) == SET)
5576 {
5577 target = SET_DEST (dep_rtx);
5578 if (GET_CODE (target) == STRICT_LOW_PART)
5579 target = XEXP (target, 0);
5580 while (GET_CODE (target) == SUBREG)
5581 target = SUBREG_REG (target);
5582
5583 if (GET_CODE (target) == REG)
5584 {
5585 int regno = REGNO (target);
5586
5587 if (s390_safe_attr_type (insn) == TYPE_LA)
5588 {
5589 pat = PATTERN (insn);
5590 if (GET_CODE (pat) == PARALLEL)
5591 {
5592 gcc_assert (XVECLEN (pat, 0) == 2);
5593 pat = XVECEXP (pat, 0, 0);
5594 }
5595 gcc_assert (GET_CODE (pat) == SET);
5596 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5597 }
5598 else if (get_attr_atype (insn) == ATYPE_AGEN)
5599 return reg_used_in_mem_p (regno, PATTERN (insn));
5600 }
5601 }
5602 return false;
5603 }
5604
5605 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5606
5607 int
5608 s390_agen_dep_p (rtx dep_insn, rtx insn)
5609 {
5610 rtx dep_rtx = PATTERN (dep_insn);
5611 int i;
5612
5613 if (GET_CODE (dep_rtx) == SET
5614 && addr_generation_dependency_p (dep_rtx, insn))
5615 return 1;
5616 else if (GET_CODE (dep_rtx) == PARALLEL)
5617 {
5618 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5619 {
5620 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5621 return 1;
5622 }
5623 }
5624 return 0;
5625 }
5626
5627
5628 /* A C statement (sans semicolon) to update the integer scheduling priority
5629 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5630 reduce the priority to execute INSN later. Do not define this macro if
5631 you do not need to adjust the scheduling priorities of insns.
5632
5633 A STD instruction should be scheduled earlier,
5634 in order to use the bypass. */
5635 static int
5636 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5637 {
5638 if (! INSN_P (insn))
5639 return priority;
5640
5641 if (s390_tune != PROCESSOR_2084_Z990
5642 && s390_tune != PROCESSOR_2094_Z9_109
5643 && s390_tune != PROCESSOR_2097_Z10
5644 && s390_tune != PROCESSOR_2817_Z196)
5645 return priority;
5646
5647 switch (s390_safe_attr_type (insn))
5648 {
5649 case TYPE_FSTOREDF:
5650 case TYPE_FSTORESF:
5651 priority = priority << 3;
5652 break;
5653 case TYPE_STORE:
5654 case TYPE_STM:
5655 priority = priority << 1;
5656 break;
5657 default:
5658 break;
5659 }
5660 return priority;
5661 }
5662
5663
5664 /* The number of instructions that can be issued per cycle. */
5665
5666 static int
5667 s390_issue_rate (void)
5668 {
5669 switch (s390_tune)
5670 {
5671 case PROCESSOR_2084_Z990:
5672 case PROCESSOR_2094_Z9_109:
5673 case PROCESSOR_2817_Z196:
5674 return 3;
5675 case PROCESSOR_2097_Z10:
5676 return 2;
5677 default:
5678 return 1;
5679 }
5680 }
5681
5682 static int
5683 s390_first_cycle_multipass_dfa_lookahead (void)
5684 {
5685 return 4;
5686 }
5687
5688 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5689 Fix up MEMs as required. */
5690
5691 static void
5692 annotate_constant_pool_refs (rtx *x)
5693 {
5694 int i, j;
5695 const char *fmt;
5696
5697 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5698 || !CONSTANT_POOL_ADDRESS_P (*x));
5699
5700 /* Literal pool references can only occur inside a MEM ... */
5701 if (GET_CODE (*x) == MEM)
5702 {
5703 rtx memref = XEXP (*x, 0);
5704
5705 if (GET_CODE (memref) == SYMBOL_REF
5706 && CONSTANT_POOL_ADDRESS_P (memref))
5707 {
5708 rtx base = cfun->machine->base_reg;
5709 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5710 UNSPEC_LTREF);
5711
5712 *x = replace_equiv_address (*x, addr);
5713 return;
5714 }
5715
5716 if (GET_CODE (memref) == CONST
5717 && GET_CODE (XEXP (memref, 0)) == PLUS
5718 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5719 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5720 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5721 {
5722 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5723 rtx sym = XEXP (XEXP (memref, 0), 0);
5724 rtx base = cfun->machine->base_reg;
5725 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5726 UNSPEC_LTREF);
5727
5728 *x = replace_equiv_address (*x, plus_constant (addr, off));
5729 return;
5730 }
5731 }
5732
5733 /* ... or a load-address type pattern. */
5734 if (GET_CODE (*x) == SET)
5735 {
5736 rtx addrref = SET_SRC (*x);
5737
5738 if (GET_CODE (addrref) == SYMBOL_REF
5739 && CONSTANT_POOL_ADDRESS_P (addrref))
5740 {
5741 rtx base = cfun->machine->base_reg;
5742 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5743 UNSPEC_LTREF);
5744
5745 SET_SRC (*x) = addr;
5746 return;
5747 }
5748
5749 if (GET_CODE (addrref) == CONST
5750 && GET_CODE (XEXP (addrref, 0)) == PLUS
5751 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5752 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5753 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5754 {
5755 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5756 rtx sym = XEXP (XEXP (addrref, 0), 0);
5757 rtx base = cfun->machine->base_reg;
5758 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5759 UNSPEC_LTREF);
5760
5761 SET_SRC (*x) = plus_constant (addr, off);
5762 return;
5763 }
5764 }
5765
5766 /* Annotate LTREL_BASE as well. */
5767 if (GET_CODE (*x) == UNSPEC
5768 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5769 {
5770 rtx base = cfun->machine->base_reg;
5771 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5772 UNSPEC_LTREL_BASE);
5773 return;
5774 }
5775
5776 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5777 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5778 {
5779 if (fmt[i] == 'e')
5780 {
5781 annotate_constant_pool_refs (&XEXP (*x, i));
5782 }
5783 else if (fmt[i] == 'E')
5784 {
5785 for (j = 0; j < XVECLEN (*x, i); j++)
5786 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5787 }
5788 }
5789 }
5790
5791 /* Split all branches that exceed the maximum distance.
5792 Returns true if this created a new literal pool entry. */
5793
5794 static int
5795 s390_split_branches (void)
5796 {
5797 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5798 int new_literal = 0, ret;
5799 rtx insn, pat, tmp, target;
5800 rtx *label;
5801
5802 /* We need correct insn addresses. */
5803
5804 shorten_branches (get_insns ());
5805
5806 /* Find all branches that exceed 64KB, and split them. */
5807
5808 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5809 {
5810 if (GET_CODE (insn) != JUMP_INSN)
5811 continue;
5812
5813 pat = PATTERN (insn);
5814 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5815 pat = XVECEXP (pat, 0, 0);
5816 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5817 continue;
5818
5819 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5820 {
5821 label = &SET_SRC (pat);
5822 }
5823 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5824 {
5825 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5826 label = &XEXP (SET_SRC (pat), 1);
5827 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5828 label = &XEXP (SET_SRC (pat), 2);
5829 else
5830 continue;
5831 }
5832 else
5833 continue;
5834
5835 if (get_attr_length (insn) <= 4)
5836 continue;
5837
5838 /* We are going to use the return register as scratch register,
5839 make sure it will be saved/restored by the prologue/epilogue. */
5840 cfun_frame_layout.save_return_addr_p = 1;
5841
5842 if (!flag_pic)
5843 {
5844 new_literal = 1;
5845 tmp = force_const_mem (Pmode, *label);
5846 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5847 INSN_ADDRESSES_NEW (tmp, -1);
5848 annotate_constant_pool_refs (&PATTERN (tmp));
5849
5850 target = temp_reg;
5851 }
5852 else
5853 {
5854 new_literal = 1;
5855 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5856 UNSPEC_LTREL_OFFSET);
5857 target = gen_rtx_CONST (Pmode, target);
5858 target = force_const_mem (Pmode, target);
5859 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5860 INSN_ADDRESSES_NEW (tmp, -1);
5861 annotate_constant_pool_refs (&PATTERN (tmp));
5862
5863 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5864 cfun->machine->base_reg),
5865 UNSPEC_LTREL_BASE);
5866 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5867 }
5868
5869 ret = validate_change (insn, label, target, 0);
5870 gcc_assert (ret);
5871 }
5872
5873 return new_literal;
5874 }
5875
5876
5877 /* Find an annotated literal pool symbol referenced in RTX X,
5878 and store it at REF. Will abort if X contains references to
5879 more than one such pool symbol; multiple references to the same
5880 symbol are allowed, however.
5881
5882 The rtx pointed to by REF must be initialized to NULL_RTX
5883 by the caller before calling this routine. */
5884
5885 static void
5886 find_constant_pool_ref (rtx x, rtx *ref)
5887 {
5888 int i, j;
5889 const char *fmt;
5890
5891 /* Ignore LTREL_BASE references. */
5892 if (GET_CODE (x) == UNSPEC
5893 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5894 return;
5895 /* Likewise POOL_ENTRY insns. */
5896 if (GET_CODE (x) == UNSPEC_VOLATILE
5897 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5898 return;
5899
5900 gcc_assert (GET_CODE (x) != SYMBOL_REF
5901 || !CONSTANT_POOL_ADDRESS_P (x));
5902
5903 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5904 {
5905 rtx sym = XVECEXP (x, 0, 0);
5906 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5907 && CONSTANT_POOL_ADDRESS_P (sym));
5908
5909 if (*ref == NULL_RTX)
5910 *ref = sym;
5911 else
5912 gcc_assert (*ref == sym);
5913
5914 return;
5915 }
5916
5917 fmt = GET_RTX_FORMAT (GET_CODE (x));
5918 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5919 {
5920 if (fmt[i] == 'e')
5921 {
5922 find_constant_pool_ref (XEXP (x, i), ref);
5923 }
5924 else if (fmt[i] == 'E')
5925 {
5926 for (j = 0; j < XVECLEN (x, i); j++)
5927 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5928 }
5929 }
5930 }
5931
5932 /* Replace every reference to the annotated literal pool
5933 symbol REF in X by its base plus OFFSET. */
5934
5935 static void
5936 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5937 {
5938 int i, j;
5939 const char *fmt;
5940
5941 gcc_assert (*x != ref);
5942
5943 if (GET_CODE (*x) == UNSPEC
5944 && XINT (*x, 1) == UNSPEC_LTREF
5945 && XVECEXP (*x, 0, 0) == ref)
5946 {
5947 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5948 return;
5949 }
5950
5951 if (GET_CODE (*x) == PLUS
5952 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5953 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5954 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5955 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5956 {
5957 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5958 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5959 return;
5960 }
5961
5962 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5963 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5964 {
5965 if (fmt[i] == 'e')
5966 {
5967 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5968 }
5969 else if (fmt[i] == 'E')
5970 {
5971 for (j = 0; j < XVECLEN (*x, i); j++)
5972 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5973 }
5974 }
5975 }
5976
5977 /* Check whether X contains an UNSPEC_LTREL_BASE.
5978 Return its constant pool symbol if found, NULL_RTX otherwise. */
5979
5980 static rtx
5981 find_ltrel_base (rtx x)
5982 {
5983 int i, j;
5984 const char *fmt;
5985
5986 if (GET_CODE (x) == UNSPEC
5987 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5988 return XVECEXP (x, 0, 0);
5989
5990 fmt = GET_RTX_FORMAT (GET_CODE (x));
5991 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5992 {
5993 if (fmt[i] == 'e')
5994 {
5995 rtx fnd = find_ltrel_base (XEXP (x, i));
5996 if (fnd)
5997 return fnd;
5998 }
5999 else if (fmt[i] == 'E')
6000 {
6001 for (j = 0; j < XVECLEN (x, i); j++)
6002 {
6003 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6004 if (fnd)
6005 return fnd;
6006 }
6007 }
6008 }
6009
6010 return NULL_RTX;
6011 }
6012
6013 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6014
6015 static void
6016 replace_ltrel_base (rtx *x)
6017 {
6018 int i, j;
6019 const char *fmt;
6020
6021 if (GET_CODE (*x) == UNSPEC
6022 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6023 {
6024 *x = XVECEXP (*x, 0, 1);
6025 return;
6026 }
6027
6028 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6029 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6030 {
6031 if (fmt[i] == 'e')
6032 {
6033 replace_ltrel_base (&XEXP (*x, i));
6034 }
6035 else if (fmt[i] == 'E')
6036 {
6037 for (j = 0; j < XVECLEN (*x, i); j++)
6038 replace_ltrel_base (&XVECEXP (*x, i, j));
6039 }
6040 }
6041 }
6042
6043
6044 /* We keep a list of constants which we have to add to internal
6045 constant tables in the middle of large functions. */
6046
6047 #define NR_C_MODES 11
6048 enum machine_mode constant_modes[NR_C_MODES] =
6049 {
6050 TFmode, TImode, TDmode,
6051 DFmode, DImode, DDmode,
6052 SFmode, SImode, SDmode,
6053 HImode,
6054 QImode
6055 };
6056
6057 struct constant
6058 {
6059 struct constant *next;
6060 rtx value;
6061 rtx label;
6062 };
6063
6064 struct constant_pool
6065 {
6066 struct constant_pool *next;
6067 rtx first_insn;
6068 rtx pool_insn;
6069 bitmap insns;
6070 rtx emit_pool_after;
6071
6072 struct constant *constants[NR_C_MODES];
6073 struct constant *execute;
6074 rtx label;
6075 int size;
6076 };
6077
6078 /* Allocate new constant_pool structure. */
6079
6080 static struct constant_pool *
6081 s390_alloc_pool (void)
6082 {
6083 struct constant_pool *pool;
6084 int i;
6085
6086 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6087 pool->next = NULL;
6088 for (i = 0; i < NR_C_MODES; i++)
6089 pool->constants[i] = NULL;
6090
6091 pool->execute = NULL;
6092 pool->label = gen_label_rtx ();
6093 pool->first_insn = NULL_RTX;
6094 pool->pool_insn = NULL_RTX;
6095 pool->insns = BITMAP_ALLOC (NULL);
6096 pool->size = 0;
6097 pool->emit_pool_after = NULL_RTX;
6098
6099 return pool;
6100 }
6101
6102 /* Create new constant pool covering instructions starting at INSN
6103 and chain it to the end of POOL_LIST. */
6104
6105 static struct constant_pool *
6106 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6107 {
6108 struct constant_pool *pool, **prev;
6109
6110 pool = s390_alloc_pool ();
6111 pool->first_insn = insn;
6112
6113 for (prev = pool_list; *prev; prev = &(*prev)->next)
6114 ;
6115 *prev = pool;
6116
6117 return pool;
6118 }
6119
6120 /* End range of instructions covered by POOL at INSN and emit
6121 placeholder insn representing the pool. */
6122
6123 static void
6124 s390_end_pool (struct constant_pool *pool, rtx insn)
6125 {
6126 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6127
6128 if (!insn)
6129 insn = get_last_insn ();
6130
6131 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6132 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6133 }
6134
6135 /* Add INSN to the list of insns covered by POOL. */
6136
6137 static void
6138 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6139 {
6140 bitmap_set_bit (pool->insns, INSN_UID (insn));
6141 }
6142
6143 /* Return pool out of POOL_LIST that covers INSN. */
6144
6145 static struct constant_pool *
6146 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6147 {
6148 struct constant_pool *pool;
6149
6150 for (pool = pool_list; pool; pool = pool->next)
6151 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6152 break;
6153
6154 return pool;
6155 }
6156
6157 /* Add constant VAL of mode MODE to the constant pool POOL. */
6158
6159 static void
6160 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6161 {
6162 struct constant *c;
6163 int i;
6164
6165 for (i = 0; i < NR_C_MODES; i++)
6166 if (constant_modes[i] == mode)
6167 break;
6168 gcc_assert (i != NR_C_MODES);
6169
6170 for (c = pool->constants[i]; c != NULL; c = c->next)
6171 if (rtx_equal_p (val, c->value))
6172 break;
6173
6174 if (c == NULL)
6175 {
6176 c = (struct constant *) xmalloc (sizeof *c);
6177 c->value = val;
6178 c->label = gen_label_rtx ();
6179 c->next = pool->constants[i];
6180 pool->constants[i] = c;
6181 pool->size += GET_MODE_SIZE (mode);
6182 }
6183 }
6184
6185 /* Return an rtx that represents the offset of X from the start of
6186 pool POOL. */
6187
6188 static rtx
6189 s390_pool_offset (struct constant_pool *pool, rtx x)
6190 {
6191 rtx label;
6192
6193 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6194 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6195 UNSPEC_POOL_OFFSET);
6196 return gen_rtx_CONST (GET_MODE (x), x);
6197 }
6198
6199 /* Find constant VAL of mode MODE in the constant pool POOL.
6200 Return an RTX describing the distance from the start of
6201 the pool to the location of the new constant. */
6202
6203 static rtx
6204 s390_find_constant (struct constant_pool *pool, rtx val,
6205 enum machine_mode mode)
6206 {
6207 struct constant *c;
6208 int i;
6209
6210 for (i = 0; i < NR_C_MODES; i++)
6211 if (constant_modes[i] == mode)
6212 break;
6213 gcc_assert (i != NR_C_MODES);
6214
6215 for (c = pool->constants[i]; c != NULL; c = c->next)
6216 if (rtx_equal_p (val, c->value))
6217 break;
6218
6219 gcc_assert (c);
6220
6221 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6222 }
6223
6224 /* Check whether INSN is an execute. Return the label_ref to its
6225 execute target template if so, NULL_RTX otherwise. */
6226
6227 static rtx
6228 s390_execute_label (rtx insn)
6229 {
6230 if (GET_CODE (insn) == INSN
6231 && GET_CODE (PATTERN (insn)) == PARALLEL
6232 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6233 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6234 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6235
6236 return NULL_RTX;
6237 }
6238
6239 /* Add execute target for INSN to the constant pool POOL. */
6240
6241 static void
6242 s390_add_execute (struct constant_pool *pool, rtx insn)
6243 {
6244 struct constant *c;
6245
6246 for (c = pool->execute; c != NULL; c = c->next)
6247 if (INSN_UID (insn) == INSN_UID (c->value))
6248 break;
6249
6250 if (c == NULL)
6251 {
6252 c = (struct constant *) xmalloc (sizeof *c);
6253 c->value = insn;
6254 c->label = gen_label_rtx ();
6255 c->next = pool->execute;
6256 pool->execute = c;
6257 pool->size += 6;
6258 }
6259 }
6260
6261 /* Find execute target for INSN in the constant pool POOL.
6262 Return an RTX describing the distance from the start of
6263 the pool to the location of the execute target. */
6264
6265 static rtx
6266 s390_find_execute (struct constant_pool *pool, rtx insn)
6267 {
6268 struct constant *c;
6269
6270 for (c = pool->execute; c != NULL; c = c->next)
6271 if (INSN_UID (insn) == INSN_UID (c->value))
6272 break;
6273
6274 gcc_assert (c);
6275
6276 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6277 }
6278
6279 /* For an execute INSN, extract the execute target template. */
6280
6281 static rtx
6282 s390_execute_target (rtx insn)
6283 {
6284 rtx pattern = PATTERN (insn);
6285 gcc_assert (s390_execute_label (insn));
6286
6287 if (XVECLEN (pattern, 0) == 2)
6288 {
6289 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6290 }
6291 else
6292 {
6293 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6294 int i;
6295
6296 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6297 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6298
6299 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6300 }
6301
6302 return pattern;
6303 }
6304
6305 /* Indicate that INSN cannot be duplicated. This is the case for
6306 execute insns that carry a unique label. */
6307
6308 static bool
6309 s390_cannot_copy_insn_p (rtx insn)
6310 {
6311 rtx label = s390_execute_label (insn);
6312 return label && label != const0_rtx;
6313 }
6314
6315 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6316 do not emit the pool base label. */
6317
6318 static void
6319 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6320 {
6321 struct constant *c;
6322 rtx insn = pool->pool_insn;
6323 int i;
6324
6325 /* Switch to rodata section. */
6326 if (TARGET_CPU_ZARCH)
6327 {
6328 insn = emit_insn_after (gen_pool_section_start (), insn);
6329 INSN_ADDRESSES_NEW (insn, -1);
6330 }
6331
6332 /* Ensure minimum pool alignment. */
6333 if (TARGET_CPU_ZARCH)
6334 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6335 else
6336 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6337 INSN_ADDRESSES_NEW (insn, -1);
6338
6339 /* Emit pool base label. */
6340 if (!remote_label)
6341 {
6342 insn = emit_label_after (pool->label, insn);
6343 INSN_ADDRESSES_NEW (insn, -1);
6344 }
6345
6346 /* Dump constants in descending alignment requirement order,
6347 ensuring proper alignment for every constant. */
6348 for (i = 0; i < NR_C_MODES; i++)
6349 for (c = pool->constants[i]; c; c = c->next)
6350 {
6351 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6352 rtx value = copy_rtx (c->value);
6353 if (GET_CODE (value) == CONST
6354 && GET_CODE (XEXP (value, 0)) == UNSPEC
6355 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6356 && XVECLEN (XEXP (value, 0), 0) == 1)
6357 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6358
6359 insn = emit_label_after (c->label, insn);
6360 INSN_ADDRESSES_NEW (insn, -1);
6361
6362 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6363 gen_rtvec (1, value),
6364 UNSPECV_POOL_ENTRY);
6365 insn = emit_insn_after (value, insn);
6366 INSN_ADDRESSES_NEW (insn, -1);
6367 }
6368
6369 /* Ensure minimum alignment for instructions. */
6370 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6371 INSN_ADDRESSES_NEW (insn, -1);
6372
6373 /* Output in-pool execute template insns. */
6374 for (c = pool->execute; c; c = c->next)
6375 {
6376 insn = emit_label_after (c->label, insn);
6377 INSN_ADDRESSES_NEW (insn, -1);
6378
6379 insn = emit_insn_after (s390_execute_target (c->value), insn);
6380 INSN_ADDRESSES_NEW (insn, -1);
6381 }
6382
6383 /* Switch back to previous section. */
6384 if (TARGET_CPU_ZARCH)
6385 {
6386 insn = emit_insn_after (gen_pool_section_end (), insn);
6387 INSN_ADDRESSES_NEW (insn, -1);
6388 }
6389
6390 insn = emit_barrier_after (insn);
6391 INSN_ADDRESSES_NEW (insn, -1);
6392
6393 /* Remove placeholder insn. */
6394 remove_insn (pool->pool_insn);
6395 }
6396
6397 /* Free all memory used by POOL. */
6398
6399 static void
6400 s390_free_pool (struct constant_pool *pool)
6401 {
6402 struct constant *c, *next;
6403 int i;
6404
6405 for (i = 0; i < NR_C_MODES; i++)
6406 for (c = pool->constants[i]; c; c = next)
6407 {
6408 next = c->next;
6409 free (c);
6410 }
6411
6412 for (c = pool->execute; c; c = next)
6413 {
6414 next = c->next;
6415 free (c);
6416 }
6417
6418 BITMAP_FREE (pool->insns);
6419 free (pool);
6420 }
6421
6422
6423 /* Collect main literal pool. Return NULL on overflow. */
6424
6425 static struct constant_pool *
6426 s390_mainpool_start (void)
6427 {
6428 struct constant_pool *pool;
6429 rtx insn;
6430
6431 pool = s390_alloc_pool ();
6432
6433 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6434 {
6435 if (GET_CODE (insn) == INSN
6436 && GET_CODE (PATTERN (insn)) == SET
6437 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6438 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6439 {
6440 gcc_assert (!pool->pool_insn);
6441 pool->pool_insn = insn;
6442 }
6443
6444 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6445 {
6446 s390_add_execute (pool, insn);
6447 }
6448 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6449 {
6450 rtx pool_ref = NULL_RTX;
6451 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6452 if (pool_ref)
6453 {
6454 rtx constant = get_pool_constant (pool_ref);
6455 enum machine_mode mode = get_pool_mode (pool_ref);
6456 s390_add_constant (pool, constant, mode);
6457 }
6458 }
6459
6460 /* If hot/cold partitioning is enabled we have to make sure that
6461 the literal pool is emitted in the same section where the
6462 initialization of the literal pool base pointer takes place.
6463 emit_pool_after is only used in the non-overflow case on non
6464 Z cpus where we can emit the literal pool at the end of the
6465 function body within the text section. */
6466 if (NOTE_P (insn)
6467 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6468 && !pool->emit_pool_after)
6469 pool->emit_pool_after = PREV_INSN (insn);
6470 }
6471
6472 gcc_assert (pool->pool_insn || pool->size == 0);
6473
6474 if (pool->size >= 4096)
6475 {
6476 /* We're going to chunkify the pool, so remove the main
6477 pool placeholder insn. */
6478 remove_insn (pool->pool_insn);
6479
6480 s390_free_pool (pool);
6481 pool = NULL;
6482 }
6483
6484 /* If the functions ends with the section where the literal pool
6485 should be emitted set the marker to its end. */
6486 if (pool && !pool->emit_pool_after)
6487 pool->emit_pool_after = get_last_insn ();
6488
6489 return pool;
6490 }
6491
6492 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6493 Modify the current function to output the pool constants as well as
6494 the pool register setup instruction. */
6495
6496 static void
6497 s390_mainpool_finish (struct constant_pool *pool)
6498 {
6499 rtx base_reg = cfun->machine->base_reg;
6500 rtx insn;
6501
6502 /* If the pool is empty, we're done. */
6503 if (pool->size == 0)
6504 {
6505 /* We don't actually need a base register after all. */
6506 cfun->machine->base_reg = NULL_RTX;
6507
6508 if (pool->pool_insn)
6509 remove_insn (pool->pool_insn);
6510 s390_free_pool (pool);
6511 return;
6512 }
6513
6514 /* We need correct insn addresses. */
6515 shorten_branches (get_insns ());
6516
6517 /* On zSeries, we use a LARL to load the pool register. The pool is
6518 located in the .rodata section, so we emit it after the function. */
6519 if (TARGET_CPU_ZARCH)
6520 {
6521 insn = gen_main_base_64 (base_reg, pool->label);
6522 insn = emit_insn_after (insn, pool->pool_insn);
6523 INSN_ADDRESSES_NEW (insn, -1);
6524 remove_insn (pool->pool_insn);
6525
6526 insn = get_last_insn ();
6527 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6528 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6529
6530 s390_dump_pool (pool, 0);
6531 }
6532
6533 /* On S/390, if the total size of the function's code plus literal pool
6534 does not exceed 4096 bytes, we use BASR to set up a function base
6535 pointer, and emit the literal pool at the end of the function. */
6536 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6537 + pool->size + 8 /* alignment slop */ < 4096)
6538 {
6539 insn = gen_main_base_31_small (base_reg, pool->label);
6540 insn = emit_insn_after (insn, pool->pool_insn);
6541 INSN_ADDRESSES_NEW (insn, -1);
6542 remove_insn (pool->pool_insn);
6543
6544 insn = emit_label_after (pool->label, insn);
6545 INSN_ADDRESSES_NEW (insn, -1);
6546
6547 /* emit_pool_after will be set by s390_mainpool_start to the
6548 last insn of the section where the literal pool should be
6549 emitted. */
6550 insn = pool->emit_pool_after;
6551
6552 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6553 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6554
6555 s390_dump_pool (pool, 1);
6556 }
6557
6558 /* Otherwise, we emit an inline literal pool and use BASR to branch
6559 over it, setting up the pool register at the same time. */
6560 else
6561 {
6562 rtx pool_end = gen_label_rtx ();
6563
6564 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6565 insn = emit_insn_after (insn, pool->pool_insn);
6566 INSN_ADDRESSES_NEW (insn, -1);
6567 remove_insn (pool->pool_insn);
6568
6569 insn = emit_label_after (pool->label, insn);
6570 INSN_ADDRESSES_NEW (insn, -1);
6571
6572 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6573 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6574
6575 insn = emit_label_after (pool_end, pool->pool_insn);
6576 INSN_ADDRESSES_NEW (insn, -1);
6577
6578 s390_dump_pool (pool, 1);
6579 }
6580
6581
6582 /* Replace all literal pool references. */
6583
6584 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6585 {
6586 if (INSN_P (insn))
6587 replace_ltrel_base (&PATTERN (insn));
6588
6589 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6590 {
6591 rtx addr, pool_ref = NULL_RTX;
6592 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6593 if (pool_ref)
6594 {
6595 if (s390_execute_label (insn))
6596 addr = s390_find_execute (pool, insn);
6597 else
6598 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6599 get_pool_mode (pool_ref));
6600
6601 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6602 INSN_CODE (insn) = -1;
6603 }
6604 }
6605 }
6606
6607
6608 /* Free the pool. */
6609 s390_free_pool (pool);
6610 }
6611
6612 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6613 We have decided we cannot use this pool, so revert all changes
6614 to the current function that were done by s390_mainpool_start. */
6615 static void
6616 s390_mainpool_cancel (struct constant_pool *pool)
6617 {
6618 /* We didn't actually change the instruction stream, so simply
6619 free the pool memory. */
6620 s390_free_pool (pool);
6621 }
6622
6623
6624 /* Chunkify the literal pool. */
6625
6626 #define S390_POOL_CHUNK_MIN 0xc00
6627 #define S390_POOL_CHUNK_MAX 0xe00
6628
6629 static struct constant_pool *
6630 s390_chunkify_start (void)
6631 {
6632 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6633 int extra_size = 0;
6634 bitmap far_labels;
6635 rtx pending_ltrel = NULL_RTX;
6636 rtx insn;
6637
6638 rtx (*gen_reload_base) (rtx, rtx) =
6639 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6640
6641
6642 /* We need correct insn addresses. */
6643
6644 shorten_branches (get_insns ());
6645
6646 /* Scan all insns and move literals to pool chunks. */
6647
6648 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6649 {
6650 bool section_switch_p = false;
6651
6652 /* Check for pending LTREL_BASE. */
6653 if (INSN_P (insn))
6654 {
6655 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6656 if (ltrel_base)
6657 {
6658 gcc_assert (ltrel_base == pending_ltrel);
6659 pending_ltrel = NULL_RTX;
6660 }
6661 }
6662
6663 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6664 {
6665 if (!curr_pool)
6666 curr_pool = s390_start_pool (&pool_list, insn);
6667
6668 s390_add_execute (curr_pool, insn);
6669 s390_add_pool_insn (curr_pool, insn);
6670 }
6671 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6672 {
6673 rtx pool_ref = NULL_RTX;
6674 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6675 if (pool_ref)
6676 {
6677 rtx constant = get_pool_constant (pool_ref);
6678 enum machine_mode mode = get_pool_mode (pool_ref);
6679
6680 if (!curr_pool)
6681 curr_pool = s390_start_pool (&pool_list, insn);
6682
6683 s390_add_constant (curr_pool, constant, mode);
6684 s390_add_pool_insn (curr_pool, insn);
6685
6686 /* Don't split the pool chunk between a LTREL_OFFSET load
6687 and the corresponding LTREL_BASE. */
6688 if (GET_CODE (constant) == CONST
6689 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6690 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6691 {
6692 gcc_assert (!pending_ltrel);
6693 pending_ltrel = pool_ref;
6694 }
6695 }
6696 /* Make sure we do not split between a call and its
6697 corresponding CALL_ARG_LOCATION note. */
6698 if (CALL_P (insn))
6699 {
6700 rtx next = NEXT_INSN (insn);
6701 if (next && NOTE_P (next)
6702 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6703 continue;
6704 }
6705 }
6706
6707 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6708 {
6709 if (curr_pool)
6710 s390_add_pool_insn (curr_pool, insn);
6711 /* An LTREL_BASE must follow within the same basic block. */
6712 gcc_assert (!pending_ltrel);
6713 }
6714
6715 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6716 section_switch_p = true;
6717
6718 if (!curr_pool
6719 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6720 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6721 continue;
6722
6723 if (TARGET_CPU_ZARCH)
6724 {
6725 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6726 continue;
6727
6728 s390_end_pool (curr_pool, NULL_RTX);
6729 curr_pool = NULL;
6730 }
6731 else
6732 {
6733 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6734 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6735 + extra_size;
6736
6737 /* We will later have to insert base register reload insns.
6738 Those will have an effect on code size, which we need to
6739 consider here. This calculation makes rather pessimistic
6740 worst-case assumptions. */
6741 if (GET_CODE (insn) == CODE_LABEL)
6742 extra_size += 6;
6743
6744 if (chunk_size < S390_POOL_CHUNK_MIN
6745 && curr_pool->size < S390_POOL_CHUNK_MIN
6746 && !section_switch_p)
6747 continue;
6748
6749 /* Pool chunks can only be inserted after BARRIERs ... */
6750 if (GET_CODE (insn) == BARRIER)
6751 {
6752 s390_end_pool (curr_pool, insn);
6753 curr_pool = NULL;
6754 extra_size = 0;
6755 }
6756
6757 /* ... so if we don't find one in time, create one. */
6758 else if (chunk_size > S390_POOL_CHUNK_MAX
6759 || curr_pool->size > S390_POOL_CHUNK_MAX
6760 || section_switch_p)
6761 {
6762 rtx label, jump, barrier;
6763
6764 if (!section_switch_p)
6765 {
6766 /* We can insert the barrier only after a 'real' insn. */
6767 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6768 continue;
6769 if (get_attr_length (insn) == 0)
6770 continue;
6771 /* Don't separate LTREL_BASE from the corresponding
6772 LTREL_OFFSET load. */
6773 if (pending_ltrel)
6774 continue;
6775 }
6776 else
6777 {
6778 gcc_assert (!pending_ltrel);
6779
6780 /* The old pool has to end before the section switch
6781 note in order to make it part of the current
6782 section. */
6783 insn = PREV_INSN (insn);
6784 }
6785
6786 label = gen_label_rtx ();
6787 jump = emit_jump_insn_after (gen_jump (label), insn);
6788 barrier = emit_barrier_after (jump);
6789 insn = emit_label_after (label, barrier);
6790 JUMP_LABEL (jump) = label;
6791 LABEL_NUSES (label) = 1;
6792
6793 INSN_ADDRESSES_NEW (jump, -1);
6794 INSN_ADDRESSES_NEW (barrier, -1);
6795 INSN_ADDRESSES_NEW (insn, -1);
6796
6797 s390_end_pool (curr_pool, barrier);
6798 curr_pool = NULL;
6799 extra_size = 0;
6800 }
6801 }
6802 }
6803
6804 if (curr_pool)
6805 s390_end_pool (curr_pool, NULL_RTX);
6806 gcc_assert (!pending_ltrel);
6807
6808 /* Find all labels that are branched into
6809 from an insn belonging to a different chunk. */
6810
6811 far_labels = BITMAP_ALLOC (NULL);
6812
6813 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6814 {
6815 /* Labels marked with LABEL_PRESERVE_P can be target
6816 of non-local jumps, so we have to mark them.
6817 The same holds for named labels.
6818
6819 Don't do that, however, if it is the label before
6820 a jump table. */
6821
6822 if (GET_CODE (insn) == CODE_LABEL
6823 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6824 {
6825 rtx vec_insn = next_real_insn (insn);
6826 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6827 PATTERN (vec_insn) : NULL_RTX;
6828 if (!vec_pat
6829 || !(GET_CODE (vec_pat) == ADDR_VEC
6830 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6831 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6832 }
6833
6834 /* If we have a direct jump (conditional or unconditional)
6835 or a casesi jump, check all potential targets. */
6836 else if (GET_CODE (insn) == JUMP_INSN)
6837 {
6838 rtx pat = PATTERN (insn);
6839 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6840 pat = XVECEXP (pat, 0, 0);
6841
6842 if (GET_CODE (pat) == SET)
6843 {
6844 rtx label = JUMP_LABEL (insn);
6845 if (label)
6846 {
6847 if (s390_find_pool (pool_list, label)
6848 != s390_find_pool (pool_list, insn))
6849 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6850 }
6851 }
6852 else if (GET_CODE (pat) == PARALLEL
6853 && XVECLEN (pat, 0) == 2
6854 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6855 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6856 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6857 {
6858 /* Find the jump table used by this casesi jump. */
6859 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6860 rtx vec_insn = next_real_insn (vec_label);
6861 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6862 PATTERN (vec_insn) : NULL_RTX;
6863 if (vec_pat
6864 && (GET_CODE (vec_pat) == ADDR_VEC
6865 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6866 {
6867 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6868
6869 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6870 {
6871 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6872
6873 if (s390_find_pool (pool_list, label)
6874 != s390_find_pool (pool_list, insn))
6875 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6876 }
6877 }
6878 }
6879 }
6880 }
6881
6882 /* Insert base register reload insns before every pool. */
6883
6884 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6885 {
6886 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6887 curr_pool->label);
6888 rtx insn = curr_pool->first_insn;
6889 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6890 }
6891
6892 /* Insert base register reload insns at every far label. */
6893
6894 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6895 if (GET_CODE (insn) == CODE_LABEL
6896 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6897 {
6898 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6899 if (pool)
6900 {
6901 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6902 pool->label);
6903 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6904 }
6905 }
6906
6907
6908 BITMAP_FREE (far_labels);
6909
6910
6911 /* Recompute insn addresses. */
6912
6913 init_insn_lengths ();
6914 shorten_branches (get_insns ());
6915
6916 return pool_list;
6917 }
6918
6919 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6920 After we have decided to use this list, finish implementing
6921 all changes to the current function as required. */
6922
6923 static void
6924 s390_chunkify_finish (struct constant_pool *pool_list)
6925 {
6926 struct constant_pool *curr_pool = NULL;
6927 rtx insn;
6928
6929
6930 /* Replace all literal pool references. */
6931
6932 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6933 {
6934 if (INSN_P (insn))
6935 replace_ltrel_base (&PATTERN (insn));
6936
6937 curr_pool = s390_find_pool (pool_list, insn);
6938 if (!curr_pool)
6939 continue;
6940
6941 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6942 {
6943 rtx addr, pool_ref = NULL_RTX;
6944 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6945 if (pool_ref)
6946 {
6947 if (s390_execute_label (insn))
6948 addr = s390_find_execute (curr_pool, insn);
6949 else
6950 addr = s390_find_constant (curr_pool,
6951 get_pool_constant (pool_ref),
6952 get_pool_mode (pool_ref));
6953
6954 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6955 INSN_CODE (insn) = -1;
6956 }
6957 }
6958 }
6959
6960 /* Dump out all literal pools. */
6961
6962 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6963 s390_dump_pool (curr_pool, 0);
6964
6965 /* Free pool list. */
6966
6967 while (pool_list)
6968 {
6969 struct constant_pool *next = pool_list->next;
6970 s390_free_pool (pool_list);
6971 pool_list = next;
6972 }
6973 }
6974
6975 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6976 We have decided we cannot use this list, so revert all changes
6977 to the current function that were done by s390_chunkify_start. */
6978
6979 static void
6980 s390_chunkify_cancel (struct constant_pool *pool_list)
6981 {
6982 struct constant_pool *curr_pool = NULL;
6983 rtx insn;
6984
6985 /* Remove all pool placeholder insns. */
6986
6987 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6988 {
6989 /* Did we insert an extra barrier? Remove it. */
6990 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6991 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6992 rtx label = NEXT_INSN (curr_pool->pool_insn);
6993
6994 if (jump && GET_CODE (jump) == JUMP_INSN
6995 && barrier && GET_CODE (barrier) == BARRIER
6996 && label && GET_CODE (label) == CODE_LABEL
6997 && GET_CODE (PATTERN (jump)) == SET
6998 && SET_DEST (PATTERN (jump)) == pc_rtx
6999 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7000 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7001 {
7002 remove_insn (jump);
7003 remove_insn (barrier);
7004 remove_insn (label);
7005 }
7006
7007 remove_insn (curr_pool->pool_insn);
7008 }
7009
7010 /* Remove all base register reload insns. */
7011
7012 for (insn = get_insns (); insn; )
7013 {
7014 rtx next_insn = NEXT_INSN (insn);
7015
7016 if (GET_CODE (insn) == INSN
7017 && GET_CODE (PATTERN (insn)) == SET
7018 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7019 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7020 remove_insn (insn);
7021
7022 insn = next_insn;
7023 }
7024
7025 /* Free pool list. */
7026
7027 while (pool_list)
7028 {
7029 struct constant_pool *next = pool_list->next;
7030 s390_free_pool (pool_list);
7031 pool_list = next;
7032 }
7033 }
7034
7035 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7036
7037 void
7038 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7039 {
7040 REAL_VALUE_TYPE r;
7041
7042 switch (GET_MODE_CLASS (mode))
7043 {
7044 case MODE_FLOAT:
7045 case MODE_DECIMAL_FLOAT:
7046 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7047
7048 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7049 assemble_real (r, mode, align);
7050 break;
7051
7052 case MODE_INT:
7053 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7054 mark_symbol_refs_as_used (exp);
7055 break;
7056
7057 default:
7058 gcc_unreachable ();
7059 }
7060 }
7061
7062
7063 /* Return an RTL expression representing the value of the return address
7064 for the frame COUNT steps up from the current frame. FRAME is the
7065 frame pointer of that frame. */
7066
7067 rtx
7068 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7069 {
7070 int offset;
7071 rtx addr;
7072
7073 /* Without backchain, we fail for all but the current frame. */
7074
7075 if (!TARGET_BACKCHAIN && count > 0)
7076 return NULL_RTX;
7077
7078 /* For the current frame, we need to make sure the initial
7079 value of RETURN_REGNUM is actually saved. */
7080
7081 if (count == 0)
7082 {
7083 /* On non-z architectures branch splitting could overwrite r14. */
7084 if (TARGET_CPU_ZARCH)
7085 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7086 else
7087 {
7088 cfun_frame_layout.save_return_addr_p = true;
7089 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7090 }
7091 }
7092
7093 if (TARGET_PACKED_STACK)
7094 offset = -2 * UNITS_PER_LONG;
7095 else
7096 offset = RETURN_REGNUM * UNITS_PER_LONG;
7097
7098 addr = plus_constant (frame, offset);
7099 addr = memory_address (Pmode, addr);
7100 return gen_rtx_MEM (Pmode, addr);
7101 }
7102
7103 /* Return an RTL expression representing the back chain stored in
7104 the current stack frame. */
7105
7106 rtx
7107 s390_back_chain_rtx (void)
7108 {
7109 rtx chain;
7110
7111 gcc_assert (TARGET_BACKCHAIN);
7112
7113 if (TARGET_PACKED_STACK)
7114 chain = plus_constant (stack_pointer_rtx,
7115 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7116 else
7117 chain = stack_pointer_rtx;
7118
7119 chain = gen_rtx_MEM (Pmode, chain);
7120 return chain;
7121 }
7122
7123 /* Find first call clobbered register unused in a function.
7124 This could be used as base register in a leaf function
7125 or for holding the return address before epilogue. */
7126
7127 static int
7128 find_unused_clobbered_reg (void)
7129 {
7130 int i;
7131 for (i = 0; i < 6; i++)
7132 if (!df_regs_ever_live_p (i))
7133 return i;
7134 return 0;
7135 }
7136
7137
7138 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7139 clobbered hard regs in SETREG. */
7140
7141 static void
7142 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7143 {
7144 int *regs_ever_clobbered = (int *)data;
7145 unsigned int i, regno;
7146 enum machine_mode mode = GET_MODE (setreg);
7147
7148 if (GET_CODE (setreg) == SUBREG)
7149 {
7150 rtx inner = SUBREG_REG (setreg);
7151 if (!GENERAL_REG_P (inner))
7152 return;
7153 regno = subreg_regno (setreg);
7154 }
7155 else if (GENERAL_REG_P (setreg))
7156 regno = REGNO (setreg);
7157 else
7158 return;
7159
7160 for (i = regno;
7161 i < regno + HARD_REGNO_NREGS (regno, mode);
7162 i++)
7163 regs_ever_clobbered[i] = 1;
7164 }
7165
7166 /* Walks through all basic blocks of the current function looking
7167 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7168 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7169 each of those regs. */
7170
7171 static void
7172 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7173 {
7174 basic_block cur_bb;
7175 rtx cur_insn;
7176 unsigned int i;
7177
7178 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7179
7180 /* For non-leaf functions we have to consider all call clobbered regs to be
7181 clobbered. */
7182 if (!current_function_is_leaf)
7183 {
7184 for (i = 0; i < 16; i++)
7185 regs_ever_clobbered[i] = call_really_used_regs[i];
7186 }
7187
7188 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7189 this work is done by liveness analysis (mark_regs_live_at_end).
7190 Special care is needed for functions containing landing pads. Landing pads
7191 may use the eh registers, but the code which sets these registers is not
7192 contained in that function. Hence s390_regs_ever_clobbered is not able to
7193 deal with this automatically. */
7194 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7195 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7196 if (crtl->calls_eh_return
7197 || (cfun->machine->has_landing_pad_p
7198 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7199 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7200
7201 /* For nonlocal gotos all call-saved registers have to be saved.
7202 This flag is also set for the unwinding code in libgcc.
7203 See expand_builtin_unwind_init. For regs_ever_live this is done by
7204 reload. */
7205 if (cfun->has_nonlocal_label)
7206 for (i = 0; i < 16; i++)
7207 if (!call_really_used_regs[i])
7208 regs_ever_clobbered[i] = 1;
7209
7210 FOR_EACH_BB (cur_bb)
7211 {
7212 FOR_BB_INSNS (cur_bb, cur_insn)
7213 {
7214 if (INSN_P (cur_insn))
7215 note_stores (PATTERN (cur_insn),
7216 s390_reg_clobbered_rtx,
7217 regs_ever_clobbered);
7218 }
7219 }
7220 }
7221
7222 /* Determine the frame area which actually has to be accessed
7223 in the function epilogue. The values are stored at the
7224 given pointers AREA_BOTTOM (address of the lowest used stack
7225 address) and AREA_TOP (address of the first item which does
7226 not belong to the stack frame). */
7227
7228 static void
7229 s390_frame_area (int *area_bottom, int *area_top)
7230 {
7231 int b, t;
7232 int i;
7233
7234 b = INT_MAX;
7235 t = INT_MIN;
7236
7237 if (cfun_frame_layout.first_restore_gpr != -1)
7238 {
7239 b = (cfun_frame_layout.gprs_offset
7240 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7241 t = b + (cfun_frame_layout.last_restore_gpr
7242 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7243 }
7244
7245 if (TARGET_64BIT && cfun_save_high_fprs_p)
7246 {
7247 b = MIN (b, cfun_frame_layout.f8_offset);
7248 t = MAX (t, (cfun_frame_layout.f8_offset
7249 + cfun_frame_layout.high_fprs * 8));
7250 }
7251
7252 if (!TARGET_64BIT)
7253 for (i = 2; i < 4; i++)
7254 if (cfun_fpr_bit_p (i))
7255 {
7256 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7257 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7258 }
7259
7260 *area_bottom = b;
7261 *area_top = t;
7262 }
7263
7264 /* Fill cfun->machine with info about register usage of current function.
7265 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7266
7267 static void
7268 s390_register_info (int clobbered_regs[])
7269 {
7270 int i, j;
7271
7272 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7273 cfun_frame_layout.fpr_bitmap = 0;
7274 cfun_frame_layout.high_fprs = 0;
7275 if (TARGET_64BIT)
7276 for (i = 24; i < 32; i++)
7277 if (df_regs_ever_live_p (i) && !global_regs[i])
7278 {
7279 cfun_set_fpr_bit (i - 16);
7280 cfun_frame_layout.high_fprs++;
7281 }
7282
7283 /* Find first and last gpr to be saved. We trust regs_ever_live
7284 data, except that we don't save and restore global registers.
7285
7286 Also, all registers with special meaning to the compiler need
7287 to be handled extra. */
7288
7289 s390_regs_ever_clobbered (clobbered_regs);
7290
7291 for (i = 0; i < 16; i++)
7292 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7293
7294 if (frame_pointer_needed)
7295 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7296
7297 if (flag_pic)
7298 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7299 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7300
7301 clobbered_regs[BASE_REGNUM]
7302 |= (cfun->machine->base_reg
7303 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7304
7305 clobbered_regs[RETURN_REGNUM]
7306 |= (!current_function_is_leaf
7307 || TARGET_TPF_PROFILING
7308 || cfun->machine->split_branches_pending_p
7309 || cfun_frame_layout.save_return_addr_p
7310 || crtl->calls_eh_return
7311 || cfun->stdarg);
7312
7313 clobbered_regs[STACK_POINTER_REGNUM]
7314 |= (!current_function_is_leaf
7315 || TARGET_TPF_PROFILING
7316 || cfun_save_high_fprs_p
7317 || get_frame_size () > 0
7318 || cfun->calls_alloca
7319 || cfun->stdarg);
7320
7321 for (i = 6; i < 16; i++)
7322 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7323 break;
7324 for (j = 15; j > i; j--)
7325 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7326 break;
7327
7328 if (i == 16)
7329 {
7330 /* Nothing to save/restore. */
7331 cfun_frame_layout.first_save_gpr_slot = -1;
7332 cfun_frame_layout.last_save_gpr_slot = -1;
7333 cfun_frame_layout.first_save_gpr = -1;
7334 cfun_frame_layout.first_restore_gpr = -1;
7335 cfun_frame_layout.last_save_gpr = -1;
7336 cfun_frame_layout.last_restore_gpr = -1;
7337 }
7338 else
7339 {
7340 /* Save slots for gprs from i to j. */
7341 cfun_frame_layout.first_save_gpr_slot = i;
7342 cfun_frame_layout.last_save_gpr_slot = j;
7343
7344 for (i = cfun_frame_layout.first_save_gpr_slot;
7345 i < cfun_frame_layout.last_save_gpr_slot + 1;
7346 i++)
7347 if (clobbered_regs[i])
7348 break;
7349
7350 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7351 if (clobbered_regs[j])
7352 break;
7353
7354 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7355 {
7356 /* Nothing to save/restore. */
7357 cfun_frame_layout.first_save_gpr = -1;
7358 cfun_frame_layout.first_restore_gpr = -1;
7359 cfun_frame_layout.last_save_gpr = -1;
7360 cfun_frame_layout.last_restore_gpr = -1;
7361 }
7362 else
7363 {
7364 /* Save / Restore from gpr i to j. */
7365 cfun_frame_layout.first_save_gpr = i;
7366 cfun_frame_layout.first_restore_gpr = i;
7367 cfun_frame_layout.last_save_gpr = j;
7368 cfun_frame_layout.last_restore_gpr = j;
7369 }
7370 }
7371
7372 if (cfun->stdarg)
7373 {
7374 /* Varargs functions need to save gprs 2 to 6. */
7375 if (cfun->va_list_gpr_size
7376 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7377 {
7378 int min_gpr = crtl->args.info.gprs;
7379 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7380 if (max_gpr > GP_ARG_NUM_REG)
7381 max_gpr = GP_ARG_NUM_REG;
7382
7383 if (cfun_frame_layout.first_save_gpr == -1
7384 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7385 {
7386 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7387 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7388 }
7389
7390 if (cfun_frame_layout.last_save_gpr == -1
7391 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7392 {
7393 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7394 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7395 }
7396 }
7397
7398 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7399 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7400 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7401 {
7402 int min_fpr = crtl->args.info.fprs;
7403 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7404 if (max_fpr > FP_ARG_NUM_REG)
7405 max_fpr = FP_ARG_NUM_REG;
7406
7407 /* ??? This is currently required to ensure proper location
7408 of the fpr save slots within the va_list save area. */
7409 if (TARGET_PACKED_STACK)
7410 min_fpr = 0;
7411
7412 for (i = min_fpr; i < max_fpr; i++)
7413 cfun_set_fpr_bit (i);
7414 }
7415 }
7416
7417 if (!TARGET_64BIT)
7418 for (i = 2; i < 4; i++)
7419 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7420 cfun_set_fpr_bit (i);
7421 }
7422
7423 /* Fill cfun->machine with info about frame of current function. */
7424
7425 static void
7426 s390_frame_info (void)
7427 {
7428 int i;
7429
7430 cfun_frame_layout.frame_size = get_frame_size ();
7431 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7432 fatal_error ("total size of local variables exceeds architecture limit");
7433
7434 if (!TARGET_PACKED_STACK)
7435 {
7436 cfun_frame_layout.backchain_offset = 0;
7437 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7438 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7439 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7440 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7441 * UNITS_PER_LONG);
7442 }
7443 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7444 {
7445 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7446 - UNITS_PER_LONG);
7447 cfun_frame_layout.gprs_offset
7448 = (cfun_frame_layout.backchain_offset
7449 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7450 * UNITS_PER_LONG);
7451
7452 if (TARGET_64BIT)
7453 {
7454 cfun_frame_layout.f4_offset
7455 = (cfun_frame_layout.gprs_offset
7456 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7457
7458 cfun_frame_layout.f0_offset
7459 = (cfun_frame_layout.f4_offset
7460 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7461 }
7462 else
7463 {
7464 /* On 31 bit we have to care about alignment of the
7465 floating point regs to provide fastest access. */
7466 cfun_frame_layout.f0_offset
7467 = ((cfun_frame_layout.gprs_offset
7468 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7469 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7470
7471 cfun_frame_layout.f4_offset
7472 = (cfun_frame_layout.f0_offset
7473 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7474 }
7475 }
7476 else /* no backchain */
7477 {
7478 cfun_frame_layout.f4_offset
7479 = (STACK_POINTER_OFFSET
7480 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7481
7482 cfun_frame_layout.f0_offset
7483 = (cfun_frame_layout.f4_offset
7484 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7485
7486 cfun_frame_layout.gprs_offset
7487 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7488 }
7489
7490 if (current_function_is_leaf
7491 && !TARGET_TPF_PROFILING
7492 && cfun_frame_layout.frame_size == 0
7493 && !cfun_save_high_fprs_p
7494 && !cfun->calls_alloca
7495 && !cfun->stdarg)
7496 return;
7497
7498 if (!TARGET_PACKED_STACK)
7499 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7500 + crtl->outgoing_args_size
7501 + cfun_frame_layout.high_fprs * 8);
7502 else
7503 {
7504 if (TARGET_BACKCHAIN)
7505 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7506
7507 /* No alignment trouble here because f8-f15 are only saved under
7508 64 bit. */
7509 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7510 cfun_frame_layout.f4_offset),
7511 cfun_frame_layout.gprs_offset)
7512 - cfun_frame_layout.high_fprs * 8);
7513
7514 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7515
7516 for (i = 0; i < 8; i++)
7517 if (cfun_fpr_bit_p (i))
7518 cfun_frame_layout.frame_size += 8;
7519
7520 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7521
7522 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7523 the frame size to sustain 8 byte alignment of stack frames. */
7524 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7525 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7526 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7527
7528 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7529 }
7530 }
7531
7532 /* Generate frame layout. Fills in register and frame data for the current
7533 function in cfun->machine. This routine can be called multiple times;
7534 it will re-do the complete frame layout every time. */
7535
7536 static void
7537 s390_init_frame_layout (void)
7538 {
7539 HOST_WIDE_INT frame_size;
7540 int base_used;
7541 int clobbered_regs[16];
7542
7543 /* On S/390 machines, we may need to perform branch splitting, which
7544 will require both base and return address register. We have no
7545 choice but to assume we're going to need them until right at the
7546 end of the machine dependent reorg phase. */
7547 if (!TARGET_CPU_ZARCH)
7548 cfun->machine->split_branches_pending_p = true;
7549
7550 do
7551 {
7552 frame_size = cfun_frame_layout.frame_size;
7553
7554 /* Try to predict whether we'll need the base register. */
7555 base_used = cfun->machine->split_branches_pending_p
7556 || crtl->uses_const_pool
7557 || (!DISP_IN_RANGE (frame_size)
7558 && !CONST_OK_FOR_K (frame_size));
7559
7560 /* Decide which register to use as literal pool base. In small
7561 leaf functions, try to use an unused call-clobbered register
7562 as base register to avoid save/restore overhead. */
7563 if (!base_used)
7564 cfun->machine->base_reg = NULL_RTX;
7565 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7566 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7567 else
7568 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7569
7570 s390_register_info (clobbered_regs);
7571 s390_frame_info ();
7572 }
7573 while (frame_size != cfun_frame_layout.frame_size);
7574 }
7575
7576 /* Update frame layout. Recompute actual register save data based on
7577 current info and update regs_ever_live for the special registers.
7578 May be called multiple times, but may never cause *more* registers
7579 to be saved than s390_init_frame_layout allocated room for. */
7580
7581 static void
7582 s390_update_frame_layout (void)
7583 {
7584 int clobbered_regs[16];
7585
7586 s390_register_info (clobbered_regs);
7587
7588 df_set_regs_ever_live (BASE_REGNUM,
7589 clobbered_regs[BASE_REGNUM] ? true : false);
7590 df_set_regs_ever_live (RETURN_REGNUM,
7591 clobbered_regs[RETURN_REGNUM] ? true : false);
7592 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7593 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7594
7595 if (cfun->machine->base_reg)
7596 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7597 }
7598
7599 /* Return true if it is legal to put a value with MODE into REGNO. */
7600
7601 bool
7602 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7603 {
7604 switch (REGNO_REG_CLASS (regno))
7605 {
7606 case FP_REGS:
7607 if (REGNO_PAIR_OK (regno, mode))
7608 {
7609 if (mode == SImode || mode == DImode)
7610 return true;
7611
7612 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7613 return true;
7614 }
7615 break;
7616 case ADDR_REGS:
7617 if (FRAME_REGNO_P (regno) && mode == Pmode)
7618 return true;
7619
7620 /* fallthrough */
7621 case GENERAL_REGS:
7622 if (REGNO_PAIR_OK (regno, mode))
7623 {
7624 if (TARGET_ZARCH
7625 || (mode != TFmode && mode != TCmode && mode != TDmode))
7626 return true;
7627 }
7628 break;
7629 case CC_REGS:
7630 if (GET_MODE_CLASS (mode) == MODE_CC)
7631 return true;
7632 break;
7633 case ACCESS_REGS:
7634 if (REGNO_PAIR_OK (regno, mode))
7635 {
7636 if (mode == SImode || mode == Pmode)
7637 return true;
7638 }
7639 break;
7640 default:
7641 return false;
7642 }
7643
7644 return false;
7645 }
7646
7647 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7648
7649 bool
7650 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7651 {
7652 /* Once we've decided upon a register to use as base register, it must
7653 no longer be used for any other purpose. */
7654 if (cfun->machine->base_reg)
7655 if (REGNO (cfun->machine->base_reg) == old_reg
7656 || REGNO (cfun->machine->base_reg) == new_reg)
7657 return false;
7658
7659 return true;
7660 }
7661
7662 /* Maximum number of registers to represent a value of mode MODE
7663 in a register of class RCLASS. */
7664
7665 bool
7666 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7667 {
7668 switch (rclass)
7669 {
7670 case FP_REGS:
7671 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7672 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7673 else
7674 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7675 case ACCESS_REGS:
7676 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7677 default:
7678 break;
7679 }
7680 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7681 }
7682
7683 /* Return true if register FROM can be eliminated via register TO. */
7684
7685 static bool
7686 s390_can_eliminate (const int from, const int to)
7687 {
7688 /* On zSeries machines, we have not marked the base register as fixed.
7689 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7690 If a function requires the base register, we say here that this
7691 elimination cannot be performed. This will cause reload to free
7692 up the base register (as if it were fixed). On the other hand,
7693 if the current function does *not* require the base register, we
7694 say here the elimination succeeds, which in turn allows reload
7695 to allocate the base register for any other purpose. */
7696 if (from == BASE_REGNUM && to == BASE_REGNUM)
7697 {
7698 if (TARGET_CPU_ZARCH)
7699 {
7700 s390_init_frame_layout ();
7701 return cfun->machine->base_reg == NULL_RTX;
7702 }
7703
7704 return false;
7705 }
7706
7707 /* Everything else must point into the stack frame. */
7708 gcc_assert (to == STACK_POINTER_REGNUM
7709 || to == HARD_FRAME_POINTER_REGNUM);
7710
7711 gcc_assert (from == FRAME_POINTER_REGNUM
7712 || from == ARG_POINTER_REGNUM
7713 || from == RETURN_ADDRESS_POINTER_REGNUM);
7714
7715 /* Make sure we actually saved the return address. */
7716 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7717 if (!crtl->calls_eh_return
7718 && !cfun->stdarg
7719 && !cfun_frame_layout.save_return_addr_p)
7720 return false;
7721
7722 return true;
7723 }
7724
7725 /* Return offset between register FROM and TO initially after prolog. */
7726
7727 HOST_WIDE_INT
7728 s390_initial_elimination_offset (int from, int to)
7729 {
7730 HOST_WIDE_INT offset;
7731 int index;
7732
7733 /* ??? Why are we called for non-eliminable pairs? */
7734 if (!s390_can_eliminate (from, to))
7735 return 0;
7736
7737 switch (from)
7738 {
7739 case FRAME_POINTER_REGNUM:
7740 offset = (get_frame_size()
7741 + STACK_POINTER_OFFSET
7742 + crtl->outgoing_args_size);
7743 break;
7744
7745 case ARG_POINTER_REGNUM:
7746 s390_init_frame_layout ();
7747 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7748 break;
7749
7750 case RETURN_ADDRESS_POINTER_REGNUM:
7751 s390_init_frame_layout ();
7752 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7753 gcc_assert (index >= 0);
7754 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7755 offset += index * UNITS_PER_LONG;
7756 break;
7757
7758 case BASE_REGNUM:
7759 offset = 0;
7760 break;
7761
7762 default:
7763 gcc_unreachable ();
7764 }
7765
7766 return offset;
7767 }
7768
7769 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7770 to register BASE. Return generated insn. */
7771
7772 static rtx
7773 save_fpr (rtx base, int offset, int regnum)
7774 {
7775 rtx addr;
7776 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7777
7778 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7779 set_mem_alias_set (addr, get_varargs_alias_set ());
7780 else
7781 set_mem_alias_set (addr, get_frame_alias_set ());
7782
7783 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7784 }
7785
7786 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7787 to register BASE. Return generated insn. */
7788
7789 static rtx
7790 restore_fpr (rtx base, int offset, int regnum)
7791 {
7792 rtx addr;
7793 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7794 set_mem_alias_set (addr, get_frame_alias_set ());
7795
7796 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7797 }
7798
7799 /* Return true if REGNO is a global register, but not one
7800 of the special ones that need to be saved/restored in anyway. */
7801
7802 static inline bool
7803 global_not_special_regno_p (int regno)
7804 {
7805 return (global_regs[regno]
7806 /* These registers are special and need to be
7807 restored in any case. */
7808 && !(regno == STACK_POINTER_REGNUM
7809 || regno == RETURN_REGNUM
7810 || regno == BASE_REGNUM
7811 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7812 }
7813
7814 /* Generate insn to save registers FIRST to LAST into
7815 the register save area located at offset OFFSET
7816 relative to register BASE. */
7817
7818 static rtx
7819 save_gprs (rtx base, int offset, int first, int last)
7820 {
7821 rtx addr, insn, note;
7822 int i;
7823
7824 addr = plus_constant (base, offset);
7825 addr = gen_rtx_MEM (Pmode, addr);
7826
7827 set_mem_alias_set (addr, get_frame_alias_set ());
7828
7829 /* Special-case single register. */
7830 if (first == last)
7831 {
7832 if (TARGET_64BIT)
7833 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7834 else
7835 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7836
7837 if (!global_not_special_regno_p (first))
7838 RTX_FRAME_RELATED_P (insn) = 1;
7839 return insn;
7840 }
7841
7842
7843 insn = gen_store_multiple (addr,
7844 gen_rtx_REG (Pmode, first),
7845 GEN_INT (last - first + 1));
7846
7847 if (first <= 6 && cfun->stdarg)
7848 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7849 {
7850 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7851
7852 if (first + i <= 6)
7853 set_mem_alias_set (mem, get_varargs_alias_set ());
7854 }
7855
7856 /* We need to set the FRAME_RELATED flag on all SETs
7857 inside the store-multiple pattern.
7858
7859 However, we must not emit DWARF records for registers 2..5
7860 if they are stored for use by variable arguments ...
7861
7862 ??? Unfortunately, it is not enough to simply not the
7863 FRAME_RELATED flags for those SETs, because the first SET
7864 of the PARALLEL is always treated as if it had the flag
7865 set, even if it does not. Therefore we emit a new pattern
7866 without those registers as REG_FRAME_RELATED_EXPR note. */
7867
7868 if (first >= 6 && !global_not_special_regno_p (first))
7869 {
7870 rtx pat = PATTERN (insn);
7871
7872 for (i = 0; i < XVECLEN (pat, 0); i++)
7873 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7874 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7875 0, i)))))
7876 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7877
7878 RTX_FRAME_RELATED_P (insn) = 1;
7879 }
7880 else if (last >= 6)
7881 {
7882 int start;
7883
7884 for (start = first >= 6 ? first : 6; start <= last; start++)
7885 if (!global_not_special_regno_p (start))
7886 break;
7887
7888 if (start > last)
7889 return insn;
7890
7891 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7892 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7893 gen_rtx_REG (Pmode, start),
7894 GEN_INT (last - start + 1));
7895 note = PATTERN (note);
7896
7897 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7898
7899 for (i = 0; i < XVECLEN (note, 0); i++)
7900 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7901 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7902 0, i)))))
7903 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7904
7905 RTX_FRAME_RELATED_P (insn) = 1;
7906 }
7907
7908 return insn;
7909 }
7910
7911 /* Generate insn to restore registers FIRST to LAST from
7912 the register save area located at offset OFFSET
7913 relative to register BASE. */
7914
7915 static rtx
7916 restore_gprs (rtx base, int offset, int first, int last)
7917 {
7918 rtx addr, insn;
7919
7920 addr = plus_constant (base, offset);
7921 addr = gen_rtx_MEM (Pmode, addr);
7922 set_mem_alias_set (addr, get_frame_alias_set ());
7923
7924 /* Special-case single register. */
7925 if (first == last)
7926 {
7927 if (TARGET_64BIT)
7928 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7929 else
7930 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7931
7932 return insn;
7933 }
7934
7935 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7936 addr,
7937 GEN_INT (last - first + 1));
7938 return insn;
7939 }
7940
7941 /* Return insn sequence to load the GOT register. */
7942
7943 static GTY(()) rtx got_symbol;
7944 rtx
7945 s390_load_got (void)
7946 {
7947 rtx insns;
7948
7949 if (!got_symbol)
7950 {
7951 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7952 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7953 }
7954
7955 start_sequence ();
7956
7957 if (TARGET_CPU_ZARCH)
7958 {
7959 emit_move_insn (pic_offset_table_rtx, got_symbol);
7960 }
7961 else
7962 {
7963 rtx offset;
7964
7965 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7966 UNSPEC_LTREL_OFFSET);
7967 offset = gen_rtx_CONST (Pmode, offset);
7968 offset = force_const_mem (Pmode, offset);
7969
7970 emit_move_insn (pic_offset_table_rtx, offset);
7971
7972 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7973 UNSPEC_LTREL_BASE);
7974 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7975
7976 emit_move_insn (pic_offset_table_rtx, offset);
7977 }
7978
7979 insns = get_insns ();
7980 end_sequence ();
7981 return insns;
7982 }
7983
7984 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7985 and the change to the stack pointer. */
7986
7987 static void
7988 s390_emit_stack_tie (void)
7989 {
7990 rtx mem = gen_frame_mem (BLKmode,
7991 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7992
7993 emit_insn (gen_stack_tie (mem));
7994 }
7995
7996 /* Expand the prologue into a bunch of separate insns. */
7997
7998 void
7999 s390_emit_prologue (void)
8000 {
8001 rtx insn, addr;
8002 rtx temp_reg;
8003 int i;
8004 int offset;
8005 int next_fpr = 0;
8006
8007 /* Complete frame layout. */
8008
8009 s390_update_frame_layout ();
8010
8011 /* Annotate all constant pool references to let the scheduler know
8012 they implicitly use the base register. */
8013
8014 push_topmost_sequence ();
8015
8016 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8017 if (INSN_P (insn))
8018 {
8019 annotate_constant_pool_refs (&PATTERN (insn));
8020 df_insn_rescan (insn);
8021 }
8022
8023 pop_topmost_sequence ();
8024
8025 /* Choose best register to use for temp use within prologue.
8026 See below for why TPF must use the register 1. */
8027
8028 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8029 && !current_function_is_leaf
8030 && !TARGET_TPF_PROFILING)
8031 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8032 else
8033 temp_reg = gen_rtx_REG (Pmode, 1);
8034
8035 /* Save call saved gprs. */
8036 if (cfun_frame_layout.first_save_gpr != -1)
8037 {
8038 insn = save_gprs (stack_pointer_rtx,
8039 cfun_frame_layout.gprs_offset +
8040 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8041 - cfun_frame_layout.first_save_gpr_slot),
8042 cfun_frame_layout.first_save_gpr,
8043 cfun_frame_layout.last_save_gpr);
8044 emit_insn (insn);
8045 }
8046
8047 /* Dummy insn to mark literal pool slot. */
8048
8049 if (cfun->machine->base_reg)
8050 emit_insn (gen_main_pool (cfun->machine->base_reg));
8051
8052 offset = cfun_frame_layout.f0_offset;
8053
8054 /* Save f0 and f2. */
8055 for (i = 0; i < 2; i++)
8056 {
8057 if (cfun_fpr_bit_p (i))
8058 {
8059 save_fpr (stack_pointer_rtx, offset, i + 16);
8060 offset += 8;
8061 }
8062 else if (!TARGET_PACKED_STACK)
8063 offset += 8;
8064 }
8065
8066 /* Save f4 and f6. */
8067 offset = cfun_frame_layout.f4_offset;
8068 for (i = 2; i < 4; i++)
8069 {
8070 if (cfun_fpr_bit_p (i))
8071 {
8072 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8073 offset += 8;
8074
8075 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8076 therefore are not frame related. */
8077 if (!call_really_used_regs[i + 16])
8078 RTX_FRAME_RELATED_P (insn) = 1;
8079 }
8080 else if (!TARGET_PACKED_STACK)
8081 offset += 8;
8082 }
8083
8084 if (TARGET_PACKED_STACK
8085 && cfun_save_high_fprs_p
8086 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8087 {
8088 offset = (cfun_frame_layout.f8_offset
8089 + (cfun_frame_layout.high_fprs - 1) * 8);
8090
8091 for (i = 15; i > 7 && offset >= 0; i--)
8092 if (cfun_fpr_bit_p (i))
8093 {
8094 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8095
8096 RTX_FRAME_RELATED_P (insn) = 1;
8097 offset -= 8;
8098 }
8099 if (offset >= cfun_frame_layout.f8_offset)
8100 next_fpr = i + 16;
8101 }
8102
8103 if (!TARGET_PACKED_STACK)
8104 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8105
8106 if (flag_stack_usage)
8107 current_function_static_stack_size = cfun_frame_layout.frame_size;
8108
8109 /* Decrement stack pointer. */
8110
8111 if (cfun_frame_layout.frame_size > 0)
8112 {
8113 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8114 rtx real_frame_off;
8115
8116 if (s390_stack_size)
8117 {
8118 HOST_WIDE_INT stack_guard;
8119
8120 if (s390_stack_guard)
8121 stack_guard = s390_stack_guard;
8122 else
8123 {
8124 /* If no value for stack guard is provided the smallest power of 2
8125 larger than the current frame size is chosen. */
8126 stack_guard = 1;
8127 while (stack_guard < cfun_frame_layout.frame_size)
8128 stack_guard <<= 1;
8129 }
8130
8131 if (cfun_frame_layout.frame_size >= s390_stack_size)
8132 {
8133 warning (0, "frame size of function %qs is "
8134 HOST_WIDE_INT_PRINT_DEC
8135 " bytes exceeding user provided stack limit of "
8136 HOST_WIDE_INT_PRINT_DEC " bytes. "
8137 "An unconditional trap is added.",
8138 current_function_name(), cfun_frame_layout.frame_size,
8139 s390_stack_size);
8140 emit_insn (gen_trap ());
8141 }
8142 else
8143 {
8144 /* stack_guard has to be smaller than s390_stack_size.
8145 Otherwise we would emit an AND with zero which would
8146 not match the test under mask pattern. */
8147 if (stack_guard >= s390_stack_size)
8148 {
8149 warning (0, "frame size of function %qs is "
8150 HOST_WIDE_INT_PRINT_DEC
8151 " bytes which is more than half the stack size. "
8152 "The dynamic check would not be reliable. "
8153 "No check emitted for this function.",
8154 current_function_name(),
8155 cfun_frame_layout.frame_size);
8156 }
8157 else
8158 {
8159 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8160 & ~(stack_guard - 1));
8161
8162 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8163 GEN_INT (stack_check_mask));
8164 if (TARGET_64BIT)
8165 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8166 t, const0_rtx),
8167 t, const0_rtx, const0_rtx));
8168 else
8169 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8170 t, const0_rtx),
8171 t, const0_rtx, const0_rtx));
8172 }
8173 }
8174 }
8175
8176 if (s390_warn_framesize > 0
8177 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8178 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
8179 current_function_name (), cfun_frame_layout.frame_size);
8180
8181 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8182 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8183
8184 /* Save incoming stack pointer into temp reg. */
8185 if (TARGET_BACKCHAIN || next_fpr)
8186 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8187
8188 /* Subtract frame size from stack pointer. */
8189
8190 if (DISP_IN_RANGE (INTVAL (frame_off)))
8191 {
8192 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8193 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8194 frame_off));
8195 insn = emit_insn (insn);
8196 }
8197 else
8198 {
8199 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8200 frame_off = force_const_mem (Pmode, frame_off);
8201
8202 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8203 annotate_constant_pool_refs (&PATTERN (insn));
8204 }
8205
8206 RTX_FRAME_RELATED_P (insn) = 1;
8207 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8208 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8209 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8210 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8211 real_frame_off)));
8212
8213 /* Set backchain. */
8214
8215 if (TARGET_BACKCHAIN)
8216 {
8217 if (cfun_frame_layout.backchain_offset)
8218 addr = gen_rtx_MEM (Pmode,
8219 plus_constant (stack_pointer_rtx,
8220 cfun_frame_layout.backchain_offset));
8221 else
8222 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8223 set_mem_alias_set (addr, get_frame_alias_set ());
8224 insn = emit_insn (gen_move_insn (addr, temp_reg));
8225 }
8226
8227 /* If we support non-call exceptions (e.g. for Java),
8228 we need to make sure the backchain pointer is set up
8229 before any possibly trapping memory access. */
8230 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8231 {
8232 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8233 emit_clobber (addr);
8234 }
8235 }
8236
8237 /* Save fprs 8 - 15 (64 bit ABI). */
8238
8239 if (cfun_save_high_fprs_p && next_fpr)
8240 {
8241 /* If the stack might be accessed through a different register
8242 we have to make sure that the stack pointer decrement is not
8243 moved below the use of the stack slots. */
8244 s390_emit_stack_tie ();
8245
8246 insn = emit_insn (gen_add2_insn (temp_reg,
8247 GEN_INT (cfun_frame_layout.f8_offset)));
8248
8249 offset = 0;
8250
8251 for (i = 24; i <= next_fpr; i++)
8252 if (cfun_fpr_bit_p (i - 16))
8253 {
8254 rtx addr = plus_constant (stack_pointer_rtx,
8255 cfun_frame_layout.frame_size
8256 + cfun_frame_layout.f8_offset
8257 + offset);
8258
8259 insn = save_fpr (temp_reg, offset, i);
8260 offset += 8;
8261 RTX_FRAME_RELATED_P (insn) = 1;
8262 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8263 gen_rtx_SET (VOIDmode,
8264 gen_rtx_MEM (DFmode, addr),
8265 gen_rtx_REG (DFmode, i)));
8266 }
8267 }
8268
8269 /* Set frame pointer, if needed. */
8270
8271 if (frame_pointer_needed)
8272 {
8273 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8274 RTX_FRAME_RELATED_P (insn) = 1;
8275 }
8276
8277 /* Set up got pointer, if needed. */
8278
8279 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8280 {
8281 rtx insns = s390_load_got ();
8282
8283 for (insn = insns; insn; insn = NEXT_INSN (insn))
8284 annotate_constant_pool_refs (&PATTERN (insn));
8285
8286 emit_insn (insns);
8287 }
8288
8289 if (TARGET_TPF_PROFILING)
8290 {
8291 /* Generate a BAS instruction to serve as a function
8292 entry intercept to facilitate the use of tracing
8293 algorithms located at the branch target. */
8294 emit_insn (gen_prologue_tpf ());
8295
8296 /* Emit a blockage here so that all code
8297 lies between the profiling mechanisms. */
8298 emit_insn (gen_blockage ());
8299 }
8300 }
8301
8302 /* Expand the epilogue into a bunch of separate insns. */
8303
8304 void
8305 s390_emit_epilogue (bool sibcall)
8306 {
8307 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8308 int area_bottom, area_top, offset = 0;
8309 int next_offset;
8310 rtvec p;
8311 int i;
8312
8313 if (TARGET_TPF_PROFILING)
8314 {
8315
8316 /* Generate a BAS instruction to serve as a function
8317 entry intercept to facilitate the use of tracing
8318 algorithms located at the branch target. */
8319
8320 /* Emit a blockage here so that all code
8321 lies between the profiling mechanisms. */
8322 emit_insn (gen_blockage ());
8323
8324 emit_insn (gen_epilogue_tpf ());
8325 }
8326
8327 /* Check whether to use frame or stack pointer for restore. */
8328
8329 frame_pointer = (frame_pointer_needed
8330 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8331
8332 s390_frame_area (&area_bottom, &area_top);
8333
8334 /* Check whether we can access the register save area.
8335 If not, increment the frame pointer as required. */
8336
8337 if (area_top <= area_bottom)
8338 {
8339 /* Nothing to restore. */
8340 }
8341 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8342 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8343 {
8344 /* Area is in range. */
8345 offset = cfun_frame_layout.frame_size;
8346 }
8347 else
8348 {
8349 rtx insn, frame_off, cfa;
8350
8351 offset = area_bottom < 0 ? -area_bottom : 0;
8352 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8353
8354 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8355 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8356 if (DISP_IN_RANGE (INTVAL (frame_off)))
8357 {
8358 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8359 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8360 insn = emit_insn (insn);
8361 }
8362 else
8363 {
8364 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8365 frame_off = force_const_mem (Pmode, frame_off);
8366
8367 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8368 annotate_constant_pool_refs (&PATTERN (insn));
8369 }
8370 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8371 RTX_FRAME_RELATED_P (insn) = 1;
8372 }
8373
8374 /* Restore call saved fprs. */
8375
8376 if (TARGET_64BIT)
8377 {
8378 if (cfun_save_high_fprs_p)
8379 {
8380 next_offset = cfun_frame_layout.f8_offset;
8381 for (i = 24; i < 32; i++)
8382 {
8383 if (cfun_fpr_bit_p (i - 16))
8384 {
8385 restore_fpr (frame_pointer,
8386 offset + next_offset, i);
8387 cfa_restores
8388 = alloc_reg_note (REG_CFA_RESTORE,
8389 gen_rtx_REG (DFmode, i), cfa_restores);
8390 next_offset += 8;
8391 }
8392 }
8393 }
8394
8395 }
8396 else
8397 {
8398 next_offset = cfun_frame_layout.f4_offset;
8399 for (i = 18; i < 20; i++)
8400 {
8401 if (cfun_fpr_bit_p (i - 16))
8402 {
8403 restore_fpr (frame_pointer,
8404 offset + next_offset, i);
8405 cfa_restores
8406 = alloc_reg_note (REG_CFA_RESTORE,
8407 gen_rtx_REG (DFmode, i), cfa_restores);
8408 next_offset += 8;
8409 }
8410 else if (!TARGET_PACKED_STACK)
8411 next_offset += 8;
8412 }
8413
8414 }
8415
8416 /* Return register. */
8417
8418 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8419
8420 /* Restore call saved gprs. */
8421
8422 if (cfun_frame_layout.first_restore_gpr != -1)
8423 {
8424 rtx insn, addr;
8425 int i;
8426
8427 /* Check for global register and save them
8428 to stack location from where they get restored. */
8429
8430 for (i = cfun_frame_layout.first_restore_gpr;
8431 i <= cfun_frame_layout.last_restore_gpr;
8432 i++)
8433 {
8434 if (global_not_special_regno_p (i))
8435 {
8436 addr = plus_constant (frame_pointer,
8437 offset + cfun_frame_layout.gprs_offset
8438 + (i - cfun_frame_layout.first_save_gpr_slot)
8439 * UNITS_PER_LONG);
8440 addr = gen_rtx_MEM (Pmode, addr);
8441 set_mem_alias_set (addr, get_frame_alias_set ());
8442 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8443 }
8444 else
8445 cfa_restores
8446 = alloc_reg_note (REG_CFA_RESTORE,
8447 gen_rtx_REG (Pmode, i), cfa_restores);
8448 }
8449
8450 if (! sibcall)
8451 {
8452 /* Fetch return address from stack before load multiple,
8453 this will do good for scheduling. */
8454
8455 if (cfun_frame_layout.save_return_addr_p
8456 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8457 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8458 {
8459 int return_regnum = find_unused_clobbered_reg();
8460 if (!return_regnum)
8461 return_regnum = 4;
8462 return_reg = gen_rtx_REG (Pmode, return_regnum);
8463
8464 addr = plus_constant (frame_pointer,
8465 offset + cfun_frame_layout.gprs_offset
8466 + (RETURN_REGNUM
8467 - cfun_frame_layout.first_save_gpr_slot)
8468 * UNITS_PER_LONG);
8469 addr = gen_rtx_MEM (Pmode, addr);
8470 set_mem_alias_set (addr, get_frame_alias_set ());
8471 emit_move_insn (return_reg, addr);
8472 }
8473 }
8474
8475 insn = restore_gprs (frame_pointer,
8476 offset + cfun_frame_layout.gprs_offset
8477 + (cfun_frame_layout.first_restore_gpr
8478 - cfun_frame_layout.first_save_gpr_slot)
8479 * UNITS_PER_LONG,
8480 cfun_frame_layout.first_restore_gpr,
8481 cfun_frame_layout.last_restore_gpr);
8482 insn = emit_insn (insn);
8483 REG_NOTES (insn) = cfa_restores;
8484 add_reg_note (insn, REG_CFA_DEF_CFA,
8485 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8486 RTX_FRAME_RELATED_P (insn) = 1;
8487 }
8488
8489 if (! sibcall)
8490 {
8491
8492 /* Return to caller. */
8493
8494 p = rtvec_alloc (2);
8495
8496 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8497 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8498 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8499 }
8500 }
8501
8502
8503 /* Return the size in bytes of a function argument of
8504 type TYPE and/or mode MODE. At least one of TYPE or
8505 MODE must be specified. */
8506
8507 static int
8508 s390_function_arg_size (enum machine_mode mode, const_tree type)
8509 {
8510 if (type)
8511 return int_size_in_bytes (type);
8512
8513 /* No type info available for some library calls ... */
8514 if (mode != BLKmode)
8515 return GET_MODE_SIZE (mode);
8516
8517 /* If we have neither type nor mode, abort */
8518 gcc_unreachable ();
8519 }
8520
8521 /* Return true if a function argument of type TYPE and mode MODE
8522 is to be passed in a floating-point register, if available. */
8523
8524 static bool
8525 s390_function_arg_float (enum machine_mode mode, const_tree type)
8526 {
8527 int size = s390_function_arg_size (mode, type);
8528 if (size > 8)
8529 return false;
8530
8531 /* Soft-float changes the ABI: no floating-point registers are used. */
8532 if (TARGET_SOFT_FLOAT)
8533 return false;
8534
8535 /* No type info available for some library calls ... */
8536 if (!type)
8537 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8538
8539 /* The ABI says that record types with a single member are treated
8540 just like that member would be. */
8541 while (TREE_CODE (type) == RECORD_TYPE)
8542 {
8543 tree field, single = NULL_TREE;
8544
8545 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8546 {
8547 if (TREE_CODE (field) != FIELD_DECL)
8548 continue;
8549
8550 if (single == NULL_TREE)
8551 single = TREE_TYPE (field);
8552 else
8553 return false;
8554 }
8555
8556 if (single == NULL_TREE)
8557 return false;
8558 else
8559 type = single;
8560 }
8561
8562 return TREE_CODE (type) == REAL_TYPE;
8563 }
8564
8565 /* Return true if a function argument of type TYPE and mode MODE
8566 is to be passed in an integer register, or a pair of integer
8567 registers, if available. */
8568
8569 static bool
8570 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8571 {
8572 int size = s390_function_arg_size (mode, type);
8573 if (size > 8)
8574 return false;
8575
8576 /* No type info available for some library calls ... */
8577 if (!type)
8578 return GET_MODE_CLASS (mode) == MODE_INT
8579 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8580
8581 /* We accept small integral (and similar) types. */
8582 if (INTEGRAL_TYPE_P (type)
8583 || POINTER_TYPE_P (type)
8584 || TREE_CODE (type) == NULLPTR_TYPE
8585 || TREE_CODE (type) == OFFSET_TYPE
8586 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8587 return true;
8588
8589 /* We also accept structs of size 1, 2, 4, 8 that are not
8590 passed in floating-point registers. */
8591 if (AGGREGATE_TYPE_P (type)
8592 && exact_log2 (size) >= 0
8593 && !s390_function_arg_float (mode, type))
8594 return true;
8595
8596 return false;
8597 }
8598
8599 /* Return 1 if a function argument of type TYPE and mode MODE
8600 is to be passed by reference. The ABI specifies that only
8601 structures of size 1, 2, 4, or 8 bytes are passed by value,
8602 all other structures (and complex numbers) are passed by
8603 reference. */
8604
8605 static bool
8606 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8607 enum machine_mode mode, const_tree type,
8608 bool named ATTRIBUTE_UNUSED)
8609 {
8610 int size = s390_function_arg_size (mode, type);
8611 if (size > 8)
8612 return true;
8613
8614 if (type)
8615 {
8616 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8617 return 1;
8618
8619 if (TREE_CODE (type) == COMPLEX_TYPE
8620 || TREE_CODE (type) == VECTOR_TYPE)
8621 return 1;
8622 }
8623
8624 return 0;
8625 }
8626
8627 /* Update the data in CUM to advance over an argument of mode MODE and
8628 data type TYPE. (TYPE is null for libcalls where that information
8629 may not be available.). The boolean NAMED specifies whether the
8630 argument is a named argument (as opposed to an unnamed argument
8631 matching an ellipsis). */
8632
8633 static void
8634 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8635 const_tree type, bool named ATTRIBUTE_UNUSED)
8636 {
8637 if (s390_function_arg_float (mode, type))
8638 {
8639 cum->fprs += 1;
8640 }
8641 else if (s390_function_arg_integer (mode, type))
8642 {
8643 int size = s390_function_arg_size (mode, type);
8644 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8645 }
8646 else
8647 gcc_unreachable ();
8648 }
8649
8650 /* Define where to put the arguments to a function.
8651 Value is zero to push the argument on the stack,
8652 or a hard register in which to store the argument.
8653
8654 MODE is the argument's machine mode.
8655 TYPE is the data type of the argument (as a tree).
8656 This is null for libcalls where that information may
8657 not be available.
8658 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8659 the preceding args and about the function being called.
8660 NAMED is nonzero if this argument is a named parameter
8661 (otherwise it is an extra parameter matching an ellipsis).
8662
8663 On S/390, we use general purpose registers 2 through 6 to
8664 pass integer, pointer, and certain structure arguments, and
8665 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8666 to pass floating point arguments. All remaining arguments
8667 are pushed to the stack. */
8668
8669 static rtx
8670 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8671 const_tree type, bool named ATTRIBUTE_UNUSED)
8672 {
8673 if (s390_function_arg_float (mode, type))
8674 {
8675 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8676 return 0;
8677 else
8678 return gen_rtx_REG (mode, cum->fprs + 16);
8679 }
8680 else if (s390_function_arg_integer (mode, type))
8681 {
8682 int size = s390_function_arg_size (mode, type);
8683 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8684
8685 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8686 return 0;
8687 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8688 return gen_rtx_REG (mode, cum->gprs + 2);
8689 else if (n_gprs == 2)
8690 {
8691 rtvec p = rtvec_alloc (2);
8692
8693 RTVEC_ELT (p, 0)
8694 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8695 const0_rtx);
8696 RTVEC_ELT (p, 1)
8697 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8698 GEN_INT (4));
8699
8700 return gen_rtx_PARALLEL (mode, p);
8701 }
8702 }
8703
8704 /* After the real arguments, expand_call calls us once again
8705 with a void_type_node type. Whatever we return here is
8706 passed as operand 2 to the call expanders.
8707
8708 We don't need this feature ... */
8709 else if (type == void_type_node)
8710 return const0_rtx;
8711
8712 gcc_unreachable ();
8713 }
8714
8715 /* Return true if return values of type TYPE should be returned
8716 in a memory buffer whose address is passed by the caller as
8717 hidden first argument. */
8718
8719 static bool
8720 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8721 {
8722 /* We accept small integral (and similar) types. */
8723 if (INTEGRAL_TYPE_P (type)
8724 || POINTER_TYPE_P (type)
8725 || TREE_CODE (type) == OFFSET_TYPE
8726 || TREE_CODE (type) == REAL_TYPE)
8727 return int_size_in_bytes (type) > 8;
8728
8729 /* Aggregates and similar constructs are always returned
8730 in memory. */
8731 if (AGGREGATE_TYPE_P (type)
8732 || TREE_CODE (type) == COMPLEX_TYPE
8733 || TREE_CODE (type) == VECTOR_TYPE)
8734 return true;
8735
8736 /* ??? We get called on all sorts of random stuff from
8737 aggregate_value_p. We can't abort, but it's not clear
8738 what's safe to return. Pretend it's a struct I guess. */
8739 return true;
8740 }
8741
8742 /* Function arguments and return values are promoted to word size. */
8743
8744 static enum machine_mode
8745 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8746 int *punsignedp,
8747 const_tree fntype ATTRIBUTE_UNUSED,
8748 int for_return ATTRIBUTE_UNUSED)
8749 {
8750 if (INTEGRAL_MODE_P (mode)
8751 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8752 {
8753 if (POINTER_TYPE_P (type))
8754 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8755 return Pmode;
8756 }
8757
8758 return mode;
8759 }
8760
8761 /* Define where to return a (scalar) value of type RET_TYPE.
8762 If RET_TYPE is null, define where to return a (scalar)
8763 value of mode MODE from a libcall. */
8764
8765 static rtx
8766 s390_function_and_libcall_value (enum machine_mode mode,
8767 const_tree ret_type,
8768 const_tree fntype_or_decl,
8769 bool outgoing ATTRIBUTE_UNUSED)
8770 {
8771 /* For normal functions perform the promotion as
8772 promote_function_mode would do. */
8773 if (ret_type)
8774 {
8775 int unsignedp = TYPE_UNSIGNED (ret_type);
8776 mode = promote_function_mode (ret_type, mode, &unsignedp,
8777 fntype_or_decl, 1);
8778 }
8779
8780 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8781 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8782
8783 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8784 return gen_rtx_REG (mode, 16);
8785 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8786 || UNITS_PER_LONG == UNITS_PER_WORD)
8787 return gen_rtx_REG (mode, 2);
8788 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8789 {
8790 /* This case is triggered when returning a 64 bit value with
8791 -m31 -mzarch. Although the value would fit into a single
8792 register it has to be forced into a 32 bit register pair in
8793 order to match the ABI. */
8794 rtvec p = rtvec_alloc (2);
8795
8796 RTVEC_ELT (p, 0)
8797 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8798 RTVEC_ELT (p, 1)
8799 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8800
8801 return gen_rtx_PARALLEL (mode, p);
8802 }
8803
8804 gcc_unreachable ();
8805 }
8806
8807 /* Define where to return a scalar return value of type RET_TYPE. */
8808
8809 static rtx
8810 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8811 bool outgoing)
8812 {
8813 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8814 fn_decl_or_type, outgoing);
8815 }
8816
8817 /* Define where to return a scalar libcall return value of mode
8818 MODE. */
8819
8820 static rtx
8821 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8822 {
8823 return s390_function_and_libcall_value (mode, NULL_TREE,
8824 NULL_TREE, true);
8825 }
8826
8827
8828 /* Create and return the va_list datatype.
8829
8830 On S/390, va_list is an array type equivalent to
8831
8832 typedef struct __va_list_tag
8833 {
8834 long __gpr;
8835 long __fpr;
8836 void *__overflow_arg_area;
8837 void *__reg_save_area;
8838 } va_list[1];
8839
8840 where __gpr and __fpr hold the number of general purpose
8841 or floating point arguments used up to now, respectively,
8842 __overflow_arg_area points to the stack location of the
8843 next argument passed on the stack, and __reg_save_area
8844 always points to the start of the register area in the
8845 call frame of the current function. The function prologue
8846 saves all registers used for argument passing into this
8847 area if the function uses variable arguments. */
8848
8849 static tree
8850 s390_build_builtin_va_list (void)
8851 {
8852 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8853
8854 record = lang_hooks.types.make_type (RECORD_TYPE);
8855
8856 type_decl =
8857 build_decl (BUILTINS_LOCATION,
8858 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8859
8860 f_gpr = build_decl (BUILTINS_LOCATION,
8861 FIELD_DECL, get_identifier ("__gpr"),
8862 long_integer_type_node);
8863 f_fpr = build_decl (BUILTINS_LOCATION,
8864 FIELD_DECL, get_identifier ("__fpr"),
8865 long_integer_type_node);
8866 f_ovf = build_decl (BUILTINS_LOCATION,
8867 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8868 ptr_type_node);
8869 f_sav = build_decl (BUILTINS_LOCATION,
8870 FIELD_DECL, get_identifier ("__reg_save_area"),
8871 ptr_type_node);
8872
8873 va_list_gpr_counter_field = f_gpr;
8874 va_list_fpr_counter_field = f_fpr;
8875
8876 DECL_FIELD_CONTEXT (f_gpr) = record;
8877 DECL_FIELD_CONTEXT (f_fpr) = record;
8878 DECL_FIELD_CONTEXT (f_ovf) = record;
8879 DECL_FIELD_CONTEXT (f_sav) = record;
8880
8881 TYPE_STUB_DECL (record) = type_decl;
8882 TYPE_NAME (record) = type_decl;
8883 TYPE_FIELDS (record) = f_gpr;
8884 DECL_CHAIN (f_gpr) = f_fpr;
8885 DECL_CHAIN (f_fpr) = f_ovf;
8886 DECL_CHAIN (f_ovf) = f_sav;
8887
8888 layout_type (record);
8889
8890 /* The correct type is an array type of one element. */
8891 return build_array_type (record, build_index_type (size_zero_node));
8892 }
8893
8894 /* Implement va_start by filling the va_list structure VALIST.
8895 STDARG_P is always true, and ignored.
8896 NEXTARG points to the first anonymous stack argument.
8897
8898 The following global variables are used to initialize
8899 the va_list structure:
8900
8901 crtl->args.info:
8902 holds number of gprs and fprs used for named arguments.
8903 crtl->args.arg_offset_rtx:
8904 holds the offset of the first anonymous stack argument
8905 (relative to the virtual arg pointer). */
8906
8907 static void
8908 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8909 {
8910 HOST_WIDE_INT n_gpr, n_fpr;
8911 int off;
8912 tree f_gpr, f_fpr, f_ovf, f_sav;
8913 tree gpr, fpr, ovf, sav, t;
8914
8915 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8916 f_fpr = DECL_CHAIN (f_gpr);
8917 f_ovf = DECL_CHAIN (f_fpr);
8918 f_sav = DECL_CHAIN (f_ovf);
8919
8920 valist = build_simple_mem_ref (valist);
8921 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8922 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8923 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8924 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8925
8926 /* Count number of gp and fp argument registers used. */
8927
8928 n_gpr = crtl->args.info.gprs;
8929 n_fpr = crtl->args.info.fprs;
8930
8931 if (cfun->va_list_gpr_size)
8932 {
8933 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8934 build_int_cst (NULL_TREE, n_gpr));
8935 TREE_SIDE_EFFECTS (t) = 1;
8936 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8937 }
8938
8939 if (cfun->va_list_fpr_size)
8940 {
8941 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8942 build_int_cst (NULL_TREE, n_fpr));
8943 TREE_SIDE_EFFECTS (t) = 1;
8944 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8945 }
8946
8947 /* Find the overflow area. */
8948 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8949 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8950 {
8951 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8952
8953 off = INTVAL (crtl->args.arg_offset_rtx);
8954 off = off < 0 ? 0 : off;
8955 if (TARGET_DEBUG_ARG)
8956 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8957 (int)n_gpr, (int)n_fpr, off);
8958
8959 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8960
8961 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8962 TREE_SIDE_EFFECTS (t) = 1;
8963 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8964 }
8965
8966 /* Find the register save area. */
8967 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8968 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8969 {
8970 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8971 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8972 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8973
8974 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8975 TREE_SIDE_EFFECTS (t) = 1;
8976 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8977 }
8978 }
8979
8980 /* Implement va_arg by updating the va_list structure
8981 VALIST as required to retrieve an argument of type
8982 TYPE, and returning that argument.
8983
8984 Generates code equivalent to:
8985
8986 if (integral value) {
8987 if (size <= 4 && args.gpr < 5 ||
8988 size > 4 && args.gpr < 4 )
8989 ret = args.reg_save_area[args.gpr+8]
8990 else
8991 ret = *args.overflow_arg_area++;
8992 } else if (float value) {
8993 if (args.fgpr < 2)
8994 ret = args.reg_save_area[args.fpr+64]
8995 else
8996 ret = *args.overflow_arg_area++;
8997 } else if (aggregate value) {
8998 if (args.gpr < 5)
8999 ret = *args.reg_save_area[args.gpr]
9000 else
9001 ret = **args.overflow_arg_area++;
9002 } */
9003
9004 static tree
9005 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9006 gimple_seq *post_p ATTRIBUTE_UNUSED)
9007 {
9008 tree f_gpr, f_fpr, f_ovf, f_sav;
9009 tree gpr, fpr, ovf, sav, reg, t, u;
9010 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9011 tree lab_false, lab_over, addr;
9012
9013 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9014 f_fpr = DECL_CHAIN (f_gpr);
9015 f_ovf = DECL_CHAIN (f_fpr);
9016 f_sav = DECL_CHAIN (f_ovf);
9017
9018 valist = build_va_arg_indirect_ref (valist);
9019 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9020 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9021 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9022
9023 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9024 both appear on a lhs. */
9025 valist = unshare_expr (valist);
9026 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9027
9028 size = int_size_in_bytes (type);
9029
9030 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9031 {
9032 if (TARGET_DEBUG_ARG)
9033 {
9034 fprintf (stderr, "va_arg: aggregate type");
9035 debug_tree (type);
9036 }
9037
9038 /* Aggregates are passed by reference. */
9039 indirect_p = 1;
9040 reg = gpr;
9041 n_reg = 1;
9042
9043 /* kernel stack layout on 31 bit: It is assumed here that no padding
9044 will be added by s390_frame_info because for va_args always an even
9045 number of gprs has to be saved r15-r2 = 14 regs. */
9046 sav_ofs = 2 * UNITS_PER_LONG;
9047 sav_scale = UNITS_PER_LONG;
9048 size = UNITS_PER_LONG;
9049 max_reg = GP_ARG_NUM_REG - n_reg;
9050 }
9051 else if (s390_function_arg_float (TYPE_MODE (type), type))
9052 {
9053 if (TARGET_DEBUG_ARG)
9054 {
9055 fprintf (stderr, "va_arg: float type");
9056 debug_tree (type);
9057 }
9058
9059 /* FP args go in FP registers, if present. */
9060 indirect_p = 0;
9061 reg = fpr;
9062 n_reg = 1;
9063 sav_ofs = 16 * UNITS_PER_LONG;
9064 sav_scale = 8;
9065 max_reg = FP_ARG_NUM_REG - n_reg;
9066 }
9067 else
9068 {
9069 if (TARGET_DEBUG_ARG)
9070 {
9071 fprintf (stderr, "va_arg: other type");
9072 debug_tree (type);
9073 }
9074
9075 /* Otherwise into GP registers. */
9076 indirect_p = 0;
9077 reg = gpr;
9078 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9079
9080 /* kernel stack layout on 31 bit: It is assumed here that no padding
9081 will be added by s390_frame_info because for va_args always an even
9082 number of gprs has to be saved r15-r2 = 14 regs. */
9083 sav_ofs = 2 * UNITS_PER_LONG;
9084
9085 if (size < UNITS_PER_LONG)
9086 sav_ofs += UNITS_PER_LONG - size;
9087
9088 sav_scale = UNITS_PER_LONG;
9089 max_reg = GP_ARG_NUM_REG - n_reg;
9090 }
9091
9092 /* Pull the value out of the saved registers ... */
9093
9094 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9095 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9096 addr = create_tmp_var (ptr_type_node, "addr");
9097
9098 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9099 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9100 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9101 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9102 gimplify_and_add (t, pre_p);
9103
9104 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
9105 size_int (sav_ofs));
9106 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9107 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9108 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
9109
9110 gimplify_assign (addr, t, pre_p);
9111
9112 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9113
9114 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9115
9116
9117 /* ... Otherwise out of the overflow area. */
9118
9119 t = ovf;
9120 if (size < UNITS_PER_LONG)
9121 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9122 size_int (UNITS_PER_LONG - size));
9123
9124 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9125
9126 gimplify_assign (addr, t, pre_p);
9127
9128 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9129 size_int (size));
9130 gimplify_assign (ovf, t, pre_p);
9131
9132 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9133
9134
9135 /* Increment register save count. */
9136
9137 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9138 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9139 gimplify_and_add (u, pre_p);
9140
9141 if (indirect_p)
9142 {
9143 t = build_pointer_type_for_mode (build_pointer_type (type),
9144 ptr_mode, true);
9145 addr = fold_convert (t, addr);
9146 addr = build_va_arg_indirect_ref (addr);
9147 }
9148 else
9149 {
9150 t = build_pointer_type_for_mode (type, ptr_mode, true);
9151 addr = fold_convert (t, addr);
9152 }
9153
9154 return build_va_arg_indirect_ref (addr);
9155 }
9156
9157
9158 /* Builtins. */
9159
9160 enum s390_builtin
9161 {
9162 S390_BUILTIN_THREAD_POINTER,
9163 S390_BUILTIN_SET_THREAD_POINTER,
9164
9165 S390_BUILTIN_max
9166 };
9167
9168 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9169 CODE_FOR_get_tp_64,
9170 CODE_FOR_set_tp_64
9171 };
9172
9173 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9174 CODE_FOR_get_tp_31,
9175 CODE_FOR_set_tp_31
9176 };
9177
9178 static void
9179 s390_init_builtins (void)
9180 {
9181 tree ftype;
9182
9183 ftype = build_function_type (ptr_type_node, void_list_node);
9184 add_builtin_function ("__builtin_thread_pointer", ftype,
9185 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9186 NULL, NULL_TREE);
9187
9188 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9189 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9190 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9191 NULL, NULL_TREE);
9192 }
9193
9194 /* Expand an expression EXP that calls a built-in function,
9195 with result going to TARGET if that's convenient
9196 (and in mode MODE if that's convenient).
9197 SUBTARGET may be used as the target for computing one of EXP's operands.
9198 IGNORE is nonzero if the value is to be ignored. */
9199
9200 static rtx
9201 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9202 enum machine_mode mode ATTRIBUTE_UNUSED,
9203 int ignore ATTRIBUTE_UNUSED)
9204 {
9205 #define MAX_ARGS 2
9206
9207 enum insn_code const *code_for_builtin =
9208 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9209
9210 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9211 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9212 enum insn_code icode;
9213 rtx op[MAX_ARGS], pat;
9214 int arity;
9215 bool nonvoid;
9216 tree arg;
9217 call_expr_arg_iterator iter;
9218
9219 if (fcode >= S390_BUILTIN_max)
9220 internal_error ("bad builtin fcode");
9221 icode = code_for_builtin[fcode];
9222 if (icode == 0)
9223 internal_error ("bad builtin fcode");
9224
9225 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9226
9227 arity = 0;
9228 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9229 {
9230 const struct insn_operand_data *insn_op;
9231
9232 if (arg == error_mark_node)
9233 return NULL_RTX;
9234 if (arity > MAX_ARGS)
9235 return NULL_RTX;
9236
9237 insn_op = &insn_data[icode].operand[arity + nonvoid];
9238
9239 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9240
9241 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9242 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9243 arity++;
9244 }
9245
9246 if (nonvoid)
9247 {
9248 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9249 if (!target
9250 || GET_MODE (target) != tmode
9251 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9252 target = gen_reg_rtx (tmode);
9253 }
9254
9255 switch (arity)
9256 {
9257 case 0:
9258 pat = GEN_FCN (icode) (target);
9259 break;
9260 case 1:
9261 if (nonvoid)
9262 pat = GEN_FCN (icode) (target, op[0]);
9263 else
9264 pat = GEN_FCN (icode) (op[0]);
9265 break;
9266 case 2:
9267 pat = GEN_FCN (icode) (target, op[0], op[1]);
9268 break;
9269 default:
9270 gcc_unreachable ();
9271 }
9272 if (!pat)
9273 return NULL_RTX;
9274 emit_insn (pat);
9275
9276 if (nonvoid)
9277 return target;
9278 else
9279 return const0_rtx;
9280 }
9281
9282
9283 /* Output assembly code for the trampoline template to
9284 stdio stream FILE.
9285
9286 On S/390, we use gpr 1 internally in the trampoline code;
9287 gpr 0 is used to hold the static chain. */
9288
9289 static void
9290 s390_asm_trampoline_template (FILE *file)
9291 {
9292 rtx op[2];
9293 op[0] = gen_rtx_REG (Pmode, 0);
9294 op[1] = gen_rtx_REG (Pmode, 1);
9295
9296 if (TARGET_64BIT)
9297 {
9298 output_asm_insn ("basr\t%1,0", op);
9299 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9300 output_asm_insn ("br\t%1", op);
9301 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9302 }
9303 else
9304 {
9305 output_asm_insn ("basr\t%1,0", op);
9306 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9307 output_asm_insn ("br\t%1", op);
9308 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9309 }
9310 }
9311
9312 /* Emit RTL insns to initialize the variable parts of a trampoline.
9313 FNADDR is an RTX for the address of the function's pure code.
9314 CXT is an RTX for the static chain value for the function. */
9315
9316 static void
9317 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9318 {
9319 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9320 rtx mem;
9321
9322 emit_block_move (m_tramp, assemble_trampoline_template (),
9323 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9324
9325 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9326 emit_move_insn (mem, cxt);
9327 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9328 emit_move_insn (mem, fnaddr);
9329 }
9330
9331 /* Output assembler code to FILE to increment profiler label # LABELNO
9332 for profiling a function entry. */
9333
9334 void
9335 s390_function_profiler (FILE *file, int labelno)
9336 {
9337 rtx op[7];
9338
9339 char label[128];
9340 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9341
9342 fprintf (file, "# function profiler \n");
9343
9344 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9345 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9346 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9347
9348 op[2] = gen_rtx_REG (Pmode, 1);
9349 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9350 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9351
9352 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9353 if (flag_pic)
9354 {
9355 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9356 op[4] = gen_rtx_CONST (Pmode, op[4]);
9357 }
9358
9359 if (TARGET_64BIT)
9360 {
9361 output_asm_insn ("stg\t%0,%1", op);
9362 output_asm_insn ("larl\t%2,%3", op);
9363 output_asm_insn ("brasl\t%0,%4", op);
9364 output_asm_insn ("lg\t%0,%1", op);
9365 }
9366 else if (!flag_pic)
9367 {
9368 op[6] = gen_label_rtx ();
9369
9370 output_asm_insn ("st\t%0,%1", op);
9371 output_asm_insn ("bras\t%2,%l6", op);
9372 output_asm_insn (".long\t%4", op);
9373 output_asm_insn (".long\t%3", op);
9374 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9375 output_asm_insn ("l\t%0,0(%2)", op);
9376 output_asm_insn ("l\t%2,4(%2)", op);
9377 output_asm_insn ("basr\t%0,%0", op);
9378 output_asm_insn ("l\t%0,%1", op);
9379 }
9380 else
9381 {
9382 op[5] = gen_label_rtx ();
9383 op[6] = gen_label_rtx ();
9384
9385 output_asm_insn ("st\t%0,%1", op);
9386 output_asm_insn ("bras\t%2,%l6", op);
9387 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9388 output_asm_insn (".long\t%4-%l5", op);
9389 output_asm_insn (".long\t%3-%l5", op);
9390 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9391 output_asm_insn ("lr\t%0,%2", op);
9392 output_asm_insn ("a\t%0,0(%2)", op);
9393 output_asm_insn ("a\t%2,4(%2)", op);
9394 output_asm_insn ("basr\t%0,%0", op);
9395 output_asm_insn ("l\t%0,%1", op);
9396 }
9397 }
9398
9399 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9400 into its SYMBOL_REF_FLAGS. */
9401
9402 static void
9403 s390_encode_section_info (tree decl, rtx rtl, int first)
9404 {
9405 default_encode_section_info (decl, rtl, first);
9406
9407 if (TREE_CODE (decl) == VAR_DECL)
9408 {
9409 /* If a variable has a forced alignment to < 2 bytes, mark it
9410 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9411 operand. */
9412 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9413 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9414 if (!DECL_SIZE (decl)
9415 || !DECL_ALIGN (decl)
9416 || !host_integerp (DECL_SIZE (decl), 0)
9417 || (DECL_ALIGN (decl) <= 64
9418 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9419 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9420 }
9421
9422 /* Literal pool references don't have a decl so they are handled
9423 differently here. We rely on the information in the MEM_ALIGN
9424 entry to decide upon natural alignment. */
9425 if (MEM_P (rtl)
9426 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9427 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9428 && (MEM_ALIGN (rtl) == 0
9429 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9430 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9431 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9432 }
9433
9434 /* Output thunk to FILE that implements a C++ virtual function call (with
9435 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9436 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9437 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9438 relative to the resulting this pointer. */
9439
9440 static void
9441 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9442 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9443 tree function)
9444 {
9445 rtx op[10];
9446 int nonlocal = 0;
9447
9448 /* Make sure unwind info is emitted for the thunk if needed. */
9449 final_start_function (emit_barrier (), file, 1);
9450
9451 /* Operand 0 is the target function. */
9452 op[0] = XEXP (DECL_RTL (function), 0);
9453 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9454 {
9455 nonlocal = 1;
9456 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9457 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9458 op[0] = gen_rtx_CONST (Pmode, op[0]);
9459 }
9460
9461 /* Operand 1 is the 'this' pointer. */
9462 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9463 op[1] = gen_rtx_REG (Pmode, 3);
9464 else
9465 op[1] = gen_rtx_REG (Pmode, 2);
9466
9467 /* Operand 2 is the delta. */
9468 op[2] = GEN_INT (delta);
9469
9470 /* Operand 3 is the vcall_offset. */
9471 op[3] = GEN_INT (vcall_offset);
9472
9473 /* Operand 4 is the temporary register. */
9474 op[4] = gen_rtx_REG (Pmode, 1);
9475
9476 /* Operands 5 to 8 can be used as labels. */
9477 op[5] = NULL_RTX;
9478 op[6] = NULL_RTX;
9479 op[7] = NULL_RTX;
9480 op[8] = NULL_RTX;
9481
9482 /* Operand 9 can be used for temporary register. */
9483 op[9] = NULL_RTX;
9484
9485 /* Generate code. */
9486 if (TARGET_64BIT)
9487 {
9488 /* Setup literal pool pointer if required. */
9489 if ((!DISP_IN_RANGE (delta)
9490 && !CONST_OK_FOR_K (delta)
9491 && !CONST_OK_FOR_Os (delta))
9492 || (!DISP_IN_RANGE (vcall_offset)
9493 && !CONST_OK_FOR_K (vcall_offset)
9494 && !CONST_OK_FOR_Os (vcall_offset)))
9495 {
9496 op[5] = gen_label_rtx ();
9497 output_asm_insn ("larl\t%4,%5", op);
9498 }
9499
9500 /* Add DELTA to this pointer. */
9501 if (delta)
9502 {
9503 if (CONST_OK_FOR_J (delta))
9504 output_asm_insn ("la\t%1,%2(%1)", op);
9505 else if (DISP_IN_RANGE (delta))
9506 output_asm_insn ("lay\t%1,%2(%1)", op);
9507 else if (CONST_OK_FOR_K (delta))
9508 output_asm_insn ("aghi\t%1,%2", op);
9509 else if (CONST_OK_FOR_Os (delta))
9510 output_asm_insn ("agfi\t%1,%2", op);
9511 else
9512 {
9513 op[6] = gen_label_rtx ();
9514 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9515 }
9516 }
9517
9518 /* Perform vcall adjustment. */
9519 if (vcall_offset)
9520 {
9521 if (DISP_IN_RANGE (vcall_offset))
9522 {
9523 output_asm_insn ("lg\t%4,0(%1)", op);
9524 output_asm_insn ("ag\t%1,%3(%4)", op);
9525 }
9526 else if (CONST_OK_FOR_K (vcall_offset))
9527 {
9528 output_asm_insn ("lghi\t%4,%3", op);
9529 output_asm_insn ("ag\t%4,0(%1)", op);
9530 output_asm_insn ("ag\t%1,0(%4)", op);
9531 }
9532 else if (CONST_OK_FOR_Os (vcall_offset))
9533 {
9534 output_asm_insn ("lgfi\t%4,%3", op);
9535 output_asm_insn ("ag\t%4,0(%1)", op);
9536 output_asm_insn ("ag\t%1,0(%4)", op);
9537 }
9538 else
9539 {
9540 op[7] = gen_label_rtx ();
9541 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9542 output_asm_insn ("ag\t%4,0(%1)", op);
9543 output_asm_insn ("ag\t%1,0(%4)", op);
9544 }
9545 }
9546
9547 /* Jump to target. */
9548 output_asm_insn ("jg\t%0", op);
9549
9550 /* Output literal pool if required. */
9551 if (op[5])
9552 {
9553 output_asm_insn (".align\t4", op);
9554 targetm.asm_out.internal_label (file, "L",
9555 CODE_LABEL_NUMBER (op[5]));
9556 }
9557 if (op[6])
9558 {
9559 targetm.asm_out.internal_label (file, "L",
9560 CODE_LABEL_NUMBER (op[6]));
9561 output_asm_insn (".long\t%2", op);
9562 }
9563 if (op[7])
9564 {
9565 targetm.asm_out.internal_label (file, "L",
9566 CODE_LABEL_NUMBER (op[7]));
9567 output_asm_insn (".long\t%3", op);
9568 }
9569 }
9570 else
9571 {
9572 /* Setup base pointer if required. */
9573 if (!vcall_offset
9574 || (!DISP_IN_RANGE (delta)
9575 && !CONST_OK_FOR_K (delta)
9576 && !CONST_OK_FOR_Os (delta))
9577 || (!DISP_IN_RANGE (delta)
9578 && !CONST_OK_FOR_K (vcall_offset)
9579 && !CONST_OK_FOR_Os (vcall_offset)))
9580 {
9581 op[5] = gen_label_rtx ();
9582 output_asm_insn ("basr\t%4,0", op);
9583 targetm.asm_out.internal_label (file, "L",
9584 CODE_LABEL_NUMBER (op[5]));
9585 }
9586
9587 /* Add DELTA to this pointer. */
9588 if (delta)
9589 {
9590 if (CONST_OK_FOR_J (delta))
9591 output_asm_insn ("la\t%1,%2(%1)", op);
9592 else if (DISP_IN_RANGE (delta))
9593 output_asm_insn ("lay\t%1,%2(%1)", op);
9594 else if (CONST_OK_FOR_K (delta))
9595 output_asm_insn ("ahi\t%1,%2", op);
9596 else if (CONST_OK_FOR_Os (delta))
9597 output_asm_insn ("afi\t%1,%2", op);
9598 else
9599 {
9600 op[6] = gen_label_rtx ();
9601 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9602 }
9603 }
9604
9605 /* Perform vcall adjustment. */
9606 if (vcall_offset)
9607 {
9608 if (CONST_OK_FOR_J (vcall_offset))
9609 {
9610 output_asm_insn ("l\t%4,0(%1)", op);
9611 output_asm_insn ("a\t%1,%3(%4)", op);
9612 }
9613 else if (DISP_IN_RANGE (vcall_offset))
9614 {
9615 output_asm_insn ("l\t%4,0(%1)", op);
9616 output_asm_insn ("ay\t%1,%3(%4)", op);
9617 }
9618 else if (CONST_OK_FOR_K (vcall_offset))
9619 {
9620 output_asm_insn ("lhi\t%4,%3", op);
9621 output_asm_insn ("a\t%4,0(%1)", op);
9622 output_asm_insn ("a\t%1,0(%4)", op);
9623 }
9624 else if (CONST_OK_FOR_Os (vcall_offset))
9625 {
9626 output_asm_insn ("iilf\t%4,%3", op);
9627 output_asm_insn ("a\t%4,0(%1)", op);
9628 output_asm_insn ("a\t%1,0(%4)", op);
9629 }
9630 else
9631 {
9632 op[7] = gen_label_rtx ();
9633 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9634 output_asm_insn ("a\t%4,0(%1)", op);
9635 output_asm_insn ("a\t%1,0(%4)", op);
9636 }
9637
9638 /* We had to clobber the base pointer register.
9639 Re-setup the base pointer (with a different base). */
9640 op[5] = gen_label_rtx ();
9641 output_asm_insn ("basr\t%4,0", op);
9642 targetm.asm_out.internal_label (file, "L",
9643 CODE_LABEL_NUMBER (op[5]));
9644 }
9645
9646 /* Jump to target. */
9647 op[8] = gen_label_rtx ();
9648
9649 if (!flag_pic)
9650 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9651 else if (!nonlocal)
9652 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9653 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9654 else if (flag_pic == 1)
9655 {
9656 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9657 output_asm_insn ("l\t%4,%0(%4)", op);
9658 }
9659 else if (flag_pic == 2)
9660 {
9661 op[9] = gen_rtx_REG (Pmode, 0);
9662 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9663 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9664 output_asm_insn ("ar\t%4,%9", op);
9665 output_asm_insn ("l\t%4,0(%4)", op);
9666 }
9667
9668 output_asm_insn ("br\t%4", op);
9669
9670 /* Output literal pool. */
9671 output_asm_insn (".align\t4", op);
9672
9673 if (nonlocal && flag_pic == 2)
9674 output_asm_insn (".long\t%0", op);
9675 if (nonlocal)
9676 {
9677 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9678 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9679 }
9680
9681 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9682 if (!flag_pic)
9683 output_asm_insn (".long\t%0", op);
9684 else
9685 output_asm_insn (".long\t%0-%5", op);
9686
9687 if (op[6])
9688 {
9689 targetm.asm_out.internal_label (file, "L",
9690 CODE_LABEL_NUMBER (op[6]));
9691 output_asm_insn (".long\t%2", op);
9692 }
9693 if (op[7])
9694 {
9695 targetm.asm_out.internal_label (file, "L",
9696 CODE_LABEL_NUMBER (op[7]));
9697 output_asm_insn (".long\t%3", op);
9698 }
9699 }
9700 final_end_function ();
9701 }
9702
9703 static bool
9704 s390_valid_pointer_mode (enum machine_mode mode)
9705 {
9706 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9707 }
9708
9709 /* Checks whether the given CALL_EXPR would use a caller
9710 saved register. This is used to decide whether sibling call
9711 optimization could be performed on the respective function
9712 call. */
9713
9714 static bool
9715 s390_call_saved_register_used (tree call_expr)
9716 {
9717 CUMULATIVE_ARGS cum;
9718 tree parameter;
9719 enum machine_mode mode;
9720 tree type;
9721 rtx parm_rtx;
9722 int reg, i;
9723
9724 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9725
9726 for (i = 0; i < call_expr_nargs (call_expr); i++)
9727 {
9728 parameter = CALL_EXPR_ARG (call_expr, i);
9729 gcc_assert (parameter);
9730
9731 /* For an undeclared variable passed as parameter we will get
9732 an ERROR_MARK node here. */
9733 if (TREE_CODE (parameter) == ERROR_MARK)
9734 return true;
9735
9736 type = TREE_TYPE (parameter);
9737 gcc_assert (type);
9738
9739 mode = TYPE_MODE (type);
9740 gcc_assert (mode);
9741
9742 if (pass_by_reference (&cum, mode, type, true))
9743 {
9744 mode = Pmode;
9745 type = build_pointer_type (type);
9746 }
9747
9748 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9749
9750 s390_function_arg_advance (&cum, mode, type, 0);
9751
9752 if (!parm_rtx)
9753 continue;
9754
9755 if (REG_P (parm_rtx))
9756 {
9757 for (reg = 0;
9758 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9759 reg++)
9760 if (!call_used_regs[reg + REGNO (parm_rtx)])
9761 return true;
9762 }
9763
9764 if (GET_CODE (parm_rtx) == PARALLEL)
9765 {
9766 int i;
9767
9768 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9769 {
9770 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9771
9772 gcc_assert (REG_P (r));
9773
9774 for (reg = 0;
9775 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9776 reg++)
9777 if (!call_used_regs[reg + REGNO (r)])
9778 return true;
9779 }
9780 }
9781
9782 }
9783 return false;
9784 }
9785
9786 /* Return true if the given call expression can be
9787 turned into a sibling call.
9788 DECL holds the declaration of the function to be called whereas
9789 EXP is the call expression itself. */
9790
9791 static bool
9792 s390_function_ok_for_sibcall (tree decl, tree exp)
9793 {
9794 /* The TPF epilogue uses register 1. */
9795 if (TARGET_TPF_PROFILING)
9796 return false;
9797
9798 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9799 which would have to be restored before the sibcall. */
9800 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9801 return false;
9802
9803 /* Register 6 on s390 is available as an argument register but unfortunately
9804 "caller saved". This makes functions needing this register for arguments
9805 not suitable for sibcalls. */
9806 return !s390_call_saved_register_used (exp);
9807 }
9808
9809 /* Return the fixed registers used for condition codes. */
9810
9811 static bool
9812 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9813 {
9814 *p1 = CC_REGNUM;
9815 *p2 = INVALID_REGNUM;
9816
9817 return true;
9818 }
9819
9820 /* This function is used by the call expanders of the machine description.
9821 It emits the call insn itself together with the necessary operations
9822 to adjust the target address and returns the emitted insn.
9823 ADDR_LOCATION is the target address rtx
9824 TLS_CALL the location of the thread-local symbol
9825 RESULT_REG the register where the result of the call should be stored
9826 RETADDR_REG the register where the return address should be stored
9827 If this parameter is NULL_RTX the call is considered
9828 to be a sibling call. */
9829
9830 rtx
9831 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9832 rtx retaddr_reg)
9833 {
9834 bool plt_call = false;
9835 rtx insn;
9836 rtx call;
9837 rtx clobber;
9838 rtvec vec;
9839
9840 /* Direct function calls need special treatment. */
9841 if (GET_CODE (addr_location) == SYMBOL_REF)
9842 {
9843 /* When calling a global routine in PIC mode, we must
9844 replace the symbol itself with the PLT stub. */
9845 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9846 {
9847 if (retaddr_reg != NULL_RTX)
9848 {
9849 addr_location = gen_rtx_UNSPEC (Pmode,
9850 gen_rtvec (1, addr_location),
9851 UNSPEC_PLT);
9852 addr_location = gen_rtx_CONST (Pmode, addr_location);
9853 plt_call = true;
9854 }
9855 else
9856 /* For -fpic code the PLT entries might use r12 which is
9857 call-saved. Therefore we cannot do a sibcall when
9858 calling directly using a symbol ref. When reaching
9859 this point we decided (in s390_function_ok_for_sibcall)
9860 to do a sibcall for a function pointer but one of the
9861 optimizers was able to get rid of the function pointer
9862 by propagating the symbol ref into the call. This
9863 optimization is illegal for S/390 so we turn the direct
9864 call into a indirect call again. */
9865 addr_location = force_reg (Pmode, addr_location);
9866 }
9867
9868 /* Unless we can use the bras(l) insn, force the
9869 routine address into a register. */
9870 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9871 {
9872 if (flag_pic)
9873 addr_location = legitimize_pic_address (addr_location, 0);
9874 else
9875 addr_location = force_reg (Pmode, addr_location);
9876 }
9877 }
9878
9879 /* If it is already an indirect call or the code above moved the
9880 SYMBOL_REF to somewhere else make sure the address can be found in
9881 register 1. */
9882 if (retaddr_reg == NULL_RTX
9883 && GET_CODE (addr_location) != SYMBOL_REF
9884 && !plt_call)
9885 {
9886 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9887 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9888 }
9889
9890 addr_location = gen_rtx_MEM (QImode, addr_location);
9891 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9892
9893 if (result_reg != NULL_RTX)
9894 call = gen_rtx_SET (VOIDmode, result_reg, call);
9895
9896 if (retaddr_reg != NULL_RTX)
9897 {
9898 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9899
9900 if (tls_call != NULL_RTX)
9901 vec = gen_rtvec (3, call, clobber,
9902 gen_rtx_USE (VOIDmode, tls_call));
9903 else
9904 vec = gen_rtvec (2, call, clobber);
9905
9906 call = gen_rtx_PARALLEL (VOIDmode, vec);
9907 }
9908
9909 insn = emit_call_insn (call);
9910
9911 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9912 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9913 {
9914 /* s390_function_ok_for_sibcall should
9915 have denied sibcalls in this case. */
9916 gcc_assert (retaddr_reg != NULL_RTX);
9917
9918 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9919 }
9920 return insn;
9921 }
9922
9923 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9924
9925 static void
9926 s390_conditional_register_usage (void)
9927 {
9928 int i;
9929
9930 if (flag_pic)
9931 {
9932 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9933 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9934 }
9935 if (TARGET_CPU_ZARCH)
9936 {
9937 fixed_regs[BASE_REGNUM] = 0;
9938 call_used_regs[BASE_REGNUM] = 0;
9939 fixed_regs[RETURN_REGNUM] = 0;
9940 call_used_regs[RETURN_REGNUM] = 0;
9941 }
9942 if (TARGET_64BIT)
9943 {
9944 for (i = 24; i < 32; i++)
9945 call_used_regs[i] = call_really_used_regs[i] = 0;
9946 }
9947 else
9948 {
9949 for (i = 18; i < 20; i++)
9950 call_used_regs[i] = call_really_used_regs[i] = 0;
9951 }
9952
9953 if (TARGET_SOFT_FLOAT)
9954 {
9955 for (i = 16; i < 32; i++)
9956 call_used_regs[i] = fixed_regs[i] = 1;
9957 }
9958 }
9959
9960 /* Corresponding function to eh_return expander. */
9961
9962 static GTY(()) rtx s390_tpf_eh_return_symbol;
9963 void
9964 s390_emit_tpf_eh_return (rtx target)
9965 {
9966 rtx insn, reg;
9967
9968 if (!s390_tpf_eh_return_symbol)
9969 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9970
9971 reg = gen_rtx_REG (Pmode, 2);
9972
9973 emit_move_insn (reg, target);
9974 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9975 gen_rtx_REG (Pmode, RETURN_REGNUM));
9976 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9977
9978 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9979 }
9980
9981 /* Rework the prologue/epilogue to avoid saving/restoring
9982 registers unnecessarily. */
9983
9984 static void
9985 s390_optimize_prologue (void)
9986 {
9987 rtx insn, new_insn, next_insn;
9988
9989 /* Do a final recompute of the frame-related data. */
9990
9991 s390_update_frame_layout ();
9992
9993 /* If all special registers are in fact used, there's nothing we
9994 can do, so no point in walking the insn list. */
9995
9996 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9997 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9998 && (TARGET_CPU_ZARCH
9999 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10000 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10001 return;
10002
10003 /* Search for prologue/epilogue insns and replace them. */
10004
10005 for (insn = get_insns (); insn; insn = next_insn)
10006 {
10007 int first, last, off;
10008 rtx set, base, offset;
10009
10010 next_insn = NEXT_INSN (insn);
10011
10012 if (GET_CODE (insn) != INSN)
10013 continue;
10014
10015 if (GET_CODE (PATTERN (insn)) == PARALLEL
10016 && store_multiple_operation (PATTERN (insn), VOIDmode))
10017 {
10018 set = XVECEXP (PATTERN (insn), 0, 0);
10019 first = REGNO (SET_SRC (set));
10020 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10021 offset = const0_rtx;
10022 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10023 off = INTVAL (offset);
10024
10025 if (GET_CODE (base) != REG || off < 0)
10026 continue;
10027 if (cfun_frame_layout.first_save_gpr != -1
10028 && (cfun_frame_layout.first_save_gpr < first
10029 || cfun_frame_layout.last_save_gpr > last))
10030 continue;
10031 if (REGNO (base) != STACK_POINTER_REGNUM
10032 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10033 continue;
10034 if (first > BASE_REGNUM || last < BASE_REGNUM)
10035 continue;
10036
10037 if (cfun_frame_layout.first_save_gpr != -1)
10038 {
10039 new_insn = save_gprs (base,
10040 off + (cfun_frame_layout.first_save_gpr
10041 - first) * UNITS_PER_LONG,
10042 cfun_frame_layout.first_save_gpr,
10043 cfun_frame_layout.last_save_gpr);
10044 new_insn = emit_insn_before (new_insn, insn);
10045 INSN_ADDRESSES_NEW (new_insn, -1);
10046 }
10047
10048 remove_insn (insn);
10049 continue;
10050 }
10051
10052 if (cfun_frame_layout.first_save_gpr == -1
10053 && GET_CODE (PATTERN (insn)) == SET
10054 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10055 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10056 || (!TARGET_CPU_ZARCH
10057 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10058 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10059 {
10060 set = PATTERN (insn);
10061 first = REGNO (SET_SRC (set));
10062 offset = const0_rtx;
10063 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10064 off = INTVAL (offset);
10065
10066 if (GET_CODE (base) != REG || off < 0)
10067 continue;
10068 if (REGNO (base) != STACK_POINTER_REGNUM
10069 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10070 continue;
10071
10072 remove_insn (insn);
10073 continue;
10074 }
10075
10076 if (GET_CODE (PATTERN (insn)) == PARALLEL
10077 && load_multiple_operation (PATTERN (insn), VOIDmode))
10078 {
10079 set = XVECEXP (PATTERN (insn), 0, 0);
10080 first = REGNO (SET_DEST (set));
10081 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10082 offset = const0_rtx;
10083 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10084 off = INTVAL (offset);
10085
10086 if (GET_CODE (base) != REG || off < 0)
10087 continue;
10088 if (cfun_frame_layout.first_restore_gpr != -1
10089 && (cfun_frame_layout.first_restore_gpr < first
10090 || cfun_frame_layout.last_restore_gpr > last))
10091 continue;
10092 if (REGNO (base) != STACK_POINTER_REGNUM
10093 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10094 continue;
10095 if (first > BASE_REGNUM || last < BASE_REGNUM)
10096 continue;
10097
10098 if (cfun_frame_layout.first_restore_gpr != -1)
10099 {
10100 new_insn = restore_gprs (base,
10101 off + (cfun_frame_layout.first_restore_gpr
10102 - first) * UNITS_PER_LONG,
10103 cfun_frame_layout.first_restore_gpr,
10104 cfun_frame_layout.last_restore_gpr);
10105 new_insn = emit_insn_before (new_insn, insn);
10106 INSN_ADDRESSES_NEW (new_insn, -1);
10107 }
10108
10109 remove_insn (insn);
10110 continue;
10111 }
10112
10113 if (cfun_frame_layout.first_restore_gpr == -1
10114 && GET_CODE (PATTERN (insn)) == SET
10115 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10116 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10117 || (!TARGET_CPU_ZARCH
10118 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10119 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10120 {
10121 set = PATTERN (insn);
10122 first = REGNO (SET_DEST (set));
10123 offset = const0_rtx;
10124 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10125 off = INTVAL (offset);
10126
10127 if (GET_CODE (base) != REG || off < 0)
10128 continue;
10129 if (REGNO (base) != STACK_POINTER_REGNUM
10130 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10131 continue;
10132
10133 remove_insn (insn);
10134 continue;
10135 }
10136 }
10137 }
10138
10139 /* On z10 and later the dynamic branch prediction must see the
10140 backward jump within a certain windows. If not it falls back to
10141 the static prediction. This function rearranges the loop backward
10142 branch in a way which makes the static prediction always correct.
10143 The function returns true if it added an instruction. */
10144 static bool
10145 s390_fix_long_loop_prediction (rtx insn)
10146 {
10147 rtx set = single_set (insn);
10148 rtx code_label, label_ref, new_label;
10149 rtx uncond_jump;
10150 rtx cur_insn;
10151 rtx tmp;
10152 int distance;
10153
10154 /* This will exclude branch on count and branch on index patterns
10155 since these are correctly statically predicted. */
10156 if (!set
10157 || SET_DEST (set) != pc_rtx
10158 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10159 return false;
10160
10161 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10162 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10163
10164 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10165
10166 code_label = XEXP (label_ref, 0);
10167
10168 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10169 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10170 || (INSN_ADDRESSES (INSN_UID (insn))
10171 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10172 return false;
10173
10174 for (distance = 0, cur_insn = PREV_INSN (insn);
10175 distance < PREDICT_DISTANCE - 6;
10176 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10177 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10178 return false;
10179
10180 new_label = gen_label_rtx ();
10181 uncond_jump = emit_jump_insn_after (
10182 gen_rtx_SET (VOIDmode, pc_rtx,
10183 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10184 insn);
10185 emit_label_after (new_label, uncond_jump);
10186
10187 tmp = XEXP (SET_SRC (set), 1);
10188 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10189 XEXP (SET_SRC (set), 2) = tmp;
10190 INSN_CODE (insn) = -1;
10191
10192 XEXP (label_ref, 0) = new_label;
10193 JUMP_LABEL (insn) = new_label;
10194 JUMP_LABEL (uncond_jump) = code_label;
10195
10196 return true;
10197 }
10198
10199 /* Returns 1 if INSN reads the value of REG for purposes not related
10200 to addressing of memory, and 0 otherwise. */
10201 static int
10202 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10203 {
10204 return reg_referenced_p (reg, PATTERN (insn))
10205 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10206 }
10207
10208 /* Starting from INSN find_cond_jump looks downwards in the insn
10209 stream for a single jump insn which is the last user of the
10210 condition code set in INSN. */
10211 static rtx
10212 find_cond_jump (rtx insn)
10213 {
10214 for (; insn; insn = NEXT_INSN (insn))
10215 {
10216 rtx ite, cc;
10217
10218 if (LABEL_P (insn))
10219 break;
10220
10221 if (!JUMP_P (insn))
10222 {
10223 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10224 break;
10225 continue;
10226 }
10227
10228 /* This will be triggered by a return. */
10229 if (GET_CODE (PATTERN (insn)) != SET)
10230 break;
10231
10232 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10233 ite = SET_SRC (PATTERN (insn));
10234
10235 if (GET_CODE (ite) != IF_THEN_ELSE)
10236 break;
10237
10238 cc = XEXP (XEXP (ite, 0), 0);
10239 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10240 break;
10241
10242 if (find_reg_note (insn, REG_DEAD, cc))
10243 return insn;
10244 break;
10245 }
10246
10247 return NULL_RTX;
10248 }
10249
10250 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10251 the semantics does not change. If NULL_RTX is passed as COND the
10252 function tries to find the conditional jump starting with INSN. */
10253 static void
10254 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10255 {
10256 rtx tmp = *op0;
10257
10258 if (cond == NULL_RTX)
10259 {
10260 rtx jump = find_cond_jump (NEXT_INSN (insn));
10261 jump = jump ? single_set (jump) : NULL_RTX;
10262
10263 if (jump == NULL_RTX)
10264 return;
10265
10266 cond = XEXP (XEXP (jump, 1), 0);
10267 }
10268
10269 *op0 = *op1;
10270 *op1 = tmp;
10271 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10272 }
10273
10274 /* On z10, instructions of the compare-and-branch family have the
10275 property to access the register occurring as second operand with
10276 its bits complemented. If such a compare is grouped with a second
10277 instruction that accesses the same register non-complemented, and
10278 if that register's value is delivered via a bypass, then the
10279 pipeline recycles, thereby causing significant performance decline.
10280 This function locates such situations and exchanges the two
10281 operands of the compare. The function return true whenever it
10282 added an insn. */
10283 static bool
10284 s390_z10_optimize_cmp (rtx insn)
10285 {
10286 rtx prev_insn, next_insn;
10287 bool insn_added_p = false;
10288 rtx cond, *op0, *op1;
10289
10290 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10291 {
10292 /* Handle compare and branch and branch on count
10293 instructions. */
10294 rtx pattern = single_set (insn);
10295
10296 if (!pattern
10297 || SET_DEST (pattern) != pc_rtx
10298 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10299 return false;
10300
10301 cond = XEXP (SET_SRC (pattern), 0);
10302 op0 = &XEXP (cond, 0);
10303 op1 = &XEXP (cond, 1);
10304 }
10305 else if (GET_CODE (PATTERN (insn)) == SET)
10306 {
10307 rtx src, dest;
10308
10309 /* Handle normal compare instructions. */
10310 src = SET_SRC (PATTERN (insn));
10311 dest = SET_DEST (PATTERN (insn));
10312
10313 if (!REG_P (dest)
10314 || !CC_REGNO_P (REGNO (dest))
10315 || GET_CODE (src) != COMPARE)
10316 return false;
10317
10318 /* s390_swap_cmp will try to find the conditional
10319 jump when passing NULL_RTX as condition. */
10320 cond = NULL_RTX;
10321 op0 = &XEXP (src, 0);
10322 op1 = &XEXP (src, 1);
10323 }
10324 else
10325 return false;
10326
10327 if (!REG_P (*op0) || !REG_P (*op1))
10328 return false;
10329
10330 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10331 return false;
10332
10333 /* Swap the COMPARE arguments and its mask if there is a
10334 conflicting access in the previous insn. */
10335 prev_insn = prev_active_insn (insn);
10336 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10337 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10338 s390_swap_cmp (cond, op0, op1, insn);
10339
10340 /* Check if there is a conflict with the next insn. If there
10341 was no conflict with the previous insn, then swap the
10342 COMPARE arguments and its mask. If we already swapped
10343 the operands, or if swapping them would cause a conflict
10344 with the previous insn, issue a NOP after the COMPARE in
10345 order to separate the two instuctions. */
10346 next_insn = next_active_insn (insn);
10347 if (next_insn != NULL_RTX && INSN_P (next_insn)
10348 && s390_non_addr_reg_read_p (*op1, next_insn))
10349 {
10350 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10351 && s390_non_addr_reg_read_p (*op0, prev_insn))
10352 {
10353 if (REGNO (*op1) == 0)
10354 emit_insn_after (gen_nop1 (), insn);
10355 else
10356 emit_insn_after (gen_nop (), insn);
10357 insn_added_p = true;
10358 }
10359 else
10360 s390_swap_cmp (cond, op0, op1, insn);
10361 }
10362 return insn_added_p;
10363 }
10364
10365 /* Perform machine-dependent processing. */
10366
10367 static void
10368 s390_reorg (void)
10369 {
10370 bool pool_overflow = false;
10371
10372 /* Make sure all splits have been performed; splits after
10373 machine_dependent_reorg might confuse insn length counts. */
10374 split_all_insns_noflow ();
10375
10376 /* Install the main literal pool and the associated base
10377 register load insns.
10378
10379 In addition, there are two problematic situations we need
10380 to correct:
10381
10382 - the literal pool might be > 4096 bytes in size, so that
10383 some of its elements cannot be directly accessed
10384
10385 - a branch target might be > 64K away from the branch, so that
10386 it is not possible to use a PC-relative instruction.
10387
10388 To fix those, we split the single literal pool into multiple
10389 pool chunks, reloading the pool base register at various
10390 points throughout the function to ensure it always points to
10391 the pool chunk the following code expects, and / or replace
10392 PC-relative branches by absolute branches.
10393
10394 However, the two problems are interdependent: splitting the
10395 literal pool can move a branch further away from its target,
10396 causing the 64K limit to overflow, and on the other hand,
10397 replacing a PC-relative branch by an absolute branch means
10398 we need to put the branch target address into the literal
10399 pool, possibly causing it to overflow.
10400
10401 So, we loop trying to fix up both problems until we manage
10402 to satisfy both conditions at the same time. Note that the
10403 loop is guaranteed to terminate as every pass of the loop
10404 strictly decreases the total number of PC-relative branches
10405 in the function. (This is not completely true as there
10406 might be branch-over-pool insns introduced by chunkify_start.
10407 Those never need to be split however.) */
10408
10409 for (;;)
10410 {
10411 struct constant_pool *pool = NULL;
10412
10413 /* Collect the literal pool. */
10414 if (!pool_overflow)
10415 {
10416 pool = s390_mainpool_start ();
10417 if (!pool)
10418 pool_overflow = true;
10419 }
10420
10421 /* If literal pool overflowed, start to chunkify it. */
10422 if (pool_overflow)
10423 pool = s390_chunkify_start ();
10424
10425 /* Split out-of-range branches. If this has created new
10426 literal pool entries, cancel current chunk list and
10427 recompute it. zSeries machines have large branch
10428 instructions, so we never need to split a branch. */
10429 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10430 {
10431 if (pool_overflow)
10432 s390_chunkify_cancel (pool);
10433 else
10434 s390_mainpool_cancel (pool);
10435
10436 continue;
10437 }
10438
10439 /* If we made it up to here, both conditions are satisfied.
10440 Finish up literal pool related changes. */
10441 if (pool_overflow)
10442 s390_chunkify_finish (pool);
10443 else
10444 s390_mainpool_finish (pool);
10445
10446 /* We're done splitting branches. */
10447 cfun->machine->split_branches_pending_p = false;
10448 break;
10449 }
10450
10451 /* Generate out-of-pool execute target insns. */
10452 if (TARGET_CPU_ZARCH)
10453 {
10454 rtx insn, label, target;
10455
10456 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10457 {
10458 label = s390_execute_label (insn);
10459 if (!label)
10460 continue;
10461
10462 gcc_assert (label != const0_rtx);
10463
10464 target = emit_label (XEXP (label, 0));
10465 INSN_ADDRESSES_NEW (target, -1);
10466
10467 target = emit_insn (s390_execute_target (insn));
10468 INSN_ADDRESSES_NEW (target, -1);
10469 }
10470 }
10471
10472 /* Try to optimize prologue and epilogue further. */
10473 s390_optimize_prologue ();
10474
10475 /* Walk over the insns and do some >=z10 specific changes. */
10476 if (s390_tune == PROCESSOR_2097_Z10
10477 || s390_tune == PROCESSOR_2817_Z196)
10478 {
10479 rtx insn;
10480 bool insn_added_p = false;
10481
10482 /* The insn lengths and addresses have to be up to date for the
10483 following manipulations. */
10484 shorten_branches (get_insns ());
10485
10486 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10487 {
10488 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10489 continue;
10490
10491 if (JUMP_P (insn))
10492 insn_added_p |= s390_fix_long_loop_prediction (insn);
10493
10494 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10495 || GET_CODE (PATTERN (insn)) == SET)
10496 && s390_tune == PROCESSOR_2097_Z10)
10497 insn_added_p |= s390_z10_optimize_cmp (insn);
10498 }
10499
10500 /* Adjust branches if we added new instructions. */
10501 if (insn_added_p)
10502 shorten_branches (get_insns ());
10503 }
10504 }
10505
10506 /* Return true if INSN is a fp load insn writing register REGNO. */
10507 static inline bool
10508 s390_fpload_toreg (rtx insn, unsigned int regno)
10509 {
10510 rtx set;
10511 enum attr_type flag = s390_safe_attr_type (insn);
10512
10513 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10514 return false;
10515
10516 set = single_set (insn);
10517
10518 if (set == NULL_RTX)
10519 return false;
10520
10521 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10522 return false;
10523
10524 if (REGNO (SET_DEST (set)) != regno)
10525 return false;
10526
10527 return true;
10528 }
10529
10530 /* This value describes the distance to be avoided between an
10531 aritmetic fp instruction and an fp load writing the same register.
10532 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10533 fine but the exact value has to be avoided. Otherwise the FP
10534 pipeline will throw an exception causing a major penalty. */
10535 #define Z10_EARLYLOAD_DISTANCE 7
10536
10537 /* Rearrange the ready list in order to avoid the situation described
10538 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10539 moved to the very end of the ready list. */
10540 static void
10541 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10542 {
10543 unsigned int regno;
10544 int nready = *nready_p;
10545 rtx tmp;
10546 int i;
10547 rtx insn;
10548 rtx set;
10549 enum attr_type flag;
10550 int distance;
10551
10552 /* Skip DISTANCE - 1 active insns. */
10553 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10554 distance > 0 && insn != NULL_RTX;
10555 distance--, insn = prev_active_insn (insn))
10556 if (CALL_P (insn) || JUMP_P (insn))
10557 return;
10558
10559 if (insn == NULL_RTX)
10560 return;
10561
10562 set = single_set (insn);
10563
10564 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10565 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10566 return;
10567
10568 flag = s390_safe_attr_type (insn);
10569
10570 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10571 return;
10572
10573 regno = REGNO (SET_DEST (set));
10574 i = nready - 1;
10575
10576 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10577 i--;
10578
10579 if (!i)
10580 return;
10581
10582 tmp = ready[i];
10583 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10584 ready[0] = tmp;
10585 }
10586
10587 /* This function is called via hook TARGET_SCHED_REORDER before
10588 issueing one insn from list READY which contains *NREADYP entries.
10589 For target z10 it reorders load instructions to avoid early load
10590 conflicts in the floating point pipeline */
10591 static int
10592 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10593 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10594 {
10595 if (s390_tune == PROCESSOR_2097_Z10)
10596 if (reload_completed && *nreadyp > 1)
10597 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10598
10599 return s390_issue_rate ();
10600 }
10601
10602 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10603 the scheduler has issued INSN. It stores the last issued insn into
10604 last_scheduled_insn in order to make it available for
10605 s390_sched_reorder. */
10606 static int
10607 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10608 int verbose ATTRIBUTE_UNUSED,
10609 rtx insn, int more)
10610 {
10611 last_scheduled_insn = insn;
10612
10613 if (GET_CODE (PATTERN (insn)) != USE
10614 && GET_CODE (PATTERN (insn)) != CLOBBER)
10615 return more - 1;
10616 else
10617 return more;
10618 }
10619
10620 static void
10621 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10622 int verbose ATTRIBUTE_UNUSED,
10623 int max_ready ATTRIBUTE_UNUSED)
10624 {
10625 last_scheduled_insn = NULL_RTX;
10626 }
10627
10628 /* This function checks the whole of insn X for memory references. The
10629 function always returns zero because the framework it is called
10630 from would stop recursively analyzing the insn upon a return value
10631 other than zero. The real result of this function is updating
10632 counter variable MEM_COUNT. */
10633 static int
10634 check_dpu (rtx *x, unsigned *mem_count)
10635 {
10636 if (*x != NULL_RTX && MEM_P (*x))
10637 (*mem_count)++;
10638 return 0;
10639 }
10640
10641 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10642 a new number struct loop *loop should be unrolled if tuned for cpus with
10643 a built-in stride prefetcher.
10644 The loop is analyzed for memory accesses by calling check_dpu for
10645 each rtx of the loop. Depending on the loop_depth and the amount of
10646 memory accesses a new number <=nunroll is returned to improve the
10647 behaviour of the hardware prefetch unit. */
10648 static unsigned
10649 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10650 {
10651 basic_block *bbs;
10652 rtx insn;
10653 unsigned i;
10654 unsigned mem_count = 0;
10655
10656 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10657 return nunroll;
10658
10659 /* Count the number of memory references within the loop body. */
10660 bbs = get_loop_body (loop);
10661 for (i = 0; i < loop->num_nodes; i++)
10662 {
10663 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10664 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10665 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10666 }
10667 free (bbs);
10668
10669 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10670 if (mem_count == 0)
10671 return nunroll;
10672
10673 switch (loop_depth(loop))
10674 {
10675 case 1:
10676 return MIN (nunroll, 28 / mem_count);
10677 case 2:
10678 return MIN (nunroll, 22 / mem_count);
10679 default:
10680 return MIN (nunroll, 16 / mem_count);
10681 }
10682 }
10683
10684 /* Initialize GCC target structure. */
10685
10686 #undef TARGET_ASM_ALIGNED_HI_OP
10687 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10688 #undef TARGET_ASM_ALIGNED_DI_OP
10689 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10690 #undef TARGET_ASM_INTEGER
10691 #define TARGET_ASM_INTEGER s390_assemble_integer
10692
10693 #undef TARGET_ASM_OPEN_PAREN
10694 #define TARGET_ASM_OPEN_PAREN ""
10695
10696 #undef TARGET_ASM_CLOSE_PAREN
10697 #define TARGET_ASM_CLOSE_PAREN ""
10698
10699 #undef TARGET_DEFAULT_TARGET_FLAGS
10700 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT)
10701
10702 #undef TARGET_HANDLE_OPTION
10703 #define TARGET_HANDLE_OPTION s390_handle_option
10704
10705 #undef TARGET_OPTION_OVERRIDE
10706 #define TARGET_OPTION_OVERRIDE s390_option_override
10707
10708 #undef TARGET_OPTION_OPTIMIZATION_TABLE
10709 #define TARGET_OPTION_OPTIMIZATION_TABLE s390_option_optimization_table
10710
10711 #undef TARGET_OPTION_INIT_STRUCT
10712 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10713
10714 #undef TARGET_ENCODE_SECTION_INFO
10715 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10716
10717 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10718 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10719
10720 #ifdef HAVE_AS_TLS
10721 #undef TARGET_HAVE_TLS
10722 #define TARGET_HAVE_TLS true
10723 #endif
10724 #undef TARGET_CANNOT_FORCE_CONST_MEM
10725 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10726
10727 #undef TARGET_DELEGITIMIZE_ADDRESS
10728 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10729
10730 #undef TARGET_LEGITIMIZE_ADDRESS
10731 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10732
10733 #undef TARGET_RETURN_IN_MEMORY
10734 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10735
10736 #undef TARGET_INIT_BUILTINS
10737 #define TARGET_INIT_BUILTINS s390_init_builtins
10738 #undef TARGET_EXPAND_BUILTIN
10739 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10740
10741 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10742 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10743
10744 #undef TARGET_ASM_OUTPUT_MI_THUNK
10745 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10746 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10747 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10748
10749 #undef TARGET_SCHED_ADJUST_PRIORITY
10750 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10751 #undef TARGET_SCHED_ISSUE_RATE
10752 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10753 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10754 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10755
10756 #undef TARGET_SCHED_VARIABLE_ISSUE
10757 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10758 #undef TARGET_SCHED_REORDER
10759 #define TARGET_SCHED_REORDER s390_sched_reorder
10760 #undef TARGET_SCHED_INIT
10761 #define TARGET_SCHED_INIT s390_sched_init
10762
10763 #undef TARGET_CANNOT_COPY_INSN_P
10764 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10765 #undef TARGET_RTX_COSTS
10766 #define TARGET_RTX_COSTS s390_rtx_costs
10767 #undef TARGET_ADDRESS_COST
10768 #define TARGET_ADDRESS_COST s390_address_cost
10769 #undef TARGET_REGISTER_MOVE_COST
10770 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10771 #undef TARGET_MEMORY_MOVE_COST
10772 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10773
10774 #undef TARGET_MACHINE_DEPENDENT_REORG
10775 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10776
10777 #undef TARGET_VALID_POINTER_MODE
10778 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10779
10780 #undef TARGET_BUILD_BUILTIN_VA_LIST
10781 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10782 #undef TARGET_EXPAND_BUILTIN_VA_START
10783 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10784 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10785 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10786
10787 #undef TARGET_PROMOTE_FUNCTION_MODE
10788 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10789 #undef TARGET_PASS_BY_REFERENCE
10790 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10791
10792 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10793 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10794 #undef TARGET_FUNCTION_ARG
10795 #define TARGET_FUNCTION_ARG s390_function_arg
10796 #undef TARGET_FUNCTION_ARG_ADVANCE
10797 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10798 #undef TARGET_FUNCTION_VALUE
10799 #define TARGET_FUNCTION_VALUE s390_function_value
10800 #undef TARGET_LIBCALL_VALUE
10801 #define TARGET_LIBCALL_VALUE s390_libcall_value
10802
10803 #undef TARGET_FIXED_CONDITION_CODE_REGS
10804 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10805
10806 #undef TARGET_CC_MODES_COMPATIBLE
10807 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10808
10809 #undef TARGET_INVALID_WITHIN_DOLOOP
10810 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10811
10812 #ifdef HAVE_AS_TLS
10813 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10814 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10815 #endif
10816
10817 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10818 #undef TARGET_MANGLE_TYPE
10819 #define TARGET_MANGLE_TYPE s390_mangle_type
10820 #endif
10821
10822 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10823 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10824
10825 #undef TARGET_PREFERRED_RELOAD_CLASS
10826 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10827
10828 #undef TARGET_SECONDARY_RELOAD
10829 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10830
10831 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10832 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10833
10834 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10835 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10836
10837 #undef TARGET_LEGITIMATE_ADDRESS_P
10838 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10839
10840 #undef TARGET_CAN_ELIMINATE
10841 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10842
10843 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10844 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10845
10846 #undef TARGET_LOOP_UNROLL_ADJUST
10847 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10848
10849 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10850 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10851 #undef TARGET_TRAMPOLINE_INIT
10852 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10853
10854 #undef TARGET_UNWIND_WORD_MODE
10855 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10856
10857 struct gcc_target targetm = TARGET_INITIALIZER;
10858
10859 #include "gt-s390.h"