s390.c (s390_delegitimize_address): Handle PLTOFF and PLT unspecs.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* Which cpu are we tuning for. */
281 enum processor_type s390_tune = PROCESSOR_max;
282 int s390_tune_flags;
283 /* Which instruction set architecture to use. */
284 enum processor_type s390_arch;
285 int s390_arch_flags;
286
287 HOST_WIDE_INT s390_warn_framesize = 0;
288 HOST_WIDE_INT s390_stack_size = 0;
289 HOST_WIDE_INT s390_stack_guard = 0;
290
291 /* The following structure is embedded in the machine
292 specific part of struct function. */
293
294 struct GTY (()) s390_frame_layout
295 {
296 /* Offset within stack frame. */
297 HOST_WIDE_INT gprs_offset;
298 HOST_WIDE_INT f0_offset;
299 HOST_WIDE_INT f4_offset;
300 HOST_WIDE_INT f8_offset;
301 HOST_WIDE_INT backchain_offset;
302
303 /* Number of first and last gpr where slots in the register
304 save area are reserved for. */
305 int first_save_gpr_slot;
306 int last_save_gpr_slot;
307
308 /* Number of first and last gpr to be saved, restored. */
309 int first_save_gpr;
310 int first_restore_gpr;
311 int last_save_gpr;
312 int last_restore_gpr;
313
314 /* Bits standing for floating point registers. Set, if the
315 respective register has to be saved. Starting with reg 16 (f0)
316 at the rightmost bit.
317 Bit 15 - 8 7 6 5 4 3 2 1 0
318 fpr 15 - 8 7 5 3 1 6 4 2 0
319 reg 31 - 24 23 22 21 20 19 18 17 16 */
320 unsigned int fpr_bitmap;
321
322 /* Number of floating point registers f8-f15 which must be saved. */
323 int high_fprs;
324
325 /* Set if return address needs to be saved.
326 This flag is set by s390_return_addr_rtx if it could not use
327 the initial value of r14 and therefore depends on r14 saved
328 to the stack. */
329 bool save_return_addr_p;
330
331 /* Size of stack frame. */
332 HOST_WIDE_INT frame_size;
333 };
334
335 /* Define the structure for the machine field in struct function. */
336
337 struct GTY(()) machine_function
338 {
339 struct s390_frame_layout frame_layout;
340
341 /* Literal pool base register. */
342 rtx base_reg;
343
344 /* True if we may need to perform branch splitting. */
345 bool split_branches_pending_p;
346
347 /* Some local-dynamic TLS symbol name. */
348 const char *some_ld_name;
349
350 bool has_landing_pad_p;
351 };
352
353 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
354
355 #define cfun_frame_layout (cfun->machine->frame_layout)
356 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
357 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
358 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
359 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
360 (1 << (BITNUM)))
361 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
362 (1 << (BITNUM))))
363
364 /* Number of GPRs and FPRs used for argument passing. */
365 #define GP_ARG_NUM_REG 5
366 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
367
368 /* A couple of shortcuts. */
369 #define CONST_OK_FOR_J(x) \
370 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
371 #define CONST_OK_FOR_K(x) \
372 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
373 #define CONST_OK_FOR_Os(x) \
374 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
375 #define CONST_OK_FOR_Op(x) \
376 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
377 #define CONST_OK_FOR_On(x) \
378 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
379
380 #define REGNO_PAIR_OK(REGNO, MODE) \
381 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
382
383 /* That's the read ahead of the dynamic branch prediction unit in
384 bytes on a z10 (or higher) CPU. */
385 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
386
387 /* Return the alignment for LABEL. We default to the -falign-labels
388 value except for the literal pool base label. */
389 int
390 s390_label_align (rtx label)
391 {
392 rtx prev_insn = prev_active_insn (label);
393
394 if (prev_insn == NULL_RTX)
395 goto old;
396
397 prev_insn = single_set (prev_insn);
398
399 if (prev_insn == NULL_RTX)
400 goto old;
401
402 prev_insn = SET_SRC (prev_insn);
403
404 /* Don't align literal pool base labels. */
405 if (GET_CODE (prev_insn) == UNSPEC
406 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
407 return 0;
408
409 old:
410 return align_labels_log;
411 }
412
413 static enum machine_mode
414 s390_libgcc_cmp_return_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 static enum machine_mode
420 s390_libgcc_shift_count_mode (void)
421 {
422 return TARGET_64BIT ? DImode : SImode;
423 }
424
425 static enum machine_mode
426 s390_unwind_word_mode (void)
427 {
428 return TARGET_64BIT ? DImode : SImode;
429 }
430
431 /* Return true if the back end supports mode MODE. */
432 static bool
433 s390_scalar_mode_supported_p (enum machine_mode mode)
434 {
435 /* In contrast to the default implementation reject TImode constants on 31bit
436 TARGET_ZARCH for ABI compliance. */
437 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
438 return false;
439
440 if (DECIMAL_FLOAT_MODE_P (mode))
441 return default_decimal_float_supported_p ();
442
443 return default_scalar_mode_supported_p (mode);
444 }
445
446 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
447
448 void
449 s390_set_has_landing_pad_p (bool value)
450 {
451 cfun->machine->has_landing_pad_p = value;
452 }
453
454 /* If two condition code modes are compatible, return a condition code
455 mode which is compatible with both. Otherwise, return
456 VOIDmode. */
457
458 static enum machine_mode
459 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
460 {
461 if (m1 == m2)
462 return m1;
463
464 switch (m1)
465 {
466 case CCZmode:
467 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
468 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
469 return m2;
470 return VOIDmode;
471
472 case CCSmode:
473 case CCUmode:
474 case CCTmode:
475 case CCSRmode:
476 case CCURmode:
477 case CCZ1mode:
478 if (m2 == CCZmode)
479 return m1;
480
481 return VOIDmode;
482
483 default:
484 return VOIDmode;
485 }
486 return VOIDmode;
487 }
488
489 /* Return true if SET either doesn't set the CC register, or else
490 the source and destination have matching CC modes and that
491 CC mode is at least as constrained as REQ_MODE. */
492
493 static bool
494 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
495 {
496 enum machine_mode set_mode;
497
498 gcc_assert (GET_CODE (set) == SET);
499
500 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
501 return 1;
502
503 set_mode = GET_MODE (SET_DEST (set));
504 switch (set_mode)
505 {
506 case CCSmode:
507 case CCSRmode:
508 case CCUmode:
509 case CCURmode:
510 case CCLmode:
511 case CCL1mode:
512 case CCL2mode:
513 case CCL3mode:
514 case CCT1mode:
515 case CCT2mode:
516 case CCT3mode:
517 if (req_mode != set_mode)
518 return 0;
519 break;
520
521 case CCZmode:
522 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
523 && req_mode != CCSRmode && req_mode != CCURmode)
524 return 0;
525 break;
526
527 case CCAPmode:
528 case CCANmode:
529 if (req_mode != CCAmode)
530 return 0;
531 break;
532
533 default:
534 gcc_unreachable ();
535 }
536
537 return (GET_MODE (SET_SRC (set)) == set_mode);
538 }
539
540 /* Return true if every SET in INSN that sets the CC register
541 has source and destination with matching CC modes and that
542 CC mode is at least as constrained as REQ_MODE.
543 If REQ_MODE is VOIDmode, always return false. */
544
545 bool
546 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
547 {
548 int i;
549
550 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
551 if (req_mode == VOIDmode)
552 return false;
553
554 if (GET_CODE (PATTERN (insn)) == SET)
555 return s390_match_ccmode_set (PATTERN (insn), req_mode);
556
557 if (GET_CODE (PATTERN (insn)) == PARALLEL)
558 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
559 {
560 rtx set = XVECEXP (PATTERN (insn), 0, i);
561 if (GET_CODE (set) == SET)
562 if (!s390_match_ccmode_set (set, req_mode))
563 return false;
564 }
565
566 return true;
567 }
568
569 /* If a test-under-mask instruction can be used to implement
570 (compare (and ... OP1) OP2), return the CC mode required
571 to do that. Otherwise, return VOIDmode.
572 MIXED is true if the instruction can distinguish between
573 CC1 and CC2 for mixed selected bits (TMxx), it is false
574 if the instruction cannot (TM). */
575
576 enum machine_mode
577 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
578 {
579 int bit0, bit1;
580
581 /* ??? Fixme: should work on CONST_DOUBLE as well. */
582 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
583 return VOIDmode;
584
585 /* Selected bits all zero: CC0.
586 e.g.: int a; if ((a & (16 + 128)) == 0) */
587 if (INTVAL (op2) == 0)
588 return CCTmode;
589
590 /* Selected bits all one: CC3.
591 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
592 if (INTVAL (op2) == INTVAL (op1))
593 return CCT3mode;
594
595 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
596 int a;
597 if ((a & (16 + 128)) == 16) -> CCT1
598 if ((a & (16 + 128)) == 128) -> CCT2 */
599 if (mixed)
600 {
601 bit1 = exact_log2 (INTVAL (op2));
602 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
603 if (bit0 != -1 && bit1 != -1)
604 return bit0 > bit1 ? CCT1mode : CCT2mode;
605 }
606
607 return VOIDmode;
608 }
609
610 /* Given a comparison code OP (EQ, NE, etc.) and the operands
611 OP0 and OP1 of a COMPARE, return the mode to be used for the
612 comparison. */
613
614 enum machine_mode
615 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
616 {
617 switch (code)
618 {
619 case EQ:
620 case NE:
621 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
622 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
623 return CCAPmode;
624 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
625 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
626 return CCAPmode;
627 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
628 || GET_CODE (op1) == NEG)
629 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
630 return CCLmode;
631
632 if (GET_CODE (op0) == AND)
633 {
634 /* Check whether we can potentially do it via TM. */
635 enum machine_mode ccmode;
636 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
637 if (ccmode != VOIDmode)
638 {
639 /* Relax CCTmode to CCZmode to allow fall-back to AND
640 if that turns out to be beneficial. */
641 return ccmode == CCTmode ? CCZmode : ccmode;
642 }
643 }
644
645 if (register_operand (op0, HImode)
646 && GET_CODE (op1) == CONST_INT
647 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
648 return CCT3mode;
649 if (register_operand (op0, QImode)
650 && GET_CODE (op1) == CONST_INT
651 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
652 return CCT3mode;
653
654 return CCZmode;
655
656 case LE:
657 case LT:
658 case GE:
659 case GT:
660 /* The only overflow condition of NEG and ABS happens when
661 -INT_MAX is used as parameter, which stays negative. So
662 we have an overflow from a positive value to a negative.
663 Using CCAP mode the resulting cc can be used for comparisons. */
664 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
665 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
666 return CCAPmode;
667
668 /* If constants are involved in an add instruction it is possible to use
669 the resulting cc for comparisons with zero. Knowing the sign of the
670 constant the overflow behavior gets predictable. e.g.:
671 int a, b; if ((b = a + c) > 0)
672 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
673 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
674 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
675 {
676 if (INTVAL (XEXP((op0), 1)) < 0)
677 return CCANmode;
678 else
679 return CCAPmode;
680 }
681 /* Fall through. */
682 case UNORDERED:
683 case ORDERED:
684 case UNEQ:
685 case UNLE:
686 case UNLT:
687 case UNGE:
688 case UNGT:
689 case LTGT:
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCSRmode;
693 return CCSmode;
694
695 case LTU:
696 case GEU:
697 if (GET_CODE (op0) == PLUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL1mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 case LEU:
707 case GTU:
708 if (GET_CODE (op0) == MINUS
709 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
710 return CCL2mode;
711
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCURmode;
715 return CCUmode;
716
717 default:
718 gcc_unreachable ();
719 }
720 }
721
722 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
723 that we can implement more efficiently. */
724
725 void
726 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
727 {
728 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
729 if ((*code == EQ || *code == NE)
730 && *op1 == const0_rtx
731 && GET_CODE (*op0) == ZERO_EXTRACT
732 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
733 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
735 {
736 rtx inner = XEXP (*op0, 0);
737 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
738 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
739 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
740
741 if (len > 0 && len < modesize
742 && pos >= 0 && pos + len <= modesize
743 && modesize <= HOST_BITS_PER_WIDE_INT)
744 {
745 unsigned HOST_WIDE_INT block;
746 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
747 block <<= modesize - pos - len;
748
749 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
750 gen_int_mode (block, GET_MODE (inner)));
751 }
752 }
753
754 /* Narrow AND of memory against immediate to enable TM. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == AND
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
760 {
761 rtx inner = XEXP (*op0, 0);
762 rtx mask = XEXP (*op0, 1);
763
764 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
765 if (GET_CODE (inner) == SUBREG
766 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
767 && (GET_MODE_SIZE (GET_MODE (inner))
768 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
769 && ((INTVAL (mask)
770 & GET_MODE_MASK (GET_MODE (inner))
771 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
772 == 0))
773 inner = SUBREG_REG (inner);
774
775 /* Do not change volatile MEMs. */
776 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
777 {
778 int part = s390_single_part (XEXP (*op0, 1),
779 GET_MODE (inner), QImode, 0);
780 if (part >= 0)
781 {
782 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
783 inner = adjust_address_nv (inner, QImode, part);
784 *op0 = gen_rtx_AND (QImode, inner, mask);
785 }
786 }
787 }
788
789 /* Narrow comparisons against 0xffff to HImode if possible. */
790 if ((*code == EQ || *code == NE)
791 && GET_CODE (*op1) == CONST_INT
792 && INTVAL (*op1) == 0xffff
793 && SCALAR_INT_MODE_P (GET_MODE (*op0))
794 && (nonzero_bits (*op0, GET_MODE (*op0))
795 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
796 {
797 *op0 = gen_lowpart (HImode, *op0);
798 *op1 = constm1_rtx;
799 }
800
801 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
802 if (GET_CODE (*op0) == UNSPEC
803 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
804 && XVECLEN (*op0, 0) == 1
805 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
806 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
807 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
808 && *op1 == const0_rtx)
809 {
810 enum rtx_code new_code = UNKNOWN;
811 switch (*code)
812 {
813 case EQ: new_code = EQ; break;
814 case NE: new_code = NE; break;
815 case LT: new_code = GTU; break;
816 case GT: new_code = LTU; break;
817 case LE: new_code = GEU; break;
818 case GE: new_code = LEU; break;
819 default: break;
820 }
821
822 if (new_code != UNKNOWN)
823 {
824 *op0 = XVECEXP (*op0, 0, 0);
825 *code = new_code;
826 }
827 }
828
829 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
830 if (GET_CODE (*op0) == UNSPEC
831 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
832 && XVECLEN (*op0, 0) == 1
833 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
834 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
835 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
836 && *op1 == const0_rtx)
837 {
838 enum rtx_code new_code = UNKNOWN;
839 switch (*code)
840 {
841 case EQ: new_code = EQ; break;
842 case NE: new_code = NE; break;
843 default: break;
844 }
845
846 if (new_code != UNKNOWN)
847 {
848 *op0 = XVECEXP (*op0, 0, 0);
849 *code = new_code;
850 }
851 }
852
853 /* Simplify cascaded EQ, NE with const0_rtx. */
854 if ((*code == NE || *code == EQ)
855 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
856 && GET_MODE (*op0) == SImode
857 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
858 && REG_P (XEXP (*op0, 0))
859 && XEXP (*op0, 1) == const0_rtx
860 && *op1 == const0_rtx)
861 {
862 if ((*code == EQ && GET_CODE (*op0) == NE)
863 || (*code == NE && GET_CODE (*op0) == EQ))
864 *code = EQ;
865 else
866 *code = NE;
867 *op0 = XEXP (*op0, 0);
868 }
869
870 /* Prefer register over memory as first operand. */
871 if (MEM_P (*op0) && REG_P (*op1))
872 {
873 rtx tem = *op0; *op0 = *op1; *op1 = tem;
874 *code = swap_condition (*code);
875 }
876 }
877
878 /* Emit a compare instruction suitable to implement the comparison
879 OP0 CODE OP1. Return the correct condition RTL to be placed in
880 the IF_THEN_ELSE of the conditional branch testing the result. */
881
882 rtx
883 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
884 {
885 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
886 rtx cc;
887
888 /* Do not output a redundant compare instruction if a compare_and_swap
889 pattern already computed the result and the machine modes are compatible. */
890 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
891 {
892 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
893 == GET_MODE (op0));
894 cc = op0;
895 }
896 else
897 {
898 cc = gen_rtx_REG (mode, CC_REGNUM);
899 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
900 }
901
902 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
903 }
904
905 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
906 matches CMP.
907 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
908 conditional branch testing the result. */
909
910 static rtx
911 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
912 {
913 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
914 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
915 }
916
917 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
918 unconditional jump, else a conditional jump under condition COND. */
919
920 void
921 s390_emit_jump (rtx target, rtx cond)
922 {
923 rtx insn;
924
925 target = gen_rtx_LABEL_REF (VOIDmode, target);
926 if (cond)
927 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
928
929 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
930 emit_jump_insn (insn);
931 }
932
933 /* Return branch condition mask to implement a branch
934 specified by CODE. Return -1 for invalid comparisons. */
935
936 int
937 s390_branch_condition_mask (rtx code)
938 {
939 const int CC0 = 1 << 3;
940 const int CC1 = 1 << 2;
941 const int CC2 = 1 << 1;
942 const int CC3 = 1 << 0;
943
944 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
945 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
946 gcc_assert (XEXP (code, 1) == const0_rtx);
947
948 switch (GET_MODE (XEXP (code, 0)))
949 {
950 case CCZmode:
951 case CCZ1mode:
952 switch (GET_CODE (code))
953 {
954 case EQ: return CC0;
955 case NE: return CC1 | CC2 | CC3;
956 default: return -1;
957 }
958 break;
959
960 case CCT1mode:
961 switch (GET_CODE (code))
962 {
963 case EQ: return CC1;
964 case NE: return CC0 | CC2 | CC3;
965 default: return -1;
966 }
967 break;
968
969 case CCT2mode:
970 switch (GET_CODE (code))
971 {
972 case EQ: return CC2;
973 case NE: return CC0 | CC1 | CC3;
974 default: return -1;
975 }
976 break;
977
978 case CCT3mode:
979 switch (GET_CODE (code))
980 {
981 case EQ: return CC3;
982 case NE: return CC0 | CC1 | CC2;
983 default: return -1;
984 }
985 break;
986
987 case CCLmode:
988 switch (GET_CODE (code))
989 {
990 case EQ: return CC0 | CC2;
991 case NE: return CC1 | CC3;
992 default: return -1;
993 }
994 break;
995
996 case CCL1mode:
997 switch (GET_CODE (code))
998 {
999 case LTU: return CC2 | CC3; /* carry */
1000 case GEU: return CC0 | CC1; /* no carry */
1001 default: return -1;
1002 }
1003 break;
1004
1005 case CCL2mode:
1006 switch (GET_CODE (code))
1007 {
1008 case GTU: return CC0 | CC1; /* borrow */
1009 case LEU: return CC2 | CC3; /* no borrow */
1010 default: return -1;
1011 }
1012 break;
1013
1014 case CCL3mode:
1015 switch (GET_CODE (code))
1016 {
1017 case EQ: return CC0 | CC2;
1018 case NE: return CC1 | CC3;
1019 case LTU: return CC1;
1020 case GTU: return CC3;
1021 case LEU: return CC1 | CC2;
1022 case GEU: return CC2 | CC3;
1023 default: return -1;
1024 }
1025
1026 case CCUmode:
1027 switch (GET_CODE (code))
1028 {
1029 case EQ: return CC0;
1030 case NE: return CC1 | CC2 | CC3;
1031 case LTU: return CC1;
1032 case GTU: return CC2;
1033 case LEU: return CC0 | CC1;
1034 case GEU: return CC0 | CC2;
1035 default: return -1;
1036 }
1037 break;
1038
1039 case CCURmode:
1040 switch (GET_CODE (code))
1041 {
1042 case EQ: return CC0;
1043 case NE: return CC2 | CC1 | CC3;
1044 case LTU: return CC2;
1045 case GTU: return CC1;
1046 case LEU: return CC0 | CC2;
1047 case GEU: return CC0 | CC1;
1048 default: return -1;
1049 }
1050 break;
1051
1052 case CCAPmode:
1053 switch (GET_CODE (code))
1054 {
1055 case EQ: return CC0;
1056 case NE: return CC1 | CC2 | CC3;
1057 case LT: return CC1 | CC3;
1058 case GT: return CC2;
1059 case LE: return CC0 | CC1 | CC3;
1060 case GE: return CC0 | CC2;
1061 default: return -1;
1062 }
1063 break;
1064
1065 case CCANmode:
1066 switch (GET_CODE (code))
1067 {
1068 case EQ: return CC0;
1069 case NE: return CC1 | CC2 | CC3;
1070 case LT: return CC1;
1071 case GT: return CC2 | CC3;
1072 case LE: return CC0 | CC1;
1073 case GE: return CC0 | CC2 | CC3;
1074 default: return -1;
1075 }
1076 break;
1077
1078 case CCSmode:
1079 switch (GET_CODE (code))
1080 {
1081 case EQ: return CC0;
1082 case NE: return CC1 | CC2 | CC3;
1083 case LT: return CC1;
1084 case GT: return CC2;
1085 case LE: return CC0 | CC1;
1086 case GE: return CC0 | CC2;
1087 case UNORDERED: return CC3;
1088 case ORDERED: return CC0 | CC1 | CC2;
1089 case UNEQ: return CC0 | CC3;
1090 case UNLT: return CC1 | CC3;
1091 case UNGT: return CC2 | CC3;
1092 case UNLE: return CC0 | CC1 | CC3;
1093 case UNGE: return CC0 | CC2 | CC3;
1094 case LTGT: return CC1 | CC2;
1095 default: return -1;
1096 }
1097 break;
1098
1099 case CCSRmode:
1100 switch (GET_CODE (code))
1101 {
1102 case EQ: return CC0;
1103 case NE: return CC2 | CC1 | CC3;
1104 case LT: return CC2;
1105 case GT: return CC1;
1106 case LE: return CC0 | CC2;
1107 case GE: return CC0 | CC1;
1108 case UNORDERED: return CC3;
1109 case ORDERED: return CC0 | CC2 | CC1;
1110 case UNEQ: return CC0 | CC3;
1111 case UNLT: return CC2 | CC3;
1112 case UNGT: return CC1 | CC3;
1113 case UNLE: return CC0 | CC2 | CC3;
1114 case UNGE: return CC0 | CC1 | CC3;
1115 case LTGT: return CC2 | CC1;
1116 default: return -1;
1117 }
1118 break;
1119
1120 default:
1121 return -1;
1122 }
1123 }
1124
1125
1126 /* Return branch condition mask to implement a compare and branch
1127 specified by CODE. Return -1 for invalid comparisons. */
1128
1129 int
1130 s390_compare_and_branch_condition_mask (rtx code)
1131 {
1132 const int CC0 = 1 << 3;
1133 const int CC1 = 1 << 2;
1134 const int CC2 = 1 << 1;
1135
1136 switch (GET_CODE (code))
1137 {
1138 case EQ:
1139 return CC0;
1140 case NE:
1141 return CC1 | CC2;
1142 case LT:
1143 case LTU:
1144 return CC1;
1145 case GT:
1146 case GTU:
1147 return CC2;
1148 case LE:
1149 case LEU:
1150 return CC0 | CC1;
1151 case GE:
1152 case GEU:
1153 return CC0 | CC2;
1154 default:
1155 gcc_unreachable ();
1156 }
1157 return -1;
1158 }
1159
1160 /* If INV is false, return assembler mnemonic string to implement
1161 a branch specified by CODE. If INV is true, return mnemonic
1162 for the corresponding inverted branch. */
1163
1164 static const char *
1165 s390_branch_condition_mnemonic (rtx code, int inv)
1166 {
1167 int mask;
1168
1169 static const char *const mnemonic[16] =
1170 {
1171 NULL, "o", "h", "nle",
1172 "l", "nhe", "lh", "ne",
1173 "e", "nlh", "he", "nl",
1174 "le", "nh", "no", NULL
1175 };
1176
1177 if (GET_CODE (XEXP (code, 0)) == REG
1178 && REGNO (XEXP (code, 0)) == CC_REGNUM
1179 && XEXP (code, 1) == const0_rtx)
1180 mask = s390_branch_condition_mask (code);
1181 else
1182 mask = s390_compare_and_branch_condition_mask (code);
1183
1184 gcc_assert (mask >= 0);
1185
1186 if (inv)
1187 mask ^= 15;
1188
1189 gcc_assert (mask >= 1 && mask <= 14);
1190
1191 return mnemonic[mask];
1192 }
1193
1194 /* Return the part of op which has a value different from def.
1195 The size of the part is determined by mode.
1196 Use this function only if you already know that op really
1197 contains such a part. */
1198
1199 unsigned HOST_WIDE_INT
1200 s390_extract_part (rtx op, enum machine_mode mode, int def)
1201 {
1202 unsigned HOST_WIDE_INT value = 0;
1203 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1204 int part_bits = GET_MODE_BITSIZE (mode);
1205 unsigned HOST_WIDE_INT part_mask
1206 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1207 int i;
1208
1209 for (i = 0; i < max_parts; i++)
1210 {
1211 if (i == 0)
1212 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1213 else
1214 value >>= part_bits;
1215
1216 if ((value & part_mask) != (def & part_mask))
1217 return value & part_mask;
1218 }
1219
1220 gcc_unreachable ();
1221 }
1222
1223 /* If OP is an integer constant of mode MODE with exactly one
1224 part of mode PART_MODE unequal to DEF, return the number of that
1225 part. Otherwise, return -1. */
1226
1227 int
1228 s390_single_part (rtx op,
1229 enum machine_mode mode,
1230 enum machine_mode part_mode,
1231 int def)
1232 {
1233 unsigned HOST_WIDE_INT value = 0;
1234 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1235 unsigned HOST_WIDE_INT part_mask
1236 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1237 int i, part = -1;
1238
1239 if (GET_CODE (op) != CONST_INT)
1240 return -1;
1241
1242 for (i = 0; i < n_parts; i++)
1243 {
1244 if (i == 0)
1245 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1246 else
1247 value >>= GET_MODE_BITSIZE (part_mode);
1248
1249 if ((value & part_mask) != (def & part_mask))
1250 {
1251 if (part != -1)
1252 return -1;
1253 else
1254 part = i;
1255 }
1256 }
1257 return part == -1 ? -1 : n_parts - 1 - part;
1258 }
1259
1260 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1261 bits and no other bits are set in IN. POS and LENGTH can be used
1262 to obtain the start position and the length of the bitfield.
1263
1264 POS gives the position of the first bit of the bitfield counting
1265 from the lowest order bit starting with zero. In order to use this
1266 value for S/390 instructions this has to be converted to "bits big
1267 endian" style. */
1268
1269 bool
1270 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1271 int *pos, int *length)
1272 {
1273 int tmp_pos = 0;
1274 int tmp_length = 0;
1275 int i;
1276 unsigned HOST_WIDE_INT mask = 1ULL;
1277 bool contiguous = false;
1278
1279 for (i = 0; i < size; mask <<= 1, i++)
1280 {
1281 if (contiguous)
1282 {
1283 if (mask & in)
1284 tmp_length++;
1285 else
1286 break;
1287 }
1288 else
1289 {
1290 if (mask & in)
1291 {
1292 contiguous = true;
1293 tmp_length++;
1294 }
1295 else
1296 tmp_pos++;
1297 }
1298 }
1299
1300 if (!tmp_length)
1301 return false;
1302
1303 /* Calculate a mask for all bits beyond the contiguous bits. */
1304 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1305
1306 if (mask & in)
1307 return false;
1308
1309 if (tmp_length + tmp_pos - 1 > size)
1310 return false;
1311
1312 if (length)
1313 *length = tmp_length;
1314
1315 if (pos)
1316 *pos = tmp_pos;
1317
1318 return true;
1319 }
1320
1321 /* Check whether we can (and want to) split a double-word
1322 move in mode MODE from SRC to DST into two single-word
1323 moves, moving the subword FIRST_SUBWORD first. */
1324
1325 bool
1326 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1327 {
1328 /* Floating point registers cannot be split. */
1329 if (FP_REG_P (src) || FP_REG_P (dst))
1330 return false;
1331
1332 /* We don't need to split if operands are directly accessible. */
1333 if (s_operand (src, mode) || s_operand (dst, mode))
1334 return false;
1335
1336 /* Non-offsettable memory references cannot be split. */
1337 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1338 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1339 return false;
1340
1341 /* Moving the first subword must not clobber a register
1342 needed to move the second subword. */
1343 if (register_operand (dst, mode))
1344 {
1345 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1346 if (reg_overlap_mentioned_p (subreg, src))
1347 return false;
1348 }
1349
1350 return true;
1351 }
1352
1353 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1354 and [MEM2, MEM2 + SIZE] do overlap and false
1355 otherwise. */
1356
1357 bool
1358 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1359 {
1360 rtx addr1, addr2, addr_delta;
1361 HOST_WIDE_INT delta;
1362
1363 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1364 return true;
1365
1366 if (size == 0)
1367 return false;
1368
1369 addr1 = XEXP (mem1, 0);
1370 addr2 = XEXP (mem2, 0);
1371
1372 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1373
1374 /* This overlapping check is used by peepholes merging memory block operations.
1375 Overlapping operations would otherwise be recognized by the S/390 hardware
1376 and would fall back to a slower implementation. Allowing overlapping
1377 operations would lead to slow code but not to wrong code. Therefore we are
1378 somewhat optimistic if we cannot prove that the memory blocks are
1379 overlapping.
1380 That's why we return false here although this may accept operations on
1381 overlapping memory areas. */
1382 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1383 return false;
1384
1385 delta = INTVAL (addr_delta);
1386
1387 if (delta == 0
1388 || (delta > 0 && delta < size)
1389 || (delta < 0 && -delta < size))
1390 return true;
1391
1392 return false;
1393 }
1394
1395 /* Check whether the address of memory reference MEM2 equals exactly
1396 the address of memory reference MEM1 plus DELTA. Return true if
1397 we can prove this to be the case, false otherwise. */
1398
1399 bool
1400 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1401 {
1402 rtx addr1, addr2, addr_delta;
1403
1404 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1405 return false;
1406
1407 addr1 = XEXP (mem1, 0);
1408 addr2 = XEXP (mem2, 0);
1409
1410 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1411 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1412 return false;
1413
1414 return true;
1415 }
1416
1417 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1418
1419 void
1420 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1421 rtx *operands)
1422 {
1423 enum machine_mode wmode = mode;
1424 rtx dst = operands[0];
1425 rtx src1 = operands[1];
1426 rtx src2 = operands[2];
1427 rtx op, clob, tem;
1428
1429 /* If we cannot handle the operation directly, use a temp register. */
1430 if (!s390_logical_operator_ok_p (operands))
1431 dst = gen_reg_rtx (mode);
1432
1433 /* QImode and HImode patterns make sense only if we have a destination
1434 in memory. Otherwise perform the operation in SImode. */
1435 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1436 wmode = SImode;
1437
1438 /* Widen operands if required. */
1439 if (mode != wmode)
1440 {
1441 if (GET_CODE (dst) == SUBREG
1442 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1443 dst = tem;
1444 else if (REG_P (dst))
1445 dst = gen_rtx_SUBREG (wmode, dst, 0);
1446 else
1447 dst = gen_reg_rtx (wmode);
1448
1449 if (GET_CODE (src1) == SUBREG
1450 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1451 src1 = tem;
1452 else if (GET_MODE (src1) != VOIDmode)
1453 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1454
1455 if (GET_CODE (src2) == SUBREG
1456 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1457 src2 = tem;
1458 else if (GET_MODE (src2) != VOIDmode)
1459 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1460 }
1461
1462 /* Emit the instruction. */
1463 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1464 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1465 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1466
1467 /* Fix up the destination if needed. */
1468 if (dst != operands[0])
1469 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1470 }
1471
1472 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1473
1474 bool
1475 s390_logical_operator_ok_p (rtx *operands)
1476 {
1477 /* If the destination operand is in memory, it needs to coincide
1478 with one of the source operands. After reload, it has to be
1479 the first source operand. */
1480 if (GET_CODE (operands[0]) == MEM)
1481 return rtx_equal_p (operands[0], operands[1])
1482 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1483
1484 return true;
1485 }
1486
1487 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1488 operand IMMOP to switch from SS to SI type instructions. */
1489
1490 void
1491 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1492 {
1493 int def = code == AND ? -1 : 0;
1494 HOST_WIDE_INT mask;
1495 int part;
1496
1497 gcc_assert (GET_CODE (*memop) == MEM);
1498 gcc_assert (!MEM_VOLATILE_P (*memop));
1499
1500 mask = s390_extract_part (*immop, QImode, def);
1501 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1502 gcc_assert (part >= 0);
1503
1504 *memop = adjust_address (*memop, QImode, part);
1505 *immop = gen_int_mode (mask, QImode);
1506 }
1507
1508
1509 /* How to allocate a 'struct machine_function'. */
1510
1511 static struct machine_function *
1512 s390_init_machine_status (void)
1513 {
1514 return ggc_alloc_cleared_machine_function ();
1515 }
1516
1517 /* Change optimizations to be performed, depending on the
1518 optimization level. */
1519
1520 static const struct default_options s390_option_optimization_table[] =
1521 {
1522 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
1523
1524 /* ??? There are apparently still problems with -fcaller-saves. */
1525 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
1526
1527 /* Use MVCLE instructions to decrease code size if requested. */
1528 { OPT_LEVELS_SIZE, OPT_mmvcle, NULL, 1 },
1529
1530 { OPT_LEVELS_NONE, 0, NULL, 0 }
1531 };
1532
1533 /* Implement TARGET_OPTION_INIT_STRUCT. */
1534
1535 static void
1536 s390_option_init_struct (struct gcc_options *opts)
1537 {
1538 /* By default, always emit DWARF-2 unwind info. This allows debugging
1539 without maintaining a stack frame back-chain. */
1540 opts->x_flag_asynchronous_unwind_tables = 1;
1541 }
1542
1543 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1544 to the associated processor_type and processor_flags if so. */
1545
1546 static bool
1547 s390_handle_arch_option (const char *arg,
1548 enum processor_type *type,
1549 int *flags)
1550 {
1551 static struct pta
1552 {
1553 const char *const name; /* processor name or nickname. */
1554 const enum processor_type processor;
1555 const int flags; /* From enum processor_flags. */
1556 }
1557 const processor_alias_table[] =
1558 {
1559 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1560 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1561 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1562 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1563 | PF_LONG_DISPLACEMENT},
1564 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1565 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1566 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1567 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1568 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1569 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1570 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1571 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1572 };
1573 size_t i;
1574
1575 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1576 if (strcmp (arg, processor_alias_table[i].name) == 0)
1577 {
1578 *type = processor_alias_table[i].processor;
1579 *flags = processor_alias_table[i].flags;
1580 return true;
1581 }
1582
1583 *type = PROCESSOR_max;
1584 *flags = 0;
1585 return false;
1586 }
1587
1588 /* Implement TARGET_HANDLE_OPTION. */
1589
1590 static bool
1591 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1592 {
1593 switch (code)
1594 {
1595 case OPT_march_:
1596 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1597
1598 case OPT_mstack_guard_:
1599 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1600 return false;
1601 if (exact_log2 (s390_stack_guard) == -1)
1602 error ("stack guard value must be an exact power of 2");
1603 return true;
1604
1605 case OPT_mstack_size_:
1606 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1607 return false;
1608 if (exact_log2 (s390_stack_size) == -1)
1609 error ("stack size must be an exact power of 2");
1610 return true;
1611
1612 case OPT_mtune_:
1613 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1614
1615 case OPT_mwarn_framesize_:
1616 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1617
1618 default:
1619 return true;
1620 }
1621 }
1622
1623 static void
1624 s390_option_override (void)
1625 {
1626 /* Set up function hooks. */
1627 init_machine_status = s390_init_machine_status;
1628
1629 /* Architecture mode defaults according to ABI. */
1630 if (!(target_flags_explicit & MASK_ZARCH))
1631 {
1632 if (TARGET_64BIT)
1633 target_flags |= MASK_ZARCH;
1634 else
1635 target_flags &= ~MASK_ZARCH;
1636 }
1637
1638 /* Determine processor architectural level. */
1639 if (!s390_arch_string)
1640 {
1641 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1642 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1643 }
1644
1645 /* This check is triggered when the user specified a wrong -march=
1646 string and prevents subsequent error messages from being
1647 issued. */
1648 if (s390_arch == PROCESSOR_max)
1649 return;
1650
1651 /* Determine processor to tune for. */
1652 if (s390_tune == PROCESSOR_max)
1653 {
1654 s390_tune = s390_arch;
1655 s390_tune_flags = s390_arch_flags;
1656 }
1657
1658 /* Sanity checks. */
1659 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1660 error ("z/Architecture mode not supported on %s", s390_arch_string);
1661 if (TARGET_64BIT && !TARGET_ZARCH)
1662 error ("64-bit ABI not supported in ESA/390 mode");
1663
1664 if (TARGET_HARD_DFP && !TARGET_DFP)
1665 {
1666 if (target_flags_explicit & MASK_HARD_DFP)
1667 {
1668 if (!TARGET_CPU_DFP)
1669 error ("hardware decimal floating point instructions"
1670 " not available on %s", s390_arch_string);
1671 if (!TARGET_ZARCH)
1672 error ("hardware decimal floating point instructions"
1673 " not available in ESA/390 mode");
1674 }
1675 else
1676 target_flags &= ~MASK_HARD_DFP;
1677 }
1678
1679 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1680 {
1681 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1682 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1683
1684 target_flags &= ~MASK_HARD_DFP;
1685 }
1686
1687 /* Set processor cost function. */
1688 switch (s390_tune)
1689 {
1690 case PROCESSOR_2084_Z990:
1691 s390_cost = &z990_cost;
1692 break;
1693 case PROCESSOR_2094_Z9_109:
1694 s390_cost = &z9_109_cost;
1695 break;
1696 case PROCESSOR_2097_Z10:
1697 s390_cost = &z10_cost;
1698 case PROCESSOR_2817_Z196:
1699 s390_cost = &z196_cost;
1700 break;
1701 default:
1702 s390_cost = &z900_cost;
1703 }
1704
1705 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1706 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1707 "in combination");
1708
1709 if (s390_stack_size)
1710 {
1711 if (s390_stack_guard >= s390_stack_size)
1712 error ("stack size must be greater than the stack guard value");
1713 else if (s390_stack_size > 1 << 16)
1714 error ("stack size must not be greater than 64k");
1715 }
1716 else if (s390_stack_guard)
1717 error ("-mstack-guard implies use of -mstack-size");
1718
1719 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1720 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1721 target_flags |= MASK_LONG_DOUBLE_128;
1722 #endif
1723
1724 if (s390_tune == PROCESSOR_2097_Z10
1725 || s390_tune == PROCESSOR_2817_Z196)
1726 {
1727 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1728 global_options.x_param_values,
1729 global_options_set.x_param_values);
1730 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1731 global_options.x_param_values,
1732 global_options_set.x_param_values);
1733 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1734 global_options.x_param_values,
1735 global_options_set.x_param_values);
1736 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1737 global_options.x_param_values,
1738 global_options_set.x_param_values);
1739 }
1740
1741 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1742 global_options.x_param_values,
1743 global_options_set.x_param_values);
1744 /* values for loop prefetching */
1745 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1746 global_options.x_param_values,
1747 global_options_set.x_param_values);
1748 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1749 global_options.x_param_values,
1750 global_options_set.x_param_values);
1751 /* s390 has more than 2 levels and the size is much larger. Since
1752 we are always running virtualized assume that we only get a small
1753 part of the caches above l1. */
1754 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1755 global_options.x_param_values,
1756 global_options_set.x_param_values);
1757 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1758 global_options.x_param_values,
1759 global_options_set.x_param_values);
1760 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1761 global_options.x_param_values,
1762 global_options_set.x_param_values);
1763
1764 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1765 requires the arch flags to be evaluated already. Since prefetching
1766 is beneficial on s390, we enable it if available. */
1767 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1768 flag_prefetch_loop_arrays = 1;
1769 }
1770
1771 /* Map for smallest class containing reg regno. */
1772
1773 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1774 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1775 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1776 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1777 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1778 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1779 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1780 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1781 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1782 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1783 ACCESS_REGS, ACCESS_REGS
1784 };
1785
1786 /* Return attribute type of insn. */
1787
1788 static enum attr_type
1789 s390_safe_attr_type (rtx insn)
1790 {
1791 if (recog_memoized (insn) >= 0)
1792 return get_attr_type (insn);
1793 else
1794 return TYPE_NONE;
1795 }
1796
1797 /* Return true if DISP is a valid short displacement. */
1798
1799 static bool
1800 s390_short_displacement (rtx disp)
1801 {
1802 /* No displacement is OK. */
1803 if (!disp)
1804 return true;
1805
1806 /* Without the long displacement facility we don't need to
1807 distingiush between long and short displacement. */
1808 if (!TARGET_LONG_DISPLACEMENT)
1809 return true;
1810
1811 /* Integer displacement in range. */
1812 if (GET_CODE (disp) == CONST_INT)
1813 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1814
1815 /* GOT offset is not OK, the GOT can be large. */
1816 if (GET_CODE (disp) == CONST
1817 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1818 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1819 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1820 return false;
1821
1822 /* All other symbolic constants are literal pool references,
1823 which are OK as the literal pool must be small. */
1824 if (GET_CODE (disp) == CONST)
1825 return true;
1826
1827 return false;
1828 }
1829
1830 /* Decompose a RTL expression ADDR for a memory address into
1831 its components, returned in OUT.
1832
1833 Returns false if ADDR is not a valid memory address, true
1834 otherwise. If OUT is NULL, don't return the components,
1835 but check for validity only.
1836
1837 Note: Only addresses in canonical form are recognized.
1838 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1839 canonical form so that they will be recognized. */
1840
1841 static int
1842 s390_decompose_address (rtx addr, struct s390_address *out)
1843 {
1844 HOST_WIDE_INT offset = 0;
1845 rtx base = NULL_RTX;
1846 rtx indx = NULL_RTX;
1847 rtx disp = NULL_RTX;
1848 rtx orig_disp;
1849 bool pointer = false;
1850 bool base_ptr = false;
1851 bool indx_ptr = false;
1852 bool literal_pool = false;
1853
1854 /* We may need to substitute the literal pool base register into the address
1855 below. However, at this point we do not know which register is going to
1856 be used as base, so we substitute the arg pointer register. This is going
1857 to be treated as holding a pointer below -- it shouldn't be used for any
1858 other purpose. */
1859 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1860
1861 /* Decompose address into base + index + displacement. */
1862
1863 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1864 base = addr;
1865
1866 else if (GET_CODE (addr) == PLUS)
1867 {
1868 rtx op0 = XEXP (addr, 0);
1869 rtx op1 = XEXP (addr, 1);
1870 enum rtx_code code0 = GET_CODE (op0);
1871 enum rtx_code code1 = GET_CODE (op1);
1872
1873 if (code0 == REG || code0 == UNSPEC)
1874 {
1875 if (code1 == REG || code1 == UNSPEC)
1876 {
1877 indx = op0; /* index + base */
1878 base = op1;
1879 }
1880
1881 else
1882 {
1883 base = op0; /* base + displacement */
1884 disp = op1;
1885 }
1886 }
1887
1888 else if (code0 == PLUS)
1889 {
1890 indx = XEXP (op0, 0); /* index + base + disp */
1891 base = XEXP (op0, 1);
1892 disp = op1;
1893 }
1894
1895 else
1896 {
1897 return false;
1898 }
1899 }
1900
1901 else
1902 disp = addr; /* displacement */
1903
1904 /* Extract integer part of displacement. */
1905 orig_disp = disp;
1906 if (disp)
1907 {
1908 if (GET_CODE (disp) == CONST_INT)
1909 {
1910 offset = INTVAL (disp);
1911 disp = NULL_RTX;
1912 }
1913 else if (GET_CODE (disp) == CONST
1914 && GET_CODE (XEXP (disp, 0)) == PLUS
1915 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1916 {
1917 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1918 disp = XEXP (XEXP (disp, 0), 0);
1919 }
1920 }
1921
1922 /* Strip off CONST here to avoid special case tests later. */
1923 if (disp && GET_CODE (disp) == CONST)
1924 disp = XEXP (disp, 0);
1925
1926 /* We can convert literal pool addresses to
1927 displacements by basing them off the base register. */
1928 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1929 {
1930 /* Either base or index must be free to hold the base register. */
1931 if (!base)
1932 base = fake_pool_base, literal_pool = true;
1933 else if (!indx)
1934 indx = fake_pool_base, literal_pool = true;
1935 else
1936 return false;
1937
1938 /* Mark up the displacement. */
1939 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1940 UNSPEC_LTREL_OFFSET);
1941 }
1942
1943 /* Validate base register. */
1944 if (base)
1945 {
1946 if (GET_CODE (base) == UNSPEC)
1947 switch (XINT (base, 1))
1948 {
1949 case UNSPEC_LTREF:
1950 if (!disp)
1951 disp = gen_rtx_UNSPEC (Pmode,
1952 gen_rtvec (1, XVECEXP (base, 0, 0)),
1953 UNSPEC_LTREL_OFFSET);
1954 else
1955 return false;
1956
1957 base = XVECEXP (base, 0, 1);
1958 break;
1959
1960 case UNSPEC_LTREL_BASE:
1961 if (XVECLEN (base, 0) == 1)
1962 base = fake_pool_base, literal_pool = true;
1963 else
1964 base = XVECEXP (base, 0, 1);
1965 break;
1966
1967 default:
1968 return false;
1969 }
1970
1971 if (!REG_P (base)
1972 || (GET_MODE (base) != SImode
1973 && GET_MODE (base) != Pmode))
1974 return false;
1975
1976 if (REGNO (base) == STACK_POINTER_REGNUM
1977 || REGNO (base) == FRAME_POINTER_REGNUM
1978 || ((reload_completed || reload_in_progress)
1979 && frame_pointer_needed
1980 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1981 || REGNO (base) == ARG_POINTER_REGNUM
1982 || (flag_pic
1983 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1984 pointer = base_ptr = true;
1985
1986 if ((reload_completed || reload_in_progress)
1987 && base == cfun->machine->base_reg)
1988 pointer = base_ptr = literal_pool = true;
1989 }
1990
1991 /* Validate index register. */
1992 if (indx)
1993 {
1994 if (GET_CODE (indx) == UNSPEC)
1995 switch (XINT (indx, 1))
1996 {
1997 case UNSPEC_LTREF:
1998 if (!disp)
1999 disp = gen_rtx_UNSPEC (Pmode,
2000 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2001 UNSPEC_LTREL_OFFSET);
2002 else
2003 return false;
2004
2005 indx = XVECEXP (indx, 0, 1);
2006 break;
2007
2008 case UNSPEC_LTREL_BASE:
2009 if (XVECLEN (indx, 0) == 1)
2010 indx = fake_pool_base, literal_pool = true;
2011 else
2012 indx = XVECEXP (indx, 0, 1);
2013 break;
2014
2015 default:
2016 return false;
2017 }
2018
2019 if (!REG_P (indx)
2020 || (GET_MODE (indx) != SImode
2021 && GET_MODE (indx) != Pmode))
2022 return false;
2023
2024 if (REGNO (indx) == STACK_POINTER_REGNUM
2025 || REGNO (indx) == FRAME_POINTER_REGNUM
2026 || ((reload_completed || reload_in_progress)
2027 && frame_pointer_needed
2028 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2029 || REGNO (indx) == ARG_POINTER_REGNUM
2030 || (flag_pic
2031 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2032 pointer = indx_ptr = true;
2033
2034 if ((reload_completed || reload_in_progress)
2035 && indx == cfun->machine->base_reg)
2036 pointer = indx_ptr = literal_pool = true;
2037 }
2038
2039 /* Prefer to use pointer as base, not index. */
2040 if (base && indx && !base_ptr
2041 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2042 {
2043 rtx tmp = base;
2044 base = indx;
2045 indx = tmp;
2046 }
2047
2048 /* Validate displacement. */
2049 if (!disp)
2050 {
2051 /* If virtual registers are involved, the displacement will change later
2052 anyway as the virtual registers get eliminated. This could make a
2053 valid displacement invalid, but it is more likely to make an invalid
2054 displacement valid, because we sometimes access the register save area
2055 via negative offsets to one of those registers.
2056 Thus we don't check the displacement for validity here. If after
2057 elimination the displacement turns out to be invalid after all,
2058 this is fixed up by reload in any case. */
2059 if (base != arg_pointer_rtx
2060 && indx != arg_pointer_rtx
2061 && base != return_address_pointer_rtx
2062 && indx != return_address_pointer_rtx
2063 && base != frame_pointer_rtx
2064 && indx != frame_pointer_rtx
2065 && base != virtual_stack_vars_rtx
2066 && indx != virtual_stack_vars_rtx)
2067 if (!DISP_IN_RANGE (offset))
2068 return false;
2069 }
2070 else
2071 {
2072 /* All the special cases are pointers. */
2073 pointer = true;
2074
2075 /* In the small-PIC case, the linker converts @GOT
2076 and @GOTNTPOFF offsets to possible displacements. */
2077 if (GET_CODE (disp) == UNSPEC
2078 && (XINT (disp, 1) == UNSPEC_GOT
2079 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2080 && flag_pic == 1)
2081 {
2082 ;
2083 }
2084
2085 /* Accept pool label offsets. */
2086 else if (GET_CODE (disp) == UNSPEC
2087 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2088 ;
2089
2090 /* Accept literal pool references. */
2091 else if (GET_CODE (disp) == UNSPEC
2092 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2093 {
2094 /* In case CSE pulled a non literal pool reference out of
2095 the pool we have to reject the address. This is
2096 especially important when loading the GOT pointer on non
2097 zarch CPUs. In this case the literal pool contains an lt
2098 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2099 will most likely exceed the displacement. */
2100 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2101 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2102 return false;
2103
2104 orig_disp = gen_rtx_CONST (Pmode, disp);
2105 if (offset)
2106 {
2107 /* If we have an offset, make sure it does not
2108 exceed the size of the constant pool entry. */
2109 rtx sym = XVECEXP (disp, 0, 0);
2110 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2111 return false;
2112
2113 orig_disp = plus_constant (orig_disp, offset);
2114 }
2115 }
2116
2117 else
2118 return false;
2119 }
2120
2121 if (!base && !indx)
2122 pointer = true;
2123
2124 if (out)
2125 {
2126 out->base = base;
2127 out->indx = indx;
2128 out->disp = orig_disp;
2129 out->pointer = pointer;
2130 out->literal_pool = literal_pool;
2131 }
2132
2133 return true;
2134 }
2135
2136 /* Decompose a RTL expression OP for a shift count into its components,
2137 and return the base register in BASE and the offset in OFFSET.
2138
2139 Return true if OP is a valid shift count, false if not. */
2140
2141 bool
2142 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2143 {
2144 HOST_WIDE_INT off = 0;
2145
2146 /* We can have an integer constant, an address register,
2147 or a sum of the two. */
2148 if (GET_CODE (op) == CONST_INT)
2149 {
2150 off = INTVAL (op);
2151 op = NULL_RTX;
2152 }
2153 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2154 {
2155 off = INTVAL (XEXP (op, 1));
2156 op = XEXP (op, 0);
2157 }
2158 while (op && GET_CODE (op) == SUBREG)
2159 op = SUBREG_REG (op);
2160
2161 if (op && GET_CODE (op) != REG)
2162 return false;
2163
2164 if (offset)
2165 *offset = off;
2166 if (base)
2167 *base = op;
2168
2169 return true;
2170 }
2171
2172
2173 /* Return true if CODE is a valid address without index. */
2174
2175 bool
2176 s390_legitimate_address_without_index_p (rtx op)
2177 {
2178 struct s390_address addr;
2179
2180 if (!s390_decompose_address (XEXP (op, 0), &addr))
2181 return false;
2182 if (addr.indx)
2183 return false;
2184
2185 return true;
2186 }
2187
2188
2189 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2190 and return these parts in SYMREF and ADDEND. You can pass NULL in
2191 SYMREF and/or ADDEND if you are not interested in these values.
2192 Literal pool references are *not* considered symbol references. */
2193
2194 static bool
2195 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2196 {
2197 HOST_WIDE_INT tmpaddend = 0;
2198
2199 if (GET_CODE (addr) == CONST)
2200 addr = XEXP (addr, 0);
2201
2202 if (GET_CODE (addr) == PLUS)
2203 {
2204 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2205 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2206 && CONST_INT_P (XEXP (addr, 1)))
2207 {
2208 tmpaddend = INTVAL (XEXP (addr, 1));
2209 addr = XEXP (addr, 0);
2210 }
2211 else
2212 return false;
2213 }
2214 else
2215 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2216 return false;
2217
2218 if (symref)
2219 *symref = addr;
2220 if (addend)
2221 *addend = tmpaddend;
2222
2223 return true;
2224 }
2225
2226
2227 /* Return true if the address in OP is valid for constraint letter C
2228 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2229 pool MEMs should be accepted. Only the Q, R, S, T constraint
2230 letters are allowed for C. */
2231
2232 static int
2233 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2234 {
2235 struct s390_address addr;
2236 bool decomposed = false;
2237
2238 /* This check makes sure that no symbolic address (except literal
2239 pool references) are accepted by the R or T constraints. */
2240 if (s390_symref_operand_p (op, NULL, NULL))
2241 return 0;
2242
2243 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2244 if (!lit_pool_ok)
2245 {
2246 if (!s390_decompose_address (op, &addr))
2247 return 0;
2248 if (addr.literal_pool)
2249 return 0;
2250 decomposed = true;
2251 }
2252
2253 switch (c)
2254 {
2255 case 'Q': /* no index short displacement */
2256 if (!decomposed && !s390_decompose_address (op, &addr))
2257 return 0;
2258 if (addr.indx)
2259 return 0;
2260 if (!s390_short_displacement (addr.disp))
2261 return 0;
2262 break;
2263
2264 case 'R': /* with index short displacement */
2265 if (TARGET_LONG_DISPLACEMENT)
2266 {
2267 if (!decomposed && !s390_decompose_address (op, &addr))
2268 return 0;
2269 if (!s390_short_displacement (addr.disp))
2270 return 0;
2271 }
2272 /* Any invalid address here will be fixed up by reload,
2273 so accept it for the most generic constraint. */
2274 break;
2275
2276 case 'S': /* no index long displacement */
2277 if (!TARGET_LONG_DISPLACEMENT)
2278 return 0;
2279 if (!decomposed && !s390_decompose_address (op, &addr))
2280 return 0;
2281 if (addr.indx)
2282 return 0;
2283 if (s390_short_displacement (addr.disp))
2284 return 0;
2285 break;
2286
2287 case 'T': /* with index long displacement */
2288 if (!TARGET_LONG_DISPLACEMENT)
2289 return 0;
2290 /* Any invalid address here will be fixed up by reload,
2291 so accept it for the most generic constraint. */
2292 if ((decomposed || s390_decompose_address (op, &addr))
2293 && s390_short_displacement (addr.disp))
2294 return 0;
2295 break;
2296 default:
2297 return 0;
2298 }
2299 return 1;
2300 }
2301
2302
2303 /* Evaluates constraint strings described by the regular expression
2304 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2305 the constraint given in STR, or 0 else. */
2306
2307 int
2308 s390_mem_constraint (const char *str, rtx op)
2309 {
2310 char c = str[0];
2311
2312 switch (c)
2313 {
2314 case 'A':
2315 /* Check for offsettable variants of memory constraints. */
2316 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2317 return 0;
2318 if ((reload_completed || reload_in_progress)
2319 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2320 return 0;
2321 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2322 case 'B':
2323 /* Check for non-literal-pool variants of memory constraints. */
2324 if (!MEM_P (op))
2325 return 0;
2326 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2327 case 'Q':
2328 case 'R':
2329 case 'S':
2330 case 'T':
2331 if (GET_CODE (op) != MEM)
2332 return 0;
2333 return s390_check_qrst_address (c, XEXP (op, 0), true);
2334 case 'U':
2335 return (s390_check_qrst_address ('Q', op, true)
2336 || s390_check_qrst_address ('R', op, true));
2337 case 'W':
2338 return (s390_check_qrst_address ('S', op, true)
2339 || s390_check_qrst_address ('T', op, true));
2340 case 'Y':
2341 /* Simply check for the basic form of a shift count. Reload will
2342 take care of making sure we have a proper base register. */
2343 if (!s390_decompose_shift_count (op, NULL, NULL))
2344 return 0;
2345 break;
2346 case 'Z':
2347 return s390_check_qrst_address (str[1], op, true);
2348 default:
2349 return 0;
2350 }
2351 return 1;
2352 }
2353
2354
2355 /* Evaluates constraint strings starting with letter O. Input
2356 parameter C is the second letter following the "O" in the constraint
2357 string. Returns 1 if VALUE meets the respective constraint and 0
2358 otherwise. */
2359
2360 int
2361 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2362 {
2363 if (!TARGET_EXTIMM)
2364 return 0;
2365
2366 switch (c)
2367 {
2368 case 's':
2369 return trunc_int_for_mode (value, SImode) == value;
2370
2371 case 'p':
2372 return value == 0
2373 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2374
2375 case 'n':
2376 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2377
2378 default:
2379 gcc_unreachable ();
2380 }
2381 }
2382
2383
2384 /* Evaluates constraint strings starting with letter N. Parameter STR
2385 contains the letters following letter "N" in the constraint string.
2386 Returns true if VALUE matches the constraint. */
2387
2388 int
2389 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2390 {
2391 enum machine_mode mode, part_mode;
2392 int def;
2393 int part, part_goal;
2394
2395
2396 if (str[0] == 'x')
2397 part_goal = -1;
2398 else
2399 part_goal = str[0] - '0';
2400
2401 switch (str[1])
2402 {
2403 case 'Q':
2404 part_mode = QImode;
2405 break;
2406 case 'H':
2407 part_mode = HImode;
2408 break;
2409 case 'S':
2410 part_mode = SImode;
2411 break;
2412 default:
2413 return 0;
2414 }
2415
2416 switch (str[2])
2417 {
2418 case 'H':
2419 mode = HImode;
2420 break;
2421 case 'S':
2422 mode = SImode;
2423 break;
2424 case 'D':
2425 mode = DImode;
2426 break;
2427 default:
2428 return 0;
2429 }
2430
2431 switch (str[3])
2432 {
2433 case '0':
2434 def = 0;
2435 break;
2436 case 'F':
2437 def = -1;
2438 break;
2439 default:
2440 return 0;
2441 }
2442
2443 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2444 return 0;
2445
2446 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2447 if (part < 0)
2448 return 0;
2449 if (part_goal != -1 && part_goal != part)
2450 return 0;
2451
2452 return 1;
2453 }
2454
2455
2456 /* Returns true if the input parameter VALUE is a float zero. */
2457
2458 int
2459 s390_float_const_zero_p (rtx value)
2460 {
2461 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2462 && value == CONST0_RTX (GET_MODE (value)));
2463 }
2464
2465 /* Implement TARGET_REGISTER_MOVE_COST. */
2466
2467 static int
2468 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2469 reg_class_t from, reg_class_t to)
2470 {
2471 /* On s390, copy between fprs and gprs is expensive. */
2472 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2473 && reg_classes_intersect_p (to, FP_REGS))
2474 || (reg_classes_intersect_p (from, FP_REGS)
2475 && reg_classes_intersect_p (to, GENERAL_REGS)))
2476 return 10;
2477
2478 return 1;
2479 }
2480
2481 /* Implement TARGET_MEMORY_MOVE_COST. */
2482
2483 static int
2484 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2485 reg_class_t rclass ATTRIBUTE_UNUSED,
2486 bool in ATTRIBUTE_UNUSED)
2487 {
2488 return 1;
2489 }
2490
2491 /* Compute a (partial) cost for rtx X. Return true if the complete
2492 cost has been computed, and false if subexpressions should be
2493 scanned. In either case, *TOTAL contains the cost result.
2494 CODE contains GET_CODE (x), OUTER_CODE contains the code
2495 of the superexpression of x. */
2496
2497 static bool
2498 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2499 bool speed ATTRIBUTE_UNUSED)
2500 {
2501 switch (code)
2502 {
2503 case CONST:
2504 case CONST_INT:
2505 case LABEL_REF:
2506 case SYMBOL_REF:
2507 case CONST_DOUBLE:
2508 case MEM:
2509 *total = 0;
2510 return true;
2511
2512 case ASHIFT:
2513 case ASHIFTRT:
2514 case LSHIFTRT:
2515 case ROTATE:
2516 case ROTATERT:
2517 case AND:
2518 case IOR:
2519 case XOR:
2520 case NEG:
2521 case NOT:
2522 *total = COSTS_N_INSNS (1);
2523 return false;
2524
2525 case PLUS:
2526 case MINUS:
2527 *total = COSTS_N_INSNS (1);
2528 return false;
2529
2530 case MULT:
2531 switch (GET_MODE (x))
2532 {
2533 case SImode:
2534 {
2535 rtx left = XEXP (x, 0);
2536 rtx right = XEXP (x, 1);
2537 if (GET_CODE (right) == CONST_INT
2538 && CONST_OK_FOR_K (INTVAL (right)))
2539 *total = s390_cost->mhi;
2540 else if (GET_CODE (left) == SIGN_EXTEND)
2541 *total = s390_cost->mh;
2542 else
2543 *total = s390_cost->ms; /* msr, ms, msy */
2544 break;
2545 }
2546 case DImode:
2547 {
2548 rtx left = XEXP (x, 0);
2549 rtx right = XEXP (x, 1);
2550 if (TARGET_ZARCH)
2551 {
2552 if (GET_CODE (right) == CONST_INT
2553 && CONST_OK_FOR_K (INTVAL (right)))
2554 *total = s390_cost->mghi;
2555 else if (GET_CODE (left) == SIGN_EXTEND)
2556 *total = s390_cost->msgf;
2557 else
2558 *total = s390_cost->msg; /* msgr, msg */
2559 }
2560 else /* TARGET_31BIT */
2561 {
2562 if (GET_CODE (left) == SIGN_EXTEND
2563 && GET_CODE (right) == SIGN_EXTEND)
2564 /* mulsidi case: mr, m */
2565 *total = s390_cost->m;
2566 else if (GET_CODE (left) == ZERO_EXTEND
2567 && GET_CODE (right) == ZERO_EXTEND
2568 && TARGET_CPU_ZARCH)
2569 /* umulsidi case: ml, mlr */
2570 *total = s390_cost->ml;
2571 else
2572 /* Complex calculation is required. */
2573 *total = COSTS_N_INSNS (40);
2574 }
2575 break;
2576 }
2577 case SFmode:
2578 case DFmode:
2579 *total = s390_cost->mult_df;
2580 break;
2581 case TFmode:
2582 *total = s390_cost->mxbr;
2583 break;
2584 default:
2585 return false;
2586 }
2587 return false;
2588
2589 case FMA:
2590 switch (GET_MODE (x))
2591 {
2592 case DFmode:
2593 *total = s390_cost->madbr;
2594 break;
2595 case SFmode:
2596 *total = s390_cost->maebr;
2597 break;
2598 default:
2599 return false;
2600 }
2601 /* Negate in the third argument is free: FMSUB. */
2602 if (GET_CODE (XEXP (x, 2)) == NEG)
2603 {
2604 *total += (rtx_cost (XEXP (x, 0), FMA, speed)
2605 + rtx_cost (XEXP (x, 1), FMA, speed)
2606 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, speed));
2607 return true;
2608 }
2609 return false;
2610
2611 case UDIV:
2612 case UMOD:
2613 if (GET_MODE (x) == TImode) /* 128 bit division */
2614 *total = s390_cost->dlgr;
2615 else if (GET_MODE (x) == DImode)
2616 {
2617 rtx right = XEXP (x, 1);
2618 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2619 *total = s390_cost->dlr;
2620 else /* 64 by 64 bit division */
2621 *total = s390_cost->dlgr;
2622 }
2623 else if (GET_MODE (x) == SImode) /* 32 bit division */
2624 *total = s390_cost->dlr;
2625 return false;
2626
2627 case DIV:
2628 case MOD:
2629 if (GET_MODE (x) == DImode)
2630 {
2631 rtx right = XEXP (x, 1);
2632 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2633 if (TARGET_ZARCH)
2634 *total = s390_cost->dsgfr;
2635 else
2636 *total = s390_cost->dr;
2637 else /* 64 by 64 bit division */
2638 *total = s390_cost->dsgr;
2639 }
2640 else if (GET_MODE (x) == SImode) /* 32 bit division */
2641 *total = s390_cost->dlr;
2642 else if (GET_MODE (x) == SFmode)
2643 {
2644 *total = s390_cost->debr;
2645 }
2646 else if (GET_MODE (x) == DFmode)
2647 {
2648 *total = s390_cost->ddbr;
2649 }
2650 else if (GET_MODE (x) == TFmode)
2651 {
2652 *total = s390_cost->dxbr;
2653 }
2654 return false;
2655
2656 case SQRT:
2657 if (GET_MODE (x) == SFmode)
2658 *total = s390_cost->sqebr;
2659 else if (GET_MODE (x) == DFmode)
2660 *total = s390_cost->sqdbr;
2661 else /* TFmode */
2662 *total = s390_cost->sqxbr;
2663 return false;
2664
2665 case SIGN_EXTEND:
2666 case ZERO_EXTEND:
2667 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2668 || outer_code == PLUS || outer_code == MINUS
2669 || outer_code == COMPARE)
2670 *total = 0;
2671 return false;
2672
2673 case COMPARE:
2674 *total = COSTS_N_INSNS (1);
2675 if (GET_CODE (XEXP (x, 0)) == AND
2676 && GET_CODE (XEXP (x, 1)) == CONST_INT
2677 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2678 {
2679 rtx op0 = XEXP (XEXP (x, 0), 0);
2680 rtx op1 = XEXP (XEXP (x, 0), 1);
2681 rtx op2 = XEXP (x, 1);
2682
2683 if (memory_operand (op0, GET_MODE (op0))
2684 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2685 return true;
2686 if (register_operand (op0, GET_MODE (op0))
2687 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2688 return true;
2689 }
2690 return false;
2691
2692 default:
2693 return false;
2694 }
2695 }
2696
2697 /* Return the cost of an address rtx ADDR. */
2698
2699 static int
2700 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2701 {
2702 struct s390_address ad;
2703 if (!s390_decompose_address (addr, &ad))
2704 return 1000;
2705
2706 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2707 }
2708
2709 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2710 otherwise return 0. */
2711
2712 int
2713 tls_symbolic_operand (rtx op)
2714 {
2715 if (GET_CODE (op) != SYMBOL_REF)
2716 return 0;
2717 return SYMBOL_REF_TLS_MODEL (op);
2718 }
2719 \f
2720 /* Split DImode access register reference REG (on 64-bit) into its constituent
2721 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2722 gen_highpart cannot be used as they assume all registers are word-sized,
2723 while our access registers have only half that size. */
2724
2725 void
2726 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2727 {
2728 gcc_assert (TARGET_64BIT);
2729 gcc_assert (ACCESS_REG_P (reg));
2730 gcc_assert (GET_MODE (reg) == DImode);
2731 gcc_assert (!(REGNO (reg) & 1));
2732
2733 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2734 *hi = gen_rtx_REG (SImode, REGNO (reg));
2735 }
2736
2737 /* Return true if OP contains a symbol reference */
2738
2739 bool
2740 symbolic_reference_mentioned_p (rtx op)
2741 {
2742 const char *fmt;
2743 int i;
2744
2745 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2746 return 1;
2747
2748 fmt = GET_RTX_FORMAT (GET_CODE (op));
2749 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2750 {
2751 if (fmt[i] == 'E')
2752 {
2753 int j;
2754
2755 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2756 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2757 return 1;
2758 }
2759
2760 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2761 return 1;
2762 }
2763
2764 return 0;
2765 }
2766
2767 /* Return true if OP contains a reference to a thread-local symbol. */
2768
2769 bool
2770 tls_symbolic_reference_mentioned_p (rtx op)
2771 {
2772 const char *fmt;
2773 int i;
2774
2775 if (GET_CODE (op) == SYMBOL_REF)
2776 return tls_symbolic_operand (op);
2777
2778 fmt = GET_RTX_FORMAT (GET_CODE (op));
2779 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2780 {
2781 if (fmt[i] == 'E')
2782 {
2783 int j;
2784
2785 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2786 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2787 return true;
2788 }
2789
2790 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2791 return true;
2792 }
2793
2794 return false;
2795 }
2796
2797
2798 /* Return true if OP is a legitimate general operand when
2799 generating PIC code. It is given that flag_pic is on
2800 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2801
2802 int
2803 legitimate_pic_operand_p (rtx op)
2804 {
2805 /* Accept all non-symbolic constants. */
2806 if (!SYMBOLIC_CONST (op))
2807 return 1;
2808
2809 /* Reject everything else; must be handled
2810 via emit_symbolic_move. */
2811 return 0;
2812 }
2813
2814 /* Returns true if the constant value OP is a legitimate general operand.
2815 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2816
2817 int
2818 legitimate_constant_p (rtx op)
2819 {
2820 /* Accept all non-symbolic constants. */
2821 if (!SYMBOLIC_CONST (op))
2822 return 1;
2823
2824 /* Accept immediate LARL operands. */
2825 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2826 return 1;
2827
2828 /* Thread-local symbols are never legal constants. This is
2829 so that emit_call knows that computing such addresses
2830 might require a function call. */
2831 if (TLS_SYMBOLIC_CONST (op))
2832 return 0;
2833
2834 /* In the PIC case, symbolic constants must *not* be
2835 forced into the literal pool. We accept them here,
2836 so that they will be handled by emit_symbolic_move. */
2837 if (flag_pic)
2838 return 1;
2839
2840 /* All remaining non-PIC symbolic constants are
2841 forced into the literal pool. */
2842 return 0;
2843 }
2844
2845 /* Determine if it's legal to put X into the constant pool. This
2846 is not possible if X contains the address of a symbol that is
2847 not constant (TLS) or not known at final link time (PIC). */
2848
2849 static bool
2850 s390_cannot_force_const_mem (rtx x)
2851 {
2852 switch (GET_CODE (x))
2853 {
2854 case CONST_INT:
2855 case CONST_DOUBLE:
2856 /* Accept all non-symbolic constants. */
2857 return false;
2858
2859 case LABEL_REF:
2860 /* Labels are OK iff we are non-PIC. */
2861 return flag_pic != 0;
2862
2863 case SYMBOL_REF:
2864 /* 'Naked' TLS symbol references are never OK,
2865 non-TLS symbols are OK iff we are non-PIC. */
2866 if (tls_symbolic_operand (x))
2867 return true;
2868 else
2869 return flag_pic != 0;
2870
2871 case CONST:
2872 return s390_cannot_force_const_mem (XEXP (x, 0));
2873 case PLUS:
2874 case MINUS:
2875 return s390_cannot_force_const_mem (XEXP (x, 0))
2876 || s390_cannot_force_const_mem (XEXP (x, 1));
2877
2878 case UNSPEC:
2879 switch (XINT (x, 1))
2880 {
2881 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2882 case UNSPEC_LTREL_OFFSET:
2883 case UNSPEC_GOT:
2884 case UNSPEC_GOTOFF:
2885 case UNSPEC_PLTOFF:
2886 case UNSPEC_TLSGD:
2887 case UNSPEC_TLSLDM:
2888 case UNSPEC_NTPOFF:
2889 case UNSPEC_DTPOFF:
2890 case UNSPEC_GOTNTPOFF:
2891 case UNSPEC_INDNTPOFF:
2892 return false;
2893
2894 /* If the literal pool shares the code section, be put
2895 execute template placeholders into the pool as well. */
2896 case UNSPEC_INSN:
2897 return TARGET_CPU_ZARCH;
2898
2899 default:
2900 return true;
2901 }
2902 break;
2903
2904 default:
2905 gcc_unreachable ();
2906 }
2907 }
2908
2909 /* Returns true if the constant value OP is a legitimate general
2910 operand during and after reload. The difference to
2911 legitimate_constant_p is that this function will not accept
2912 a constant that would need to be forced to the literal pool
2913 before it can be used as operand.
2914 This function accepts all constants which can be loaded directly
2915 into a GPR. */
2916
2917 bool
2918 legitimate_reload_constant_p (rtx op)
2919 {
2920 /* Accept la(y) operands. */
2921 if (GET_CODE (op) == CONST_INT
2922 && DISP_IN_RANGE (INTVAL (op)))
2923 return true;
2924
2925 /* Accept l(g)hi/l(g)fi operands. */
2926 if (GET_CODE (op) == CONST_INT
2927 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2928 return true;
2929
2930 /* Accept lliXX operands. */
2931 if (TARGET_ZARCH
2932 && GET_CODE (op) == CONST_INT
2933 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2934 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2935 return true;
2936
2937 if (TARGET_EXTIMM
2938 && GET_CODE (op) == CONST_INT
2939 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2940 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2941 return true;
2942
2943 /* Accept larl operands. */
2944 if (TARGET_CPU_ZARCH
2945 && larl_operand (op, VOIDmode))
2946 return true;
2947
2948 /* Accept floating-point zero operands that fit into a single GPR. */
2949 if (GET_CODE (op) == CONST_DOUBLE
2950 && s390_float_const_zero_p (op)
2951 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2952 return true;
2953
2954 /* Accept double-word operands that can be split. */
2955 if (GET_CODE (op) == CONST_INT
2956 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2957 {
2958 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2959 rtx hi = operand_subword (op, 0, 0, dword_mode);
2960 rtx lo = operand_subword (op, 1, 0, dword_mode);
2961 return legitimate_reload_constant_p (hi)
2962 && legitimate_reload_constant_p (lo);
2963 }
2964
2965 /* Everything else cannot be handled without reload. */
2966 return false;
2967 }
2968
2969 /* Returns true if the constant value OP is a legitimate fp operand
2970 during and after reload.
2971 This function accepts all constants which can be loaded directly
2972 into an FPR. */
2973
2974 static bool
2975 legitimate_reload_fp_constant_p (rtx op)
2976 {
2977 /* Accept floating-point zero operands if the load zero instruction
2978 can be used. */
2979 if (TARGET_Z196
2980 && GET_CODE (op) == CONST_DOUBLE
2981 && s390_float_const_zero_p (op))
2982 return true;
2983
2984 return false;
2985 }
2986
2987 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2988 return the class of reg to actually use. */
2989
2990 static reg_class_t
2991 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2992 {
2993 switch (GET_CODE (op))
2994 {
2995 /* Constants we cannot reload into general registers
2996 must be forced into the literal pool. */
2997 case CONST_DOUBLE:
2998 case CONST_INT:
2999 if (reg_class_subset_p (GENERAL_REGS, rclass)
3000 && legitimate_reload_constant_p (op))
3001 return GENERAL_REGS;
3002 else if (reg_class_subset_p (ADDR_REGS, rclass)
3003 && legitimate_reload_constant_p (op))
3004 return ADDR_REGS;
3005 else if (reg_class_subset_p (FP_REGS, rclass)
3006 && legitimate_reload_fp_constant_p (op))
3007 return FP_REGS;
3008 return NO_REGS;
3009
3010 /* If a symbolic constant or a PLUS is reloaded,
3011 it is most likely being used as an address, so
3012 prefer ADDR_REGS. If 'class' is not a superset
3013 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3014 case PLUS:
3015 case LABEL_REF:
3016 case SYMBOL_REF:
3017 case CONST:
3018 if (reg_class_subset_p (ADDR_REGS, rclass))
3019 return ADDR_REGS;
3020 else
3021 return NO_REGS;
3022
3023 default:
3024 break;
3025 }
3026
3027 return rclass;
3028 }
3029
3030 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3031 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3032 aligned. */
3033
3034 bool
3035 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3036 {
3037 HOST_WIDE_INT addend;
3038 rtx symref;
3039
3040 if (!s390_symref_operand_p (addr, &symref, &addend))
3041 return false;
3042
3043 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
3044 && !(addend & (alignment - 1)));
3045 }
3046
3047 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3048 operand SCRATCH is used to reload the even part of the address and
3049 adding one. */
3050
3051 void
3052 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3053 {
3054 HOST_WIDE_INT addend;
3055 rtx symref;
3056
3057 if (!s390_symref_operand_p (addr, &symref, &addend))
3058 gcc_unreachable ();
3059
3060 if (!(addend & 1))
3061 /* Easy case. The addend is even so larl will do fine. */
3062 emit_move_insn (reg, addr);
3063 else
3064 {
3065 /* We can leave the scratch register untouched if the target
3066 register is a valid base register. */
3067 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3068 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3069 scratch = reg;
3070
3071 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3072 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3073
3074 if (addend != 1)
3075 emit_move_insn (scratch,
3076 gen_rtx_CONST (Pmode,
3077 gen_rtx_PLUS (Pmode, symref,
3078 GEN_INT (addend - 1))));
3079 else
3080 emit_move_insn (scratch, symref);
3081
3082 /* Increment the address using la in order to avoid clobbering cc. */
3083 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3084 }
3085 }
3086
3087 /* Generate what is necessary to move between REG and MEM using
3088 SCRATCH. The direction is given by TOMEM. */
3089
3090 void
3091 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3092 {
3093 /* Reload might have pulled a constant out of the literal pool.
3094 Force it back in. */
3095 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3096 || GET_CODE (mem) == CONST)
3097 mem = force_const_mem (GET_MODE (reg), mem);
3098
3099 gcc_assert (MEM_P (mem));
3100
3101 /* For a load from memory we can leave the scratch register
3102 untouched if the target register is a valid base register. */
3103 if (!tomem
3104 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3105 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3106 && GET_MODE (reg) == GET_MODE (scratch))
3107 scratch = reg;
3108
3109 /* Load address into scratch register. Since we can't have a
3110 secondary reload for a secondary reload we have to cover the case
3111 where larl would need a secondary reload here as well. */
3112 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3113
3114 /* Now we can use a standard load/store to do the move. */
3115 if (tomem)
3116 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3117 else
3118 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3119 }
3120
3121 /* Inform reload about cases where moving X with a mode MODE to a register in
3122 RCLASS requires an extra scratch or immediate register. Return the class
3123 needed for the immediate register. */
3124
3125 static reg_class_t
3126 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3127 enum machine_mode mode, secondary_reload_info *sri)
3128 {
3129 enum reg_class rclass = (enum reg_class) rclass_i;
3130
3131 /* Intermediate register needed. */
3132 if (reg_classes_intersect_p (CC_REGS, rclass))
3133 return GENERAL_REGS;
3134
3135 if (TARGET_Z10)
3136 {
3137 /* On z10 several optimizer steps may generate larl operands with
3138 an odd addend. */
3139 if (in_p
3140 && s390_symref_operand_p (x, NULL, NULL)
3141 && mode == Pmode
3142 && !s390_check_symref_alignment (x, 2))
3143 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3144 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3145
3146 /* On z10 we need a scratch register when moving QI, TI or floating
3147 point mode values from or to a memory location with a SYMBOL_REF
3148 or if the symref addend of a SI or DI move is not aligned to the
3149 width of the access. */
3150 if (MEM_P (x)
3151 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3152 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3153 || (!TARGET_ZARCH && mode == DImode)
3154 || ((mode == HImode || mode == SImode || mode == DImode)
3155 && (!s390_check_symref_alignment (XEXP (x, 0),
3156 GET_MODE_SIZE (mode))))))
3157 {
3158 #define __SECONDARY_RELOAD_CASE(M,m) \
3159 case M##mode: \
3160 if (TARGET_64BIT) \
3161 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3162 CODE_FOR_reload##m##di_tomem_z10; \
3163 else \
3164 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3165 CODE_FOR_reload##m##si_tomem_z10; \
3166 break;
3167
3168 switch (GET_MODE (x))
3169 {
3170 __SECONDARY_RELOAD_CASE (QI, qi);
3171 __SECONDARY_RELOAD_CASE (HI, hi);
3172 __SECONDARY_RELOAD_CASE (SI, si);
3173 __SECONDARY_RELOAD_CASE (DI, di);
3174 __SECONDARY_RELOAD_CASE (TI, ti);
3175 __SECONDARY_RELOAD_CASE (SF, sf);
3176 __SECONDARY_RELOAD_CASE (DF, df);
3177 __SECONDARY_RELOAD_CASE (TF, tf);
3178 __SECONDARY_RELOAD_CASE (SD, sd);
3179 __SECONDARY_RELOAD_CASE (DD, dd);
3180 __SECONDARY_RELOAD_CASE (TD, td);
3181
3182 default:
3183 gcc_unreachable ();
3184 }
3185 #undef __SECONDARY_RELOAD_CASE
3186 }
3187 }
3188
3189 /* We need a scratch register when loading a PLUS expression which
3190 is not a legitimate operand of the LOAD ADDRESS instruction. */
3191 if (in_p && s390_plus_operand (x, mode))
3192 sri->icode = (TARGET_64BIT ?
3193 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3194
3195 /* Performing a multiword move from or to memory we have to make sure the
3196 second chunk in memory is addressable without causing a displacement
3197 overflow. If that would be the case we calculate the address in
3198 a scratch register. */
3199 if (MEM_P (x)
3200 && GET_CODE (XEXP (x, 0)) == PLUS
3201 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3202 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3203 + GET_MODE_SIZE (mode) - 1))
3204 {
3205 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3206 in a s_operand address since we may fallback to lm/stm. So we only
3207 have to care about overflows in the b+i+d case. */
3208 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3209 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3211 /* For FP_REGS no lm/stm is available so this check is triggered
3212 for displacement overflows in b+i+d and b+d like addresses. */
3213 || (reg_classes_intersect_p (FP_REGS, rclass)
3214 && s390_class_max_nregs (FP_REGS, mode) > 1))
3215 {
3216 if (in_p)
3217 sri->icode = (TARGET_64BIT ?
3218 CODE_FOR_reloaddi_nonoffmem_in :
3219 CODE_FOR_reloadsi_nonoffmem_in);
3220 else
3221 sri->icode = (TARGET_64BIT ?
3222 CODE_FOR_reloaddi_nonoffmem_out :
3223 CODE_FOR_reloadsi_nonoffmem_out);
3224 }
3225 }
3226
3227 /* A scratch address register is needed when a symbolic constant is
3228 copied to r0 compiling with -fPIC. In other cases the target
3229 register might be used as temporary (see legitimize_pic_address). */
3230 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3231 sri->icode = (TARGET_64BIT ?
3232 CODE_FOR_reloaddi_PIC_addr :
3233 CODE_FOR_reloadsi_PIC_addr);
3234
3235 /* Either scratch or no register needed. */
3236 return NO_REGS;
3237 }
3238
3239 /* Generate code to load SRC, which is PLUS that is not a
3240 legitimate operand for the LA instruction, into TARGET.
3241 SCRATCH may be used as scratch register. */
3242
3243 void
3244 s390_expand_plus_operand (rtx target, rtx src,
3245 rtx scratch)
3246 {
3247 rtx sum1, sum2;
3248 struct s390_address ad;
3249
3250 /* src must be a PLUS; get its two operands. */
3251 gcc_assert (GET_CODE (src) == PLUS);
3252 gcc_assert (GET_MODE (src) == Pmode);
3253
3254 /* Check if any of the two operands is already scheduled
3255 for replacement by reload. This can happen e.g. when
3256 float registers occur in an address. */
3257 sum1 = find_replacement (&XEXP (src, 0));
3258 sum2 = find_replacement (&XEXP (src, 1));
3259 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3260
3261 /* If the address is already strictly valid, there's nothing to do. */
3262 if (!s390_decompose_address (src, &ad)
3263 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3264 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3265 {
3266 /* Otherwise, one of the operands cannot be an address register;
3267 we reload its value into the scratch register. */
3268 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3269 {
3270 emit_move_insn (scratch, sum1);
3271 sum1 = scratch;
3272 }
3273 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3274 {
3275 emit_move_insn (scratch, sum2);
3276 sum2 = scratch;
3277 }
3278
3279 /* According to the way these invalid addresses are generated
3280 in reload.c, it should never happen (at least on s390) that
3281 *neither* of the PLUS components, after find_replacements
3282 was applied, is an address register. */
3283 if (sum1 == scratch && sum2 == scratch)
3284 {
3285 debug_rtx (src);
3286 gcc_unreachable ();
3287 }
3288
3289 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3290 }
3291
3292 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3293 is only ever performed on addresses, so we can mark the
3294 sum as legitimate for LA in any case. */
3295 s390_load_address (target, src);
3296 }
3297
3298
3299 /* Return true if ADDR is a valid memory address.
3300 STRICT specifies whether strict register checking applies. */
3301
3302 static bool
3303 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3304 {
3305 struct s390_address ad;
3306
3307 if (TARGET_Z10
3308 && larl_operand (addr, VOIDmode)
3309 && (mode == VOIDmode
3310 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3311 return true;
3312
3313 if (!s390_decompose_address (addr, &ad))
3314 return false;
3315
3316 if (strict)
3317 {
3318 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3319 return false;
3320
3321 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3322 return false;
3323 }
3324 else
3325 {
3326 if (ad.base
3327 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3328 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3329 return false;
3330
3331 if (ad.indx
3332 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3333 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3334 return false;
3335 }
3336 return true;
3337 }
3338
3339 /* Return true if OP is a valid operand for the LA instruction.
3340 In 31-bit, we need to prove that the result is used as an
3341 address, as LA performs only a 31-bit addition. */
3342
3343 bool
3344 legitimate_la_operand_p (rtx op)
3345 {
3346 struct s390_address addr;
3347 if (!s390_decompose_address (op, &addr))
3348 return false;
3349
3350 return (TARGET_64BIT || addr.pointer);
3351 }
3352
3353 /* Return true if it is valid *and* preferable to use LA to
3354 compute the sum of OP1 and OP2. */
3355
3356 bool
3357 preferred_la_operand_p (rtx op1, rtx op2)
3358 {
3359 struct s390_address addr;
3360
3361 if (op2 != const0_rtx)
3362 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3363
3364 if (!s390_decompose_address (op1, &addr))
3365 return false;
3366 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3367 return false;
3368 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3369 return false;
3370
3371 /* Avoid LA instructions with index register on z196; it is
3372 preferable to use regular add instructions when possible. */
3373 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3374 return false;
3375
3376 if (!TARGET_64BIT && !addr.pointer)
3377 return false;
3378
3379 if (addr.pointer)
3380 return true;
3381
3382 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3383 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3384 return true;
3385
3386 return false;
3387 }
3388
3389 /* Emit a forced load-address operation to load SRC into DST.
3390 This will use the LOAD ADDRESS instruction even in situations
3391 where legitimate_la_operand_p (SRC) returns false. */
3392
3393 void
3394 s390_load_address (rtx dst, rtx src)
3395 {
3396 if (TARGET_64BIT)
3397 emit_move_insn (dst, src);
3398 else
3399 emit_insn (gen_force_la_31 (dst, src));
3400 }
3401
3402 /* Return a legitimate reference for ORIG (an address) using the
3403 register REG. If REG is 0, a new pseudo is generated.
3404
3405 There are two types of references that must be handled:
3406
3407 1. Global data references must load the address from the GOT, via
3408 the PIC reg. An insn is emitted to do this load, and the reg is
3409 returned.
3410
3411 2. Static data references, constant pool addresses, and code labels
3412 compute the address as an offset from the GOT, whose base is in
3413 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3414 differentiate them from global data objects. The returned
3415 address is the PIC reg + an unspec constant.
3416
3417 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3418 reg also appears in the address. */
3419
3420 rtx
3421 legitimize_pic_address (rtx orig, rtx reg)
3422 {
3423 rtx addr = orig;
3424 rtx new_rtx = orig;
3425 rtx base;
3426
3427 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3428
3429 if (GET_CODE (addr) == LABEL_REF
3430 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3431 {
3432 /* This is a local symbol. */
3433 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3434 {
3435 /* Access local symbols PC-relative via LARL.
3436 This is the same as in the non-PIC case, so it is
3437 handled automatically ... */
3438 }
3439 else
3440 {
3441 /* Access local symbols relative to the GOT. */
3442
3443 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3444
3445 if (reload_in_progress || reload_completed)
3446 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3447
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3449 addr = gen_rtx_CONST (Pmode, addr);
3450 addr = force_const_mem (Pmode, addr);
3451 emit_move_insn (temp, addr);
3452
3453 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3454 if (reg != 0)
3455 {
3456 s390_load_address (reg, new_rtx);
3457 new_rtx = reg;
3458 }
3459 }
3460 }
3461 else if (GET_CODE (addr) == SYMBOL_REF)
3462 {
3463 if (reg == 0)
3464 reg = gen_reg_rtx (Pmode);
3465
3466 if (flag_pic == 1)
3467 {
3468 /* Assume GOT offset < 4k. This is handled the same way
3469 in both 31- and 64-bit code (@GOT). */
3470
3471 if (reload_in_progress || reload_completed)
3472 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3473
3474 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3475 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3476 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3477 new_rtx = gen_const_mem (Pmode, new_rtx);
3478 emit_move_insn (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 else if (TARGET_CPU_ZARCH)
3482 {
3483 /* If the GOT offset might be >= 4k, we determine the position
3484 of the GOT entry via a PC-relative LARL (@GOTENT). */
3485
3486 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3487
3488 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3489 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3490
3491 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3492 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3493 emit_move_insn (temp, new_rtx);
3494
3495 new_rtx = gen_const_mem (Pmode, temp);
3496 emit_move_insn (reg, new_rtx);
3497 new_rtx = reg;
3498 }
3499 else
3500 {
3501 /* If the GOT offset might be >= 4k, we have to load it
3502 from the literal pool (@GOT). */
3503
3504 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3505
3506 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3507 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3508
3509 if (reload_in_progress || reload_completed)
3510 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3511
3512 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3513 addr = gen_rtx_CONST (Pmode, addr);
3514 addr = force_const_mem (Pmode, addr);
3515 emit_move_insn (temp, addr);
3516
3517 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3518 new_rtx = gen_const_mem (Pmode, new_rtx);
3519 emit_move_insn (reg, new_rtx);
3520 new_rtx = reg;
3521 }
3522 }
3523 else
3524 {
3525 if (GET_CODE (addr) == CONST)
3526 {
3527 addr = XEXP (addr, 0);
3528 if (GET_CODE (addr) == UNSPEC)
3529 {
3530 gcc_assert (XVECLEN (addr, 0) == 1);
3531 switch (XINT (addr, 1))
3532 {
3533 /* If someone moved a GOT-relative UNSPEC
3534 out of the literal pool, force them back in. */
3535 case UNSPEC_GOTOFF:
3536 case UNSPEC_PLTOFF:
3537 new_rtx = force_const_mem (Pmode, orig);
3538 break;
3539
3540 /* @GOT is OK as is if small. */
3541 case UNSPEC_GOT:
3542 if (flag_pic == 2)
3543 new_rtx = force_const_mem (Pmode, orig);
3544 break;
3545
3546 /* @GOTENT is OK as is. */
3547 case UNSPEC_GOTENT:
3548 break;
3549
3550 /* @PLT is OK as is on 64-bit, must be converted to
3551 GOT-relative @PLTOFF on 31-bit. */
3552 case UNSPEC_PLT:
3553 if (!TARGET_CPU_ZARCH)
3554 {
3555 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3556
3557 if (reload_in_progress || reload_completed)
3558 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3559
3560 addr = XVECEXP (addr, 0, 0);
3561 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3562 UNSPEC_PLTOFF);
3563 addr = gen_rtx_CONST (Pmode, addr);
3564 addr = force_const_mem (Pmode, addr);
3565 emit_move_insn (temp, addr);
3566
3567 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3568 if (reg != 0)
3569 {
3570 s390_load_address (reg, new_rtx);
3571 new_rtx = reg;
3572 }
3573 }
3574 break;
3575
3576 /* Everything else cannot happen. */
3577 default:
3578 gcc_unreachable ();
3579 }
3580 }
3581 else
3582 gcc_assert (GET_CODE (addr) == PLUS);
3583 }
3584 if (GET_CODE (addr) == PLUS)
3585 {
3586 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3587
3588 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3589 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3590
3591 /* Check first to see if this is a constant offset
3592 from a local symbol reference. */
3593 if ((GET_CODE (op0) == LABEL_REF
3594 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3595 && GET_CODE (op1) == CONST_INT)
3596 {
3597 if (TARGET_CPU_ZARCH
3598 && larl_operand (op0, VOIDmode)
3599 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3600 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3601 {
3602 if (INTVAL (op1) & 1)
3603 {
3604 /* LARL can't handle odd offsets, so emit a
3605 pair of LARL and LA. */
3606 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3607
3608 if (!DISP_IN_RANGE (INTVAL (op1)))
3609 {
3610 HOST_WIDE_INT even = INTVAL (op1) - 1;
3611 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3612 op0 = gen_rtx_CONST (Pmode, op0);
3613 op1 = const1_rtx;
3614 }
3615
3616 emit_move_insn (temp, op0);
3617 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3618
3619 if (reg != 0)
3620 {
3621 s390_load_address (reg, new_rtx);
3622 new_rtx = reg;
3623 }
3624 }
3625 else
3626 {
3627 /* If the offset is even, we can just use LARL.
3628 This will happen automatically. */
3629 }
3630 }
3631 else
3632 {
3633 /* Access local symbols relative to the GOT. */
3634
3635 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3636
3637 if (reload_in_progress || reload_completed)
3638 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3639
3640 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3641 UNSPEC_GOTOFF);
3642 addr = gen_rtx_PLUS (Pmode, addr, op1);
3643 addr = gen_rtx_CONST (Pmode, addr);
3644 addr = force_const_mem (Pmode, addr);
3645 emit_move_insn (temp, addr);
3646
3647 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3648 if (reg != 0)
3649 {
3650 s390_load_address (reg, new_rtx);
3651 new_rtx = reg;
3652 }
3653 }
3654 }
3655
3656 /* Now, check whether it is a GOT relative symbol plus offset
3657 that was pulled out of the literal pool. Force it back in. */
3658
3659 else if (GET_CODE (op0) == UNSPEC
3660 && GET_CODE (op1) == CONST_INT
3661 && XINT (op0, 1) == UNSPEC_GOTOFF)
3662 {
3663 gcc_assert (XVECLEN (op0, 0) == 1);
3664
3665 new_rtx = force_const_mem (Pmode, orig);
3666 }
3667
3668 /* Otherwise, compute the sum. */
3669 else
3670 {
3671 base = legitimize_pic_address (XEXP (addr, 0), reg);
3672 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3673 base == reg ? NULL_RTX : reg);
3674 if (GET_CODE (new_rtx) == CONST_INT)
3675 new_rtx = plus_constant (base, INTVAL (new_rtx));
3676 else
3677 {
3678 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3679 {
3680 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3681 new_rtx = XEXP (new_rtx, 1);
3682 }
3683 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3684 }
3685
3686 if (GET_CODE (new_rtx) == CONST)
3687 new_rtx = XEXP (new_rtx, 0);
3688 new_rtx = force_operand (new_rtx, 0);
3689 }
3690 }
3691 }
3692 return new_rtx;
3693 }
3694
3695 /* Load the thread pointer into a register. */
3696
3697 rtx
3698 s390_get_thread_pointer (void)
3699 {
3700 rtx tp = gen_reg_rtx (Pmode);
3701
3702 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3703 mark_reg_pointer (tp, BITS_PER_WORD);
3704
3705 return tp;
3706 }
3707
3708 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3709 in s390_tls_symbol which always refers to __tls_get_offset.
3710 The returned offset is written to RESULT_REG and an USE rtx is
3711 generated for TLS_CALL. */
3712
3713 static GTY(()) rtx s390_tls_symbol;
3714
3715 static void
3716 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3717 {
3718 rtx insn;
3719
3720 gcc_assert (flag_pic);
3721
3722 if (!s390_tls_symbol)
3723 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3724
3725 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3726 gen_rtx_REG (Pmode, RETURN_REGNUM));
3727
3728 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3729 RTL_CONST_CALL_P (insn) = 1;
3730 }
3731
3732 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3733 this (thread-local) address. REG may be used as temporary. */
3734
3735 static rtx
3736 legitimize_tls_address (rtx addr, rtx reg)
3737 {
3738 rtx new_rtx, tls_call, temp, base, r2, insn;
3739
3740 if (GET_CODE (addr) == SYMBOL_REF)
3741 switch (tls_symbolic_operand (addr))
3742 {
3743 case TLS_MODEL_GLOBAL_DYNAMIC:
3744 start_sequence ();
3745 r2 = gen_rtx_REG (Pmode, 2);
3746 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3747 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 emit_move_insn (r2, new_rtx);
3750 s390_emit_tls_call_insn (r2, tls_call);
3751 insn = get_insns ();
3752 end_sequence ();
3753
3754 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3755 temp = gen_reg_rtx (Pmode);
3756 emit_libcall_block (insn, temp, r2, new_rtx);
3757
3758 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3759 if (reg != 0)
3760 {
3761 s390_load_address (reg, new_rtx);
3762 new_rtx = reg;
3763 }
3764 break;
3765
3766 case TLS_MODEL_LOCAL_DYNAMIC:
3767 start_sequence ();
3768 r2 = gen_rtx_REG (Pmode, 2);
3769 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3770 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3771 new_rtx = force_const_mem (Pmode, new_rtx);
3772 emit_move_insn (r2, new_rtx);
3773 s390_emit_tls_call_insn (r2, tls_call);
3774 insn = get_insns ();
3775 end_sequence ();
3776
3777 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3778 temp = gen_reg_rtx (Pmode);
3779 emit_libcall_block (insn, temp, r2, new_rtx);
3780
3781 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3782 base = gen_reg_rtx (Pmode);
3783 s390_load_address (base, new_rtx);
3784
3785 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3786 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3787 new_rtx = force_const_mem (Pmode, new_rtx);
3788 temp = gen_reg_rtx (Pmode);
3789 emit_move_insn (temp, new_rtx);
3790
3791 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3792 if (reg != 0)
3793 {
3794 s390_load_address (reg, new_rtx);
3795 new_rtx = reg;
3796 }
3797 break;
3798
3799 case TLS_MODEL_INITIAL_EXEC:
3800 if (flag_pic == 1)
3801 {
3802 /* Assume GOT offset < 4k. This is handled the same way
3803 in both 31- and 64-bit code. */
3804
3805 if (reload_in_progress || reload_completed)
3806 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3807
3808 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3809 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3810 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3811 new_rtx = gen_const_mem (Pmode, new_rtx);
3812 temp = gen_reg_rtx (Pmode);
3813 emit_move_insn (temp, new_rtx);
3814 }
3815 else if (TARGET_CPU_ZARCH)
3816 {
3817 /* If the GOT offset might be >= 4k, we determine the position
3818 of the GOT entry via a PC-relative LARL. */
3819
3820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3822 temp = gen_reg_rtx (Pmode);
3823 emit_move_insn (temp, new_rtx);
3824
3825 new_rtx = gen_const_mem (Pmode, temp);
3826 temp = gen_reg_rtx (Pmode);
3827 emit_move_insn (temp, new_rtx);
3828 }
3829 else if (flag_pic)
3830 {
3831 /* If the GOT offset might be >= 4k, we have to load it
3832 from the literal pool. */
3833
3834 if (reload_in_progress || reload_completed)
3835 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3836
3837 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3838 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3839 new_rtx = force_const_mem (Pmode, new_rtx);
3840 temp = gen_reg_rtx (Pmode);
3841 emit_move_insn (temp, new_rtx);
3842
3843 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3844 new_rtx = gen_const_mem (Pmode, new_rtx);
3845
3846 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3847 temp = gen_reg_rtx (Pmode);
3848 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3849 }
3850 else
3851 {
3852 /* In position-dependent code, load the absolute address of
3853 the GOT entry from the literal pool. */
3854
3855 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3856 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3857 new_rtx = force_const_mem (Pmode, new_rtx);
3858 temp = gen_reg_rtx (Pmode);
3859 emit_move_insn (temp, new_rtx);
3860
3861 new_rtx = temp;
3862 new_rtx = gen_const_mem (Pmode, new_rtx);
3863 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3864 temp = gen_reg_rtx (Pmode);
3865 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3866 }
3867
3868 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3869 if (reg != 0)
3870 {
3871 s390_load_address (reg, new_rtx);
3872 new_rtx = reg;
3873 }
3874 break;
3875
3876 case TLS_MODEL_LOCAL_EXEC:
3877 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3878 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3879 new_rtx = force_const_mem (Pmode, new_rtx);
3880 temp = gen_reg_rtx (Pmode);
3881 emit_move_insn (temp, new_rtx);
3882
3883 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3884 if (reg != 0)
3885 {
3886 s390_load_address (reg, new_rtx);
3887 new_rtx = reg;
3888 }
3889 break;
3890
3891 default:
3892 gcc_unreachable ();
3893 }
3894
3895 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3896 {
3897 switch (XINT (XEXP (addr, 0), 1))
3898 {
3899 case UNSPEC_INDNTPOFF:
3900 gcc_assert (TARGET_CPU_ZARCH);
3901 new_rtx = addr;
3902 break;
3903
3904 default:
3905 gcc_unreachable ();
3906 }
3907 }
3908
3909 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3910 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3911 {
3912 new_rtx = XEXP (XEXP (addr, 0), 0);
3913 if (GET_CODE (new_rtx) != SYMBOL_REF)
3914 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3915
3916 new_rtx = legitimize_tls_address (new_rtx, reg);
3917 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3918 new_rtx = force_operand (new_rtx, 0);
3919 }
3920
3921 else
3922 gcc_unreachable (); /* for now ... */
3923
3924 return new_rtx;
3925 }
3926
3927 /* Emit insns making the address in operands[1] valid for a standard
3928 move to operands[0]. operands[1] is replaced by an address which
3929 should be used instead of the former RTX to emit the move
3930 pattern. */
3931
3932 void
3933 emit_symbolic_move (rtx *operands)
3934 {
3935 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3936
3937 if (GET_CODE (operands[0]) == MEM)
3938 operands[1] = force_reg (Pmode, operands[1]);
3939 else if (TLS_SYMBOLIC_CONST (operands[1]))
3940 operands[1] = legitimize_tls_address (operands[1], temp);
3941 else if (flag_pic)
3942 operands[1] = legitimize_pic_address (operands[1], temp);
3943 }
3944
3945 /* Try machine-dependent ways of modifying an illegitimate address X
3946 to be legitimate. If we find one, return the new, valid address.
3947
3948 OLDX is the address as it was before break_out_memory_refs was called.
3949 In some cases it is useful to look at this to decide what needs to be done.
3950
3951 MODE is the mode of the operand pointed to by X.
3952
3953 When -fpic is used, special handling is needed for symbolic references.
3954 See comments by legitimize_pic_address for details. */
3955
3956 static rtx
3957 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3958 enum machine_mode mode ATTRIBUTE_UNUSED)
3959 {
3960 rtx constant_term = const0_rtx;
3961
3962 if (TLS_SYMBOLIC_CONST (x))
3963 {
3964 x = legitimize_tls_address (x, 0);
3965
3966 if (s390_legitimate_address_p (mode, x, FALSE))
3967 return x;
3968 }
3969 else if (GET_CODE (x) == PLUS
3970 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3971 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3972 {
3973 return x;
3974 }
3975 else if (flag_pic)
3976 {
3977 if (SYMBOLIC_CONST (x)
3978 || (GET_CODE (x) == PLUS
3979 && (SYMBOLIC_CONST (XEXP (x, 0))
3980 || SYMBOLIC_CONST (XEXP (x, 1)))))
3981 x = legitimize_pic_address (x, 0);
3982
3983 if (s390_legitimate_address_p (mode, x, FALSE))
3984 return x;
3985 }
3986
3987 x = eliminate_constant_term (x, &constant_term);
3988
3989 /* Optimize loading of large displacements by splitting them
3990 into the multiple of 4K and the rest; this allows the
3991 former to be CSE'd if possible.
3992
3993 Don't do this if the displacement is added to a register
3994 pointing into the stack frame, as the offsets will
3995 change later anyway. */
3996
3997 if (GET_CODE (constant_term) == CONST_INT
3998 && !TARGET_LONG_DISPLACEMENT
3999 && !DISP_IN_RANGE (INTVAL (constant_term))
4000 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4001 {
4002 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4003 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4004
4005 rtx temp = gen_reg_rtx (Pmode);
4006 rtx val = force_operand (GEN_INT (upper), temp);
4007 if (val != temp)
4008 emit_move_insn (temp, val);
4009
4010 x = gen_rtx_PLUS (Pmode, x, temp);
4011 constant_term = GEN_INT (lower);
4012 }
4013
4014 if (GET_CODE (x) == PLUS)
4015 {
4016 if (GET_CODE (XEXP (x, 0)) == REG)
4017 {
4018 rtx temp = gen_reg_rtx (Pmode);
4019 rtx val = force_operand (XEXP (x, 1), temp);
4020 if (val != temp)
4021 emit_move_insn (temp, val);
4022
4023 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4024 }
4025
4026 else if (GET_CODE (XEXP (x, 1)) == REG)
4027 {
4028 rtx temp = gen_reg_rtx (Pmode);
4029 rtx val = force_operand (XEXP (x, 0), temp);
4030 if (val != temp)
4031 emit_move_insn (temp, val);
4032
4033 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4034 }
4035 }
4036
4037 if (constant_term != const0_rtx)
4038 x = gen_rtx_PLUS (Pmode, x, constant_term);
4039
4040 return x;
4041 }
4042
4043 /* Try a machine-dependent way of reloading an illegitimate address AD
4044 operand. If we find one, push the reload and and return the new address.
4045
4046 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4047 and TYPE is the reload type of the current reload. */
4048
4049 rtx
4050 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4051 int opnum, int type)
4052 {
4053 if (!optimize || TARGET_LONG_DISPLACEMENT)
4054 return NULL_RTX;
4055
4056 if (GET_CODE (ad) == PLUS)
4057 {
4058 rtx tem = simplify_binary_operation (PLUS, Pmode,
4059 XEXP (ad, 0), XEXP (ad, 1));
4060 if (tem)
4061 ad = tem;
4062 }
4063
4064 if (GET_CODE (ad) == PLUS
4065 && GET_CODE (XEXP (ad, 0)) == REG
4066 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4067 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4068 {
4069 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4070 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4071 rtx cst, tem, new_rtx;
4072
4073 cst = GEN_INT (upper);
4074 if (!legitimate_reload_constant_p (cst))
4075 cst = force_const_mem (Pmode, cst);
4076
4077 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4078 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4079
4080 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4081 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4082 opnum, (enum reload_type) type);
4083 return new_rtx;
4084 }
4085
4086 return NULL_RTX;
4087 }
4088
4089 /* Emit code to move LEN bytes from DST to SRC. */
4090
4091 void
4092 s390_expand_movmem (rtx dst, rtx src, rtx len)
4093 {
4094 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4095 {
4096 if (INTVAL (len) > 0)
4097 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4098 }
4099
4100 else if (TARGET_MVCLE)
4101 {
4102 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4103 }
4104
4105 else
4106 {
4107 rtx dst_addr, src_addr, count, blocks, temp;
4108 rtx loop_start_label = gen_label_rtx ();
4109 rtx loop_end_label = gen_label_rtx ();
4110 rtx end_label = gen_label_rtx ();
4111 enum machine_mode mode;
4112
4113 mode = GET_MODE (len);
4114 if (mode == VOIDmode)
4115 mode = Pmode;
4116
4117 dst_addr = gen_reg_rtx (Pmode);
4118 src_addr = gen_reg_rtx (Pmode);
4119 count = gen_reg_rtx (mode);
4120 blocks = gen_reg_rtx (mode);
4121
4122 convert_move (count, len, 1);
4123 emit_cmp_and_jump_insns (count, const0_rtx,
4124 EQ, NULL_RTX, mode, 1, end_label);
4125
4126 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4127 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4128 dst = change_address (dst, VOIDmode, dst_addr);
4129 src = change_address (src, VOIDmode, src_addr);
4130
4131 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4132 OPTAB_DIRECT);
4133 if (temp != count)
4134 emit_move_insn (count, temp);
4135
4136 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4137 OPTAB_DIRECT);
4138 if (temp != blocks)
4139 emit_move_insn (blocks, temp);
4140
4141 emit_cmp_and_jump_insns (blocks, const0_rtx,
4142 EQ, NULL_RTX, mode, 1, loop_end_label);
4143
4144 emit_label (loop_start_label);
4145
4146 if (TARGET_Z10
4147 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4148 {
4149 rtx prefetch;
4150
4151 /* Issue a read prefetch for the +3 cache line. */
4152 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4153 const0_rtx, const0_rtx);
4154 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4155 emit_insn (prefetch);
4156
4157 /* Issue a write prefetch for the +3 cache line. */
4158 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4159 const1_rtx, const0_rtx);
4160 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4161 emit_insn (prefetch);
4162 }
4163
4164 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4165 s390_load_address (dst_addr,
4166 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4167 s390_load_address (src_addr,
4168 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4169
4170 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4171 OPTAB_DIRECT);
4172 if (temp != blocks)
4173 emit_move_insn (blocks, temp);
4174
4175 emit_cmp_and_jump_insns (blocks, const0_rtx,
4176 EQ, NULL_RTX, mode, 1, loop_end_label);
4177
4178 emit_jump (loop_start_label);
4179 emit_label (loop_end_label);
4180
4181 emit_insn (gen_movmem_short (dst, src,
4182 convert_to_mode (Pmode, count, 1)));
4183 emit_label (end_label);
4184 }
4185 }
4186
4187 /* Emit code to set LEN bytes at DST to VAL.
4188 Make use of clrmem if VAL is zero. */
4189
4190 void
4191 s390_expand_setmem (rtx dst, rtx len, rtx val)
4192 {
4193 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4194 return;
4195
4196 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4197
4198 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4199 {
4200 if (val == const0_rtx && INTVAL (len) <= 256)
4201 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4202 else
4203 {
4204 /* Initialize memory by storing the first byte. */
4205 emit_move_insn (adjust_address (dst, QImode, 0), val);
4206
4207 if (INTVAL (len) > 1)
4208 {
4209 /* Initiate 1 byte overlap move.
4210 The first byte of DST is propagated through DSTP1.
4211 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4212 DST is set to size 1 so the rest of the memory location
4213 does not count as source operand. */
4214 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4215 set_mem_size (dst, const1_rtx);
4216
4217 emit_insn (gen_movmem_short (dstp1, dst,
4218 GEN_INT (INTVAL (len) - 2)));
4219 }
4220 }
4221 }
4222
4223 else if (TARGET_MVCLE)
4224 {
4225 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4226 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4227 }
4228
4229 else
4230 {
4231 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4232 rtx loop_start_label = gen_label_rtx ();
4233 rtx loop_end_label = gen_label_rtx ();
4234 rtx end_label = gen_label_rtx ();
4235 enum machine_mode mode;
4236
4237 mode = GET_MODE (len);
4238 if (mode == VOIDmode)
4239 mode = Pmode;
4240
4241 dst_addr = gen_reg_rtx (Pmode);
4242 count = gen_reg_rtx (mode);
4243 blocks = gen_reg_rtx (mode);
4244
4245 convert_move (count, len, 1);
4246 emit_cmp_and_jump_insns (count, const0_rtx,
4247 EQ, NULL_RTX, mode, 1, end_label);
4248
4249 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4250 dst = change_address (dst, VOIDmode, dst_addr);
4251
4252 if (val == const0_rtx)
4253 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4254 OPTAB_DIRECT);
4255 else
4256 {
4257 dstp1 = adjust_address (dst, VOIDmode, 1);
4258 set_mem_size (dst, const1_rtx);
4259
4260 /* Initialize memory by storing the first byte. */
4261 emit_move_insn (adjust_address (dst, QImode, 0), val);
4262
4263 /* If count is 1 we are done. */
4264 emit_cmp_and_jump_insns (count, const1_rtx,
4265 EQ, NULL_RTX, mode, 1, end_label);
4266
4267 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4268 OPTAB_DIRECT);
4269 }
4270 if (temp != count)
4271 emit_move_insn (count, temp);
4272
4273 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4274 OPTAB_DIRECT);
4275 if (temp != blocks)
4276 emit_move_insn (blocks, temp);
4277
4278 emit_cmp_and_jump_insns (blocks, const0_rtx,
4279 EQ, NULL_RTX, mode, 1, loop_end_label);
4280
4281 emit_label (loop_start_label);
4282
4283 if (TARGET_Z10
4284 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4285 {
4286 /* Issue a write prefetch for the +4 cache line. */
4287 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4288 GEN_INT (1024)),
4289 const1_rtx, const0_rtx);
4290 emit_insn (prefetch);
4291 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4292 }
4293
4294 if (val == const0_rtx)
4295 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4296 else
4297 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4298 s390_load_address (dst_addr,
4299 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4300
4301 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4302 OPTAB_DIRECT);
4303 if (temp != blocks)
4304 emit_move_insn (blocks, temp);
4305
4306 emit_cmp_and_jump_insns (blocks, const0_rtx,
4307 EQ, NULL_RTX, mode, 1, loop_end_label);
4308
4309 emit_jump (loop_start_label);
4310 emit_label (loop_end_label);
4311
4312 if (val == const0_rtx)
4313 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4314 else
4315 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4316 emit_label (end_label);
4317 }
4318 }
4319
4320 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4321 and return the result in TARGET. */
4322
4323 void
4324 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4325 {
4326 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4327 rtx tmp;
4328
4329 /* As the result of CMPINT is inverted compared to what we need,
4330 we have to swap the operands. */
4331 tmp = op0; op0 = op1; op1 = tmp;
4332
4333 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4334 {
4335 if (INTVAL (len) > 0)
4336 {
4337 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4338 emit_insn (gen_cmpint (target, ccreg));
4339 }
4340 else
4341 emit_move_insn (target, const0_rtx);
4342 }
4343 else if (TARGET_MVCLE)
4344 {
4345 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4346 emit_insn (gen_cmpint (target, ccreg));
4347 }
4348 else
4349 {
4350 rtx addr0, addr1, count, blocks, temp;
4351 rtx loop_start_label = gen_label_rtx ();
4352 rtx loop_end_label = gen_label_rtx ();
4353 rtx end_label = gen_label_rtx ();
4354 enum machine_mode mode;
4355
4356 mode = GET_MODE (len);
4357 if (mode == VOIDmode)
4358 mode = Pmode;
4359
4360 addr0 = gen_reg_rtx (Pmode);
4361 addr1 = gen_reg_rtx (Pmode);
4362 count = gen_reg_rtx (mode);
4363 blocks = gen_reg_rtx (mode);
4364
4365 convert_move (count, len, 1);
4366 emit_cmp_and_jump_insns (count, const0_rtx,
4367 EQ, NULL_RTX, mode, 1, end_label);
4368
4369 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4370 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4371 op0 = change_address (op0, VOIDmode, addr0);
4372 op1 = change_address (op1, VOIDmode, addr1);
4373
4374 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4375 OPTAB_DIRECT);
4376 if (temp != count)
4377 emit_move_insn (count, temp);
4378
4379 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4380 OPTAB_DIRECT);
4381 if (temp != blocks)
4382 emit_move_insn (blocks, temp);
4383
4384 emit_cmp_and_jump_insns (blocks, const0_rtx,
4385 EQ, NULL_RTX, mode, 1, loop_end_label);
4386
4387 emit_label (loop_start_label);
4388
4389 if (TARGET_Z10
4390 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4391 {
4392 rtx prefetch;
4393
4394 /* Issue a read prefetch for the +2 cache line of operand 1. */
4395 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4396 const0_rtx, const0_rtx);
4397 emit_insn (prefetch);
4398 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4399
4400 /* Issue a read prefetch for the +2 cache line of operand 2. */
4401 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4402 const0_rtx, const0_rtx);
4403 emit_insn (prefetch);
4404 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4405 }
4406
4407 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4408 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4409 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4410 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4411 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4412 emit_jump_insn (temp);
4413
4414 s390_load_address (addr0,
4415 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4416 s390_load_address (addr1,
4417 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4418
4419 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4420 OPTAB_DIRECT);
4421 if (temp != blocks)
4422 emit_move_insn (blocks, temp);
4423
4424 emit_cmp_and_jump_insns (blocks, const0_rtx,
4425 EQ, NULL_RTX, mode, 1, loop_end_label);
4426
4427 emit_jump (loop_start_label);
4428 emit_label (loop_end_label);
4429
4430 emit_insn (gen_cmpmem_short (op0, op1,
4431 convert_to_mode (Pmode, count, 1)));
4432 emit_label (end_label);
4433
4434 emit_insn (gen_cmpint (target, ccreg));
4435 }
4436 }
4437
4438
4439 /* Expand conditional increment or decrement using alc/slb instructions.
4440 Should generate code setting DST to either SRC or SRC + INCREMENT,
4441 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4442 Returns true if successful, false otherwise.
4443
4444 That makes it possible to implement some if-constructs without jumps e.g.:
4445 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4446 unsigned int a, b, c;
4447 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4448 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4449 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4450 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4451
4452 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4453 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4454 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4455 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4456 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4457
4458 bool
4459 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4460 rtx dst, rtx src, rtx increment)
4461 {
4462 enum machine_mode cmp_mode;
4463 enum machine_mode cc_mode;
4464 rtx op_res;
4465 rtx insn;
4466 rtvec p;
4467 int ret;
4468
4469 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4470 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4471 cmp_mode = SImode;
4472 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4473 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4474 cmp_mode = DImode;
4475 else
4476 return false;
4477
4478 /* Try ADD LOGICAL WITH CARRY. */
4479 if (increment == const1_rtx)
4480 {
4481 /* Determine CC mode to use. */
4482 if (cmp_code == EQ || cmp_code == NE)
4483 {
4484 if (cmp_op1 != const0_rtx)
4485 {
4486 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4487 NULL_RTX, 0, OPTAB_WIDEN);
4488 cmp_op1 = const0_rtx;
4489 }
4490
4491 cmp_code = cmp_code == EQ ? LEU : GTU;
4492 }
4493
4494 if (cmp_code == LTU || cmp_code == LEU)
4495 {
4496 rtx tem = cmp_op0;
4497 cmp_op0 = cmp_op1;
4498 cmp_op1 = tem;
4499 cmp_code = swap_condition (cmp_code);
4500 }
4501
4502 switch (cmp_code)
4503 {
4504 case GTU:
4505 cc_mode = CCUmode;
4506 break;
4507
4508 case GEU:
4509 cc_mode = CCL3mode;
4510 break;
4511
4512 default:
4513 return false;
4514 }
4515
4516 /* Emit comparison instruction pattern. */
4517 if (!register_operand (cmp_op0, cmp_mode))
4518 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4519
4520 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4521 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4522 /* We use insn_invalid_p here to add clobbers if required. */
4523 ret = insn_invalid_p (emit_insn (insn));
4524 gcc_assert (!ret);
4525
4526 /* Emit ALC instruction pattern. */
4527 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4528 gen_rtx_REG (cc_mode, CC_REGNUM),
4529 const0_rtx);
4530
4531 if (src != const0_rtx)
4532 {
4533 if (!register_operand (src, GET_MODE (dst)))
4534 src = force_reg (GET_MODE (dst), src);
4535
4536 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4537 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4538 }
4539
4540 p = rtvec_alloc (2);
4541 RTVEC_ELT (p, 0) =
4542 gen_rtx_SET (VOIDmode, dst, op_res);
4543 RTVEC_ELT (p, 1) =
4544 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4545 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4546
4547 return true;
4548 }
4549
4550 /* Try SUBTRACT LOGICAL WITH BORROW. */
4551 if (increment == constm1_rtx)
4552 {
4553 /* Determine CC mode to use. */
4554 if (cmp_code == EQ || cmp_code == NE)
4555 {
4556 if (cmp_op1 != const0_rtx)
4557 {
4558 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4559 NULL_RTX, 0, OPTAB_WIDEN);
4560 cmp_op1 = const0_rtx;
4561 }
4562
4563 cmp_code = cmp_code == EQ ? LEU : GTU;
4564 }
4565
4566 if (cmp_code == GTU || cmp_code == GEU)
4567 {
4568 rtx tem = cmp_op0;
4569 cmp_op0 = cmp_op1;
4570 cmp_op1 = tem;
4571 cmp_code = swap_condition (cmp_code);
4572 }
4573
4574 switch (cmp_code)
4575 {
4576 case LEU:
4577 cc_mode = CCUmode;
4578 break;
4579
4580 case LTU:
4581 cc_mode = CCL3mode;
4582 break;
4583
4584 default:
4585 return false;
4586 }
4587
4588 /* Emit comparison instruction pattern. */
4589 if (!register_operand (cmp_op0, cmp_mode))
4590 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4591
4592 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4593 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4594 /* We use insn_invalid_p here to add clobbers if required. */
4595 ret = insn_invalid_p (emit_insn (insn));
4596 gcc_assert (!ret);
4597
4598 /* Emit SLB instruction pattern. */
4599 if (!register_operand (src, GET_MODE (dst)))
4600 src = force_reg (GET_MODE (dst), src);
4601
4602 op_res = gen_rtx_MINUS (GET_MODE (dst),
4603 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4604 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4605 gen_rtx_REG (cc_mode, CC_REGNUM),
4606 const0_rtx));
4607 p = rtvec_alloc (2);
4608 RTVEC_ELT (p, 0) =
4609 gen_rtx_SET (VOIDmode, dst, op_res);
4610 RTVEC_ELT (p, 1) =
4611 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4612 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4613
4614 return true;
4615 }
4616
4617 return false;
4618 }
4619
4620 /* Expand code for the insv template. Return true if successful. */
4621
4622 bool
4623 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4624 {
4625 int bitsize = INTVAL (op1);
4626 int bitpos = INTVAL (op2);
4627
4628 /* On z10 we can use the risbg instruction to implement insv. */
4629 if (TARGET_Z10
4630 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4631 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4632 {
4633 rtx op;
4634 rtx clobber;
4635
4636 op = gen_rtx_SET (GET_MODE(src),
4637 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4638 src);
4639 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4640 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4641
4642 return true;
4643 }
4644
4645 /* We need byte alignment. */
4646 if (bitsize % BITS_PER_UNIT)
4647 return false;
4648
4649 if (bitpos == 0
4650 && memory_operand (dest, VOIDmode)
4651 && (register_operand (src, word_mode)
4652 || const_int_operand (src, VOIDmode)))
4653 {
4654 /* Emit standard pattern if possible. */
4655 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4656 if (GET_MODE_BITSIZE (mode) == bitsize)
4657 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4658
4659 /* (set (ze (mem)) (const_int)). */
4660 else if (const_int_operand (src, VOIDmode))
4661 {
4662 int size = bitsize / BITS_PER_UNIT;
4663 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4664 GET_MODE_SIZE (word_mode) - size);
4665
4666 dest = adjust_address (dest, BLKmode, 0);
4667 set_mem_size (dest, GEN_INT (size));
4668 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4669 }
4670
4671 /* (set (ze (mem)) (reg)). */
4672 else if (register_operand (src, word_mode))
4673 {
4674 if (bitsize <= GET_MODE_BITSIZE (SImode))
4675 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4676 const0_rtx), src);
4677 else
4678 {
4679 /* Emit st,stcmh sequence. */
4680 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4681 int size = stcmh_width / BITS_PER_UNIT;
4682
4683 emit_move_insn (adjust_address (dest, SImode, size),
4684 gen_lowpart (SImode, src));
4685 set_mem_size (dest, GEN_INT (size));
4686 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4687 (stcmh_width), const0_rtx),
4688 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4689 (GET_MODE_BITSIZE (SImode))));
4690 }
4691 }
4692 else
4693 return false;
4694
4695 return true;
4696 }
4697
4698 /* (set (ze (reg)) (const_int)). */
4699 if (TARGET_ZARCH
4700 && register_operand (dest, word_mode)
4701 && (bitpos % 16) == 0
4702 && (bitsize % 16) == 0
4703 && const_int_operand (src, VOIDmode))
4704 {
4705 HOST_WIDE_INT val = INTVAL (src);
4706 int regpos = bitpos + bitsize;
4707
4708 while (regpos > bitpos)
4709 {
4710 enum machine_mode putmode;
4711 int putsize;
4712
4713 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4714 putmode = SImode;
4715 else
4716 putmode = HImode;
4717
4718 putsize = GET_MODE_BITSIZE (putmode);
4719 regpos -= putsize;
4720 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4721 GEN_INT (putsize),
4722 GEN_INT (regpos)),
4723 gen_int_mode (val, putmode));
4724 val >>= putsize;
4725 }
4726 gcc_assert (regpos == bitpos);
4727 return true;
4728 }
4729
4730 return false;
4731 }
4732
4733 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4734 register that holds VAL of mode MODE shifted by COUNT bits. */
4735
4736 static inline rtx
4737 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4738 {
4739 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4740 NULL_RTX, 1, OPTAB_DIRECT);
4741 return expand_simple_binop (SImode, ASHIFT, val, count,
4742 NULL_RTX, 1, OPTAB_DIRECT);
4743 }
4744
4745 /* Structure to hold the initial parameters for a compare_and_swap operation
4746 in HImode and QImode. */
4747
4748 struct alignment_context
4749 {
4750 rtx memsi; /* SI aligned memory location. */
4751 rtx shift; /* Bit offset with regard to lsb. */
4752 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4753 rtx modemaski; /* ~modemask */
4754 bool aligned; /* True if memory is aligned, false else. */
4755 };
4756
4757 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4758 structure AC for transparent simplifying, if the memory alignment is known
4759 to be at least 32bit. MEM is the memory location for the actual operation
4760 and MODE its mode. */
4761
4762 static void
4763 init_alignment_context (struct alignment_context *ac, rtx mem,
4764 enum machine_mode mode)
4765 {
4766 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4767 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4768
4769 if (ac->aligned)
4770 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4771 else
4772 {
4773 /* Alignment is unknown. */
4774 rtx byteoffset, addr, align;
4775
4776 /* Force the address into a register. */
4777 addr = force_reg (Pmode, XEXP (mem, 0));
4778
4779 /* Align it to SImode. */
4780 align = expand_simple_binop (Pmode, AND, addr,
4781 GEN_INT (-GET_MODE_SIZE (SImode)),
4782 NULL_RTX, 1, OPTAB_DIRECT);
4783 /* Generate MEM. */
4784 ac->memsi = gen_rtx_MEM (SImode, align);
4785 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4786 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4787 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4788
4789 /* Calculate shiftcount. */
4790 byteoffset = expand_simple_binop (Pmode, AND, addr,
4791 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4792 NULL_RTX, 1, OPTAB_DIRECT);
4793 /* As we already have some offset, evaluate the remaining distance. */
4794 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4795 NULL_RTX, 1, OPTAB_DIRECT);
4796
4797 }
4798 /* Shift is the byte count, but we need the bitcount. */
4799 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4800 NULL_RTX, 1, OPTAB_DIRECT);
4801 /* Calculate masks. */
4802 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4803 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4804 NULL_RTX, 1, OPTAB_DIRECT);
4805 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4806 }
4807
4808 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4809 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4810 to set if CMP == MEM.
4811 CMP is never in memory for compare_and_swap_cc because
4812 expand_bool_compare_and_swap puts it into a register for later compare. */
4813
4814 void
4815 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4816 {
4817 struct alignment_context ac;
4818 rtx cmpv, newv, val, resv, cc;
4819 rtx res = gen_reg_rtx (SImode);
4820 rtx csloop = gen_label_rtx ();
4821 rtx csend = gen_label_rtx ();
4822
4823 gcc_assert (register_operand (target, VOIDmode));
4824 gcc_assert (MEM_P (mem));
4825
4826 init_alignment_context (&ac, mem, mode);
4827
4828 /* Shift the values to the correct bit positions. */
4829 if (!(ac.aligned && MEM_P (cmp)))
4830 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4831 if (!(ac.aligned && MEM_P (new_rtx)))
4832 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4833
4834 /* Load full word. Subsequent loads are performed by CS. */
4835 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4836 NULL_RTX, 1, OPTAB_DIRECT);
4837
4838 /* Start CS loop. */
4839 emit_label (csloop);
4840 /* val = "<mem>00..0<mem>"
4841 * cmp = "00..0<cmp>00..0"
4842 * new = "00..0<new>00..0"
4843 */
4844
4845 /* Patch cmp and new with val at correct position. */
4846 if (ac.aligned && MEM_P (cmp))
4847 {
4848 cmpv = force_reg (SImode, val);
4849 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4850 }
4851 else
4852 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4853 NULL_RTX, 1, OPTAB_DIRECT));
4854 if (ac.aligned && MEM_P (new_rtx))
4855 {
4856 newv = force_reg (SImode, val);
4857 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4858 }
4859 else
4860 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4861 NULL_RTX, 1, OPTAB_DIRECT));
4862
4863 /* Jump to end if we're done (likely?). */
4864 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4865 cmpv, newv));
4866
4867 /* Check for changes outside mode. */
4868 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4869 NULL_RTX, 1, OPTAB_DIRECT);
4870 cc = s390_emit_compare (NE, resv, val);
4871 emit_move_insn (val, resv);
4872 /* Loop internal if so. */
4873 s390_emit_jump (csloop, cc);
4874
4875 emit_label (csend);
4876
4877 /* Return the correct part of the bitfield. */
4878 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4879 NULL_RTX, 1, OPTAB_DIRECT), 1);
4880 }
4881
4882 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4883 and VAL the value to play with. If AFTER is true then store the value
4884 MEM holds after the operation, if AFTER is false then store the value MEM
4885 holds before the operation. If TARGET is zero then discard that value, else
4886 store it to TARGET. */
4887
4888 void
4889 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4890 rtx target, rtx mem, rtx val, bool after)
4891 {
4892 struct alignment_context ac;
4893 rtx cmp;
4894 rtx new_rtx = gen_reg_rtx (SImode);
4895 rtx orig = gen_reg_rtx (SImode);
4896 rtx csloop = gen_label_rtx ();
4897
4898 gcc_assert (!target || register_operand (target, VOIDmode));
4899 gcc_assert (MEM_P (mem));
4900
4901 init_alignment_context (&ac, mem, mode);
4902
4903 /* Shift val to the correct bit positions.
4904 Preserve "icm", but prevent "ex icm". */
4905 if (!(ac.aligned && code == SET && MEM_P (val)))
4906 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4907
4908 /* Further preparation insns. */
4909 if (code == PLUS || code == MINUS)
4910 emit_move_insn (orig, val);
4911 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4912 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4913 NULL_RTX, 1, OPTAB_DIRECT);
4914
4915 /* Load full word. Subsequent loads are performed by CS. */
4916 cmp = force_reg (SImode, ac.memsi);
4917
4918 /* Start CS loop. */
4919 emit_label (csloop);
4920 emit_move_insn (new_rtx, cmp);
4921
4922 /* Patch new with val at correct position. */
4923 switch (code)
4924 {
4925 case PLUS:
4926 case MINUS:
4927 val = expand_simple_binop (SImode, code, new_rtx, orig,
4928 NULL_RTX, 1, OPTAB_DIRECT);
4929 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4930 NULL_RTX, 1, OPTAB_DIRECT);
4931 /* FALLTHRU */
4932 case SET:
4933 if (ac.aligned && MEM_P (val))
4934 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4935 else
4936 {
4937 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4938 NULL_RTX, 1, OPTAB_DIRECT);
4939 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4940 NULL_RTX, 1, OPTAB_DIRECT);
4941 }
4942 break;
4943 case AND:
4944 case IOR:
4945 case XOR:
4946 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4947 NULL_RTX, 1, OPTAB_DIRECT);
4948 break;
4949 case MULT: /* NAND */
4950 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4951 NULL_RTX, 1, OPTAB_DIRECT);
4952 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4953 NULL_RTX, 1, OPTAB_DIRECT);
4954 break;
4955 default:
4956 gcc_unreachable ();
4957 }
4958
4959 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4960 ac.memsi, cmp, new_rtx));
4961
4962 /* Return the correct part of the bitfield. */
4963 if (target)
4964 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4965 after ? new_rtx : cmp, ac.shift,
4966 NULL_RTX, 1, OPTAB_DIRECT), 1);
4967 }
4968
4969 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4970 We need to emit DTP-relative relocations. */
4971
4972 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4973
4974 static void
4975 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4976 {
4977 switch (size)
4978 {
4979 case 4:
4980 fputs ("\t.long\t", file);
4981 break;
4982 case 8:
4983 fputs ("\t.quad\t", file);
4984 break;
4985 default:
4986 gcc_unreachable ();
4987 }
4988 output_addr_const (file, x);
4989 fputs ("@DTPOFF", file);
4990 }
4991
4992 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4993 /* Implement TARGET_MANGLE_TYPE. */
4994
4995 static const char *
4996 s390_mangle_type (const_tree type)
4997 {
4998 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4999 && TARGET_LONG_DOUBLE_128)
5000 return "g";
5001
5002 /* For all other types, use normal C++ mangling. */
5003 return NULL;
5004 }
5005 #endif
5006
5007 /* In the name of slightly smaller debug output, and to cater to
5008 general assembler lossage, recognize various UNSPEC sequences
5009 and turn them back into a direct symbol reference. */
5010
5011 static rtx
5012 s390_delegitimize_address (rtx orig_x)
5013 {
5014 rtx x, y;
5015
5016 orig_x = delegitimize_mem_from_attrs (orig_x);
5017 x = orig_x;
5018
5019 /* Extract the symbol ref from:
5020 (plus:SI (reg:SI 12 %r12)
5021 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5022 UNSPEC_GOTOFF/PLTOFF)))
5023 and
5024 (plus:SI (reg:SI 12 %r12)
5025 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5026 UNSPEC_GOTOFF/PLTOFF)
5027 (const_int 4 [0x4])))) */
5028 if (GET_CODE (x) == PLUS
5029 && REG_P (XEXP (x, 0))
5030 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5031 && GET_CODE (XEXP (x, 1)) == CONST)
5032 {
5033 /* The const operand. */
5034 y = XEXP (XEXP (x, 1), 0);
5035
5036 if (GET_CODE (y) == PLUS
5037 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5038 y = XEXP (y, 0);
5039
5040 if (GET_CODE (y) == UNSPEC
5041 && (XINT (y, 1) == UNSPEC_GOTOFF
5042 || XINT (y, 1) == UNSPEC_PLTOFF))
5043 return XVECEXP (y, 0, 0);
5044 }
5045
5046 if (GET_CODE (x) != MEM)
5047 return orig_x;
5048
5049 x = XEXP (x, 0);
5050 if (GET_CODE (x) == PLUS
5051 && GET_CODE (XEXP (x, 1)) == CONST
5052 && GET_CODE (XEXP (x, 0)) == REG
5053 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5054 {
5055 y = XEXP (XEXP (x, 1), 0);
5056 if (GET_CODE (y) == UNSPEC
5057 && XINT (y, 1) == UNSPEC_GOT)
5058 y = XVECEXP (y, 0, 0);
5059 else
5060 return orig_x;
5061 }
5062 else if (GET_CODE (x) == CONST)
5063 {
5064 /* Extract the symbol ref from:
5065 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5066 UNSPEC_PLT/GOTENT))) */
5067
5068 y = XEXP (x, 0);
5069 if (GET_CODE (y) == UNSPEC
5070 && (XINT (y, 1) == UNSPEC_GOTENT
5071 || XINT (y, 1) == UNSPEC_PLT))
5072 y = XVECEXP (y, 0, 0);
5073 else
5074 return orig_x;
5075 }
5076 else
5077 return orig_x;
5078
5079 if (GET_MODE (orig_x) != Pmode)
5080 {
5081 if (GET_MODE (orig_x) == BLKmode)
5082 return orig_x;
5083 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5084 if (y == NULL_RTX)
5085 return orig_x;
5086 }
5087 return y;
5088 }
5089
5090 /* Output operand OP to stdio stream FILE.
5091 OP is an address (register + offset) which is not used to address data;
5092 instead the rightmost bits are interpreted as the value. */
5093
5094 static void
5095 print_shift_count_operand (FILE *file, rtx op)
5096 {
5097 HOST_WIDE_INT offset;
5098 rtx base;
5099
5100 /* Extract base register and offset. */
5101 if (!s390_decompose_shift_count (op, &base, &offset))
5102 gcc_unreachable ();
5103
5104 /* Sanity check. */
5105 if (base)
5106 {
5107 gcc_assert (GET_CODE (base) == REG);
5108 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5109 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5110 }
5111
5112 /* Offsets are constricted to twelve bits. */
5113 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5114 if (base)
5115 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5116 }
5117
5118 /* See 'get_some_local_dynamic_name'. */
5119
5120 static int
5121 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5122 {
5123 rtx x = *px;
5124
5125 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5126 {
5127 x = get_pool_constant (x);
5128 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5129 }
5130
5131 if (GET_CODE (x) == SYMBOL_REF
5132 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5133 {
5134 cfun->machine->some_ld_name = XSTR (x, 0);
5135 return 1;
5136 }
5137
5138 return 0;
5139 }
5140
5141 /* Locate some local-dynamic symbol still in use by this function
5142 so that we can print its name in local-dynamic base patterns. */
5143
5144 static const char *
5145 get_some_local_dynamic_name (void)
5146 {
5147 rtx insn;
5148
5149 if (cfun->machine->some_ld_name)
5150 return cfun->machine->some_ld_name;
5151
5152 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5153 if (INSN_P (insn)
5154 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5155 return cfun->machine->some_ld_name;
5156
5157 gcc_unreachable ();
5158 }
5159
5160 /* Output machine-dependent UNSPECs occurring in address constant X
5161 in assembler syntax to stdio stream FILE. Returns true if the
5162 constant X could be recognized, false otherwise. */
5163
5164 static bool
5165 s390_output_addr_const_extra (FILE *file, rtx x)
5166 {
5167 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5168 switch (XINT (x, 1))
5169 {
5170 case UNSPEC_GOTENT:
5171 output_addr_const (file, XVECEXP (x, 0, 0));
5172 fprintf (file, "@GOTENT");
5173 return true;
5174 case UNSPEC_GOT:
5175 output_addr_const (file, XVECEXP (x, 0, 0));
5176 fprintf (file, "@GOT");
5177 return true;
5178 case UNSPEC_GOTOFF:
5179 output_addr_const (file, XVECEXP (x, 0, 0));
5180 fprintf (file, "@GOTOFF");
5181 return true;
5182 case UNSPEC_PLT:
5183 output_addr_const (file, XVECEXP (x, 0, 0));
5184 fprintf (file, "@PLT");
5185 return true;
5186 case UNSPEC_PLTOFF:
5187 output_addr_const (file, XVECEXP (x, 0, 0));
5188 fprintf (file, "@PLTOFF");
5189 return true;
5190 case UNSPEC_TLSGD:
5191 output_addr_const (file, XVECEXP (x, 0, 0));
5192 fprintf (file, "@TLSGD");
5193 return true;
5194 case UNSPEC_TLSLDM:
5195 assemble_name (file, get_some_local_dynamic_name ());
5196 fprintf (file, "@TLSLDM");
5197 return true;
5198 case UNSPEC_DTPOFF:
5199 output_addr_const (file, XVECEXP (x, 0, 0));
5200 fprintf (file, "@DTPOFF");
5201 return true;
5202 case UNSPEC_NTPOFF:
5203 output_addr_const (file, XVECEXP (x, 0, 0));
5204 fprintf (file, "@NTPOFF");
5205 return true;
5206 case UNSPEC_GOTNTPOFF:
5207 output_addr_const (file, XVECEXP (x, 0, 0));
5208 fprintf (file, "@GOTNTPOFF");
5209 return true;
5210 case UNSPEC_INDNTPOFF:
5211 output_addr_const (file, XVECEXP (x, 0, 0));
5212 fprintf (file, "@INDNTPOFF");
5213 return true;
5214 }
5215
5216 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5217 switch (XINT (x, 1))
5218 {
5219 case UNSPEC_POOL_OFFSET:
5220 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5221 output_addr_const (file, x);
5222 return true;
5223 }
5224 return false;
5225 }
5226
5227 /* Output address operand ADDR in assembler syntax to
5228 stdio stream FILE. */
5229
5230 void
5231 print_operand_address (FILE *file, rtx addr)
5232 {
5233 struct s390_address ad;
5234
5235 if (s390_symref_operand_p (addr, NULL, NULL))
5236 {
5237 if (!TARGET_Z10)
5238 {
5239 output_operand_lossage ("symbolic memory references are "
5240 "only supported on z10 or later");
5241 return;
5242 }
5243 output_addr_const (file, addr);
5244 return;
5245 }
5246
5247 if (!s390_decompose_address (addr, &ad)
5248 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5249 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5250 output_operand_lossage ("cannot decompose address");
5251
5252 if (ad.disp)
5253 output_addr_const (file, ad.disp);
5254 else
5255 fprintf (file, "0");
5256
5257 if (ad.base && ad.indx)
5258 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5259 reg_names[REGNO (ad.base)]);
5260 else if (ad.base)
5261 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5262 }
5263
5264 /* Output operand X in assembler syntax to stdio stream FILE.
5265 CODE specified the format flag. The following format flags
5266 are recognized:
5267
5268 'C': print opcode suffix for branch condition.
5269 'D': print opcode suffix for inverse branch condition.
5270 'E': print opcode suffix for branch on index instruction.
5271 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5272 'G': print the size of the operand in bytes.
5273 'O': print only the displacement of a memory reference.
5274 'R': print only the base register of a memory reference.
5275 'S': print S-type memory reference (base+displacement).
5276 'N': print the second word of a DImode operand.
5277 'M': print the second word of a TImode operand.
5278 'Y': print shift count operand.
5279
5280 'b': print integer X as if it's an unsigned byte.
5281 'c': print integer X as if it's an signed byte.
5282 'x': print integer X as if it's an unsigned halfword.
5283 'h': print integer X as if it's a signed halfword.
5284 'i': print the first nonzero HImode part of X.
5285 'j': print the first HImode part unequal to -1 of X.
5286 'k': print the first nonzero SImode part of X.
5287 'm': print the first SImode part unequal to -1 of X.
5288 'o': print integer X as if it's an unsigned 32bit word. */
5289
5290 void
5291 print_operand (FILE *file, rtx x, int code)
5292 {
5293 switch (code)
5294 {
5295 case 'C':
5296 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5297 return;
5298
5299 case 'D':
5300 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5301 return;
5302
5303 case 'E':
5304 if (GET_CODE (x) == LE)
5305 fprintf (file, "l");
5306 else if (GET_CODE (x) == GT)
5307 fprintf (file, "h");
5308 else
5309 output_operand_lossage ("invalid comparison operator "
5310 "for 'E' output modifier");
5311 return;
5312
5313 case 'J':
5314 if (GET_CODE (x) == SYMBOL_REF)
5315 {
5316 fprintf (file, "%s", ":tls_load:");
5317 output_addr_const (file, x);
5318 }
5319 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5320 {
5321 fprintf (file, "%s", ":tls_gdcall:");
5322 output_addr_const (file, XVECEXP (x, 0, 0));
5323 }
5324 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5325 {
5326 fprintf (file, "%s", ":tls_ldcall:");
5327 assemble_name (file, get_some_local_dynamic_name ());
5328 }
5329 else
5330 output_operand_lossage ("invalid reference for 'J' output modifier");
5331 return;
5332
5333 case 'G':
5334 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5335 return;
5336
5337 case 'O':
5338 {
5339 struct s390_address ad;
5340 int ret;
5341
5342 if (!MEM_P (x))
5343 {
5344 output_operand_lossage ("memory reference expected for "
5345 "'O' output modifier");
5346 return;
5347 }
5348
5349 ret = s390_decompose_address (XEXP (x, 0), &ad);
5350
5351 if (!ret
5352 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5353 || ad.indx)
5354 {
5355 output_operand_lossage ("invalid address for 'O' output modifier");
5356 return;
5357 }
5358
5359 if (ad.disp)
5360 output_addr_const (file, ad.disp);
5361 else
5362 fprintf (file, "0");
5363 }
5364 return;
5365
5366 case 'R':
5367 {
5368 struct s390_address ad;
5369 int ret;
5370
5371 if (!MEM_P (x))
5372 {
5373 output_operand_lossage ("memory reference expected for "
5374 "'R' output modifier");
5375 return;
5376 }
5377
5378 ret = s390_decompose_address (XEXP (x, 0), &ad);
5379
5380 if (!ret
5381 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5382 || ad.indx)
5383 {
5384 output_operand_lossage ("invalid address for 'R' output modifier");
5385 return;
5386 }
5387
5388 if (ad.base)
5389 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5390 else
5391 fprintf (file, "0");
5392 }
5393 return;
5394
5395 case 'S':
5396 {
5397 struct s390_address ad;
5398 int ret;
5399
5400 if (!MEM_P (x))
5401 {
5402 output_operand_lossage ("memory reference expected for "
5403 "'S' output modifier");
5404 return;
5405 }
5406 ret = s390_decompose_address (XEXP (x, 0), &ad);
5407
5408 if (!ret
5409 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5410 || ad.indx)
5411 {
5412 output_operand_lossage ("invalid address for 'S' output modifier");
5413 return;
5414 }
5415
5416 if (ad.disp)
5417 output_addr_const (file, ad.disp);
5418 else
5419 fprintf (file, "0");
5420
5421 if (ad.base)
5422 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5423 }
5424 return;
5425
5426 case 'N':
5427 if (GET_CODE (x) == REG)
5428 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5429 else if (GET_CODE (x) == MEM)
5430 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5431 else
5432 output_operand_lossage ("register or memory expression expected "
5433 "for 'N' output modifier");
5434 break;
5435
5436 case 'M':
5437 if (GET_CODE (x) == REG)
5438 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5439 else if (GET_CODE (x) == MEM)
5440 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5441 else
5442 output_operand_lossage ("register or memory expression expected "
5443 "for 'M' output modifier");
5444 break;
5445
5446 case 'Y':
5447 print_shift_count_operand (file, x);
5448 return;
5449 }
5450
5451 switch (GET_CODE (x))
5452 {
5453 case REG:
5454 fprintf (file, "%s", reg_names[REGNO (x)]);
5455 break;
5456
5457 case MEM:
5458 output_address (XEXP (x, 0));
5459 break;
5460
5461 case CONST:
5462 case CODE_LABEL:
5463 case LABEL_REF:
5464 case SYMBOL_REF:
5465 output_addr_const (file, x);
5466 break;
5467
5468 case CONST_INT:
5469 if (code == 'b')
5470 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5471 else if (code == 'c')
5472 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5473 else if (code == 'x')
5474 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5475 else if (code == 'h')
5476 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5477 else if (code == 'i')
5478 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5479 s390_extract_part (x, HImode, 0));
5480 else if (code == 'j')
5481 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5482 s390_extract_part (x, HImode, -1));
5483 else if (code == 'k')
5484 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5485 s390_extract_part (x, SImode, 0));
5486 else if (code == 'm')
5487 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5488 s390_extract_part (x, SImode, -1));
5489 else if (code == 'o')
5490 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5491 else
5492 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5493 break;
5494
5495 case CONST_DOUBLE:
5496 gcc_assert (GET_MODE (x) == VOIDmode);
5497 if (code == 'b')
5498 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5499 else if (code == 'x')
5500 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5501 else if (code == 'h')
5502 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5503 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5504 else
5505 {
5506 if (code == 0)
5507 output_operand_lossage ("invalid constant - try using "
5508 "an output modifier");
5509 else
5510 output_operand_lossage ("invalid constant for output modifier '%c'",
5511 code);
5512 }
5513 break;
5514
5515 default:
5516 if (code == 0)
5517 output_operand_lossage ("invalid expression - try using "
5518 "an output modifier");
5519 else
5520 output_operand_lossage ("invalid expression for output "
5521 "modifier '%c'", code);
5522 break;
5523 }
5524 }
5525
5526 /* Target hook for assembling integer objects. We need to define it
5527 here to work a round a bug in some versions of GAS, which couldn't
5528 handle values smaller than INT_MIN when printed in decimal. */
5529
5530 static bool
5531 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5532 {
5533 if (size == 8 && aligned_p
5534 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5535 {
5536 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5537 INTVAL (x));
5538 return true;
5539 }
5540 return default_assemble_integer (x, size, aligned_p);
5541 }
5542
5543 /* Returns true if register REGNO is used for forming
5544 a memory address in expression X. */
5545
5546 static bool
5547 reg_used_in_mem_p (int regno, rtx x)
5548 {
5549 enum rtx_code code = GET_CODE (x);
5550 int i, j;
5551 const char *fmt;
5552
5553 if (code == MEM)
5554 {
5555 if (refers_to_regno_p (regno, regno+1,
5556 XEXP (x, 0), 0))
5557 return true;
5558 }
5559 else if (code == SET
5560 && GET_CODE (SET_DEST (x)) == PC)
5561 {
5562 if (refers_to_regno_p (regno, regno+1,
5563 SET_SRC (x), 0))
5564 return true;
5565 }
5566
5567 fmt = GET_RTX_FORMAT (code);
5568 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5569 {
5570 if (fmt[i] == 'e'
5571 && reg_used_in_mem_p (regno, XEXP (x, i)))
5572 return true;
5573
5574 else if (fmt[i] == 'E')
5575 for (j = 0; j < XVECLEN (x, i); j++)
5576 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5577 return true;
5578 }
5579 return false;
5580 }
5581
5582 /* Returns true if expression DEP_RTX sets an address register
5583 used by instruction INSN to address memory. */
5584
5585 static bool
5586 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5587 {
5588 rtx target, pat;
5589
5590 if (GET_CODE (dep_rtx) == INSN)
5591 dep_rtx = PATTERN (dep_rtx);
5592
5593 if (GET_CODE (dep_rtx) == SET)
5594 {
5595 target = SET_DEST (dep_rtx);
5596 if (GET_CODE (target) == STRICT_LOW_PART)
5597 target = XEXP (target, 0);
5598 while (GET_CODE (target) == SUBREG)
5599 target = SUBREG_REG (target);
5600
5601 if (GET_CODE (target) == REG)
5602 {
5603 int regno = REGNO (target);
5604
5605 if (s390_safe_attr_type (insn) == TYPE_LA)
5606 {
5607 pat = PATTERN (insn);
5608 if (GET_CODE (pat) == PARALLEL)
5609 {
5610 gcc_assert (XVECLEN (pat, 0) == 2);
5611 pat = XVECEXP (pat, 0, 0);
5612 }
5613 gcc_assert (GET_CODE (pat) == SET);
5614 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5615 }
5616 else if (get_attr_atype (insn) == ATYPE_AGEN)
5617 return reg_used_in_mem_p (regno, PATTERN (insn));
5618 }
5619 }
5620 return false;
5621 }
5622
5623 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5624
5625 int
5626 s390_agen_dep_p (rtx dep_insn, rtx insn)
5627 {
5628 rtx dep_rtx = PATTERN (dep_insn);
5629 int i;
5630
5631 if (GET_CODE (dep_rtx) == SET
5632 && addr_generation_dependency_p (dep_rtx, insn))
5633 return 1;
5634 else if (GET_CODE (dep_rtx) == PARALLEL)
5635 {
5636 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5637 {
5638 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5639 return 1;
5640 }
5641 }
5642 return 0;
5643 }
5644
5645
5646 /* A C statement (sans semicolon) to update the integer scheduling priority
5647 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5648 reduce the priority to execute INSN later. Do not define this macro if
5649 you do not need to adjust the scheduling priorities of insns.
5650
5651 A STD instruction should be scheduled earlier,
5652 in order to use the bypass. */
5653 static int
5654 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5655 {
5656 if (! INSN_P (insn))
5657 return priority;
5658
5659 if (s390_tune != PROCESSOR_2084_Z990
5660 && s390_tune != PROCESSOR_2094_Z9_109
5661 && s390_tune != PROCESSOR_2097_Z10
5662 && s390_tune != PROCESSOR_2817_Z196)
5663 return priority;
5664
5665 switch (s390_safe_attr_type (insn))
5666 {
5667 case TYPE_FSTOREDF:
5668 case TYPE_FSTORESF:
5669 priority = priority << 3;
5670 break;
5671 case TYPE_STORE:
5672 case TYPE_STM:
5673 priority = priority << 1;
5674 break;
5675 default:
5676 break;
5677 }
5678 return priority;
5679 }
5680
5681
5682 /* The number of instructions that can be issued per cycle. */
5683
5684 static int
5685 s390_issue_rate (void)
5686 {
5687 switch (s390_tune)
5688 {
5689 case PROCESSOR_2084_Z990:
5690 case PROCESSOR_2094_Z9_109:
5691 case PROCESSOR_2817_Z196:
5692 return 3;
5693 case PROCESSOR_2097_Z10:
5694 return 2;
5695 default:
5696 return 1;
5697 }
5698 }
5699
5700 static int
5701 s390_first_cycle_multipass_dfa_lookahead (void)
5702 {
5703 return 4;
5704 }
5705
5706 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5707 Fix up MEMs as required. */
5708
5709 static void
5710 annotate_constant_pool_refs (rtx *x)
5711 {
5712 int i, j;
5713 const char *fmt;
5714
5715 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5716 || !CONSTANT_POOL_ADDRESS_P (*x));
5717
5718 /* Literal pool references can only occur inside a MEM ... */
5719 if (GET_CODE (*x) == MEM)
5720 {
5721 rtx memref = XEXP (*x, 0);
5722
5723 if (GET_CODE (memref) == SYMBOL_REF
5724 && CONSTANT_POOL_ADDRESS_P (memref))
5725 {
5726 rtx base = cfun->machine->base_reg;
5727 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5728 UNSPEC_LTREF);
5729
5730 *x = replace_equiv_address (*x, addr);
5731 return;
5732 }
5733
5734 if (GET_CODE (memref) == CONST
5735 && GET_CODE (XEXP (memref, 0)) == PLUS
5736 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5737 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5738 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5739 {
5740 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5741 rtx sym = XEXP (XEXP (memref, 0), 0);
5742 rtx base = cfun->machine->base_reg;
5743 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5744 UNSPEC_LTREF);
5745
5746 *x = replace_equiv_address (*x, plus_constant (addr, off));
5747 return;
5748 }
5749 }
5750
5751 /* ... or a load-address type pattern. */
5752 if (GET_CODE (*x) == SET)
5753 {
5754 rtx addrref = SET_SRC (*x);
5755
5756 if (GET_CODE (addrref) == SYMBOL_REF
5757 && CONSTANT_POOL_ADDRESS_P (addrref))
5758 {
5759 rtx base = cfun->machine->base_reg;
5760 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5761 UNSPEC_LTREF);
5762
5763 SET_SRC (*x) = addr;
5764 return;
5765 }
5766
5767 if (GET_CODE (addrref) == CONST
5768 && GET_CODE (XEXP (addrref, 0)) == PLUS
5769 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5770 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5771 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5772 {
5773 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5774 rtx sym = XEXP (XEXP (addrref, 0), 0);
5775 rtx base = cfun->machine->base_reg;
5776 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5777 UNSPEC_LTREF);
5778
5779 SET_SRC (*x) = plus_constant (addr, off);
5780 return;
5781 }
5782 }
5783
5784 /* Annotate LTREL_BASE as well. */
5785 if (GET_CODE (*x) == UNSPEC
5786 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5787 {
5788 rtx base = cfun->machine->base_reg;
5789 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5790 UNSPEC_LTREL_BASE);
5791 return;
5792 }
5793
5794 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5795 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5796 {
5797 if (fmt[i] == 'e')
5798 {
5799 annotate_constant_pool_refs (&XEXP (*x, i));
5800 }
5801 else if (fmt[i] == 'E')
5802 {
5803 for (j = 0; j < XVECLEN (*x, i); j++)
5804 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5805 }
5806 }
5807 }
5808
5809 /* Split all branches that exceed the maximum distance.
5810 Returns true if this created a new literal pool entry. */
5811
5812 static int
5813 s390_split_branches (void)
5814 {
5815 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5816 int new_literal = 0, ret;
5817 rtx insn, pat, tmp, target;
5818 rtx *label;
5819
5820 /* We need correct insn addresses. */
5821
5822 shorten_branches (get_insns ());
5823
5824 /* Find all branches that exceed 64KB, and split them. */
5825
5826 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5827 {
5828 if (GET_CODE (insn) != JUMP_INSN)
5829 continue;
5830
5831 pat = PATTERN (insn);
5832 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5833 pat = XVECEXP (pat, 0, 0);
5834 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5835 continue;
5836
5837 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5838 {
5839 label = &SET_SRC (pat);
5840 }
5841 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5842 {
5843 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5844 label = &XEXP (SET_SRC (pat), 1);
5845 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5846 label = &XEXP (SET_SRC (pat), 2);
5847 else
5848 continue;
5849 }
5850 else
5851 continue;
5852
5853 if (get_attr_length (insn) <= 4)
5854 continue;
5855
5856 /* We are going to use the return register as scratch register,
5857 make sure it will be saved/restored by the prologue/epilogue. */
5858 cfun_frame_layout.save_return_addr_p = 1;
5859
5860 if (!flag_pic)
5861 {
5862 new_literal = 1;
5863 tmp = force_const_mem (Pmode, *label);
5864 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5865 INSN_ADDRESSES_NEW (tmp, -1);
5866 annotate_constant_pool_refs (&PATTERN (tmp));
5867
5868 target = temp_reg;
5869 }
5870 else
5871 {
5872 new_literal = 1;
5873 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5874 UNSPEC_LTREL_OFFSET);
5875 target = gen_rtx_CONST (Pmode, target);
5876 target = force_const_mem (Pmode, target);
5877 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5878 INSN_ADDRESSES_NEW (tmp, -1);
5879 annotate_constant_pool_refs (&PATTERN (tmp));
5880
5881 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5882 cfun->machine->base_reg),
5883 UNSPEC_LTREL_BASE);
5884 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5885 }
5886
5887 ret = validate_change (insn, label, target, 0);
5888 gcc_assert (ret);
5889 }
5890
5891 return new_literal;
5892 }
5893
5894
5895 /* Find an annotated literal pool symbol referenced in RTX X,
5896 and store it at REF. Will abort if X contains references to
5897 more than one such pool symbol; multiple references to the same
5898 symbol are allowed, however.
5899
5900 The rtx pointed to by REF must be initialized to NULL_RTX
5901 by the caller before calling this routine. */
5902
5903 static void
5904 find_constant_pool_ref (rtx x, rtx *ref)
5905 {
5906 int i, j;
5907 const char *fmt;
5908
5909 /* Ignore LTREL_BASE references. */
5910 if (GET_CODE (x) == UNSPEC
5911 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5912 return;
5913 /* Likewise POOL_ENTRY insns. */
5914 if (GET_CODE (x) == UNSPEC_VOLATILE
5915 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5916 return;
5917
5918 gcc_assert (GET_CODE (x) != SYMBOL_REF
5919 || !CONSTANT_POOL_ADDRESS_P (x));
5920
5921 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5922 {
5923 rtx sym = XVECEXP (x, 0, 0);
5924 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5925 && CONSTANT_POOL_ADDRESS_P (sym));
5926
5927 if (*ref == NULL_RTX)
5928 *ref = sym;
5929 else
5930 gcc_assert (*ref == sym);
5931
5932 return;
5933 }
5934
5935 fmt = GET_RTX_FORMAT (GET_CODE (x));
5936 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5937 {
5938 if (fmt[i] == 'e')
5939 {
5940 find_constant_pool_ref (XEXP (x, i), ref);
5941 }
5942 else if (fmt[i] == 'E')
5943 {
5944 for (j = 0; j < XVECLEN (x, i); j++)
5945 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5946 }
5947 }
5948 }
5949
5950 /* Replace every reference to the annotated literal pool
5951 symbol REF in X by its base plus OFFSET. */
5952
5953 static void
5954 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5955 {
5956 int i, j;
5957 const char *fmt;
5958
5959 gcc_assert (*x != ref);
5960
5961 if (GET_CODE (*x) == UNSPEC
5962 && XINT (*x, 1) == UNSPEC_LTREF
5963 && XVECEXP (*x, 0, 0) == ref)
5964 {
5965 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5966 return;
5967 }
5968
5969 if (GET_CODE (*x) == PLUS
5970 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5971 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5972 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5973 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5974 {
5975 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5976 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5977 return;
5978 }
5979
5980 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5981 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5982 {
5983 if (fmt[i] == 'e')
5984 {
5985 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5986 }
5987 else if (fmt[i] == 'E')
5988 {
5989 for (j = 0; j < XVECLEN (*x, i); j++)
5990 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5991 }
5992 }
5993 }
5994
5995 /* Check whether X contains an UNSPEC_LTREL_BASE.
5996 Return its constant pool symbol if found, NULL_RTX otherwise. */
5997
5998 static rtx
5999 find_ltrel_base (rtx x)
6000 {
6001 int i, j;
6002 const char *fmt;
6003
6004 if (GET_CODE (x) == UNSPEC
6005 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6006 return XVECEXP (x, 0, 0);
6007
6008 fmt = GET_RTX_FORMAT (GET_CODE (x));
6009 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6010 {
6011 if (fmt[i] == 'e')
6012 {
6013 rtx fnd = find_ltrel_base (XEXP (x, i));
6014 if (fnd)
6015 return fnd;
6016 }
6017 else if (fmt[i] == 'E')
6018 {
6019 for (j = 0; j < XVECLEN (x, i); j++)
6020 {
6021 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6022 if (fnd)
6023 return fnd;
6024 }
6025 }
6026 }
6027
6028 return NULL_RTX;
6029 }
6030
6031 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6032
6033 static void
6034 replace_ltrel_base (rtx *x)
6035 {
6036 int i, j;
6037 const char *fmt;
6038
6039 if (GET_CODE (*x) == UNSPEC
6040 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6041 {
6042 *x = XVECEXP (*x, 0, 1);
6043 return;
6044 }
6045
6046 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6047 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6048 {
6049 if (fmt[i] == 'e')
6050 {
6051 replace_ltrel_base (&XEXP (*x, i));
6052 }
6053 else if (fmt[i] == 'E')
6054 {
6055 for (j = 0; j < XVECLEN (*x, i); j++)
6056 replace_ltrel_base (&XVECEXP (*x, i, j));
6057 }
6058 }
6059 }
6060
6061
6062 /* We keep a list of constants which we have to add to internal
6063 constant tables in the middle of large functions. */
6064
6065 #define NR_C_MODES 11
6066 enum machine_mode constant_modes[NR_C_MODES] =
6067 {
6068 TFmode, TImode, TDmode,
6069 DFmode, DImode, DDmode,
6070 SFmode, SImode, SDmode,
6071 HImode,
6072 QImode
6073 };
6074
6075 struct constant
6076 {
6077 struct constant *next;
6078 rtx value;
6079 rtx label;
6080 };
6081
6082 struct constant_pool
6083 {
6084 struct constant_pool *next;
6085 rtx first_insn;
6086 rtx pool_insn;
6087 bitmap insns;
6088 rtx emit_pool_after;
6089
6090 struct constant *constants[NR_C_MODES];
6091 struct constant *execute;
6092 rtx label;
6093 int size;
6094 };
6095
6096 /* Allocate new constant_pool structure. */
6097
6098 static struct constant_pool *
6099 s390_alloc_pool (void)
6100 {
6101 struct constant_pool *pool;
6102 int i;
6103
6104 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6105 pool->next = NULL;
6106 for (i = 0; i < NR_C_MODES; i++)
6107 pool->constants[i] = NULL;
6108
6109 pool->execute = NULL;
6110 pool->label = gen_label_rtx ();
6111 pool->first_insn = NULL_RTX;
6112 pool->pool_insn = NULL_RTX;
6113 pool->insns = BITMAP_ALLOC (NULL);
6114 pool->size = 0;
6115 pool->emit_pool_after = NULL_RTX;
6116
6117 return pool;
6118 }
6119
6120 /* Create new constant pool covering instructions starting at INSN
6121 and chain it to the end of POOL_LIST. */
6122
6123 static struct constant_pool *
6124 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6125 {
6126 struct constant_pool *pool, **prev;
6127
6128 pool = s390_alloc_pool ();
6129 pool->first_insn = insn;
6130
6131 for (prev = pool_list; *prev; prev = &(*prev)->next)
6132 ;
6133 *prev = pool;
6134
6135 return pool;
6136 }
6137
6138 /* End range of instructions covered by POOL at INSN and emit
6139 placeholder insn representing the pool. */
6140
6141 static void
6142 s390_end_pool (struct constant_pool *pool, rtx insn)
6143 {
6144 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6145
6146 if (!insn)
6147 insn = get_last_insn ();
6148
6149 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6150 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6151 }
6152
6153 /* Add INSN to the list of insns covered by POOL. */
6154
6155 static void
6156 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6157 {
6158 bitmap_set_bit (pool->insns, INSN_UID (insn));
6159 }
6160
6161 /* Return pool out of POOL_LIST that covers INSN. */
6162
6163 static struct constant_pool *
6164 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6165 {
6166 struct constant_pool *pool;
6167
6168 for (pool = pool_list; pool; pool = pool->next)
6169 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6170 break;
6171
6172 return pool;
6173 }
6174
6175 /* Add constant VAL of mode MODE to the constant pool POOL. */
6176
6177 static void
6178 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6179 {
6180 struct constant *c;
6181 int i;
6182
6183 for (i = 0; i < NR_C_MODES; i++)
6184 if (constant_modes[i] == mode)
6185 break;
6186 gcc_assert (i != NR_C_MODES);
6187
6188 for (c = pool->constants[i]; c != NULL; c = c->next)
6189 if (rtx_equal_p (val, c->value))
6190 break;
6191
6192 if (c == NULL)
6193 {
6194 c = (struct constant *) xmalloc (sizeof *c);
6195 c->value = val;
6196 c->label = gen_label_rtx ();
6197 c->next = pool->constants[i];
6198 pool->constants[i] = c;
6199 pool->size += GET_MODE_SIZE (mode);
6200 }
6201 }
6202
6203 /* Return an rtx that represents the offset of X from the start of
6204 pool POOL. */
6205
6206 static rtx
6207 s390_pool_offset (struct constant_pool *pool, rtx x)
6208 {
6209 rtx label;
6210
6211 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6212 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6213 UNSPEC_POOL_OFFSET);
6214 return gen_rtx_CONST (GET_MODE (x), x);
6215 }
6216
6217 /* Find constant VAL of mode MODE in the constant pool POOL.
6218 Return an RTX describing the distance from the start of
6219 the pool to the location of the new constant. */
6220
6221 static rtx
6222 s390_find_constant (struct constant_pool *pool, rtx val,
6223 enum machine_mode mode)
6224 {
6225 struct constant *c;
6226 int i;
6227
6228 for (i = 0; i < NR_C_MODES; i++)
6229 if (constant_modes[i] == mode)
6230 break;
6231 gcc_assert (i != NR_C_MODES);
6232
6233 for (c = pool->constants[i]; c != NULL; c = c->next)
6234 if (rtx_equal_p (val, c->value))
6235 break;
6236
6237 gcc_assert (c);
6238
6239 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6240 }
6241
6242 /* Check whether INSN is an execute. Return the label_ref to its
6243 execute target template if so, NULL_RTX otherwise. */
6244
6245 static rtx
6246 s390_execute_label (rtx insn)
6247 {
6248 if (GET_CODE (insn) == INSN
6249 && GET_CODE (PATTERN (insn)) == PARALLEL
6250 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6251 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6252 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6253
6254 return NULL_RTX;
6255 }
6256
6257 /* Add execute target for INSN to the constant pool POOL. */
6258
6259 static void
6260 s390_add_execute (struct constant_pool *pool, rtx insn)
6261 {
6262 struct constant *c;
6263
6264 for (c = pool->execute; c != NULL; c = c->next)
6265 if (INSN_UID (insn) == INSN_UID (c->value))
6266 break;
6267
6268 if (c == NULL)
6269 {
6270 c = (struct constant *) xmalloc (sizeof *c);
6271 c->value = insn;
6272 c->label = gen_label_rtx ();
6273 c->next = pool->execute;
6274 pool->execute = c;
6275 pool->size += 6;
6276 }
6277 }
6278
6279 /* Find execute target for INSN in the constant pool POOL.
6280 Return an RTX describing the distance from the start of
6281 the pool to the location of the execute target. */
6282
6283 static rtx
6284 s390_find_execute (struct constant_pool *pool, rtx insn)
6285 {
6286 struct constant *c;
6287
6288 for (c = pool->execute; c != NULL; c = c->next)
6289 if (INSN_UID (insn) == INSN_UID (c->value))
6290 break;
6291
6292 gcc_assert (c);
6293
6294 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6295 }
6296
6297 /* For an execute INSN, extract the execute target template. */
6298
6299 static rtx
6300 s390_execute_target (rtx insn)
6301 {
6302 rtx pattern = PATTERN (insn);
6303 gcc_assert (s390_execute_label (insn));
6304
6305 if (XVECLEN (pattern, 0) == 2)
6306 {
6307 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6308 }
6309 else
6310 {
6311 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6312 int i;
6313
6314 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6315 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6316
6317 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6318 }
6319
6320 return pattern;
6321 }
6322
6323 /* Indicate that INSN cannot be duplicated. This is the case for
6324 execute insns that carry a unique label. */
6325
6326 static bool
6327 s390_cannot_copy_insn_p (rtx insn)
6328 {
6329 rtx label = s390_execute_label (insn);
6330 return label && label != const0_rtx;
6331 }
6332
6333 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6334 do not emit the pool base label. */
6335
6336 static void
6337 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6338 {
6339 struct constant *c;
6340 rtx insn = pool->pool_insn;
6341 int i;
6342
6343 /* Switch to rodata section. */
6344 if (TARGET_CPU_ZARCH)
6345 {
6346 insn = emit_insn_after (gen_pool_section_start (), insn);
6347 INSN_ADDRESSES_NEW (insn, -1);
6348 }
6349
6350 /* Ensure minimum pool alignment. */
6351 if (TARGET_CPU_ZARCH)
6352 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6353 else
6354 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6355 INSN_ADDRESSES_NEW (insn, -1);
6356
6357 /* Emit pool base label. */
6358 if (!remote_label)
6359 {
6360 insn = emit_label_after (pool->label, insn);
6361 INSN_ADDRESSES_NEW (insn, -1);
6362 }
6363
6364 /* Dump constants in descending alignment requirement order,
6365 ensuring proper alignment for every constant. */
6366 for (i = 0; i < NR_C_MODES; i++)
6367 for (c = pool->constants[i]; c; c = c->next)
6368 {
6369 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6370 rtx value = copy_rtx (c->value);
6371 if (GET_CODE (value) == CONST
6372 && GET_CODE (XEXP (value, 0)) == UNSPEC
6373 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6374 && XVECLEN (XEXP (value, 0), 0) == 1)
6375 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6376
6377 insn = emit_label_after (c->label, insn);
6378 INSN_ADDRESSES_NEW (insn, -1);
6379
6380 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6381 gen_rtvec (1, value),
6382 UNSPECV_POOL_ENTRY);
6383 insn = emit_insn_after (value, insn);
6384 INSN_ADDRESSES_NEW (insn, -1);
6385 }
6386
6387 /* Ensure minimum alignment for instructions. */
6388 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6389 INSN_ADDRESSES_NEW (insn, -1);
6390
6391 /* Output in-pool execute template insns. */
6392 for (c = pool->execute; c; c = c->next)
6393 {
6394 insn = emit_label_after (c->label, insn);
6395 INSN_ADDRESSES_NEW (insn, -1);
6396
6397 insn = emit_insn_after (s390_execute_target (c->value), insn);
6398 INSN_ADDRESSES_NEW (insn, -1);
6399 }
6400
6401 /* Switch back to previous section. */
6402 if (TARGET_CPU_ZARCH)
6403 {
6404 insn = emit_insn_after (gen_pool_section_end (), insn);
6405 INSN_ADDRESSES_NEW (insn, -1);
6406 }
6407
6408 insn = emit_barrier_after (insn);
6409 INSN_ADDRESSES_NEW (insn, -1);
6410
6411 /* Remove placeholder insn. */
6412 remove_insn (pool->pool_insn);
6413 }
6414
6415 /* Free all memory used by POOL. */
6416
6417 static void
6418 s390_free_pool (struct constant_pool *pool)
6419 {
6420 struct constant *c, *next;
6421 int i;
6422
6423 for (i = 0; i < NR_C_MODES; i++)
6424 for (c = pool->constants[i]; c; c = next)
6425 {
6426 next = c->next;
6427 free (c);
6428 }
6429
6430 for (c = pool->execute; c; c = next)
6431 {
6432 next = c->next;
6433 free (c);
6434 }
6435
6436 BITMAP_FREE (pool->insns);
6437 free (pool);
6438 }
6439
6440
6441 /* Collect main literal pool. Return NULL on overflow. */
6442
6443 static struct constant_pool *
6444 s390_mainpool_start (void)
6445 {
6446 struct constant_pool *pool;
6447 rtx insn;
6448
6449 pool = s390_alloc_pool ();
6450
6451 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6452 {
6453 if (GET_CODE (insn) == INSN
6454 && GET_CODE (PATTERN (insn)) == SET
6455 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6456 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6457 {
6458 gcc_assert (!pool->pool_insn);
6459 pool->pool_insn = insn;
6460 }
6461
6462 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6463 {
6464 s390_add_execute (pool, insn);
6465 }
6466 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6467 {
6468 rtx pool_ref = NULL_RTX;
6469 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6470 if (pool_ref)
6471 {
6472 rtx constant = get_pool_constant (pool_ref);
6473 enum machine_mode mode = get_pool_mode (pool_ref);
6474 s390_add_constant (pool, constant, mode);
6475 }
6476 }
6477
6478 /* If hot/cold partitioning is enabled we have to make sure that
6479 the literal pool is emitted in the same section where the
6480 initialization of the literal pool base pointer takes place.
6481 emit_pool_after is only used in the non-overflow case on non
6482 Z cpus where we can emit the literal pool at the end of the
6483 function body within the text section. */
6484 if (NOTE_P (insn)
6485 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6486 && !pool->emit_pool_after)
6487 pool->emit_pool_after = PREV_INSN (insn);
6488 }
6489
6490 gcc_assert (pool->pool_insn || pool->size == 0);
6491
6492 if (pool->size >= 4096)
6493 {
6494 /* We're going to chunkify the pool, so remove the main
6495 pool placeholder insn. */
6496 remove_insn (pool->pool_insn);
6497
6498 s390_free_pool (pool);
6499 pool = NULL;
6500 }
6501
6502 /* If the functions ends with the section where the literal pool
6503 should be emitted set the marker to its end. */
6504 if (pool && !pool->emit_pool_after)
6505 pool->emit_pool_after = get_last_insn ();
6506
6507 return pool;
6508 }
6509
6510 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6511 Modify the current function to output the pool constants as well as
6512 the pool register setup instruction. */
6513
6514 static void
6515 s390_mainpool_finish (struct constant_pool *pool)
6516 {
6517 rtx base_reg = cfun->machine->base_reg;
6518 rtx insn;
6519
6520 /* If the pool is empty, we're done. */
6521 if (pool->size == 0)
6522 {
6523 /* We don't actually need a base register after all. */
6524 cfun->machine->base_reg = NULL_RTX;
6525
6526 if (pool->pool_insn)
6527 remove_insn (pool->pool_insn);
6528 s390_free_pool (pool);
6529 return;
6530 }
6531
6532 /* We need correct insn addresses. */
6533 shorten_branches (get_insns ());
6534
6535 /* On zSeries, we use a LARL to load the pool register. The pool is
6536 located in the .rodata section, so we emit it after the function. */
6537 if (TARGET_CPU_ZARCH)
6538 {
6539 insn = gen_main_base_64 (base_reg, pool->label);
6540 insn = emit_insn_after (insn, pool->pool_insn);
6541 INSN_ADDRESSES_NEW (insn, -1);
6542 remove_insn (pool->pool_insn);
6543
6544 insn = get_last_insn ();
6545 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6546 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6547
6548 s390_dump_pool (pool, 0);
6549 }
6550
6551 /* On S/390, if the total size of the function's code plus literal pool
6552 does not exceed 4096 bytes, we use BASR to set up a function base
6553 pointer, and emit the literal pool at the end of the function. */
6554 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6555 + pool->size + 8 /* alignment slop */ < 4096)
6556 {
6557 insn = gen_main_base_31_small (base_reg, pool->label);
6558 insn = emit_insn_after (insn, pool->pool_insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6560 remove_insn (pool->pool_insn);
6561
6562 insn = emit_label_after (pool->label, insn);
6563 INSN_ADDRESSES_NEW (insn, -1);
6564
6565 /* emit_pool_after will be set by s390_mainpool_start to the
6566 last insn of the section where the literal pool should be
6567 emitted. */
6568 insn = pool->emit_pool_after;
6569
6570 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6571 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6572
6573 s390_dump_pool (pool, 1);
6574 }
6575
6576 /* Otherwise, we emit an inline literal pool and use BASR to branch
6577 over it, setting up the pool register at the same time. */
6578 else
6579 {
6580 rtx pool_end = gen_label_rtx ();
6581
6582 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6583 insn = emit_insn_after (insn, pool->pool_insn);
6584 INSN_ADDRESSES_NEW (insn, -1);
6585 remove_insn (pool->pool_insn);
6586
6587 insn = emit_label_after (pool->label, insn);
6588 INSN_ADDRESSES_NEW (insn, -1);
6589
6590 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6591 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6592
6593 insn = emit_label_after (pool_end, pool->pool_insn);
6594 INSN_ADDRESSES_NEW (insn, -1);
6595
6596 s390_dump_pool (pool, 1);
6597 }
6598
6599
6600 /* Replace all literal pool references. */
6601
6602 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6603 {
6604 if (INSN_P (insn))
6605 replace_ltrel_base (&PATTERN (insn));
6606
6607 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6608 {
6609 rtx addr, pool_ref = NULL_RTX;
6610 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6611 if (pool_ref)
6612 {
6613 if (s390_execute_label (insn))
6614 addr = s390_find_execute (pool, insn);
6615 else
6616 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6617 get_pool_mode (pool_ref));
6618
6619 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6620 INSN_CODE (insn) = -1;
6621 }
6622 }
6623 }
6624
6625
6626 /* Free the pool. */
6627 s390_free_pool (pool);
6628 }
6629
6630 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6631 We have decided we cannot use this pool, so revert all changes
6632 to the current function that were done by s390_mainpool_start. */
6633 static void
6634 s390_mainpool_cancel (struct constant_pool *pool)
6635 {
6636 /* We didn't actually change the instruction stream, so simply
6637 free the pool memory. */
6638 s390_free_pool (pool);
6639 }
6640
6641
6642 /* Chunkify the literal pool. */
6643
6644 #define S390_POOL_CHUNK_MIN 0xc00
6645 #define S390_POOL_CHUNK_MAX 0xe00
6646
6647 static struct constant_pool *
6648 s390_chunkify_start (void)
6649 {
6650 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6651 int extra_size = 0;
6652 bitmap far_labels;
6653 rtx pending_ltrel = NULL_RTX;
6654 rtx insn;
6655
6656 rtx (*gen_reload_base) (rtx, rtx) =
6657 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6658
6659
6660 /* We need correct insn addresses. */
6661
6662 shorten_branches (get_insns ());
6663
6664 /* Scan all insns and move literals to pool chunks. */
6665
6666 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6667 {
6668 bool section_switch_p = false;
6669
6670 /* Check for pending LTREL_BASE. */
6671 if (INSN_P (insn))
6672 {
6673 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6674 if (ltrel_base)
6675 {
6676 gcc_assert (ltrel_base == pending_ltrel);
6677 pending_ltrel = NULL_RTX;
6678 }
6679 }
6680
6681 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6682 {
6683 if (!curr_pool)
6684 curr_pool = s390_start_pool (&pool_list, insn);
6685
6686 s390_add_execute (curr_pool, insn);
6687 s390_add_pool_insn (curr_pool, insn);
6688 }
6689 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6690 {
6691 rtx pool_ref = NULL_RTX;
6692 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6693 if (pool_ref)
6694 {
6695 rtx constant = get_pool_constant (pool_ref);
6696 enum machine_mode mode = get_pool_mode (pool_ref);
6697
6698 if (!curr_pool)
6699 curr_pool = s390_start_pool (&pool_list, insn);
6700
6701 s390_add_constant (curr_pool, constant, mode);
6702 s390_add_pool_insn (curr_pool, insn);
6703
6704 /* Don't split the pool chunk between a LTREL_OFFSET load
6705 and the corresponding LTREL_BASE. */
6706 if (GET_CODE (constant) == CONST
6707 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6708 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6709 {
6710 gcc_assert (!pending_ltrel);
6711 pending_ltrel = pool_ref;
6712 }
6713 }
6714 /* Make sure we do not split between a call and its
6715 corresponding CALL_ARG_LOCATION note. */
6716 if (CALL_P (insn))
6717 {
6718 rtx next = NEXT_INSN (insn);
6719 if (next && NOTE_P (next)
6720 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6721 continue;
6722 }
6723 }
6724
6725 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6726 {
6727 if (curr_pool)
6728 s390_add_pool_insn (curr_pool, insn);
6729 /* An LTREL_BASE must follow within the same basic block. */
6730 gcc_assert (!pending_ltrel);
6731 }
6732
6733 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6734 section_switch_p = true;
6735
6736 if (!curr_pool
6737 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6738 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6739 continue;
6740
6741 if (TARGET_CPU_ZARCH)
6742 {
6743 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6744 continue;
6745
6746 s390_end_pool (curr_pool, NULL_RTX);
6747 curr_pool = NULL;
6748 }
6749 else
6750 {
6751 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6752 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6753 + extra_size;
6754
6755 /* We will later have to insert base register reload insns.
6756 Those will have an effect on code size, which we need to
6757 consider here. This calculation makes rather pessimistic
6758 worst-case assumptions. */
6759 if (GET_CODE (insn) == CODE_LABEL)
6760 extra_size += 6;
6761
6762 if (chunk_size < S390_POOL_CHUNK_MIN
6763 && curr_pool->size < S390_POOL_CHUNK_MIN
6764 && !section_switch_p)
6765 continue;
6766
6767 /* Pool chunks can only be inserted after BARRIERs ... */
6768 if (GET_CODE (insn) == BARRIER)
6769 {
6770 s390_end_pool (curr_pool, insn);
6771 curr_pool = NULL;
6772 extra_size = 0;
6773 }
6774
6775 /* ... so if we don't find one in time, create one. */
6776 else if (chunk_size > S390_POOL_CHUNK_MAX
6777 || curr_pool->size > S390_POOL_CHUNK_MAX
6778 || section_switch_p)
6779 {
6780 rtx label, jump, barrier;
6781
6782 if (!section_switch_p)
6783 {
6784 /* We can insert the barrier only after a 'real' insn. */
6785 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6786 continue;
6787 if (get_attr_length (insn) == 0)
6788 continue;
6789 /* Don't separate LTREL_BASE from the corresponding
6790 LTREL_OFFSET load. */
6791 if (pending_ltrel)
6792 continue;
6793 }
6794 else
6795 {
6796 gcc_assert (!pending_ltrel);
6797
6798 /* The old pool has to end before the section switch
6799 note in order to make it part of the current
6800 section. */
6801 insn = PREV_INSN (insn);
6802 }
6803
6804 label = gen_label_rtx ();
6805 jump = emit_jump_insn_after (gen_jump (label), insn);
6806 barrier = emit_barrier_after (jump);
6807 insn = emit_label_after (label, barrier);
6808 JUMP_LABEL (jump) = label;
6809 LABEL_NUSES (label) = 1;
6810
6811 INSN_ADDRESSES_NEW (jump, -1);
6812 INSN_ADDRESSES_NEW (barrier, -1);
6813 INSN_ADDRESSES_NEW (insn, -1);
6814
6815 s390_end_pool (curr_pool, barrier);
6816 curr_pool = NULL;
6817 extra_size = 0;
6818 }
6819 }
6820 }
6821
6822 if (curr_pool)
6823 s390_end_pool (curr_pool, NULL_RTX);
6824 gcc_assert (!pending_ltrel);
6825
6826 /* Find all labels that are branched into
6827 from an insn belonging to a different chunk. */
6828
6829 far_labels = BITMAP_ALLOC (NULL);
6830
6831 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6832 {
6833 /* Labels marked with LABEL_PRESERVE_P can be target
6834 of non-local jumps, so we have to mark them.
6835 The same holds for named labels.
6836
6837 Don't do that, however, if it is the label before
6838 a jump table. */
6839
6840 if (GET_CODE (insn) == CODE_LABEL
6841 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6842 {
6843 rtx vec_insn = next_real_insn (insn);
6844 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6845 PATTERN (vec_insn) : NULL_RTX;
6846 if (!vec_pat
6847 || !(GET_CODE (vec_pat) == ADDR_VEC
6848 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6849 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6850 }
6851
6852 /* If we have a direct jump (conditional or unconditional)
6853 or a casesi jump, check all potential targets. */
6854 else if (GET_CODE (insn) == JUMP_INSN)
6855 {
6856 rtx pat = PATTERN (insn);
6857 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6858 pat = XVECEXP (pat, 0, 0);
6859
6860 if (GET_CODE (pat) == SET)
6861 {
6862 rtx label = JUMP_LABEL (insn);
6863 if (label)
6864 {
6865 if (s390_find_pool (pool_list, label)
6866 != s390_find_pool (pool_list, insn))
6867 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6868 }
6869 }
6870 else if (GET_CODE (pat) == PARALLEL
6871 && XVECLEN (pat, 0) == 2
6872 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6873 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6874 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6875 {
6876 /* Find the jump table used by this casesi jump. */
6877 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6878 rtx vec_insn = next_real_insn (vec_label);
6879 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6880 PATTERN (vec_insn) : NULL_RTX;
6881 if (vec_pat
6882 && (GET_CODE (vec_pat) == ADDR_VEC
6883 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6884 {
6885 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6886
6887 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6888 {
6889 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6890
6891 if (s390_find_pool (pool_list, label)
6892 != s390_find_pool (pool_list, insn))
6893 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6894 }
6895 }
6896 }
6897 }
6898 }
6899
6900 /* Insert base register reload insns before every pool. */
6901
6902 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6903 {
6904 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6905 curr_pool->label);
6906 rtx insn = curr_pool->first_insn;
6907 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6908 }
6909
6910 /* Insert base register reload insns at every far label. */
6911
6912 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6913 if (GET_CODE (insn) == CODE_LABEL
6914 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6915 {
6916 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6917 if (pool)
6918 {
6919 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6920 pool->label);
6921 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6922 }
6923 }
6924
6925
6926 BITMAP_FREE (far_labels);
6927
6928
6929 /* Recompute insn addresses. */
6930
6931 init_insn_lengths ();
6932 shorten_branches (get_insns ());
6933
6934 return pool_list;
6935 }
6936
6937 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6938 After we have decided to use this list, finish implementing
6939 all changes to the current function as required. */
6940
6941 static void
6942 s390_chunkify_finish (struct constant_pool *pool_list)
6943 {
6944 struct constant_pool *curr_pool = NULL;
6945 rtx insn;
6946
6947
6948 /* Replace all literal pool references. */
6949
6950 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6951 {
6952 if (INSN_P (insn))
6953 replace_ltrel_base (&PATTERN (insn));
6954
6955 curr_pool = s390_find_pool (pool_list, insn);
6956 if (!curr_pool)
6957 continue;
6958
6959 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6960 {
6961 rtx addr, pool_ref = NULL_RTX;
6962 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6963 if (pool_ref)
6964 {
6965 if (s390_execute_label (insn))
6966 addr = s390_find_execute (curr_pool, insn);
6967 else
6968 addr = s390_find_constant (curr_pool,
6969 get_pool_constant (pool_ref),
6970 get_pool_mode (pool_ref));
6971
6972 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6973 INSN_CODE (insn) = -1;
6974 }
6975 }
6976 }
6977
6978 /* Dump out all literal pools. */
6979
6980 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6981 s390_dump_pool (curr_pool, 0);
6982
6983 /* Free pool list. */
6984
6985 while (pool_list)
6986 {
6987 struct constant_pool *next = pool_list->next;
6988 s390_free_pool (pool_list);
6989 pool_list = next;
6990 }
6991 }
6992
6993 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6994 We have decided we cannot use this list, so revert all changes
6995 to the current function that were done by s390_chunkify_start. */
6996
6997 static void
6998 s390_chunkify_cancel (struct constant_pool *pool_list)
6999 {
7000 struct constant_pool *curr_pool = NULL;
7001 rtx insn;
7002
7003 /* Remove all pool placeholder insns. */
7004
7005 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7006 {
7007 /* Did we insert an extra barrier? Remove it. */
7008 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7009 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7010 rtx label = NEXT_INSN (curr_pool->pool_insn);
7011
7012 if (jump && GET_CODE (jump) == JUMP_INSN
7013 && barrier && GET_CODE (barrier) == BARRIER
7014 && label && GET_CODE (label) == CODE_LABEL
7015 && GET_CODE (PATTERN (jump)) == SET
7016 && SET_DEST (PATTERN (jump)) == pc_rtx
7017 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7018 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7019 {
7020 remove_insn (jump);
7021 remove_insn (barrier);
7022 remove_insn (label);
7023 }
7024
7025 remove_insn (curr_pool->pool_insn);
7026 }
7027
7028 /* Remove all base register reload insns. */
7029
7030 for (insn = get_insns (); insn; )
7031 {
7032 rtx next_insn = NEXT_INSN (insn);
7033
7034 if (GET_CODE (insn) == INSN
7035 && GET_CODE (PATTERN (insn)) == SET
7036 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7037 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7038 remove_insn (insn);
7039
7040 insn = next_insn;
7041 }
7042
7043 /* Free pool list. */
7044
7045 while (pool_list)
7046 {
7047 struct constant_pool *next = pool_list->next;
7048 s390_free_pool (pool_list);
7049 pool_list = next;
7050 }
7051 }
7052
7053 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7054
7055 void
7056 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7057 {
7058 REAL_VALUE_TYPE r;
7059
7060 switch (GET_MODE_CLASS (mode))
7061 {
7062 case MODE_FLOAT:
7063 case MODE_DECIMAL_FLOAT:
7064 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7065
7066 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7067 assemble_real (r, mode, align);
7068 break;
7069
7070 case MODE_INT:
7071 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7072 mark_symbol_refs_as_used (exp);
7073 break;
7074
7075 default:
7076 gcc_unreachable ();
7077 }
7078 }
7079
7080
7081 /* Return an RTL expression representing the value of the return address
7082 for the frame COUNT steps up from the current frame. FRAME is the
7083 frame pointer of that frame. */
7084
7085 rtx
7086 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7087 {
7088 int offset;
7089 rtx addr;
7090
7091 /* Without backchain, we fail for all but the current frame. */
7092
7093 if (!TARGET_BACKCHAIN && count > 0)
7094 return NULL_RTX;
7095
7096 /* For the current frame, we need to make sure the initial
7097 value of RETURN_REGNUM is actually saved. */
7098
7099 if (count == 0)
7100 {
7101 /* On non-z architectures branch splitting could overwrite r14. */
7102 if (TARGET_CPU_ZARCH)
7103 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7104 else
7105 {
7106 cfun_frame_layout.save_return_addr_p = true;
7107 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7108 }
7109 }
7110
7111 if (TARGET_PACKED_STACK)
7112 offset = -2 * UNITS_PER_LONG;
7113 else
7114 offset = RETURN_REGNUM * UNITS_PER_LONG;
7115
7116 addr = plus_constant (frame, offset);
7117 addr = memory_address (Pmode, addr);
7118 return gen_rtx_MEM (Pmode, addr);
7119 }
7120
7121 /* Return an RTL expression representing the back chain stored in
7122 the current stack frame. */
7123
7124 rtx
7125 s390_back_chain_rtx (void)
7126 {
7127 rtx chain;
7128
7129 gcc_assert (TARGET_BACKCHAIN);
7130
7131 if (TARGET_PACKED_STACK)
7132 chain = plus_constant (stack_pointer_rtx,
7133 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7134 else
7135 chain = stack_pointer_rtx;
7136
7137 chain = gen_rtx_MEM (Pmode, chain);
7138 return chain;
7139 }
7140
7141 /* Find first call clobbered register unused in a function.
7142 This could be used as base register in a leaf function
7143 or for holding the return address before epilogue. */
7144
7145 static int
7146 find_unused_clobbered_reg (void)
7147 {
7148 int i;
7149 for (i = 0; i < 6; i++)
7150 if (!df_regs_ever_live_p (i))
7151 return i;
7152 return 0;
7153 }
7154
7155
7156 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7157 clobbered hard regs in SETREG. */
7158
7159 static void
7160 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7161 {
7162 int *regs_ever_clobbered = (int *)data;
7163 unsigned int i, regno;
7164 enum machine_mode mode = GET_MODE (setreg);
7165
7166 if (GET_CODE (setreg) == SUBREG)
7167 {
7168 rtx inner = SUBREG_REG (setreg);
7169 if (!GENERAL_REG_P (inner))
7170 return;
7171 regno = subreg_regno (setreg);
7172 }
7173 else if (GENERAL_REG_P (setreg))
7174 regno = REGNO (setreg);
7175 else
7176 return;
7177
7178 for (i = regno;
7179 i < regno + HARD_REGNO_NREGS (regno, mode);
7180 i++)
7181 regs_ever_clobbered[i] = 1;
7182 }
7183
7184 /* Walks through all basic blocks of the current function looking
7185 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7186 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7187 each of those regs. */
7188
7189 static void
7190 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7191 {
7192 basic_block cur_bb;
7193 rtx cur_insn;
7194 unsigned int i;
7195
7196 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7197
7198 /* For non-leaf functions we have to consider all call clobbered regs to be
7199 clobbered. */
7200 if (!current_function_is_leaf)
7201 {
7202 for (i = 0; i < 16; i++)
7203 regs_ever_clobbered[i] = call_really_used_regs[i];
7204 }
7205
7206 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7207 this work is done by liveness analysis (mark_regs_live_at_end).
7208 Special care is needed for functions containing landing pads. Landing pads
7209 may use the eh registers, but the code which sets these registers is not
7210 contained in that function. Hence s390_regs_ever_clobbered is not able to
7211 deal with this automatically. */
7212 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7213 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7214 if (crtl->calls_eh_return
7215 || (cfun->machine->has_landing_pad_p
7216 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7217 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7218
7219 /* For nonlocal gotos all call-saved registers have to be saved.
7220 This flag is also set for the unwinding code in libgcc.
7221 See expand_builtin_unwind_init. For regs_ever_live this is done by
7222 reload. */
7223 if (cfun->has_nonlocal_label)
7224 for (i = 0; i < 16; i++)
7225 if (!call_really_used_regs[i])
7226 regs_ever_clobbered[i] = 1;
7227
7228 FOR_EACH_BB (cur_bb)
7229 {
7230 FOR_BB_INSNS (cur_bb, cur_insn)
7231 {
7232 if (INSN_P (cur_insn))
7233 note_stores (PATTERN (cur_insn),
7234 s390_reg_clobbered_rtx,
7235 regs_ever_clobbered);
7236 }
7237 }
7238 }
7239
7240 /* Determine the frame area which actually has to be accessed
7241 in the function epilogue. The values are stored at the
7242 given pointers AREA_BOTTOM (address of the lowest used stack
7243 address) and AREA_TOP (address of the first item which does
7244 not belong to the stack frame). */
7245
7246 static void
7247 s390_frame_area (int *area_bottom, int *area_top)
7248 {
7249 int b, t;
7250 int i;
7251
7252 b = INT_MAX;
7253 t = INT_MIN;
7254
7255 if (cfun_frame_layout.first_restore_gpr != -1)
7256 {
7257 b = (cfun_frame_layout.gprs_offset
7258 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7259 t = b + (cfun_frame_layout.last_restore_gpr
7260 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7261 }
7262
7263 if (TARGET_64BIT && cfun_save_high_fprs_p)
7264 {
7265 b = MIN (b, cfun_frame_layout.f8_offset);
7266 t = MAX (t, (cfun_frame_layout.f8_offset
7267 + cfun_frame_layout.high_fprs * 8));
7268 }
7269
7270 if (!TARGET_64BIT)
7271 for (i = 2; i < 4; i++)
7272 if (cfun_fpr_bit_p (i))
7273 {
7274 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7275 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7276 }
7277
7278 *area_bottom = b;
7279 *area_top = t;
7280 }
7281
7282 /* Fill cfun->machine with info about register usage of current function.
7283 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7284
7285 static void
7286 s390_register_info (int clobbered_regs[])
7287 {
7288 int i, j;
7289
7290 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7291 cfun_frame_layout.fpr_bitmap = 0;
7292 cfun_frame_layout.high_fprs = 0;
7293 if (TARGET_64BIT)
7294 for (i = 24; i < 32; i++)
7295 if (df_regs_ever_live_p (i) && !global_regs[i])
7296 {
7297 cfun_set_fpr_bit (i - 16);
7298 cfun_frame_layout.high_fprs++;
7299 }
7300
7301 /* Find first and last gpr to be saved. We trust regs_ever_live
7302 data, except that we don't save and restore global registers.
7303
7304 Also, all registers with special meaning to the compiler need
7305 to be handled extra. */
7306
7307 s390_regs_ever_clobbered (clobbered_regs);
7308
7309 for (i = 0; i < 16; i++)
7310 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7311
7312 if (frame_pointer_needed)
7313 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7314
7315 if (flag_pic)
7316 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7317 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7318
7319 clobbered_regs[BASE_REGNUM]
7320 |= (cfun->machine->base_reg
7321 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7322
7323 clobbered_regs[RETURN_REGNUM]
7324 |= (!current_function_is_leaf
7325 || TARGET_TPF_PROFILING
7326 || cfun->machine->split_branches_pending_p
7327 || cfun_frame_layout.save_return_addr_p
7328 || crtl->calls_eh_return
7329 || cfun->stdarg);
7330
7331 clobbered_regs[STACK_POINTER_REGNUM]
7332 |= (!current_function_is_leaf
7333 || TARGET_TPF_PROFILING
7334 || cfun_save_high_fprs_p
7335 || get_frame_size () > 0
7336 || cfun->calls_alloca
7337 || cfun->stdarg);
7338
7339 for (i = 6; i < 16; i++)
7340 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7341 break;
7342 for (j = 15; j > i; j--)
7343 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7344 break;
7345
7346 if (i == 16)
7347 {
7348 /* Nothing to save/restore. */
7349 cfun_frame_layout.first_save_gpr_slot = -1;
7350 cfun_frame_layout.last_save_gpr_slot = -1;
7351 cfun_frame_layout.first_save_gpr = -1;
7352 cfun_frame_layout.first_restore_gpr = -1;
7353 cfun_frame_layout.last_save_gpr = -1;
7354 cfun_frame_layout.last_restore_gpr = -1;
7355 }
7356 else
7357 {
7358 /* Save slots for gprs from i to j. */
7359 cfun_frame_layout.first_save_gpr_slot = i;
7360 cfun_frame_layout.last_save_gpr_slot = j;
7361
7362 for (i = cfun_frame_layout.first_save_gpr_slot;
7363 i < cfun_frame_layout.last_save_gpr_slot + 1;
7364 i++)
7365 if (clobbered_regs[i])
7366 break;
7367
7368 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7369 if (clobbered_regs[j])
7370 break;
7371
7372 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7373 {
7374 /* Nothing to save/restore. */
7375 cfun_frame_layout.first_save_gpr = -1;
7376 cfun_frame_layout.first_restore_gpr = -1;
7377 cfun_frame_layout.last_save_gpr = -1;
7378 cfun_frame_layout.last_restore_gpr = -1;
7379 }
7380 else
7381 {
7382 /* Save / Restore from gpr i to j. */
7383 cfun_frame_layout.first_save_gpr = i;
7384 cfun_frame_layout.first_restore_gpr = i;
7385 cfun_frame_layout.last_save_gpr = j;
7386 cfun_frame_layout.last_restore_gpr = j;
7387 }
7388 }
7389
7390 if (cfun->stdarg)
7391 {
7392 /* Varargs functions need to save gprs 2 to 6. */
7393 if (cfun->va_list_gpr_size
7394 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7395 {
7396 int min_gpr = crtl->args.info.gprs;
7397 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7398 if (max_gpr > GP_ARG_NUM_REG)
7399 max_gpr = GP_ARG_NUM_REG;
7400
7401 if (cfun_frame_layout.first_save_gpr == -1
7402 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7403 {
7404 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7405 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7406 }
7407
7408 if (cfun_frame_layout.last_save_gpr == -1
7409 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7410 {
7411 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7412 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7413 }
7414 }
7415
7416 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7417 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7418 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7419 {
7420 int min_fpr = crtl->args.info.fprs;
7421 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7422 if (max_fpr > FP_ARG_NUM_REG)
7423 max_fpr = FP_ARG_NUM_REG;
7424
7425 /* ??? This is currently required to ensure proper location
7426 of the fpr save slots within the va_list save area. */
7427 if (TARGET_PACKED_STACK)
7428 min_fpr = 0;
7429
7430 for (i = min_fpr; i < max_fpr; i++)
7431 cfun_set_fpr_bit (i);
7432 }
7433 }
7434
7435 if (!TARGET_64BIT)
7436 for (i = 2; i < 4; i++)
7437 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7438 cfun_set_fpr_bit (i);
7439 }
7440
7441 /* Fill cfun->machine with info about frame of current function. */
7442
7443 static void
7444 s390_frame_info (void)
7445 {
7446 int i;
7447
7448 cfun_frame_layout.frame_size = get_frame_size ();
7449 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7450 fatal_error ("total size of local variables exceeds architecture limit");
7451
7452 if (!TARGET_PACKED_STACK)
7453 {
7454 cfun_frame_layout.backchain_offset = 0;
7455 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7456 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7457 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7458 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7459 * UNITS_PER_LONG);
7460 }
7461 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7462 {
7463 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7464 - UNITS_PER_LONG);
7465 cfun_frame_layout.gprs_offset
7466 = (cfun_frame_layout.backchain_offset
7467 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7468 * UNITS_PER_LONG);
7469
7470 if (TARGET_64BIT)
7471 {
7472 cfun_frame_layout.f4_offset
7473 = (cfun_frame_layout.gprs_offset
7474 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7475
7476 cfun_frame_layout.f0_offset
7477 = (cfun_frame_layout.f4_offset
7478 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7479 }
7480 else
7481 {
7482 /* On 31 bit we have to care about alignment of the
7483 floating point regs to provide fastest access. */
7484 cfun_frame_layout.f0_offset
7485 = ((cfun_frame_layout.gprs_offset
7486 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7487 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7488
7489 cfun_frame_layout.f4_offset
7490 = (cfun_frame_layout.f0_offset
7491 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7492 }
7493 }
7494 else /* no backchain */
7495 {
7496 cfun_frame_layout.f4_offset
7497 = (STACK_POINTER_OFFSET
7498 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7499
7500 cfun_frame_layout.f0_offset
7501 = (cfun_frame_layout.f4_offset
7502 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7503
7504 cfun_frame_layout.gprs_offset
7505 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7506 }
7507
7508 if (current_function_is_leaf
7509 && !TARGET_TPF_PROFILING
7510 && cfun_frame_layout.frame_size == 0
7511 && !cfun_save_high_fprs_p
7512 && !cfun->calls_alloca
7513 && !cfun->stdarg)
7514 return;
7515
7516 if (!TARGET_PACKED_STACK)
7517 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7518 + crtl->outgoing_args_size
7519 + cfun_frame_layout.high_fprs * 8);
7520 else
7521 {
7522 if (TARGET_BACKCHAIN)
7523 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7524
7525 /* No alignment trouble here because f8-f15 are only saved under
7526 64 bit. */
7527 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7528 cfun_frame_layout.f4_offset),
7529 cfun_frame_layout.gprs_offset)
7530 - cfun_frame_layout.high_fprs * 8);
7531
7532 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7533
7534 for (i = 0; i < 8; i++)
7535 if (cfun_fpr_bit_p (i))
7536 cfun_frame_layout.frame_size += 8;
7537
7538 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7539
7540 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7541 the frame size to sustain 8 byte alignment of stack frames. */
7542 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7543 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7544 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7545
7546 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7547 }
7548 }
7549
7550 /* Generate frame layout. Fills in register and frame data for the current
7551 function in cfun->machine. This routine can be called multiple times;
7552 it will re-do the complete frame layout every time. */
7553
7554 static void
7555 s390_init_frame_layout (void)
7556 {
7557 HOST_WIDE_INT frame_size;
7558 int base_used;
7559 int clobbered_regs[16];
7560
7561 /* On S/390 machines, we may need to perform branch splitting, which
7562 will require both base and return address register. We have no
7563 choice but to assume we're going to need them until right at the
7564 end of the machine dependent reorg phase. */
7565 if (!TARGET_CPU_ZARCH)
7566 cfun->machine->split_branches_pending_p = true;
7567
7568 do
7569 {
7570 frame_size = cfun_frame_layout.frame_size;
7571
7572 /* Try to predict whether we'll need the base register. */
7573 base_used = cfun->machine->split_branches_pending_p
7574 || crtl->uses_const_pool
7575 || (!DISP_IN_RANGE (frame_size)
7576 && !CONST_OK_FOR_K (frame_size));
7577
7578 /* Decide which register to use as literal pool base. In small
7579 leaf functions, try to use an unused call-clobbered register
7580 as base register to avoid save/restore overhead. */
7581 if (!base_used)
7582 cfun->machine->base_reg = NULL_RTX;
7583 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7584 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7585 else
7586 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7587
7588 s390_register_info (clobbered_regs);
7589 s390_frame_info ();
7590 }
7591 while (frame_size != cfun_frame_layout.frame_size);
7592 }
7593
7594 /* Update frame layout. Recompute actual register save data based on
7595 current info and update regs_ever_live for the special registers.
7596 May be called multiple times, but may never cause *more* registers
7597 to be saved than s390_init_frame_layout allocated room for. */
7598
7599 static void
7600 s390_update_frame_layout (void)
7601 {
7602 int clobbered_regs[16];
7603
7604 s390_register_info (clobbered_regs);
7605
7606 df_set_regs_ever_live (BASE_REGNUM,
7607 clobbered_regs[BASE_REGNUM] ? true : false);
7608 df_set_regs_ever_live (RETURN_REGNUM,
7609 clobbered_regs[RETURN_REGNUM] ? true : false);
7610 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7611 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7612
7613 if (cfun->machine->base_reg)
7614 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7615 }
7616
7617 /* Return true if it is legal to put a value with MODE into REGNO. */
7618
7619 bool
7620 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7621 {
7622 switch (REGNO_REG_CLASS (regno))
7623 {
7624 case FP_REGS:
7625 if (REGNO_PAIR_OK (regno, mode))
7626 {
7627 if (mode == SImode || mode == DImode)
7628 return true;
7629
7630 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7631 return true;
7632 }
7633 break;
7634 case ADDR_REGS:
7635 if (FRAME_REGNO_P (regno) && mode == Pmode)
7636 return true;
7637
7638 /* fallthrough */
7639 case GENERAL_REGS:
7640 if (REGNO_PAIR_OK (regno, mode))
7641 {
7642 if (TARGET_ZARCH
7643 || (mode != TFmode && mode != TCmode && mode != TDmode))
7644 return true;
7645 }
7646 break;
7647 case CC_REGS:
7648 if (GET_MODE_CLASS (mode) == MODE_CC)
7649 return true;
7650 break;
7651 case ACCESS_REGS:
7652 if (REGNO_PAIR_OK (regno, mode))
7653 {
7654 if (mode == SImode || mode == Pmode)
7655 return true;
7656 }
7657 break;
7658 default:
7659 return false;
7660 }
7661
7662 return false;
7663 }
7664
7665 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7666
7667 bool
7668 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7669 {
7670 /* Once we've decided upon a register to use as base register, it must
7671 no longer be used for any other purpose. */
7672 if (cfun->machine->base_reg)
7673 if (REGNO (cfun->machine->base_reg) == old_reg
7674 || REGNO (cfun->machine->base_reg) == new_reg)
7675 return false;
7676
7677 return true;
7678 }
7679
7680 /* Maximum number of registers to represent a value of mode MODE
7681 in a register of class RCLASS. */
7682
7683 bool
7684 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7685 {
7686 switch (rclass)
7687 {
7688 case FP_REGS:
7689 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7690 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7691 else
7692 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7693 case ACCESS_REGS:
7694 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7695 default:
7696 break;
7697 }
7698 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7699 }
7700
7701 /* Return true if register FROM can be eliminated via register TO. */
7702
7703 static bool
7704 s390_can_eliminate (const int from, const int to)
7705 {
7706 /* On zSeries machines, we have not marked the base register as fixed.
7707 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7708 If a function requires the base register, we say here that this
7709 elimination cannot be performed. This will cause reload to free
7710 up the base register (as if it were fixed). On the other hand,
7711 if the current function does *not* require the base register, we
7712 say here the elimination succeeds, which in turn allows reload
7713 to allocate the base register for any other purpose. */
7714 if (from == BASE_REGNUM && to == BASE_REGNUM)
7715 {
7716 if (TARGET_CPU_ZARCH)
7717 {
7718 s390_init_frame_layout ();
7719 return cfun->machine->base_reg == NULL_RTX;
7720 }
7721
7722 return false;
7723 }
7724
7725 /* Everything else must point into the stack frame. */
7726 gcc_assert (to == STACK_POINTER_REGNUM
7727 || to == HARD_FRAME_POINTER_REGNUM);
7728
7729 gcc_assert (from == FRAME_POINTER_REGNUM
7730 || from == ARG_POINTER_REGNUM
7731 || from == RETURN_ADDRESS_POINTER_REGNUM);
7732
7733 /* Make sure we actually saved the return address. */
7734 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7735 if (!crtl->calls_eh_return
7736 && !cfun->stdarg
7737 && !cfun_frame_layout.save_return_addr_p)
7738 return false;
7739
7740 return true;
7741 }
7742
7743 /* Return offset between register FROM and TO initially after prolog. */
7744
7745 HOST_WIDE_INT
7746 s390_initial_elimination_offset (int from, int to)
7747 {
7748 HOST_WIDE_INT offset;
7749 int index;
7750
7751 /* ??? Why are we called for non-eliminable pairs? */
7752 if (!s390_can_eliminate (from, to))
7753 return 0;
7754
7755 switch (from)
7756 {
7757 case FRAME_POINTER_REGNUM:
7758 offset = (get_frame_size()
7759 + STACK_POINTER_OFFSET
7760 + crtl->outgoing_args_size);
7761 break;
7762
7763 case ARG_POINTER_REGNUM:
7764 s390_init_frame_layout ();
7765 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7766 break;
7767
7768 case RETURN_ADDRESS_POINTER_REGNUM:
7769 s390_init_frame_layout ();
7770 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7771 gcc_assert (index >= 0);
7772 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7773 offset += index * UNITS_PER_LONG;
7774 break;
7775
7776 case BASE_REGNUM:
7777 offset = 0;
7778 break;
7779
7780 default:
7781 gcc_unreachable ();
7782 }
7783
7784 return offset;
7785 }
7786
7787 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7788 to register BASE. Return generated insn. */
7789
7790 static rtx
7791 save_fpr (rtx base, int offset, int regnum)
7792 {
7793 rtx addr;
7794 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7795
7796 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7797 set_mem_alias_set (addr, get_varargs_alias_set ());
7798 else
7799 set_mem_alias_set (addr, get_frame_alias_set ());
7800
7801 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7802 }
7803
7804 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7805 to register BASE. Return generated insn. */
7806
7807 static rtx
7808 restore_fpr (rtx base, int offset, int regnum)
7809 {
7810 rtx addr;
7811 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7812 set_mem_alias_set (addr, get_frame_alias_set ());
7813
7814 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7815 }
7816
7817 /* Return true if REGNO is a global register, but not one
7818 of the special ones that need to be saved/restored in anyway. */
7819
7820 static inline bool
7821 global_not_special_regno_p (int regno)
7822 {
7823 return (global_regs[regno]
7824 /* These registers are special and need to be
7825 restored in any case. */
7826 && !(regno == STACK_POINTER_REGNUM
7827 || regno == RETURN_REGNUM
7828 || regno == BASE_REGNUM
7829 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7830 }
7831
7832 /* Generate insn to save registers FIRST to LAST into
7833 the register save area located at offset OFFSET
7834 relative to register BASE. */
7835
7836 static rtx
7837 save_gprs (rtx base, int offset, int first, int last)
7838 {
7839 rtx addr, insn, note;
7840 int i;
7841
7842 addr = plus_constant (base, offset);
7843 addr = gen_rtx_MEM (Pmode, addr);
7844
7845 set_mem_alias_set (addr, get_frame_alias_set ());
7846
7847 /* Special-case single register. */
7848 if (first == last)
7849 {
7850 if (TARGET_64BIT)
7851 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7852 else
7853 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7854
7855 if (!global_not_special_regno_p (first))
7856 RTX_FRAME_RELATED_P (insn) = 1;
7857 return insn;
7858 }
7859
7860
7861 insn = gen_store_multiple (addr,
7862 gen_rtx_REG (Pmode, first),
7863 GEN_INT (last - first + 1));
7864
7865 if (first <= 6 && cfun->stdarg)
7866 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7867 {
7868 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7869
7870 if (first + i <= 6)
7871 set_mem_alias_set (mem, get_varargs_alias_set ());
7872 }
7873
7874 /* We need to set the FRAME_RELATED flag on all SETs
7875 inside the store-multiple pattern.
7876
7877 However, we must not emit DWARF records for registers 2..5
7878 if they are stored for use by variable arguments ...
7879
7880 ??? Unfortunately, it is not enough to simply not the
7881 FRAME_RELATED flags for those SETs, because the first SET
7882 of the PARALLEL is always treated as if it had the flag
7883 set, even if it does not. Therefore we emit a new pattern
7884 without those registers as REG_FRAME_RELATED_EXPR note. */
7885
7886 if (first >= 6 && !global_not_special_regno_p (first))
7887 {
7888 rtx pat = PATTERN (insn);
7889
7890 for (i = 0; i < XVECLEN (pat, 0); i++)
7891 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7892 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7893 0, i)))))
7894 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7895
7896 RTX_FRAME_RELATED_P (insn) = 1;
7897 }
7898 else if (last >= 6)
7899 {
7900 int start;
7901
7902 for (start = first >= 6 ? first : 6; start <= last; start++)
7903 if (!global_not_special_regno_p (start))
7904 break;
7905
7906 if (start > last)
7907 return insn;
7908
7909 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7910 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7911 gen_rtx_REG (Pmode, start),
7912 GEN_INT (last - start + 1));
7913 note = PATTERN (note);
7914
7915 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7916
7917 for (i = 0; i < XVECLEN (note, 0); i++)
7918 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7919 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7920 0, i)))))
7921 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7922
7923 RTX_FRAME_RELATED_P (insn) = 1;
7924 }
7925
7926 return insn;
7927 }
7928
7929 /* Generate insn to restore registers FIRST to LAST from
7930 the register save area located at offset OFFSET
7931 relative to register BASE. */
7932
7933 static rtx
7934 restore_gprs (rtx base, int offset, int first, int last)
7935 {
7936 rtx addr, insn;
7937
7938 addr = plus_constant (base, offset);
7939 addr = gen_rtx_MEM (Pmode, addr);
7940 set_mem_alias_set (addr, get_frame_alias_set ());
7941
7942 /* Special-case single register. */
7943 if (first == last)
7944 {
7945 if (TARGET_64BIT)
7946 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7947 else
7948 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7949
7950 return insn;
7951 }
7952
7953 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7954 addr,
7955 GEN_INT (last - first + 1));
7956 return insn;
7957 }
7958
7959 /* Return insn sequence to load the GOT register. */
7960
7961 static GTY(()) rtx got_symbol;
7962 rtx
7963 s390_load_got (void)
7964 {
7965 rtx insns;
7966
7967 if (!got_symbol)
7968 {
7969 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7970 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7971 }
7972
7973 start_sequence ();
7974
7975 if (TARGET_CPU_ZARCH)
7976 {
7977 emit_move_insn (pic_offset_table_rtx, got_symbol);
7978 }
7979 else
7980 {
7981 rtx offset;
7982
7983 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7984 UNSPEC_LTREL_OFFSET);
7985 offset = gen_rtx_CONST (Pmode, offset);
7986 offset = force_const_mem (Pmode, offset);
7987
7988 emit_move_insn (pic_offset_table_rtx, offset);
7989
7990 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7991 UNSPEC_LTREL_BASE);
7992 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7993
7994 emit_move_insn (pic_offset_table_rtx, offset);
7995 }
7996
7997 insns = get_insns ();
7998 end_sequence ();
7999 return insns;
8000 }
8001
8002 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8003 and the change to the stack pointer. */
8004
8005 static void
8006 s390_emit_stack_tie (void)
8007 {
8008 rtx mem = gen_frame_mem (BLKmode,
8009 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8010
8011 emit_insn (gen_stack_tie (mem));
8012 }
8013
8014 /* Expand the prologue into a bunch of separate insns. */
8015
8016 void
8017 s390_emit_prologue (void)
8018 {
8019 rtx insn, addr;
8020 rtx temp_reg;
8021 int i;
8022 int offset;
8023 int next_fpr = 0;
8024
8025 /* Complete frame layout. */
8026
8027 s390_update_frame_layout ();
8028
8029 /* Annotate all constant pool references to let the scheduler know
8030 they implicitly use the base register. */
8031
8032 push_topmost_sequence ();
8033
8034 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8035 if (INSN_P (insn))
8036 {
8037 annotate_constant_pool_refs (&PATTERN (insn));
8038 df_insn_rescan (insn);
8039 }
8040
8041 pop_topmost_sequence ();
8042
8043 /* Choose best register to use for temp use within prologue.
8044 See below for why TPF must use the register 1. */
8045
8046 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8047 && !current_function_is_leaf
8048 && !TARGET_TPF_PROFILING)
8049 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8050 else
8051 temp_reg = gen_rtx_REG (Pmode, 1);
8052
8053 /* Save call saved gprs. */
8054 if (cfun_frame_layout.first_save_gpr != -1)
8055 {
8056 insn = save_gprs (stack_pointer_rtx,
8057 cfun_frame_layout.gprs_offset +
8058 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8059 - cfun_frame_layout.first_save_gpr_slot),
8060 cfun_frame_layout.first_save_gpr,
8061 cfun_frame_layout.last_save_gpr);
8062 emit_insn (insn);
8063 }
8064
8065 /* Dummy insn to mark literal pool slot. */
8066
8067 if (cfun->machine->base_reg)
8068 emit_insn (gen_main_pool (cfun->machine->base_reg));
8069
8070 offset = cfun_frame_layout.f0_offset;
8071
8072 /* Save f0 and f2. */
8073 for (i = 0; i < 2; i++)
8074 {
8075 if (cfun_fpr_bit_p (i))
8076 {
8077 save_fpr (stack_pointer_rtx, offset, i + 16);
8078 offset += 8;
8079 }
8080 else if (!TARGET_PACKED_STACK)
8081 offset += 8;
8082 }
8083
8084 /* Save f4 and f6. */
8085 offset = cfun_frame_layout.f4_offset;
8086 for (i = 2; i < 4; i++)
8087 {
8088 if (cfun_fpr_bit_p (i))
8089 {
8090 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8091 offset += 8;
8092
8093 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8094 therefore are not frame related. */
8095 if (!call_really_used_regs[i + 16])
8096 RTX_FRAME_RELATED_P (insn) = 1;
8097 }
8098 else if (!TARGET_PACKED_STACK)
8099 offset += 8;
8100 }
8101
8102 if (TARGET_PACKED_STACK
8103 && cfun_save_high_fprs_p
8104 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8105 {
8106 offset = (cfun_frame_layout.f8_offset
8107 + (cfun_frame_layout.high_fprs - 1) * 8);
8108
8109 for (i = 15; i > 7 && offset >= 0; i--)
8110 if (cfun_fpr_bit_p (i))
8111 {
8112 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8113
8114 RTX_FRAME_RELATED_P (insn) = 1;
8115 offset -= 8;
8116 }
8117 if (offset >= cfun_frame_layout.f8_offset)
8118 next_fpr = i + 16;
8119 }
8120
8121 if (!TARGET_PACKED_STACK)
8122 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8123
8124 if (flag_stack_usage)
8125 current_function_static_stack_size = cfun_frame_layout.frame_size;
8126
8127 /* Decrement stack pointer. */
8128
8129 if (cfun_frame_layout.frame_size > 0)
8130 {
8131 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8132 rtx real_frame_off;
8133
8134 if (s390_stack_size)
8135 {
8136 HOST_WIDE_INT stack_guard;
8137
8138 if (s390_stack_guard)
8139 stack_guard = s390_stack_guard;
8140 else
8141 {
8142 /* If no value for stack guard is provided the smallest power of 2
8143 larger than the current frame size is chosen. */
8144 stack_guard = 1;
8145 while (stack_guard < cfun_frame_layout.frame_size)
8146 stack_guard <<= 1;
8147 }
8148
8149 if (cfun_frame_layout.frame_size >= s390_stack_size)
8150 {
8151 warning (0, "frame size of function %qs is "
8152 HOST_WIDE_INT_PRINT_DEC
8153 " bytes exceeding user provided stack limit of "
8154 HOST_WIDE_INT_PRINT_DEC " bytes. "
8155 "An unconditional trap is added.",
8156 current_function_name(), cfun_frame_layout.frame_size,
8157 s390_stack_size);
8158 emit_insn (gen_trap ());
8159 }
8160 else
8161 {
8162 /* stack_guard has to be smaller than s390_stack_size.
8163 Otherwise we would emit an AND with zero which would
8164 not match the test under mask pattern. */
8165 if (stack_guard >= s390_stack_size)
8166 {
8167 warning (0, "frame size of function %qs is "
8168 HOST_WIDE_INT_PRINT_DEC
8169 " bytes which is more than half the stack size. "
8170 "The dynamic check would not be reliable. "
8171 "No check emitted for this function.",
8172 current_function_name(),
8173 cfun_frame_layout.frame_size);
8174 }
8175 else
8176 {
8177 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8178 & ~(stack_guard - 1));
8179
8180 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8181 GEN_INT (stack_check_mask));
8182 if (TARGET_64BIT)
8183 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8184 t, const0_rtx),
8185 t, const0_rtx, const0_rtx));
8186 else
8187 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8188 t, const0_rtx),
8189 t, const0_rtx, const0_rtx));
8190 }
8191 }
8192 }
8193
8194 if (s390_warn_framesize > 0
8195 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8196 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
8197 current_function_name (), cfun_frame_layout.frame_size);
8198
8199 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8200 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8201
8202 /* Save incoming stack pointer into temp reg. */
8203 if (TARGET_BACKCHAIN || next_fpr)
8204 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8205
8206 /* Subtract frame size from stack pointer. */
8207
8208 if (DISP_IN_RANGE (INTVAL (frame_off)))
8209 {
8210 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8211 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8212 frame_off));
8213 insn = emit_insn (insn);
8214 }
8215 else
8216 {
8217 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8218 frame_off = force_const_mem (Pmode, frame_off);
8219
8220 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8221 annotate_constant_pool_refs (&PATTERN (insn));
8222 }
8223
8224 RTX_FRAME_RELATED_P (insn) = 1;
8225 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8226 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8227 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8228 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8229 real_frame_off)));
8230
8231 /* Set backchain. */
8232
8233 if (TARGET_BACKCHAIN)
8234 {
8235 if (cfun_frame_layout.backchain_offset)
8236 addr = gen_rtx_MEM (Pmode,
8237 plus_constant (stack_pointer_rtx,
8238 cfun_frame_layout.backchain_offset));
8239 else
8240 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8241 set_mem_alias_set (addr, get_frame_alias_set ());
8242 insn = emit_insn (gen_move_insn (addr, temp_reg));
8243 }
8244
8245 /* If we support non-call exceptions (e.g. for Java),
8246 we need to make sure the backchain pointer is set up
8247 before any possibly trapping memory access. */
8248 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8249 {
8250 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8251 emit_clobber (addr);
8252 }
8253 }
8254
8255 /* Save fprs 8 - 15 (64 bit ABI). */
8256
8257 if (cfun_save_high_fprs_p && next_fpr)
8258 {
8259 /* If the stack might be accessed through a different register
8260 we have to make sure that the stack pointer decrement is not
8261 moved below the use of the stack slots. */
8262 s390_emit_stack_tie ();
8263
8264 insn = emit_insn (gen_add2_insn (temp_reg,
8265 GEN_INT (cfun_frame_layout.f8_offset)));
8266
8267 offset = 0;
8268
8269 for (i = 24; i <= next_fpr; i++)
8270 if (cfun_fpr_bit_p (i - 16))
8271 {
8272 rtx addr = plus_constant (stack_pointer_rtx,
8273 cfun_frame_layout.frame_size
8274 + cfun_frame_layout.f8_offset
8275 + offset);
8276
8277 insn = save_fpr (temp_reg, offset, i);
8278 offset += 8;
8279 RTX_FRAME_RELATED_P (insn) = 1;
8280 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8281 gen_rtx_SET (VOIDmode,
8282 gen_rtx_MEM (DFmode, addr),
8283 gen_rtx_REG (DFmode, i)));
8284 }
8285 }
8286
8287 /* Set frame pointer, if needed. */
8288
8289 if (frame_pointer_needed)
8290 {
8291 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8292 RTX_FRAME_RELATED_P (insn) = 1;
8293 }
8294
8295 /* Set up got pointer, if needed. */
8296
8297 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8298 {
8299 rtx insns = s390_load_got ();
8300
8301 for (insn = insns; insn; insn = NEXT_INSN (insn))
8302 annotate_constant_pool_refs (&PATTERN (insn));
8303
8304 emit_insn (insns);
8305 }
8306
8307 if (TARGET_TPF_PROFILING)
8308 {
8309 /* Generate a BAS instruction to serve as a function
8310 entry intercept to facilitate the use of tracing
8311 algorithms located at the branch target. */
8312 emit_insn (gen_prologue_tpf ());
8313
8314 /* Emit a blockage here so that all code
8315 lies between the profiling mechanisms. */
8316 emit_insn (gen_blockage ());
8317 }
8318 }
8319
8320 /* Expand the epilogue into a bunch of separate insns. */
8321
8322 void
8323 s390_emit_epilogue (bool sibcall)
8324 {
8325 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8326 int area_bottom, area_top, offset = 0;
8327 int next_offset;
8328 rtvec p;
8329 int i;
8330
8331 if (TARGET_TPF_PROFILING)
8332 {
8333
8334 /* Generate a BAS instruction to serve as a function
8335 entry intercept to facilitate the use of tracing
8336 algorithms located at the branch target. */
8337
8338 /* Emit a blockage here so that all code
8339 lies between the profiling mechanisms. */
8340 emit_insn (gen_blockage ());
8341
8342 emit_insn (gen_epilogue_tpf ());
8343 }
8344
8345 /* Check whether to use frame or stack pointer for restore. */
8346
8347 frame_pointer = (frame_pointer_needed
8348 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8349
8350 s390_frame_area (&area_bottom, &area_top);
8351
8352 /* Check whether we can access the register save area.
8353 If not, increment the frame pointer as required. */
8354
8355 if (area_top <= area_bottom)
8356 {
8357 /* Nothing to restore. */
8358 }
8359 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8360 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8361 {
8362 /* Area is in range. */
8363 offset = cfun_frame_layout.frame_size;
8364 }
8365 else
8366 {
8367 rtx insn, frame_off, cfa;
8368
8369 offset = area_bottom < 0 ? -area_bottom : 0;
8370 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8371
8372 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8373 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8374 if (DISP_IN_RANGE (INTVAL (frame_off)))
8375 {
8376 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8377 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8378 insn = emit_insn (insn);
8379 }
8380 else
8381 {
8382 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8383 frame_off = force_const_mem (Pmode, frame_off);
8384
8385 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8386 annotate_constant_pool_refs (&PATTERN (insn));
8387 }
8388 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8389 RTX_FRAME_RELATED_P (insn) = 1;
8390 }
8391
8392 /* Restore call saved fprs. */
8393
8394 if (TARGET_64BIT)
8395 {
8396 if (cfun_save_high_fprs_p)
8397 {
8398 next_offset = cfun_frame_layout.f8_offset;
8399 for (i = 24; i < 32; i++)
8400 {
8401 if (cfun_fpr_bit_p (i - 16))
8402 {
8403 restore_fpr (frame_pointer,
8404 offset + next_offset, i);
8405 cfa_restores
8406 = alloc_reg_note (REG_CFA_RESTORE,
8407 gen_rtx_REG (DFmode, i), cfa_restores);
8408 next_offset += 8;
8409 }
8410 }
8411 }
8412
8413 }
8414 else
8415 {
8416 next_offset = cfun_frame_layout.f4_offset;
8417 for (i = 18; i < 20; i++)
8418 {
8419 if (cfun_fpr_bit_p (i - 16))
8420 {
8421 restore_fpr (frame_pointer,
8422 offset + next_offset, i);
8423 cfa_restores
8424 = alloc_reg_note (REG_CFA_RESTORE,
8425 gen_rtx_REG (DFmode, i), cfa_restores);
8426 next_offset += 8;
8427 }
8428 else if (!TARGET_PACKED_STACK)
8429 next_offset += 8;
8430 }
8431
8432 }
8433
8434 /* Return register. */
8435
8436 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8437
8438 /* Restore call saved gprs. */
8439
8440 if (cfun_frame_layout.first_restore_gpr != -1)
8441 {
8442 rtx insn, addr;
8443 int i;
8444
8445 /* Check for global register and save them
8446 to stack location from where they get restored. */
8447
8448 for (i = cfun_frame_layout.first_restore_gpr;
8449 i <= cfun_frame_layout.last_restore_gpr;
8450 i++)
8451 {
8452 if (global_not_special_regno_p (i))
8453 {
8454 addr = plus_constant (frame_pointer,
8455 offset + cfun_frame_layout.gprs_offset
8456 + (i - cfun_frame_layout.first_save_gpr_slot)
8457 * UNITS_PER_LONG);
8458 addr = gen_rtx_MEM (Pmode, addr);
8459 set_mem_alias_set (addr, get_frame_alias_set ());
8460 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8461 }
8462 else
8463 cfa_restores
8464 = alloc_reg_note (REG_CFA_RESTORE,
8465 gen_rtx_REG (Pmode, i), cfa_restores);
8466 }
8467
8468 if (! sibcall)
8469 {
8470 /* Fetch return address from stack before load multiple,
8471 this will do good for scheduling. */
8472
8473 if (cfun_frame_layout.save_return_addr_p
8474 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8475 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8476 {
8477 int return_regnum = find_unused_clobbered_reg();
8478 if (!return_regnum)
8479 return_regnum = 4;
8480 return_reg = gen_rtx_REG (Pmode, return_regnum);
8481
8482 addr = plus_constant (frame_pointer,
8483 offset + cfun_frame_layout.gprs_offset
8484 + (RETURN_REGNUM
8485 - cfun_frame_layout.first_save_gpr_slot)
8486 * UNITS_PER_LONG);
8487 addr = gen_rtx_MEM (Pmode, addr);
8488 set_mem_alias_set (addr, get_frame_alias_set ());
8489 emit_move_insn (return_reg, addr);
8490 }
8491 }
8492
8493 insn = restore_gprs (frame_pointer,
8494 offset + cfun_frame_layout.gprs_offset
8495 + (cfun_frame_layout.first_restore_gpr
8496 - cfun_frame_layout.first_save_gpr_slot)
8497 * UNITS_PER_LONG,
8498 cfun_frame_layout.first_restore_gpr,
8499 cfun_frame_layout.last_restore_gpr);
8500 insn = emit_insn (insn);
8501 REG_NOTES (insn) = cfa_restores;
8502 add_reg_note (insn, REG_CFA_DEF_CFA,
8503 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8504 RTX_FRAME_RELATED_P (insn) = 1;
8505 }
8506
8507 if (! sibcall)
8508 {
8509
8510 /* Return to caller. */
8511
8512 p = rtvec_alloc (2);
8513
8514 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8515 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8516 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8517 }
8518 }
8519
8520
8521 /* Return the size in bytes of a function argument of
8522 type TYPE and/or mode MODE. At least one of TYPE or
8523 MODE must be specified. */
8524
8525 static int
8526 s390_function_arg_size (enum machine_mode mode, const_tree type)
8527 {
8528 if (type)
8529 return int_size_in_bytes (type);
8530
8531 /* No type info available for some library calls ... */
8532 if (mode != BLKmode)
8533 return GET_MODE_SIZE (mode);
8534
8535 /* If we have neither type nor mode, abort */
8536 gcc_unreachable ();
8537 }
8538
8539 /* Return true if a function argument of type TYPE and mode MODE
8540 is to be passed in a floating-point register, if available. */
8541
8542 static bool
8543 s390_function_arg_float (enum machine_mode mode, const_tree type)
8544 {
8545 int size = s390_function_arg_size (mode, type);
8546 if (size > 8)
8547 return false;
8548
8549 /* Soft-float changes the ABI: no floating-point registers are used. */
8550 if (TARGET_SOFT_FLOAT)
8551 return false;
8552
8553 /* No type info available for some library calls ... */
8554 if (!type)
8555 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8556
8557 /* The ABI says that record types with a single member are treated
8558 just like that member would be. */
8559 while (TREE_CODE (type) == RECORD_TYPE)
8560 {
8561 tree field, single = NULL_TREE;
8562
8563 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8564 {
8565 if (TREE_CODE (field) != FIELD_DECL)
8566 continue;
8567
8568 if (single == NULL_TREE)
8569 single = TREE_TYPE (field);
8570 else
8571 return false;
8572 }
8573
8574 if (single == NULL_TREE)
8575 return false;
8576 else
8577 type = single;
8578 }
8579
8580 return TREE_CODE (type) == REAL_TYPE;
8581 }
8582
8583 /* Return true if a function argument of type TYPE and mode MODE
8584 is to be passed in an integer register, or a pair of integer
8585 registers, if available. */
8586
8587 static bool
8588 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8589 {
8590 int size = s390_function_arg_size (mode, type);
8591 if (size > 8)
8592 return false;
8593
8594 /* No type info available for some library calls ... */
8595 if (!type)
8596 return GET_MODE_CLASS (mode) == MODE_INT
8597 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8598
8599 /* We accept small integral (and similar) types. */
8600 if (INTEGRAL_TYPE_P (type)
8601 || POINTER_TYPE_P (type)
8602 || TREE_CODE (type) == NULLPTR_TYPE
8603 || TREE_CODE (type) == OFFSET_TYPE
8604 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8605 return true;
8606
8607 /* We also accept structs of size 1, 2, 4, 8 that are not
8608 passed in floating-point registers. */
8609 if (AGGREGATE_TYPE_P (type)
8610 && exact_log2 (size) >= 0
8611 && !s390_function_arg_float (mode, type))
8612 return true;
8613
8614 return false;
8615 }
8616
8617 /* Return 1 if a function argument of type TYPE and mode MODE
8618 is to be passed by reference. The ABI specifies that only
8619 structures of size 1, 2, 4, or 8 bytes are passed by value,
8620 all other structures (and complex numbers) are passed by
8621 reference. */
8622
8623 static bool
8624 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8625 enum machine_mode mode, const_tree type,
8626 bool named ATTRIBUTE_UNUSED)
8627 {
8628 int size = s390_function_arg_size (mode, type);
8629 if (size > 8)
8630 return true;
8631
8632 if (type)
8633 {
8634 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8635 return 1;
8636
8637 if (TREE_CODE (type) == COMPLEX_TYPE
8638 || TREE_CODE (type) == VECTOR_TYPE)
8639 return 1;
8640 }
8641
8642 return 0;
8643 }
8644
8645 /* Update the data in CUM to advance over an argument of mode MODE and
8646 data type TYPE. (TYPE is null for libcalls where that information
8647 may not be available.). The boolean NAMED specifies whether the
8648 argument is a named argument (as opposed to an unnamed argument
8649 matching an ellipsis). */
8650
8651 static void
8652 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8653 const_tree type, bool named ATTRIBUTE_UNUSED)
8654 {
8655 if (s390_function_arg_float (mode, type))
8656 {
8657 cum->fprs += 1;
8658 }
8659 else if (s390_function_arg_integer (mode, type))
8660 {
8661 int size = s390_function_arg_size (mode, type);
8662 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8663 }
8664 else
8665 gcc_unreachable ();
8666 }
8667
8668 /* Define where to put the arguments to a function.
8669 Value is zero to push the argument on the stack,
8670 or a hard register in which to store the argument.
8671
8672 MODE is the argument's machine mode.
8673 TYPE is the data type of the argument (as a tree).
8674 This is null for libcalls where that information may
8675 not be available.
8676 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8677 the preceding args and about the function being called.
8678 NAMED is nonzero if this argument is a named parameter
8679 (otherwise it is an extra parameter matching an ellipsis).
8680
8681 On S/390, we use general purpose registers 2 through 6 to
8682 pass integer, pointer, and certain structure arguments, and
8683 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8684 to pass floating point arguments. All remaining arguments
8685 are pushed to the stack. */
8686
8687 static rtx
8688 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8689 const_tree type, bool named ATTRIBUTE_UNUSED)
8690 {
8691 if (s390_function_arg_float (mode, type))
8692 {
8693 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8694 return 0;
8695 else
8696 return gen_rtx_REG (mode, cum->fprs + 16);
8697 }
8698 else if (s390_function_arg_integer (mode, type))
8699 {
8700 int size = s390_function_arg_size (mode, type);
8701 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8702
8703 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8704 return 0;
8705 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8706 return gen_rtx_REG (mode, cum->gprs + 2);
8707 else if (n_gprs == 2)
8708 {
8709 rtvec p = rtvec_alloc (2);
8710
8711 RTVEC_ELT (p, 0)
8712 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8713 const0_rtx);
8714 RTVEC_ELT (p, 1)
8715 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8716 GEN_INT (4));
8717
8718 return gen_rtx_PARALLEL (mode, p);
8719 }
8720 }
8721
8722 /* After the real arguments, expand_call calls us once again
8723 with a void_type_node type. Whatever we return here is
8724 passed as operand 2 to the call expanders.
8725
8726 We don't need this feature ... */
8727 else if (type == void_type_node)
8728 return const0_rtx;
8729
8730 gcc_unreachable ();
8731 }
8732
8733 /* Return true if return values of type TYPE should be returned
8734 in a memory buffer whose address is passed by the caller as
8735 hidden first argument. */
8736
8737 static bool
8738 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8739 {
8740 /* We accept small integral (and similar) types. */
8741 if (INTEGRAL_TYPE_P (type)
8742 || POINTER_TYPE_P (type)
8743 || TREE_CODE (type) == OFFSET_TYPE
8744 || TREE_CODE (type) == REAL_TYPE)
8745 return int_size_in_bytes (type) > 8;
8746
8747 /* Aggregates and similar constructs are always returned
8748 in memory. */
8749 if (AGGREGATE_TYPE_P (type)
8750 || TREE_CODE (type) == COMPLEX_TYPE
8751 || TREE_CODE (type) == VECTOR_TYPE)
8752 return true;
8753
8754 /* ??? We get called on all sorts of random stuff from
8755 aggregate_value_p. We can't abort, but it's not clear
8756 what's safe to return. Pretend it's a struct I guess. */
8757 return true;
8758 }
8759
8760 /* Function arguments and return values are promoted to word size. */
8761
8762 static enum machine_mode
8763 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8764 int *punsignedp,
8765 const_tree fntype ATTRIBUTE_UNUSED,
8766 int for_return ATTRIBUTE_UNUSED)
8767 {
8768 if (INTEGRAL_MODE_P (mode)
8769 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8770 {
8771 if (POINTER_TYPE_P (type))
8772 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8773 return Pmode;
8774 }
8775
8776 return mode;
8777 }
8778
8779 /* Define where to return a (scalar) value of type RET_TYPE.
8780 If RET_TYPE is null, define where to return a (scalar)
8781 value of mode MODE from a libcall. */
8782
8783 static rtx
8784 s390_function_and_libcall_value (enum machine_mode mode,
8785 const_tree ret_type,
8786 const_tree fntype_or_decl,
8787 bool outgoing ATTRIBUTE_UNUSED)
8788 {
8789 /* For normal functions perform the promotion as
8790 promote_function_mode would do. */
8791 if (ret_type)
8792 {
8793 int unsignedp = TYPE_UNSIGNED (ret_type);
8794 mode = promote_function_mode (ret_type, mode, &unsignedp,
8795 fntype_or_decl, 1);
8796 }
8797
8798 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8799 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8800
8801 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8802 return gen_rtx_REG (mode, 16);
8803 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8804 || UNITS_PER_LONG == UNITS_PER_WORD)
8805 return gen_rtx_REG (mode, 2);
8806 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8807 {
8808 /* This case is triggered when returning a 64 bit value with
8809 -m31 -mzarch. Although the value would fit into a single
8810 register it has to be forced into a 32 bit register pair in
8811 order to match the ABI. */
8812 rtvec p = rtvec_alloc (2);
8813
8814 RTVEC_ELT (p, 0)
8815 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8816 RTVEC_ELT (p, 1)
8817 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8818
8819 return gen_rtx_PARALLEL (mode, p);
8820 }
8821
8822 gcc_unreachable ();
8823 }
8824
8825 /* Define where to return a scalar return value of type RET_TYPE. */
8826
8827 static rtx
8828 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8829 bool outgoing)
8830 {
8831 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8832 fn_decl_or_type, outgoing);
8833 }
8834
8835 /* Define where to return a scalar libcall return value of mode
8836 MODE. */
8837
8838 static rtx
8839 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8840 {
8841 return s390_function_and_libcall_value (mode, NULL_TREE,
8842 NULL_TREE, true);
8843 }
8844
8845
8846 /* Create and return the va_list datatype.
8847
8848 On S/390, va_list is an array type equivalent to
8849
8850 typedef struct __va_list_tag
8851 {
8852 long __gpr;
8853 long __fpr;
8854 void *__overflow_arg_area;
8855 void *__reg_save_area;
8856 } va_list[1];
8857
8858 where __gpr and __fpr hold the number of general purpose
8859 or floating point arguments used up to now, respectively,
8860 __overflow_arg_area points to the stack location of the
8861 next argument passed on the stack, and __reg_save_area
8862 always points to the start of the register area in the
8863 call frame of the current function. The function prologue
8864 saves all registers used for argument passing into this
8865 area if the function uses variable arguments. */
8866
8867 static tree
8868 s390_build_builtin_va_list (void)
8869 {
8870 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8871
8872 record = lang_hooks.types.make_type (RECORD_TYPE);
8873
8874 type_decl =
8875 build_decl (BUILTINS_LOCATION,
8876 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8877
8878 f_gpr = build_decl (BUILTINS_LOCATION,
8879 FIELD_DECL, get_identifier ("__gpr"),
8880 long_integer_type_node);
8881 f_fpr = build_decl (BUILTINS_LOCATION,
8882 FIELD_DECL, get_identifier ("__fpr"),
8883 long_integer_type_node);
8884 f_ovf = build_decl (BUILTINS_LOCATION,
8885 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8886 ptr_type_node);
8887 f_sav = build_decl (BUILTINS_LOCATION,
8888 FIELD_DECL, get_identifier ("__reg_save_area"),
8889 ptr_type_node);
8890
8891 va_list_gpr_counter_field = f_gpr;
8892 va_list_fpr_counter_field = f_fpr;
8893
8894 DECL_FIELD_CONTEXT (f_gpr) = record;
8895 DECL_FIELD_CONTEXT (f_fpr) = record;
8896 DECL_FIELD_CONTEXT (f_ovf) = record;
8897 DECL_FIELD_CONTEXT (f_sav) = record;
8898
8899 TYPE_STUB_DECL (record) = type_decl;
8900 TYPE_NAME (record) = type_decl;
8901 TYPE_FIELDS (record) = f_gpr;
8902 DECL_CHAIN (f_gpr) = f_fpr;
8903 DECL_CHAIN (f_fpr) = f_ovf;
8904 DECL_CHAIN (f_ovf) = f_sav;
8905
8906 layout_type (record);
8907
8908 /* The correct type is an array type of one element. */
8909 return build_array_type (record, build_index_type (size_zero_node));
8910 }
8911
8912 /* Implement va_start by filling the va_list structure VALIST.
8913 STDARG_P is always true, and ignored.
8914 NEXTARG points to the first anonymous stack argument.
8915
8916 The following global variables are used to initialize
8917 the va_list structure:
8918
8919 crtl->args.info:
8920 holds number of gprs and fprs used for named arguments.
8921 crtl->args.arg_offset_rtx:
8922 holds the offset of the first anonymous stack argument
8923 (relative to the virtual arg pointer). */
8924
8925 static void
8926 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8927 {
8928 HOST_WIDE_INT n_gpr, n_fpr;
8929 int off;
8930 tree f_gpr, f_fpr, f_ovf, f_sav;
8931 tree gpr, fpr, ovf, sav, t;
8932
8933 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8934 f_fpr = DECL_CHAIN (f_gpr);
8935 f_ovf = DECL_CHAIN (f_fpr);
8936 f_sav = DECL_CHAIN (f_ovf);
8937
8938 valist = build_simple_mem_ref (valist);
8939 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8940 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8941 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8942 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8943
8944 /* Count number of gp and fp argument registers used. */
8945
8946 n_gpr = crtl->args.info.gprs;
8947 n_fpr = crtl->args.info.fprs;
8948
8949 if (cfun->va_list_gpr_size)
8950 {
8951 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8952 build_int_cst (NULL_TREE, n_gpr));
8953 TREE_SIDE_EFFECTS (t) = 1;
8954 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8955 }
8956
8957 if (cfun->va_list_fpr_size)
8958 {
8959 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8960 build_int_cst (NULL_TREE, n_fpr));
8961 TREE_SIDE_EFFECTS (t) = 1;
8962 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8963 }
8964
8965 /* Find the overflow area. */
8966 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8967 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8968 {
8969 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8970
8971 off = INTVAL (crtl->args.arg_offset_rtx);
8972 off = off < 0 ? 0 : off;
8973 if (TARGET_DEBUG_ARG)
8974 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8975 (int)n_gpr, (int)n_fpr, off);
8976
8977 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8978
8979 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8980 TREE_SIDE_EFFECTS (t) = 1;
8981 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8982 }
8983
8984 /* Find the register save area. */
8985 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8986 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8987 {
8988 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8989 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8990 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8991
8992 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8993 TREE_SIDE_EFFECTS (t) = 1;
8994 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8995 }
8996 }
8997
8998 /* Implement va_arg by updating the va_list structure
8999 VALIST as required to retrieve an argument of type
9000 TYPE, and returning that argument.
9001
9002 Generates code equivalent to:
9003
9004 if (integral value) {
9005 if (size <= 4 && args.gpr < 5 ||
9006 size > 4 && args.gpr < 4 )
9007 ret = args.reg_save_area[args.gpr+8]
9008 else
9009 ret = *args.overflow_arg_area++;
9010 } else if (float value) {
9011 if (args.fgpr < 2)
9012 ret = args.reg_save_area[args.fpr+64]
9013 else
9014 ret = *args.overflow_arg_area++;
9015 } else if (aggregate value) {
9016 if (args.gpr < 5)
9017 ret = *args.reg_save_area[args.gpr]
9018 else
9019 ret = **args.overflow_arg_area++;
9020 } */
9021
9022 static tree
9023 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9024 gimple_seq *post_p ATTRIBUTE_UNUSED)
9025 {
9026 tree f_gpr, f_fpr, f_ovf, f_sav;
9027 tree gpr, fpr, ovf, sav, reg, t, u;
9028 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9029 tree lab_false, lab_over, addr;
9030
9031 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9032 f_fpr = DECL_CHAIN (f_gpr);
9033 f_ovf = DECL_CHAIN (f_fpr);
9034 f_sav = DECL_CHAIN (f_ovf);
9035
9036 valist = build_va_arg_indirect_ref (valist);
9037 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9038 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9039 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9040
9041 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9042 both appear on a lhs. */
9043 valist = unshare_expr (valist);
9044 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9045
9046 size = int_size_in_bytes (type);
9047
9048 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9049 {
9050 if (TARGET_DEBUG_ARG)
9051 {
9052 fprintf (stderr, "va_arg: aggregate type");
9053 debug_tree (type);
9054 }
9055
9056 /* Aggregates are passed by reference. */
9057 indirect_p = 1;
9058 reg = gpr;
9059 n_reg = 1;
9060
9061 /* kernel stack layout on 31 bit: It is assumed here that no padding
9062 will be added by s390_frame_info because for va_args always an even
9063 number of gprs has to be saved r15-r2 = 14 regs. */
9064 sav_ofs = 2 * UNITS_PER_LONG;
9065 sav_scale = UNITS_PER_LONG;
9066 size = UNITS_PER_LONG;
9067 max_reg = GP_ARG_NUM_REG - n_reg;
9068 }
9069 else if (s390_function_arg_float (TYPE_MODE (type), type))
9070 {
9071 if (TARGET_DEBUG_ARG)
9072 {
9073 fprintf (stderr, "va_arg: float type");
9074 debug_tree (type);
9075 }
9076
9077 /* FP args go in FP registers, if present. */
9078 indirect_p = 0;
9079 reg = fpr;
9080 n_reg = 1;
9081 sav_ofs = 16 * UNITS_PER_LONG;
9082 sav_scale = 8;
9083 max_reg = FP_ARG_NUM_REG - n_reg;
9084 }
9085 else
9086 {
9087 if (TARGET_DEBUG_ARG)
9088 {
9089 fprintf (stderr, "va_arg: other type");
9090 debug_tree (type);
9091 }
9092
9093 /* Otherwise into GP registers. */
9094 indirect_p = 0;
9095 reg = gpr;
9096 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9097
9098 /* kernel stack layout on 31 bit: It is assumed here that no padding
9099 will be added by s390_frame_info because for va_args always an even
9100 number of gprs has to be saved r15-r2 = 14 regs. */
9101 sav_ofs = 2 * UNITS_PER_LONG;
9102
9103 if (size < UNITS_PER_LONG)
9104 sav_ofs += UNITS_PER_LONG - size;
9105
9106 sav_scale = UNITS_PER_LONG;
9107 max_reg = GP_ARG_NUM_REG - n_reg;
9108 }
9109
9110 /* Pull the value out of the saved registers ... */
9111
9112 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9113 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9114 addr = create_tmp_var (ptr_type_node, "addr");
9115
9116 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9117 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9118 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9119 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9120 gimplify_and_add (t, pre_p);
9121
9122 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
9123 size_int (sav_ofs));
9124 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9125 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9126 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
9127
9128 gimplify_assign (addr, t, pre_p);
9129
9130 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9131
9132 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9133
9134
9135 /* ... Otherwise out of the overflow area. */
9136
9137 t = ovf;
9138 if (size < UNITS_PER_LONG)
9139 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9140 size_int (UNITS_PER_LONG - size));
9141
9142 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9143
9144 gimplify_assign (addr, t, pre_p);
9145
9146 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9147 size_int (size));
9148 gimplify_assign (ovf, t, pre_p);
9149
9150 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9151
9152
9153 /* Increment register save count. */
9154
9155 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9156 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9157 gimplify_and_add (u, pre_p);
9158
9159 if (indirect_p)
9160 {
9161 t = build_pointer_type_for_mode (build_pointer_type (type),
9162 ptr_mode, true);
9163 addr = fold_convert (t, addr);
9164 addr = build_va_arg_indirect_ref (addr);
9165 }
9166 else
9167 {
9168 t = build_pointer_type_for_mode (type, ptr_mode, true);
9169 addr = fold_convert (t, addr);
9170 }
9171
9172 return build_va_arg_indirect_ref (addr);
9173 }
9174
9175
9176 /* Builtins. */
9177
9178 enum s390_builtin
9179 {
9180 S390_BUILTIN_THREAD_POINTER,
9181 S390_BUILTIN_SET_THREAD_POINTER,
9182
9183 S390_BUILTIN_max
9184 };
9185
9186 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9187 CODE_FOR_get_tp_64,
9188 CODE_FOR_set_tp_64
9189 };
9190
9191 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9192 CODE_FOR_get_tp_31,
9193 CODE_FOR_set_tp_31
9194 };
9195
9196 static void
9197 s390_init_builtins (void)
9198 {
9199 tree ftype;
9200
9201 ftype = build_function_type (ptr_type_node, void_list_node);
9202 add_builtin_function ("__builtin_thread_pointer", ftype,
9203 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9204 NULL, NULL_TREE);
9205
9206 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9207 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9208 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9209 NULL, NULL_TREE);
9210 }
9211
9212 /* Expand an expression EXP that calls a built-in function,
9213 with result going to TARGET if that's convenient
9214 (and in mode MODE if that's convenient).
9215 SUBTARGET may be used as the target for computing one of EXP's operands.
9216 IGNORE is nonzero if the value is to be ignored. */
9217
9218 static rtx
9219 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9220 enum machine_mode mode ATTRIBUTE_UNUSED,
9221 int ignore ATTRIBUTE_UNUSED)
9222 {
9223 #define MAX_ARGS 2
9224
9225 enum insn_code const *code_for_builtin =
9226 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9227
9228 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9229 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9230 enum insn_code icode;
9231 rtx op[MAX_ARGS], pat;
9232 int arity;
9233 bool nonvoid;
9234 tree arg;
9235 call_expr_arg_iterator iter;
9236
9237 if (fcode >= S390_BUILTIN_max)
9238 internal_error ("bad builtin fcode");
9239 icode = code_for_builtin[fcode];
9240 if (icode == 0)
9241 internal_error ("bad builtin fcode");
9242
9243 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9244
9245 arity = 0;
9246 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9247 {
9248 const struct insn_operand_data *insn_op;
9249
9250 if (arg == error_mark_node)
9251 return NULL_RTX;
9252 if (arity > MAX_ARGS)
9253 return NULL_RTX;
9254
9255 insn_op = &insn_data[icode].operand[arity + nonvoid];
9256
9257 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9258
9259 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9260 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9261 arity++;
9262 }
9263
9264 if (nonvoid)
9265 {
9266 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9267 if (!target
9268 || GET_MODE (target) != tmode
9269 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9270 target = gen_reg_rtx (tmode);
9271 }
9272
9273 switch (arity)
9274 {
9275 case 0:
9276 pat = GEN_FCN (icode) (target);
9277 break;
9278 case 1:
9279 if (nonvoid)
9280 pat = GEN_FCN (icode) (target, op[0]);
9281 else
9282 pat = GEN_FCN (icode) (op[0]);
9283 break;
9284 case 2:
9285 pat = GEN_FCN (icode) (target, op[0], op[1]);
9286 break;
9287 default:
9288 gcc_unreachable ();
9289 }
9290 if (!pat)
9291 return NULL_RTX;
9292 emit_insn (pat);
9293
9294 if (nonvoid)
9295 return target;
9296 else
9297 return const0_rtx;
9298 }
9299
9300
9301 /* Output assembly code for the trampoline template to
9302 stdio stream FILE.
9303
9304 On S/390, we use gpr 1 internally in the trampoline code;
9305 gpr 0 is used to hold the static chain. */
9306
9307 static void
9308 s390_asm_trampoline_template (FILE *file)
9309 {
9310 rtx op[2];
9311 op[0] = gen_rtx_REG (Pmode, 0);
9312 op[1] = gen_rtx_REG (Pmode, 1);
9313
9314 if (TARGET_64BIT)
9315 {
9316 output_asm_insn ("basr\t%1,0", op);
9317 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9318 output_asm_insn ("br\t%1", op);
9319 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9320 }
9321 else
9322 {
9323 output_asm_insn ("basr\t%1,0", op);
9324 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9325 output_asm_insn ("br\t%1", op);
9326 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9327 }
9328 }
9329
9330 /* Emit RTL insns to initialize the variable parts of a trampoline.
9331 FNADDR is an RTX for the address of the function's pure code.
9332 CXT is an RTX for the static chain value for the function. */
9333
9334 static void
9335 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9336 {
9337 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9338 rtx mem;
9339
9340 emit_block_move (m_tramp, assemble_trampoline_template (),
9341 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9342
9343 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9344 emit_move_insn (mem, cxt);
9345 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9346 emit_move_insn (mem, fnaddr);
9347 }
9348
9349 /* Output assembler code to FILE to increment profiler label # LABELNO
9350 for profiling a function entry. */
9351
9352 void
9353 s390_function_profiler (FILE *file, int labelno)
9354 {
9355 rtx op[7];
9356
9357 char label[128];
9358 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9359
9360 fprintf (file, "# function profiler \n");
9361
9362 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9363 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9364 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9365
9366 op[2] = gen_rtx_REG (Pmode, 1);
9367 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9368 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9369
9370 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9371 if (flag_pic)
9372 {
9373 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9374 op[4] = gen_rtx_CONST (Pmode, op[4]);
9375 }
9376
9377 if (TARGET_64BIT)
9378 {
9379 output_asm_insn ("stg\t%0,%1", op);
9380 output_asm_insn ("larl\t%2,%3", op);
9381 output_asm_insn ("brasl\t%0,%4", op);
9382 output_asm_insn ("lg\t%0,%1", op);
9383 }
9384 else if (!flag_pic)
9385 {
9386 op[6] = gen_label_rtx ();
9387
9388 output_asm_insn ("st\t%0,%1", op);
9389 output_asm_insn ("bras\t%2,%l6", op);
9390 output_asm_insn (".long\t%4", op);
9391 output_asm_insn (".long\t%3", op);
9392 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9393 output_asm_insn ("l\t%0,0(%2)", op);
9394 output_asm_insn ("l\t%2,4(%2)", op);
9395 output_asm_insn ("basr\t%0,%0", op);
9396 output_asm_insn ("l\t%0,%1", op);
9397 }
9398 else
9399 {
9400 op[5] = gen_label_rtx ();
9401 op[6] = gen_label_rtx ();
9402
9403 output_asm_insn ("st\t%0,%1", op);
9404 output_asm_insn ("bras\t%2,%l6", op);
9405 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9406 output_asm_insn (".long\t%4-%l5", op);
9407 output_asm_insn (".long\t%3-%l5", op);
9408 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9409 output_asm_insn ("lr\t%0,%2", op);
9410 output_asm_insn ("a\t%0,0(%2)", op);
9411 output_asm_insn ("a\t%2,4(%2)", op);
9412 output_asm_insn ("basr\t%0,%0", op);
9413 output_asm_insn ("l\t%0,%1", op);
9414 }
9415 }
9416
9417 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9418 into its SYMBOL_REF_FLAGS. */
9419
9420 static void
9421 s390_encode_section_info (tree decl, rtx rtl, int first)
9422 {
9423 default_encode_section_info (decl, rtl, first);
9424
9425 if (TREE_CODE (decl) == VAR_DECL)
9426 {
9427 /* If a variable has a forced alignment to < 2 bytes, mark it
9428 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9429 operand. */
9430 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9431 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9432 if (!DECL_SIZE (decl)
9433 || !DECL_ALIGN (decl)
9434 || !host_integerp (DECL_SIZE (decl), 0)
9435 || (DECL_ALIGN (decl) <= 64
9436 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9437 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9438 }
9439
9440 /* Literal pool references don't have a decl so they are handled
9441 differently here. We rely on the information in the MEM_ALIGN
9442 entry to decide upon natural alignment. */
9443 if (MEM_P (rtl)
9444 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9445 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9446 && (MEM_ALIGN (rtl) == 0
9447 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9448 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9449 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9450 }
9451
9452 /* Output thunk to FILE that implements a C++ virtual function call (with
9453 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9454 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9455 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9456 relative to the resulting this pointer. */
9457
9458 static void
9459 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9460 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9461 tree function)
9462 {
9463 rtx op[10];
9464 int nonlocal = 0;
9465
9466 /* Make sure unwind info is emitted for the thunk if needed. */
9467 final_start_function (emit_barrier (), file, 1);
9468
9469 /* Operand 0 is the target function. */
9470 op[0] = XEXP (DECL_RTL (function), 0);
9471 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9472 {
9473 nonlocal = 1;
9474 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9475 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9476 op[0] = gen_rtx_CONST (Pmode, op[0]);
9477 }
9478
9479 /* Operand 1 is the 'this' pointer. */
9480 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9481 op[1] = gen_rtx_REG (Pmode, 3);
9482 else
9483 op[1] = gen_rtx_REG (Pmode, 2);
9484
9485 /* Operand 2 is the delta. */
9486 op[2] = GEN_INT (delta);
9487
9488 /* Operand 3 is the vcall_offset. */
9489 op[3] = GEN_INT (vcall_offset);
9490
9491 /* Operand 4 is the temporary register. */
9492 op[4] = gen_rtx_REG (Pmode, 1);
9493
9494 /* Operands 5 to 8 can be used as labels. */
9495 op[5] = NULL_RTX;
9496 op[6] = NULL_RTX;
9497 op[7] = NULL_RTX;
9498 op[8] = NULL_RTX;
9499
9500 /* Operand 9 can be used for temporary register. */
9501 op[9] = NULL_RTX;
9502
9503 /* Generate code. */
9504 if (TARGET_64BIT)
9505 {
9506 /* Setup literal pool pointer if required. */
9507 if ((!DISP_IN_RANGE (delta)
9508 && !CONST_OK_FOR_K (delta)
9509 && !CONST_OK_FOR_Os (delta))
9510 || (!DISP_IN_RANGE (vcall_offset)
9511 && !CONST_OK_FOR_K (vcall_offset)
9512 && !CONST_OK_FOR_Os (vcall_offset)))
9513 {
9514 op[5] = gen_label_rtx ();
9515 output_asm_insn ("larl\t%4,%5", op);
9516 }
9517
9518 /* Add DELTA to this pointer. */
9519 if (delta)
9520 {
9521 if (CONST_OK_FOR_J (delta))
9522 output_asm_insn ("la\t%1,%2(%1)", op);
9523 else if (DISP_IN_RANGE (delta))
9524 output_asm_insn ("lay\t%1,%2(%1)", op);
9525 else if (CONST_OK_FOR_K (delta))
9526 output_asm_insn ("aghi\t%1,%2", op);
9527 else if (CONST_OK_FOR_Os (delta))
9528 output_asm_insn ("agfi\t%1,%2", op);
9529 else
9530 {
9531 op[6] = gen_label_rtx ();
9532 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9533 }
9534 }
9535
9536 /* Perform vcall adjustment. */
9537 if (vcall_offset)
9538 {
9539 if (DISP_IN_RANGE (vcall_offset))
9540 {
9541 output_asm_insn ("lg\t%4,0(%1)", op);
9542 output_asm_insn ("ag\t%1,%3(%4)", op);
9543 }
9544 else if (CONST_OK_FOR_K (vcall_offset))
9545 {
9546 output_asm_insn ("lghi\t%4,%3", op);
9547 output_asm_insn ("ag\t%4,0(%1)", op);
9548 output_asm_insn ("ag\t%1,0(%4)", op);
9549 }
9550 else if (CONST_OK_FOR_Os (vcall_offset))
9551 {
9552 output_asm_insn ("lgfi\t%4,%3", op);
9553 output_asm_insn ("ag\t%4,0(%1)", op);
9554 output_asm_insn ("ag\t%1,0(%4)", op);
9555 }
9556 else
9557 {
9558 op[7] = gen_label_rtx ();
9559 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9560 output_asm_insn ("ag\t%4,0(%1)", op);
9561 output_asm_insn ("ag\t%1,0(%4)", op);
9562 }
9563 }
9564
9565 /* Jump to target. */
9566 output_asm_insn ("jg\t%0", op);
9567
9568 /* Output literal pool if required. */
9569 if (op[5])
9570 {
9571 output_asm_insn (".align\t4", op);
9572 targetm.asm_out.internal_label (file, "L",
9573 CODE_LABEL_NUMBER (op[5]));
9574 }
9575 if (op[6])
9576 {
9577 targetm.asm_out.internal_label (file, "L",
9578 CODE_LABEL_NUMBER (op[6]));
9579 output_asm_insn (".long\t%2", op);
9580 }
9581 if (op[7])
9582 {
9583 targetm.asm_out.internal_label (file, "L",
9584 CODE_LABEL_NUMBER (op[7]));
9585 output_asm_insn (".long\t%3", op);
9586 }
9587 }
9588 else
9589 {
9590 /* Setup base pointer if required. */
9591 if (!vcall_offset
9592 || (!DISP_IN_RANGE (delta)
9593 && !CONST_OK_FOR_K (delta)
9594 && !CONST_OK_FOR_Os (delta))
9595 || (!DISP_IN_RANGE (delta)
9596 && !CONST_OK_FOR_K (vcall_offset)
9597 && !CONST_OK_FOR_Os (vcall_offset)))
9598 {
9599 op[5] = gen_label_rtx ();
9600 output_asm_insn ("basr\t%4,0", op);
9601 targetm.asm_out.internal_label (file, "L",
9602 CODE_LABEL_NUMBER (op[5]));
9603 }
9604
9605 /* Add DELTA to this pointer. */
9606 if (delta)
9607 {
9608 if (CONST_OK_FOR_J (delta))
9609 output_asm_insn ("la\t%1,%2(%1)", op);
9610 else if (DISP_IN_RANGE (delta))
9611 output_asm_insn ("lay\t%1,%2(%1)", op);
9612 else if (CONST_OK_FOR_K (delta))
9613 output_asm_insn ("ahi\t%1,%2", op);
9614 else if (CONST_OK_FOR_Os (delta))
9615 output_asm_insn ("afi\t%1,%2", op);
9616 else
9617 {
9618 op[6] = gen_label_rtx ();
9619 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9620 }
9621 }
9622
9623 /* Perform vcall adjustment. */
9624 if (vcall_offset)
9625 {
9626 if (CONST_OK_FOR_J (vcall_offset))
9627 {
9628 output_asm_insn ("l\t%4,0(%1)", op);
9629 output_asm_insn ("a\t%1,%3(%4)", op);
9630 }
9631 else if (DISP_IN_RANGE (vcall_offset))
9632 {
9633 output_asm_insn ("l\t%4,0(%1)", op);
9634 output_asm_insn ("ay\t%1,%3(%4)", op);
9635 }
9636 else if (CONST_OK_FOR_K (vcall_offset))
9637 {
9638 output_asm_insn ("lhi\t%4,%3", op);
9639 output_asm_insn ("a\t%4,0(%1)", op);
9640 output_asm_insn ("a\t%1,0(%4)", op);
9641 }
9642 else if (CONST_OK_FOR_Os (vcall_offset))
9643 {
9644 output_asm_insn ("iilf\t%4,%3", op);
9645 output_asm_insn ("a\t%4,0(%1)", op);
9646 output_asm_insn ("a\t%1,0(%4)", op);
9647 }
9648 else
9649 {
9650 op[7] = gen_label_rtx ();
9651 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9652 output_asm_insn ("a\t%4,0(%1)", op);
9653 output_asm_insn ("a\t%1,0(%4)", op);
9654 }
9655
9656 /* We had to clobber the base pointer register.
9657 Re-setup the base pointer (with a different base). */
9658 op[5] = gen_label_rtx ();
9659 output_asm_insn ("basr\t%4,0", op);
9660 targetm.asm_out.internal_label (file, "L",
9661 CODE_LABEL_NUMBER (op[5]));
9662 }
9663
9664 /* Jump to target. */
9665 op[8] = gen_label_rtx ();
9666
9667 if (!flag_pic)
9668 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9669 else if (!nonlocal)
9670 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9671 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9672 else if (flag_pic == 1)
9673 {
9674 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9675 output_asm_insn ("l\t%4,%0(%4)", op);
9676 }
9677 else if (flag_pic == 2)
9678 {
9679 op[9] = gen_rtx_REG (Pmode, 0);
9680 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9681 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9682 output_asm_insn ("ar\t%4,%9", op);
9683 output_asm_insn ("l\t%4,0(%4)", op);
9684 }
9685
9686 output_asm_insn ("br\t%4", op);
9687
9688 /* Output literal pool. */
9689 output_asm_insn (".align\t4", op);
9690
9691 if (nonlocal && flag_pic == 2)
9692 output_asm_insn (".long\t%0", op);
9693 if (nonlocal)
9694 {
9695 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9696 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9697 }
9698
9699 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9700 if (!flag_pic)
9701 output_asm_insn (".long\t%0", op);
9702 else
9703 output_asm_insn (".long\t%0-%5", op);
9704
9705 if (op[6])
9706 {
9707 targetm.asm_out.internal_label (file, "L",
9708 CODE_LABEL_NUMBER (op[6]));
9709 output_asm_insn (".long\t%2", op);
9710 }
9711 if (op[7])
9712 {
9713 targetm.asm_out.internal_label (file, "L",
9714 CODE_LABEL_NUMBER (op[7]));
9715 output_asm_insn (".long\t%3", op);
9716 }
9717 }
9718 final_end_function ();
9719 }
9720
9721 static bool
9722 s390_valid_pointer_mode (enum machine_mode mode)
9723 {
9724 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9725 }
9726
9727 /* Checks whether the given CALL_EXPR would use a caller
9728 saved register. This is used to decide whether sibling call
9729 optimization could be performed on the respective function
9730 call. */
9731
9732 static bool
9733 s390_call_saved_register_used (tree call_expr)
9734 {
9735 CUMULATIVE_ARGS cum;
9736 tree parameter;
9737 enum machine_mode mode;
9738 tree type;
9739 rtx parm_rtx;
9740 int reg, i;
9741
9742 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9743
9744 for (i = 0; i < call_expr_nargs (call_expr); i++)
9745 {
9746 parameter = CALL_EXPR_ARG (call_expr, i);
9747 gcc_assert (parameter);
9748
9749 /* For an undeclared variable passed as parameter we will get
9750 an ERROR_MARK node here. */
9751 if (TREE_CODE (parameter) == ERROR_MARK)
9752 return true;
9753
9754 type = TREE_TYPE (parameter);
9755 gcc_assert (type);
9756
9757 mode = TYPE_MODE (type);
9758 gcc_assert (mode);
9759
9760 if (pass_by_reference (&cum, mode, type, true))
9761 {
9762 mode = Pmode;
9763 type = build_pointer_type (type);
9764 }
9765
9766 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9767
9768 s390_function_arg_advance (&cum, mode, type, 0);
9769
9770 if (!parm_rtx)
9771 continue;
9772
9773 if (REG_P (parm_rtx))
9774 {
9775 for (reg = 0;
9776 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9777 reg++)
9778 if (!call_used_regs[reg + REGNO (parm_rtx)])
9779 return true;
9780 }
9781
9782 if (GET_CODE (parm_rtx) == PARALLEL)
9783 {
9784 int i;
9785
9786 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9787 {
9788 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9789
9790 gcc_assert (REG_P (r));
9791
9792 for (reg = 0;
9793 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9794 reg++)
9795 if (!call_used_regs[reg + REGNO (r)])
9796 return true;
9797 }
9798 }
9799
9800 }
9801 return false;
9802 }
9803
9804 /* Return true if the given call expression can be
9805 turned into a sibling call.
9806 DECL holds the declaration of the function to be called whereas
9807 EXP is the call expression itself. */
9808
9809 static bool
9810 s390_function_ok_for_sibcall (tree decl, tree exp)
9811 {
9812 /* The TPF epilogue uses register 1. */
9813 if (TARGET_TPF_PROFILING)
9814 return false;
9815
9816 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9817 which would have to be restored before the sibcall. */
9818 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9819 return false;
9820
9821 /* Register 6 on s390 is available as an argument register but unfortunately
9822 "caller saved". This makes functions needing this register for arguments
9823 not suitable for sibcalls. */
9824 return !s390_call_saved_register_used (exp);
9825 }
9826
9827 /* Return the fixed registers used for condition codes. */
9828
9829 static bool
9830 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9831 {
9832 *p1 = CC_REGNUM;
9833 *p2 = INVALID_REGNUM;
9834
9835 return true;
9836 }
9837
9838 /* This function is used by the call expanders of the machine description.
9839 It emits the call insn itself together with the necessary operations
9840 to adjust the target address and returns the emitted insn.
9841 ADDR_LOCATION is the target address rtx
9842 TLS_CALL the location of the thread-local symbol
9843 RESULT_REG the register where the result of the call should be stored
9844 RETADDR_REG the register where the return address should be stored
9845 If this parameter is NULL_RTX the call is considered
9846 to be a sibling call. */
9847
9848 rtx
9849 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9850 rtx retaddr_reg)
9851 {
9852 bool plt_call = false;
9853 rtx insn;
9854 rtx call;
9855 rtx clobber;
9856 rtvec vec;
9857
9858 /* Direct function calls need special treatment. */
9859 if (GET_CODE (addr_location) == SYMBOL_REF)
9860 {
9861 /* When calling a global routine in PIC mode, we must
9862 replace the symbol itself with the PLT stub. */
9863 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9864 {
9865 if (retaddr_reg != NULL_RTX)
9866 {
9867 addr_location = gen_rtx_UNSPEC (Pmode,
9868 gen_rtvec (1, addr_location),
9869 UNSPEC_PLT);
9870 addr_location = gen_rtx_CONST (Pmode, addr_location);
9871 plt_call = true;
9872 }
9873 else
9874 /* For -fpic code the PLT entries might use r12 which is
9875 call-saved. Therefore we cannot do a sibcall when
9876 calling directly using a symbol ref. When reaching
9877 this point we decided (in s390_function_ok_for_sibcall)
9878 to do a sibcall for a function pointer but one of the
9879 optimizers was able to get rid of the function pointer
9880 by propagating the symbol ref into the call. This
9881 optimization is illegal for S/390 so we turn the direct
9882 call into a indirect call again. */
9883 addr_location = force_reg (Pmode, addr_location);
9884 }
9885
9886 /* Unless we can use the bras(l) insn, force the
9887 routine address into a register. */
9888 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9889 {
9890 if (flag_pic)
9891 addr_location = legitimize_pic_address (addr_location, 0);
9892 else
9893 addr_location = force_reg (Pmode, addr_location);
9894 }
9895 }
9896
9897 /* If it is already an indirect call or the code above moved the
9898 SYMBOL_REF to somewhere else make sure the address can be found in
9899 register 1. */
9900 if (retaddr_reg == NULL_RTX
9901 && GET_CODE (addr_location) != SYMBOL_REF
9902 && !plt_call)
9903 {
9904 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9905 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9906 }
9907
9908 addr_location = gen_rtx_MEM (QImode, addr_location);
9909 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9910
9911 if (result_reg != NULL_RTX)
9912 call = gen_rtx_SET (VOIDmode, result_reg, call);
9913
9914 if (retaddr_reg != NULL_RTX)
9915 {
9916 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9917
9918 if (tls_call != NULL_RTX)
9919 vec = gen_rtvec (3, call, clobber,
9920 gen_rtx_USE (VOIDmode, tls_call));
9921 else
9922 vec = gen_rtvec (2, call, clobber);
9923
9924 call = gen_rtx_PARALLEL (VOIDmode, vec);
9925 }
9926
9927 insn = emit_call_insn (call);
9928
9929 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9930 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9931 {
9932 /* s390_function_ok_for_sibcall should
9933 have denied sibcalls in this case. */
9934 gcc_assert (retaddr_reg != NULL_RTX);
9935
9936 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9937 }
9938 return insn;
9939 }
9940
9941 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9942
9943 static void
9944 s390_conditional_register_usage (void)
9945 {
9946 int i;
9947
9948 if (flag_pic)
9949 {
9950 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9951 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9952 }
9953 if (TARGET_CPU_ZARCH)
9954 {
9955 fixed_regs[BASE_REGNUM] = 0;
9956 call_used_regs[BASE_REGNUM] = 0;
9957 fixed_regs[RETURN_REGNUM] = 0;
9958 call_used_regs[RETURN_REGNUM] = 0;
9959 }
9960 if (TARGET_64BIT)
9961 {
9962 for (i = 24; i < 32; i++)
9963 call_used_regs[i] = call_really_used_regs[i] = 0;
9964 }
9965 else
9966 {
9967 for (i = 18; i < 20; i++)
9968 call_used_regs[i] = call_really_used_regs[i] = 0;
9969 }
9970
9971 if (TARGET_SOFT_FLOAT)
9972 {
9973 for (i = 16; i < 32; i++)
9974 call_used_regs[i] = fixed_regs[i] = 1;
9975 }
9976 }
9977
9978 /* Corresponding function to eh_return expander. */
9979
9980 static GTY(()) rtx s390_tpf_eh_return_symbol;
9981 void
9982 s390_emit_tpf_eh_return (rtx target)
9983 {
9984 rtx insn, reg;
9985
9986 if (!s390_tpf_eh_return_symbol)
9987 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9988
9989 reg = gen_rtx_REG (Pmode, 2);
9990
9991 emit_move_insn (reg, target);
9992 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9993 gen_rtx_REG (Pmode, RETURN_REGNUM));
9994 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9995
9996 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9997 }
9998
9999 /* Rework the prologue/epilogue to avoid saving/restoring
10000 registers unnecessarily. */
10001
10002 static void
10003 s390_optimize_prologue (void)
10004 {
10005 rtx insn, new_insn, next_insn;
10006
10007 /* Do a final recompute of the frame-related data. */
10008
10009 s390_update_frame_layout ();
10010
10011 /* If all special registers are in fact used, there's nothing we
10012 can do, so no point in walking the insn list. */
10013
10014 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10015 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10016 && (TARGET_CPU_ZARCH
10017 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10018 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10019 return;
10020
10021 /* Search for prologue/epilogue insns and replace them. */
10022
10023 for (insn = get_insns (); insn; insn = next_insn)
10024 {
10025 int first, last, off;
10026 rtx set, base, offset;
10027
10028 next_insn = NEXT_INSN (insn);
10029
10030 if (GET_CODE (insn) != INSN)
10031 continue;
10032
10033 if (GET_CODE (PATTERN (insn)) == PARALLEL
10034 && store_multiple_operation (PATTERN (insn), VOIDmode))
10035 {
10036 set = XVECEXP (PATTERN (insn), 0, 0);
10037 first = REGNO (SET_SRC (set));
10038 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10039 offset = const0_rtx;
10040 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10041 off = INTVAL (offset);
10042
10043 if (GET_CODE (base) != REG || off < 0)
10044 continue;
10045 if (cfun_frame_layout.first_save_gpr != -1
10046 && (cfun_frame_layout.first_save_gpr < first
10047 || cfun_frame_layout.last_save_gpr > last))
10048 continue;
10049 if (REGNO (base) != STACK_POINTER_REGNUM
10050 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10051 continue;
10052 if (first > BASE_REGNUM || last < BASE_REGNUM)
10053 continue;
10054
10055 if (cfun_frame_layout.first_save_gpr != -1)
10056 {
10057 new_insn = save_gprs (base,
10058 off + (cfun_frame_layout.first_save_gpr
10059 - first) * UNITS_PER_LONG,
10060 cfun_frame_layout.first_save_gpr,
10061 cfun_frame_layout.last_save_gpr);
10062 new_insn = emit_insn_before (new_insn, insn);
10063 INSN_ADDRESSES_NEW (new_insn, -1);
10064 }
10065
10066 remove_insn (insn);
10067 continue;
10068 }
10069
10070 if (cfun_frame_layout.first_save_gpr == -1
10071 && GET_CODE (PATTERN (insn)) == SET
10072 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10073 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10074 || (!TARGET_CPU_ZARCH
10075 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10076 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10077 {
10078 set = PATTERN (insn);
10079 first = REGNO (SET_SRC (set));
10080 offset = const0_rtx;
10081 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10082 off = INTVAL (offset);
10083
10084 if (GET_CODE (base) != REG || off < 0)
10085 continue;
10086 if (REGNO (base) != STACK_POINTER_REGNUM
10087 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10088 continue;
10089
10090 remove_insn (insn);
10091 continue;
10092 }
10093
10094 if (GET_CODE (PATTERN (insn)) == PARALLEL
10095 && load_multiple_operation (PATTERN (insn), VOIDmode))
10096 {
10097 set = XVECEXP (PATTERN (insn), 0, 0);
10098 first = REGNO (SET_DEST (set));
10099 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10100 offset = const0_rtx;
10101 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10102 off = INTVAL (offset);
10103
10104 if (GET_CODE (base) != REG || off < 0)
10105 continue;
10106 if (cfun_frame_layout.first_restore_gpr != -1
10107 && (cfun_frame_layout.first_restore_gpr < first
10108 || cfun_frame_layout.last_restore_gpr > last))
10109 continue;
10110 if (REGNO (base) != STACK_POINTER_REGNUM
10111 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10112 continue;
10113 if (first > BASE_REGNUM || last < BASE_REGNUM)
10114 continue;
10115
10116 if (cfun_frame_layout.first_restore_gpr != -1)
10117 {
10118 new_insn = restore_gprs (base,
10119 off + (cfun_frame_layout.first_restore_gpr
10120 - first) * UNITS_PER_LONG,
10121 cfun_frame_layout.first_restore_gpr,
10122 cfun_frame_layout.last_restore_gpr);
10123 new_insn = emit_insn_before (new_insn, insn);
10124 INSN_ADDRESSES_NEW (new_insn, -1);
10125 }
10126
10127 remove_insn (insn);
10128 continue;
10129 }
10130
10131 if (cfun_frame_layout.first_restore_gpr == -1
10132 && GET_CODE (PATTERN (insn)) == SET
10133 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10134 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10135 || (!TARGET_CPU_ZARCH
10136 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10137 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10138 {
10139 set = PATTERN (insn);
10140 first = REGNO (SET_DEST (set));
10141 offset = const0_rtx;
10142 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10143 off = INTVAL (offset);
10144
10145 if (GET_CODE (base) != REG || off < 0)
10146 continue;
10147 if (REGNO (base) != STACK_POINTER_REGNUM
10148 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10149 continue;
10150
10151 remove_insn (insn);
10152 continue;
10153 }
10154 }
10155 }
10156
10157 /* On z10 and later the dynamic branch prediction must see the
10158 backward jump within a certain windows. If not it falls back to
10159 the static prediction. This function rearranges the loop backward
10160 branch in a way which makes the static prediction always correct.
10161 The function returns true if it added an instruction. */
10162 static bool
10163 s390_fix_long_loop_prediction (rtx insn)
10164 {
10165 rtx set = single_set (insn);
10166 rtx code_label, label_ref, new_label;
10167 rtx uncond_jump;
10168 rtx cur_insn;
10169 rtx tmp;
10170 int distance;
10171
10172 /* This will exclude branch on count and branch on index patterns
10173 since these are correctly statically predicted. */
10174 if (!set
10175 || SET_DEST (set) != pc_rtx
10176 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10177 return false;
10178
10179 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10180 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10181
10182 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10183
10184 code_label = XEXP (label_ref, 0);
10185
10186 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10187 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10188 || (INSN_ADDRESSES (INSN_UID (insn))
10189 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10190 return false;
10191
10192 for (distance = 0, cur_insn = PREV_INSN (insn);
10193 distance < PREDICT_DISTANCE - 6;
10194 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10195 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10196 return false;
10197
10198 new_label = gen_label_rtx ();
10199 uncond_jump = emit_jump_insn_after (
10200 gen_rtx_SET (VOIDmode, pc_rtx,
10201 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10202 insn);
10203 emit_label_after (new_label, uncond_jump);
10204
10205 tmp = XEXP (SET_SRC (set), 1);
10206 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10207 XEXP (SET_SRC (set), 2) = tmp;
10208 INSN_CODE (insn) = -1;
10209
10210 XEXP (label_ref, 0) = new_label;
10211 JUMP_LABEL (insn) = new_label;
10212 JUMP_LABEL (uncond_jump) = code_label;
10213
10214 return true;
10215 }
10216
10217 /* Returns 1 if INSN reads the value of REG for purposes not related
10218 to addressing of memory, and 0 otherwise. */
10219 static int
10220 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10221 {
10222 return reg_referenced_p (reg, PATTERN (insn))
10223 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10224 }
10225
10226 /* Starting from INSN find_cond_jump looks downwards in the insn
10227 stream for a single jump insn which is the last user of the
10228 condition code set in INSN. */
10229 static rtx
10230 find_cond_jump (rtx insn)
10231 {
10232 for (; insn; insn = NEXT_INSN (insn))
10233 {
10234 rtx ite, cc;
10235
10236 if (LABEL_P (insn))
10237 break;
10238
10239 if (!JUMP_P (insn))
10240 {
10241 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10242 break;
10243 continue;
10244 }
10245
10246 /* This will be triggered by a return. */
10247 if (GET_CODE (PATTERN (insn)) != SET)
10248 break;
10249
10250 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10251 ite = SET_SRC (PATTERN (insn));
10252
10253 if (GET_CODE (ite) != IF_THEN_ELSE)
10254 break;
10255
10256 cc = XEXP (XEXP (ite, 0), 0);
10257 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10258 break;
10259
10260 if (find_reg_note (insn, REG_DEAD, cc))
10261 return insn;
10262 break;
10263 }
10264
10265 return NULL_RTX;
10266 }
10267
10268 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10269 the semantics does not change. If NULL_RTX is passed as COND the
10270 function tries to find the conditional jump starting with INSN. */
10271 static void
10272 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10273 {
10274 rtx tmp = *op0;
10275
10276 if (cond == NULL_RTX)
10277 {
10278 rtx jump = find_cond_jump (NEXT_INSN (insn));
10279 jump = jump ? single_set (jump) : NULL_RTX;
10280
10281 if (jump == NULL_RTX)
10282 return;
10283
10284 cond = XEXP (XEXP (jump, 1), 0);
10285 }
10286
10287 *op0 = *op1;
10288 *op1 = tmp;
10289 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10290 }
10291
10292 /* On z10, instructions of the compare-and-branch family have the
10293 property to access the register occurring as second operand with
10294 its bits complemented. If such a compare is grouped with a second
10295 instruction that accesses the same register non-complemented, and
10296 if that register's value is delivered via a bypass, then the
10297 pipeline recycles, thereby causing significant performance decline.
10298 This function locates such situations and exchanges the two
10299 operands of the compare. The function return true whenever it
10300 added an insn. */
10301 static bool
10302 s390_z10_optimize_cmp (rtx insn)
10303 {
10304 rtx prev_insn, next_insn;
10305 bool insn_added_p = false;
10306 rtx cond, *op0, *op1;
10307
10308 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10309 {
10310 /* Handle compare and branch and branch on count
10311 instructions. */
10312 rtx pattern = single_set (insn);
10313
10314 if (!pattern
10315 || SET_DEST (pattern) != pc_rtx
10316 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10317 return false;
10318
10319 cond = XEXP (SET_SRC (pattern), 0);
10320 op0 = &XEXP (cond, 0);
10321 op1 = &XEXP (cond, 1);
10322 }
10323 else if (GET_CODE (PATTERN (insn)) == SET)
10324 {
10325 rtx src, dest;
10326
10327 /* Handle normal compare instructions. */
10328 src = SET_SRC (PATTERN (insn));
10329 dest = SET_DEST (PATTERN (insn));
10330
10331 if (!REG_P (dest)
10332 || !CC_REGNO_P (REGNO (dest))
10333 || GET_CODE (src) != COMPARE)
10334 return false;
10335
10336 /* s390_swap_cmp will try to find the conditional
10337 jump when passing NULL_RTX as condition. */
10338 cond = NULL_RTX;
10339 op0 = &XEXP (src, 0);
10340 op1 = &XEXP (src, 1);
10341 }
10342 else
10343 return false;
10344
10345 if (!REG_P (*op0) || !REG_P (*op1))
10346 return false;
10347
10348 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10349 return false;
10350
10351 /* Swap the COMPARE arguments and its mask if there is a
10352 conflicting access in the previous insn. */
10353 prev_insn = prev_active_insn (insn);
10354 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10355 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10356 s390_swap_cmp (cond, op0, op1, insn);
10357
10358 /* Check if there is a conflict with the next insn. If there
10359 was no conflict with the previous insn, then swap the
10360 COMPARE arguments and its mask. If we already swapped
10361 the operands, or if swapping them would cause a conflict
10362 with the previous insn, issue a NOP after the COMPARE in
10363 order to separate the two instuctions. */
10364 next_insn = next_active_insn (insn);
10365 if (next_insn != NULL_RTX && INSN_P (next_insn)
10366 && s390_non_addr_reg_read_p (*op1, next_insn))
10367 {
10368 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10369 && s390_non_addr_reg_read_p (*op0, prev_insn))
10370 {
10371 if (REGNO (*op1) == 0)
10372 emit_insn_after (gen_nop1 (), insn);
10373 else
10374 emit_insn_after (gen_nop (), insn);
10375 insn_added_p = true;
10376 }
10377 else
10378 s390_swap_cmp (cond, op0, op1, insn);
10379 }
10380 return insn_added_p;
10381 }
10382
10383 /* Perform machine-dependent processing. */
10384
10385 static void
10386 s390_reorg (void)
10387 {
10388 bool pool_overflow = false;
10389
10390 /* Make sure all splits have been performed; splits after
10391 machine_dependent_reorg might confuse insn length counts. */
10392 split_all_insns_noflow ();
10393
10394 /* Install the main literal pool and the associated base
10395 register load insns.
10396
10397 In addition, there are two problematic situations we need
10398 to correct:
10399
10400 - the literal pool might be > 4096 bytes in size, so that
10401 some of its elements cannot be directly accessed
10402
10403 - a branch target might be > 64K away from the branch, so that
10404 it is not possible to use a PC-relative instruction.
10405
10406 To fix those, we split the single literal pool into multiple
10407 pool chunks, reloading the pool base register at various
10408 points throughout the function to ensure it always points to
10409 the pool chunk the following code expects, and / or replace
10410 PC-relative branches by absolute branches.
10411
10412 However, the two problems are interdependent: splitting the
10413 literal pool can move a branch further away from its target,
10414 causing the 64K limit to overflow, and on the other hand,
10415 replacing a PC-relative branch by an absolute branch means
10416 we need to put the branch target address into the literal
10417 pool, possibly causing it to overflow.
10418
10419 So, we loop trying to fix up both problems until we manage
10420 to satisfy both conditions at the same time. Note that the
10421 loop is guaranteed to terminate as every pass of the loop
10422 strictly decreases the total number of PC-relative branches
10423 in the function. (This is not completely true as there
10424 might be branch-over-pool insns introduced by chunkify_start.
10425 Those never need to be split however.) */
10426
10427 for (;;)
10428 {
10429 struct constant_pool *pool = NULL;
10430
10431 /* Collect the literal pool. */
10432 if (!pool_overflow)
10433 {
10434 pool = s390_mainpool_start ();
10435 if (!pool)
10436 pool_overflow = true;
10437 }
10438
10439 /* If literal pool overflowed, start to chunkify it. */
10440 if (pool_overflow)
10441 pool = s390_chunkify_start ();
10442
10443 /* Split out-of-range branches. If this has created new
10444 literal pool entries, cancel current chunk list and
10445 recompute it. zSeries machines have large branch
10446 instructions, so we never need to split a branch. */
10447 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10448 {
10449 if (pool_overflow)
10450 s390_chunkify_cancel (pool);
10451 else
10452 s390_mainpool_cancel (pool);
10453
10454 continue;
10455 }
10456
10457 /* If we made it up to here, both conditions are satisfied.
10458 Finish up literal pool related changes. */
10459 if (pool_overflow)
10460 s390_chunkify_finish (pool);
10461 else
10462 s390_mainpool_finish (pool);
10463
10464 /* We're done splitting branches. */
10465 cfun->machine->split_branches_pending_p = false;
10466 break;
10467 }
10468
10469 /* Generate out-of-pool execute target insns. */
10470 if (TARGET_CPU_ZARCH)
10471 {
10472 rtx insn, label, target;
10473
10474 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10475 {
10476 label = s390_execute_label (insn);
10477 if (!label)
10478 continue;
10479
10480 gcc_assert (label != const0_rtx);
10481
10482 target = emit_label (XEXP (label, 0));
10483 INSN_ADDRESSES_NEW (target, -1);
10484
10485 target = emit_insn (s390_execute_target (insn));
10486 INSN_ADDRESSES_NEW (target, -1);
10487 }
10488 }
10489
10490 /* Try to optimize prologue and epilogue further. */
10491 s390_optimize_prologue ();
10492
10493 /* Walk over the insns and do some >=z10 specific changes. */
10494 if (s390_tune == PROCESSOR_2097_Z10
10495 || s390_tune == PROCESSOR_2817_Z196)
10496 {
10497 rtx insn;
10498 bool insn_added_p = false;
10499
10500 /* The insn lengths and addresses have to be up to date for the
10501 following manipulations. */
10502 shorten_branches (get_insns ());
10503
10504 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10505 {
10506 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10507 continue;
10508
10509 if (JUMP_P (insn))
10510 insn_added_p |= s390_fix_long_loop_prediction (insn);
10511
10512 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10513 || GET_CODE (PATTERN (insn)) == SET)
10514 && s390_tune == PROCESSOR_2097_Z10)
10515 insn_added_p |= s390_z10_optimize_cmp (insn);
10516 }
10517
10518 /* Adjust branches if we added new instructions. */
10519 if (insn_added_p)
10520 shorten_branches (get_insns ());
10521 }
10522 }
10523
10524 /* Return true if INSN is a fp load insn writing register REGNO. */
10525 static inline bool
10526 s390_fpload_toreg (rtx insn, unsigned int regno)
10527 {
10528 rtx set;
10529 enum attr_type flag = s390_safe_attr_type (insn);
10530
10531 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10532 return false;
10533
10534 set = single_set (insn);
10535
10536 if (set == NULL_RTX)
10537 return false;
10538
10539 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10540 return false;
10541
10542 if (REGNO (SET_DEST (set)) != regno)
10543 return false;
10544
10545 return true;
10546 }
10547
10548 /* This value describes the distance to be avoided between an
10549 aritmetic fp instruction and an fp load writing the same register.
10550 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10551 fine but the exact value has to be avoided. Otherwise the FP
10552 pipeline will throw an exception causing a major penalty. */
10553 #define Z10_EARLYLOAD_DISTANCE 7
10554
10555 /* Rearrange the ready list in order to avoid the situation described
10556 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10557 moved to the very end of the ready list. */
10558 static void
10559 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10560 {
10561 unsigned int regno;
10562 int nready = *nready_p;
10563 rtx tmp;
10564 int i;
10565 rtx insn;
10566 rtx set;
10567 enum attr_type flag;
10568 int distance;
10569
10570 /* Skip DISTANCE - 1 active insns. */
10571 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10572 distance > 0 && insn != NULL_RTX;
10573 distance--, insn = prev_active_insn (insn))
10574 if (CALL_P (insn) || JUMP_P (insn))
10575 return;
10576
10577 if (insn == NULL_RTX)
10578 return;
10579
10580 set = single_set (insn);
10581
10582 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10583 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10584 return;
10585
10586 flag = s390_safe_attr_type (insn);
10587
10588 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10589 return;
10590
10591 regno = REGNO (SET_DEST (set));
10592 i = nready - 1;
10593
10594 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10595 i--;
10596
10597 if (!i)
10598 return;
10599
10600 tmp = ready[i];
10601 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10602 ready[0] = tmp;
10603 }
10604
10605 /* This function is called via hook TARGET_SCHED_REORDER before
10606 issueing one insn from list READY which contains *NREADYP entries.
10607 For target z10 it reorders load instructions to avoid early load
10608 conflicts in the floating point pipeline */
10609 static int
10610 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10611 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10612 {
10613 if (s390_tune == PROCESSOR_2097_Z10)
10614 if (reload_completed && *nreadyp > 1)
10615 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10616
10617 return s390_issue_rate ();
10618 }
10619
10620 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10621 the scheduler has issued INSN. It stores the last issued insn into
10622 last_scheduled_insn in order to make it available for
10623 s390_sched_reorder. */
10624 static int
10625 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10626 int verbose ATTRIBUTE_UNUSED,
10627 rtx insn, int more)
10628 {
10629 last_scheduled_insn = insn;
10630
10631 if (GET_CODE (PATTERN (insn)) != USE
10632 && GET_CODE (PATTERN (insn)) != CLOBBER)
10633 return more - 1;
10634 else
10635 return more;
10636 }
10637
10638 static void
10639 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10640 int verbose ATTRIBUTE_UNUSED,
10641 int max_ready ATTRIBUTE_UNUSED)
10642 {
10643 last_scheduled_insn = NULL_RTX;
10644 }
10645
10646 /* This function checks the whole of insn X for memory references. The
10647 function always returns zero because the framework it is called
10648 from would stop recursively analyzing the insn upon a return value
10649 other than zero. The real result of this function is updating
10650 counter variable MEM_COUNT. */
10651 static int
10652 check_dpu (rtx *x, unsigned *mem_count)
10653 {
10654 if (*x != NULL_RTX && MEM_P (*x))
10655 (*mem_count)++;
10656 return 0;
10657 }
10658
10659 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10660 a new number struct loop *loop should be unrolled if tuned for cpus with
10661 a built-in stride prefetcher.
10662 The loop is analyzed for memory accesses by calling check_dpu for
10663 each rtx of the loop. Depending on the loop_depth and the amount of
10664 memory accesses a new number <=nunroll is returned to improve the
10665 behaviour of the hardware prefetch unit. */
10666 static unsigned
10667 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10668 {
10669 basic_block *bbs;
10670 rtx insn;
10671 unsigned i;
10672 unsigned mem_count = 0;
10673
10674 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10675 return nunroll;
10676
10677 /* Count the number of memory references within the loop body. */
10678 bbs = get_loop_body (loop);
10679 for (i = 0; i < loop->num_nodes; i++)
10680 {
10681 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10682 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10683 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10684 }
10685 free (bbs);
10686
10687 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10688 if (mem_count == 0)
10689 return nunroll;
10690
10691 switch (loop_depth(loop))
10692 {
10693 case 1:
10694 return MIN (nunroll, 28 / mem_count);
10695 case 2:
10696 return MIN (nunroll, 22 / mem_count);
10697 default:
10698 return MIN (nunroll, 16 / mem_count);
10699 }
10700 }
10701
10702 /* Initialize GCC target structure. */
10703
10704 #undef TARGET_ASM_ALIGNED_HI_OP
10705 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10706 #undef TARGET_ASM_ALIGNED_DI_OP
10707 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10708 #undef TARGET_ASM_INTEGER
10709 #define TARGET_ASM_INTEGER s390_assemble_integer
10710
10711 #undef TARGET_ASM_OPEN_PAREN
10712 #define TARGET_ASM_OPEN_PAREN ""
10713
10714 #undef TARGET_ASM_CLOSE_PAREN
10715 #define TARGET_ASM_CLOSE_PAREN ""
10716
10717 #undef TARGET_DEFAULT_TARGET_FLAGS
10718 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT)
10719
10720 #undef TARGET_HANDLE_OPTION
10721 #define TARGET_HANDLE_OPTION s390_handle_option
10722
10723 #undef TARGET_OPTION_OVERRIDE
10724 #define TARGET_OPTION_OVERRIDE s390_option_override
10725
10726 #undef TARGET_OPTION_OPTIMIZATION_TABLE
10727 #define TARGET_OPTION_OPTIMIZATION_TABLE s390_option_optimization_table
10728
10729 #undef TARGET_OPTION_INIT_STRUCT
10730 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10731
10732 #undef TARGET_ENCODE_SECTION_INFO
10733 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10734
10735 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10736 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10737
10738 #ifdef HAVE_AS_TLS
10739 #undef TARGET_HAVE_TLS
10740 #define TARGET_HAVE_TLS true
10741 #endif
10742 #undef TARGET_CANNOT_FORCE_CONST_MEM
10743 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10744
10745 #undef TARGET_DELEGITIMIZE_ADDRESS
10746 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10747
10748 #undef TARGET_LEGITIMIZE_ADDRESS
10749 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10750
10751 #undef TARGET_RETURN_IN_MEMORY
10752 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10753
10754 #undef TARGET_INIT_BUILTINS
10755 #define TARGET_INIT_BUILTINS s390_init_builtins
10756 #undef TARGET_EXPAND_BUILTIN
10757 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10758
10759 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10760 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10761
10762 #undef TARGET_ASM_OUTPUT_MI_THUNK
10763 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10764 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10765 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10766
10767 #undef TARGET_SCHED_ADJUST_PRIORITY
10768 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10769 #undef TARGET_SCHED_ISSUE_RATE
10770 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10771 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10772 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10773
10774 #undef TARGET_SCHED_VARIABLE_ISSUE
10775 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10776 #undef TARGET_SCHED_REORDER
10777 #define TARGET_SCHED_REORDER s390_sched_reorder
10778 #undef TARGET_SCHED_INIT
10779 #define TARGET_SCHED_INIT s390_sched_init
10780
10781 #undef TARGET_CANNOT_COPY_INSN_P
10782 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10783 #undef TARGET_RTX_COSTS
10784 #define TARGET_RTX_COSTS s390_rtx_costs
10785 #undef TARGET_ADDRESS_COST
10786 #define TARGET_ADDRESS_COST s390_address_cost
10787 #undef TARGET_REGISTER_MOVE_COST
10788 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10789 #undef TARGET_MEMORY_MOVE_COST
10790 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10791
10792 #undef TARGET_MACHINE_DEPENDENT_REORG
10793 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10794
10795 #undef TARGET_VALID_POINTER_MODE
10796 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10797
10798 #undef TARGET_BUILD_BUILTIN_VA_LIST
10799 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10800 #undef TARGET_EXPAND_BUILTIN_VA_START
10801 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10802 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10803 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10804
10805 #undef TARGET_PROMOTE_FUNCTION_MODE
10806 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10807 #undef TARGET_PASS_BY_REFERENCE
10808 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10809
10810 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10811 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10812 #undef TARGET_FUNCTION_ARG
10813 #define TARGET_FUNCTION_ARG s390_function_arg
10814 #undef TARGET_FUNCTION_ARG_ADVANCE
10815 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10816 #undef TARGET_FUNCTION_VALUE
10817 #define TARGET_FUNCTION_VALUE s390_function_value
10818 #undef TARGET_LIBCALL_VALUE
10819 #define TARGET_LIBCALL_VALUE s390_libcall_value
10820
10821 #undef TARGET_FIXED_CONDITION_CODE_REGS
10822 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10823
10824 #undef TARGET_CC_MODES_COMPATIBLE
10825 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10826
10827 #undef TARGET_INVALID_WITHIN_DOLOOP
10828 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10829
10830 #ifdef HAVE_AS_TLS
10831 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10832 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10833 #endif
10834
10835 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10836 #undef TARGET_MANGLE_TYPE
10837 #define TARGET_MANGLE_TYPE s390_mangle_type
10838 #endif
10839
10840 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10841 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10842
10843 #undef TARGET_PREFERRED_RELOAD_CLASS
10844 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10845
10846 #undef TARGET_SECONDARY_RELOAD
10847 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10848
10849 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10850 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10851
10852 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10853 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10854
10855 #undef TARGET_LEGITIMATE_ADDRESS_P
10856 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10857
10858 #undef TARGET_CAN_ELIMINATE
10859 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10860
10861 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10862 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10863
10864 #undef TARGET_LOOP_UNROLL_ADJUST
10865 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10866
10867 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10868 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10869 #undef TARGET_TRAMPOLINE_INIT
10870 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10871
10872 #undef TARGET_UNWIND_WORD_MODE
10873 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10874
10875 struct gcc_target targetm = TARGET_INITIALIZER;
10876
10877 #include "gt-s390.h"