s390: Use VOIDmode with gen_rtx_SET
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "optabs.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "params.h"
54 #include "cfgloop.h"
55 #include "opts.h"
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (1) , /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 static const
225 struct processor_costs z196_cost =
226 {
227 COSTS_N_INSNS (7), /* M */
228 COSTS_N_INSNS (5), /* MGHI */
229 COSTS_N_INSNS (5), /* MH */
230 COSTS_N_INSNS (5), /* MHI */
231 COSTS_N_INSNS (7), /* ML */
232 COSTS_N_INSNS (7), /* MR */
233 COSTS_N_INSNS (6), /* MS */
234 COSTS_N_INSNS (8), /* MSG */
235 COSTS_N_INSNS (6), /* MSGF */
236 COSTS_N_INSNS (6), /* MSGFR */
237 COSTS_N_INSNS (8), /* MSGR */
238 COSTS_N_INSNS (6), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (40), /* MXBR B+40 */
241 COSTS_N_INSNS (100), /* SQXBR B+100 */
242 COSTS_N_INSNS (42), /* SQDBR B+42 */
243 COSTS_N_INSNS (28), /* SQEBR B+28 */
244 COSTS_N_INSNS (1), /* MADBR B */
245 COSTS_N_INSNS (1), /* MAEBR B */
246 COSTS_N_INSNS (101), /* DXBR B+101 */
247 COSTS_N_INSNS (29), /* DDBR */
248 COSTS_N_INSNS (22), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR cracked */
250 COSTS_N_INSNS (160), /* DLR cracked */
251 COSTS_N_INSNS (160), /* DR expanded */
252 COSTS_N_INSNS (160), /* DSGFR cracked */
253 COSTS_N_INSNS (160), /* DSGR cracked */
254 };
255
256 extern int reload_completed;
257
258 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
259 static rtx last_scheduled_insn;
260
261 /* Structure used to hold the components of a S/390 memory
262 address. A legitimate address on S/390 is of the general
263 form
264 base + index + displacement
265 where any of the components is optional.
266
267 base and index are registers of the class ADDR_REGS,
268 displacement is an unsigned 12-bit immediate constant. */
269
270 struct s390_address
271 {
272 rtx base;
273 rtx indx;
274 rtx disp;
275 bool pointer;
276 bool literal_pool;
277 };
278
279 /* The following structure is embedded in the machine
280 specific part of struct function. */
281
282 struct GTY (()) s390_frame_layout
283 {
284 /* Offset within stack frame. */
285 HOST_WIDE_INT gprs_offset;
286 HOST_WIDE_INT f0_offset;
287 HOST_WIDE_INT f4_offset;
288 HOST_WIDE_INT f8_offset;
289 HOST_WIDE_INT backchain_offset;
290
291 /* Number of first and last gpr where slots in the register
292 save area are reserved for. */
293 int first_save_gpr_slot;
294 int last_save_gpr_slot;
295
296 /* Number of first and last gpr to be saved, restored. */
297 int first_save_gpr;
298 int first_restore_gpr;
299 int last_save_gpr;
300 int last_restore_gpr;
301
302 /* Bits standing for floating point registers. Set, if the
303 respective register has to be saved. Starting with reg 16 (f0)
304 at the rightmost bit.
305 Bit 15 - 8 7 6 5 4 3 2 1 0
306 fpr 15 - 8 7 5 3 1 6 4 2 0
307 reg 31 - 24 23 22 21 20 19 18 17 16 */
308 unsigned int fpr_bitmap;
309
310 /* Number of floating point registers f8-f15 which must be saved. */
311 int high_fprs;
312
313 /* Set if return address needs to be saved.
314 This flag is set by s390_return_addr_rtx if it could not use
315 the initial value of r14 and therefore depends on r14 saved
316 to the stack. */
317 bool save_return_addr_p;
318
319 /* Size of stack frame. */
320 HOST_WIDE_INT frame_size;
321 };
322
323 /* Define the structure for the machine field in struct function. */
324
325 struct GTY(()) machine_function
326 {
327 struct s390_frame_layout frame_layout;
328
329 /* Literal pool base register. */
330 rtx base_reg;
331
332 /* True if we may need to perform branch splitting. */
333 bool split_branches_pending_p;
334
335 /* Some local-dynamic TLS symbol name. */
336 const char *some_ld_name;
337
338 bool has_landing_pad_p;
339 };
340
341 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
342
343 #define cfun_frame_layout (cfun->machine->frame_layout)
344 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
345 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
346 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
347 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
348 (1 << (BITNUM)))
349 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
350 (1 << (BITNUM))))
351
352 /* Number of GPRs and FPRs used for argument passing. */
353 #define GP_ARG_NUM_REG 5
354 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
355
356 /* A couple of shortcuts. */
357 #define CONST_OK_FOR_J(x) \
358 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
359 #define CONST_OK_FOR_K(x) \
360 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
361 #define CONST_OK_FOR_Os(x) \
362 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
363 #define CONST_OK_FOR_Op(x) \
364 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
365 #define CONST_OK_FOR_On(x) \
366 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
367
368 #define REGNO_PAIR_OK(REGNO, MODE) \
369 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
370
371 /* That's the read ahead of the dynamic branch prediction unit in
372 bytes on a z10 (or higher) CPU. */
373 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
374
375 /* Return the alignment for LABEL. We default to the -falign-labels
376 value except for the literal pool base label. */
377 int
378 s390_label_align (rtx label)
379 {
380 rtx prev_insn = prev_active_insn (label);
381
382 if (prev_insn == NULL_RTX)
383 goto old;
384
385 prev_insn = single_set (prev_insn);
386
387 if (prev_insn == NULL_RTX)
388 goto old;
389
390 prev_insn = SET_SRC (prev_insn);
391
392 /* Don't align literal pool base labels. */
393 if (GET_CODE (prev_insn) == UNSPEC
394 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
395 return 0;
396
397 old:
398 return align_labels_log;
399 }
400
401 static enum machine_mode
402 s390_libgcc_cmp_return_mode (void)
403 {
404 return TARGET_64BIT ? DImode : SImode;
405 }
406
407 static enum machine_mode
408 s390_libgcc_shift_count_mode (void)
409 {
410 return TARGET_64BIT ? DImode : SImode;
411 }
412
413 static enum machine_mode
414 s390_unwind_word_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 /* Return true if the back end supports mode MODE. */
420 static bool
421 s390_scalar_mode_supported_p (enum machine_mode mode)
422 {
423 /* In contrast to the default implementation reject TImode constants on 31bit
424 TARGET_ZARCH for ABI compliance. */
425 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
426 return false;
427
428 if (DECIMAL_FLOAT_MODE_P (mode))
429 return default_decimal_float_supported_p ();
430
431 return default_scalar_mode_supported_p (mode);
432 }
433
434 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
435
436 void
437 s390_set_has_landing_pad_p (bool value)
438 {
439 cfun->machine->has_landing_pad_p = value;
440 }
441
442 /* If two condition code modes are compatible, return a condition code
443 mode which is compatible with both. Otherwise, return
444 VOIDmode. */
445
446 static enum machine_mode
447 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
448 {
449 if (m1 == m2)
450 return m1;
451
452 switch (m1)
453 {
454 case CCZmode:
455 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
456 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
457 return m2;
458 return VOIDmode;
459
460 case CCSmode:
461 case CCUmode:
462 case CCTmode:
463 case CCSRmode:
464 case CCURmode:
465 case CCZ1mode:
466 if (m2 == CCZmode)
467 return m1;
468
469 return VOIDmode;
470
471 default:
472 return VOIDmode;
473 }
474 return VOIDmode;
475 }
476
477 /* Return true if SET either doesn't set the CC register, or else
478 the source and destination have matching CC modes and that
479 CC mode is at least as constrained as REQ_MODE. */
480
481 static bool
482 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
483 {
484 enum machine_mode set_mode;
485
486 gcc_assert (GET_CODE (set) == SET);
487
488 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
489 return 1;
490
491 set_mode = GET_MODE (SET_DEST (set));
492 switch (set_mode)
493 {
494 case CCSmode:
495 case CCSRmode:
496 case CCUmode:
497 case CCURmode:
498 case CCLmode:
499 case CCL1mode:
500 case CCL2mode:
501 case CCL3mode:
502 case CCT1mode:
503 case CCT2mode:
504 case CCT3mode:
505 if (req_mode != set_mode)
506 return 0;
507 break;
508
509 case CCZmode:
510 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
511 && req_mode != CCSRmode && req_mode != CCURmode)
512 return 0;
513 break;
514
515 case CCAPmode:
516 case CCANmode:
517 if (req_mode != CCAmode)
518 return 0;
519 break;
520
521 default:
522 gcc_unreachable ();
523 }
524
525 return (GET_MODE (SET_SRC (set)) == set_mode);
526 }
527
528 /* Return true if every SET in INSN that sets the CC register
529 has source and destination with matching CC modes and that
530 CC mode is at least as constrained as REQ_MODE.
531 If REQ_MODE is VOIDmode, always return false. */
532
533 bool
534 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
535 {
536 int i;
537
538 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
539 if (req_mode == VOIDmode)
540 return false;
541
542 if (GET_CODE (PATTERN (insn)) == SET)
543 return s390_match_ccmode_set (PATTERN (insn), req_mode);
544
545 if (GET_CODE (PATTERN (insn)) == PARALLEL)
546 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
547 {
548 rtx set = XVECEXP (PATTERN (insn), 0, i);
549 if (GET_CODE (set) == SET)
550 if (!s390_match_ccmode_set (set, req_mode))
551 return false;
552 }
553
554 return true;
555 }
556
557 /* If a test-under-mask instruction can be used to implement
558 (compare (and ... OP1) OP2), return the CC mode required
559 to do that. Otherwise, return VOIDmode.
560 MIXED is true if the instruction can distinguish between
561 CC1 and CC2 for mixed selected bits (TMxx), it is false
562 if the instruction cannot (TM). */
563
564 enum machine_mode
565 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
566 {
567 int bit0, bit1;
568
569 /* ??? Fixme: should work on CONST_DOUBLE as well. */
570 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
571 return VOIDmode;
572
573 /* Selected bits all zero: CC0.
574 e.g.: int a; if ((a & (16 + 128)) == 0) */
575 if (INTVAL (op2) == 0)
576 return CCTmode;
577
578 /* Selected bits all one: CC3.
579 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
580 if (INTVAL (op2) == INTVAL (op1))
581 return CCT3mode;
582
583 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
584 int a;
585 if ((a & (16 + 128)) == 16) -> CCT1
586 if ((a & (16 + 128)) == 128) -> CCT2 */
587 if (mixed)
588 {
589 bit1 = exact_log2 (INTVAL (op2));
590 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
591 if (bit0 != -1 && bit1 != -1)
592 return bit0 > bit1 ? CCT1mode : CCT2mode;
593 }
594
595 return VOIDmode;
596 }
597
598 /* Given a comparison code OP (EQ, NE, etc.) and the operands
599 OP0 and OP1 of a COMPARE, return the mode to be used for the
600 comparison. */
601
602 enum machine_mode
603 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
604 {
605 switch (code)
606 {
607 case EQ:
608 case NE:
609 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
610 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
611 return CCAPmode;
612 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
613 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
614 return CCAPmode;
615 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
616 || GET_CODE (op1) == NEG)
617 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
618 return CCLmode;
619
620 if (GET_CODE (op0) == AND)
621 {
622 /* Check whether we can potentially do it via TM. */
623 enum machine_mode ccmode;
624 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
625 if (ccmode != VOIDmode)
626 {
627 /* Relax CCTmode to CCZmode to allow fall-back to AND
628 if that turns out to be beneficial. */
629 return ccmode == CCTmode ? CCZmode : ccmode;
630 }
631 }
632
633 if (register_operand (op0, HImode)
634 && GET_CODE (op1) == CONST_INT
635 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
636 return CCT3mode;
637 if (register_operand (op0, QImode)
638 && GET_CODE (op1) == CONST_INT
639 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
640 return CCT3mode;
641
642 return CCZmode;
643
644 case LE:
645 case LT:
646 case GE:
647 case GT:
648 /* The only overflow condition of NEG and ABS happens when
649 -INT_MAX is used as parameter, which stays negative. So
650 we have an overflow from a positive value to a negative.
651 Using CCAP mode the resulting cc can be used for comparisons. */
652 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
653 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
654 return CCAPmode;
655
656 /* If constants are involved in an add instruction it is possible to use
657 the resulting cc for comparisons with zero. Knowing the sign of the
658 constant the overflow behavior gets predictable. e.g.:
659 int a, b; if ((b = a + c) > 0)
660 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
661 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
662 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
663 {
664 if (INTVAL (XEXP((op0), 1)) < 0)
665 return CCANmode;
666 else
667 return CCAPmode;
668 }
669 /* Fall through. */
670 case UNORDERED:
671 case ORDERED:
672 case UNEQ:
673 case UNLE:
674 case UNLT:
675 case UNGE:
676 case UNGT:
677 case LTGT:
678 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
679 && GET_CODE (op1) != CONST_INT)
680 return CCSRmode;
681 return CCSmode;
682
683 case LTU:
684 case GEU:
685 if (GET_CODE (op0) == PLUS
686 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
687 return CCL1mode;
688
689 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
690 && GET_CODE (op1) != CONST_INT)
691 return CCURmode;
692 return CCUmode;
693
694 case LEU:
695 case GTU:
696 if (GET_CODE (op0) == MINUS
697 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
698 return CCL2mode;
699
700 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
701 && GET_CODE (op1) != CONST_INT)
702 return CCURmode;
703 return CCUmode;
704
705 default:
706 gcc_unreachable ();
707 }
708 }
709
710 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
711 that we can implement more efficiently. */
712
713 void
714 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
715 {
716 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
717 if ((*code == EQ || *code == NE)
718 && *op1 == const0_rtx
719 && GET_CODE (*op0) == ZERO_EXTRACT
720 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
721 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
722 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
723 {
724 rtx inner = XEXP (*op0, 0);
725 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
726 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
727 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
728
729 if (len > 0 && len < modesize
730 && pos >= 0 && pos + len <= modesize
731 && modesize <= HOST_BITS_PER_WIDE_INT)
732 {
733 unsigned HOST_WIDE_INT block;
734 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
735 block <<= modesize - pos - len;
736
737 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
738 gen_int_mode (block, GET_MODE (inner)));
739 }
740 }
741
742 /* Narrow AND of memory against immediate to enable TM. */
743 if ((*code == EQ || *code == NE)
744 && *op1 == const0_rtx
745 && GET_CODE (*op0) == AND
746 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
747 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
748 {
749 rtx inner = XEXP (*op0, 0);
750 rtx mask = XEXP (*op0, 1);
751
752 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
753 if (GET_CODE (inner) == SUBREG
754 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
755 && (GET_MODE_SIZE (GET_MODE (inner))
756 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
757 && ((INTVAL (mask)
758 & GET_MODE_MASK (GET_MODE (inner))
759 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
760 == 0))
761 inner = SUBREG_REG (inner);
762
763 /* Do not change volatile MEMs. */
764 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
765 {
766 int part = s390_single_part (XEXP (*op0, 1),
767 GET_MODE (inner), QImode, 0);
768 if (part >= 0)
769 {
770 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
771 inner = adjust_address_nv (inner, QImode, part);
772 *op0 = gen_rtx_AND (QImode, inner, mask);
773 }
774 }
775 }
776
777 /* Narrow comparisons against 0xffff to HImode if possible. */
778 if ((*code == EQ || *code == NE)
779 && GET_CODE (*op1) == CONST_INT
780 && INTVAL (*op1) == 0xffff
781 && SCALAR_INT_MODE_P (GET_MODE (*op0))
782 && (nonzero_bits (*op0, GET_MODE (*op0))
783 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
784 {
785 *op0 = gen_lowpart (HImode, *op0);
786 *op1 = constm1_rtx;
787 }
788
789 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
790 if (GET_CODE (*op0) == UNSPEC
791 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
792 && XVECLEN (*op0, 0) == 1
793 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
794 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
795 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
796 && *op1 == const0_rtx)
797 {
798 enum rtx_code new_code = UNKNOWN;
799 switch (*code)
800 {
801 case EQ: new_code = EQ; break;
802 case NE: new_code = NE; break;
803 case LT: new_code = GTU; break;
804 case GT: new_code = LTU; break;
805 case LE: new_code = GEU; break;
806 case GE: new_code = LEU; break;
807 default: break;
808 }
809
810 if (new_code != UNKNOWN)
811 {
812 *op0 = XVECEXP (*op0, 0, 0);
813 *code = new_code;
814 }
815 }
816
817 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
818 if (GET_CODE (*op0) == UNSPEC
819 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
820 && XVECLEN (*op0, 0) == 1
821 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
822 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
823 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
824 && *op1 == const0_rtx)
825 {
826 enum rtx_code new_code = UNKNOWN;
827 switch (*code)
828 {
829 case EQ: new_code = EQ; break;
830 case NE: new_code = NE; break;
831 default: break;
832 }
833
834 if (new_code != UNKNOWN)
835 {
836 *op0 = XVECEXP (*op0, 0, 0);
837 *code = new_code;
838 }
839 }
840
841 /* Simplify cascaded EQ, NE with const0_rtx. */
842 if ((*code == NE || *code == EQ)
843 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
844 && GET_MODE (*op0) == SImode
845 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
846 && REG_P (XEXP (*op0, 0))
847 && XEXP (*op0, 1) == const0_rtx
848 && *op1 == const0_rtx)
849 {
850 if ((*code == EQ && GET_CODE (*op0) == NE)
851 || (*code == NE && GET_CODE (*op0) == EQ))
852 *code = EQ;
853 else
854 *code = NE;
855 *op0 = XEXP (*op0, 0);
856 }
857
858 /* Prefer register over memory as first operand. */
859 if (MEM_P (*op0) && REG_P (*op1))
860 {
861 rtx tem = *op0; *op0 = *op1; *op1 = tem;
862 *code = swap_condition (*code);
863 }
864 }
865
866 /* Emit a compare instruction suitable to implement the comparison
867 OP0 CODE OP1. Return the correct condition RTL to be placed in
868 the IF_THEN_ELSE of the conditional branch testing the result. */
869
870 rtx
871 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
872 {
873 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
874 rtx cc;
875
876 /* Do not output a redundant compare instruction if a compare_and_swap
877 pattern already computed the result and the machine modes are compatible. */
878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
879 {
880 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
881 == GET_MODE (op0));
882 cc = op0;
883 }
884 else
885 {
886 cc = gen_rtx_REG (mode, CC_REGNUM);
887 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
888 }
889
890 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
891 }
892
893 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
894 matches CMP.
895 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
896 conditional branch testing the result. */
897
898 static rtx
899 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
900 rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
904 const0_rtx);
905 }
906
907 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
908 unconditional jump, else a conditional jump under condition COND. */
909
910 void
911 s390_emit_jump (rtx target, rtx cond)
912 {
913 rtx insn;
914
915 target = gen_rtx_LABEL_REF (VOIDmode, target);
916 if (cond)
917 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
918
919 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
920 emit_jump_insn (insn);
921 }
922
923 /* Return branch condition mask to implement a branch
924 specified by CODE. Return -1 for invalid comparisons. */
925
926 int
927 s390_branch_condition_mask (rtx code)
928 {
929 const int CC0 = 1 << 3;
930 const int CC1 = 1 << 2;
931 const int CC2 = 1 << 1;
932 const int CC3 = 1 << 0;
933
934 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
935 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
936 gcc_assert (XEXP (code, 1) == const0_rtx);
937
938 switch (GET_MODE (XEXP (code, 0)))
939 {
940 case CCZmode:
941 case CCZ1mode:
942 switch (GET_CODE (code))
943 {
944 case EQ: return CC0;
945 case NE: return CC1 | CC2 | CC3;
946 default: return -1;
947 }
948 break;
949
950 case CCT1mode:
951 switch (GET_CODE (code))
952 {
953 case EQ: return CC1;
954 case NE: return CC0 | CC2 | CC3;
955 default: return -1;
956 }
957 break;
958
959 case CCT2mode:
960 switch (GET_CODE (code))
961 {
962 case EQ: return CC2;
963 case NE: return CC0 | CC1 | CC3;
964 default: return -1;
965 }
966 break;
967
968 case CCT3mode:
969 switch (GET_CODE (code))
970 {
971 case EQ: return CC3;
972 case NE: return CC0 | CC1 | CC2;
973 default: return -1;
974 }
975 break;
976
977 case CCLmode:
978 switch (GET_CODE (code))
979 {
980 case EQ: return CC0 | CC2;
981 case NE: return CC1 | CC3;
982 default: return -1;
983 }
984 break;
985
986 case CCL1mode:
987 switch (GET_CODE (code))
988 {
989 case LTU: return CC2 | CC3; /* carry */
990 case GEU: return CC0 | CC1; /* no carry */
991 default: return -1;
992 }
993 break;
994
995 case CCL2mode:
996 switch (GET_CODE (code))
997 {
998 case GTU: return CC0 | CC1; /* borrow */
999 case LEU: return CC2 | CC3; /* no borrow */
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCL3mode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0 | CC2;
1008 case NE: return CC1 | CC3;
1009 case LTU: return CC1;
1010 case GTU: return CC3;
1011 case LEU: return CC1 | CC2;
1012 case GEU: return CC2 | CC3;
1013 default: return -1;
1014 }
1015
1016 case CCUmode:
1017 switch (GET_CODE (code))
1018 {
1019 case EQ: return CC0;
1020 case NE: return CC1 | CC2 | CC3;
1021 case LTU: return CC1;
1022 case GTU: return CC2;
1023 case LEU: return CC0 | CC1;
1024 case GEU: return CC0 | CC2;
1025 default: return -1;
1026 }
1027 break;
1028
1029 case CCURmode:
1030 switch (GET_CODE (code))
1031 {
1032 case EQ: return CC0;
1033 case NE: return CC2 | CC1 | CC3;
1034 case LTU: return CC2;
1035 case GTU: return CC1;
1036 case LEU: return CC0 | CC2;
1037 case GEU: return CC0 | CC1;
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCAPmode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0;
1046 case NE: return CC1 | CC2 | CC3;
1047 case LT: return CC1 | CC3;
1048 case GT: return CC2;
1049 case LE: return CC0 | CC1 | CC3;
1050 case GE: return CC0 | CC2;
1051 default: return -1;
1052 }
1053 break;
1054
1055 case CCANmode:
1056 switch (GET_CODE (code))
1057 {
1058 case EQ: return CC0;
1059 case NE: return CC1 | CC2 | CC3;
1060 case LT: return CC1;
1061 case GT: return CC2 | CC3;
1062 case LE: return CC0 | CC1;
1063 case GE: return CC0 | CC2 | CC3;
1064 default: return -1;
1065 }
1066 break;
1067
1068 case CCSmode:
1069 switch (GET_CODE (code))
1070 {
1071 case EQ: return CC0;
1072 case NE: return CC1 | CC2 | CC3;
1073 case LT: return CC1;
1074 case GT: return CC2;
1075 case LE: return CC0 | CC1;
1076 case GE: return CC0 | CC2;
1077 case UNORDERED: return CC3;
1078 case ORDERED: return CC0 | CC1 | CC2;
1079 case UNEQ: return CC0 | CC3;
1080 case UNLT: return CC1 | CC3;
1081 case UNGT: return CC2 | CC3;
1082 case UNLE: return CC0 | CC1 | CC3;
1083 case UNGE: return CC0 | CC2 | CC3;
1084 case LTGT: return CC1 | CC2;
1085 default: return -1;
1086 }
1087 break;
1088
1089 case CCSRmode:
1090 switch (GET_CODE (code))
1091 {
1092 case EQ: return CC0;
1093 case NE: return CC2 | CC1 | CC3;
1094 case LT: return CC2;
1095 case GT: return CC1;
1096 case LE: return CC0 | CC2;
1097 case GE: return CC0 | CC1;
1098 case UNORDERED: return CC3;
1099 case ORDERED: return CC0 | CC2 | CC1;
1100 case UNEQ: return CC0 | CC3;
1101 case UNLT: return CC2 | CC3;
1102 case UNGT: return CC1 | CC3;
1103 case UNLE: return CC0 | CC2 | CC3;
1104 case UNGE: return CC0 | CC1 | CC3;
1105 case LTGT: return CC2 | CC1;
1106 default: return -1;
1107 }
1108 break;
1109
1110 default:
1111 return -1;
1112 }
1113 }
1114
1115
1116 /* Return branch condition mask to implement a compare and branch
1117 specified by CODE. Return -1 for invalid comparisons. */
1118
1119 int
1120 s390_compare_and_branch_condition_mask (rtx code)
1121 {
1122 const int CC0 = 1 << 3;
1123 const int CC1 = 1 << 2;
1124 const int CC2 = 1 << 1;
1125
1126 switch (GET_CODE (code))
1127 {
1128 case EQ:
1129 return CC0;
1130 case NE:
1131 return CC1 | CC2;
1132 case LT:
1133 case LTU:
1134 return CC1;
1135 case GT:
1136 case GTU:
1137 return CC2;
1138 case LE:
1139 case LEU:
1140 return CC0 | CC1;
1141 case GE:
1142 case GEU:
1143 return CC0 | CC2;
1144 default:
1145 gcc_unreachable ();
1146 }
1147 return -1;
1148 }
1149
1150 /* If INV is false, return assembler mnemonic string to implement
1151 a branch specified by CODE. If INV is true, return mnemonic
1152 for the corresponding inverted branch. */
1153
1154 static const char *
1155 s390_branch_condition_mnemonic (rtx code, int inv)
1156 {
1157 int mask;
1158
1159 static const char *const mnemonic[16] =
1160 {
1161 NULL, "o", "h", "nle",
1162 "l", "nhe", "lh", "ne",
1163 "e", "nlh", "he", "nl",
1164 "le", "nh", "no", NULL
1165 };
1166
1167 if (GET_CODE (XEXP (code, 0)) == REG
1168 && REGNO (XEXP (code, 0)) == CC_REGNUM
1169 && XEXP (code, 1) == const0_rtx)
1170 mask = s390_branch_condition_mask (code);
1171 else
1172 mask = s390_compare_and_branch_condition_mask (code);
1173
1174 gcc_assert (mask >= 0);
1175
1176 if (inv)
1177 mask ^= 15;
1178
1179 gcc_assert (mask >= 1 && mask <= 14);
1180
1181 return mnemonic[mask];
1182 }
1183
1184 /* Return the part of op which has a value different from def.
1185 The size of the part is determined by mode.
1186 Use this function only if you already know that op really
1187 contains such a part. */
1188
1189 unsigned HOST_WIDE_INT
1190 s390_extract_part (rtx op, enum machine_mode mode, int def)
1191 {
1192 unsigned HOST_WIDE_INT value = 0;
1193 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1194 int part_bits = GET_MODE_BITSIZE (mode);
1195 unsigned HOST_WIDE_INT part_mask
1196 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1197 int i;
1198
1199 for (i = 0; i < max_parts; i++)
1200 {
1201 if (i == 0)
1202 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1203 else
1204 value >>= part_bits;
1205
1206 if ((value & part_mask) != (def & part_mask))
1207 return value & part_mask;
1208 }
1209
1210 gcc_unreachable ();
1211 }
1212
1213 /* If OP is an integer constant of mode MODE with exactly one
1214 part of mode PART_MODE unequal to DEF, return the number of that
1215 part. Otherwise, return -1. */
1216
1217 int
1218 s390_single_part (rtx op,
1219 enum machine_mode mode,
1220 enum machine_mode part_mode,
1221 int def)
1222 {
1223 unsigned HOST_WIDE_INT value = 0;
1224 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1225 unsigned HOST_WIDE_INT part_mask
1226 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1227 int i, part = -1;
1228
1229 if (GET_CODE (op) != CONST_INT)
1230 return -1;
1231
1232 for (i = 0; i < n_parts; i++)
1233 {
1234 if (i == 0)
1235 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1236 else
1237 value >>= GET_MODE_BITSIZE (part_mode);
1238
1239 if ((value & part_mask) != (def & part_mask))
1240 {
1241 if (part != -1)
1242 return -1;
1243 else
1244 part = i;
1245 }
1246 }
1247 return part == -1 ? -1 : n_parts - 1 - part;
1248 }
1249
1250 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1251 bits and no other bits are set in IN. POS and LENGTH can be used
1252 to obtain the start position and the length of the bitfield.
1253
1254 POS gives the position of the first bit of the bitfield counting
1255 from the lowest order bit starting with zero. In order to use this
1256 value for S/390 instructions this has to be converted to "bits big
1257 endian" style. */
1258
1259 bool
1260 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1261 int *pos, int *length)
1262 {
1263 int tmp_pos = 0;
1264 int tmp_length = 0;
1265 int i;
1266 unsigned HOST_WIDE_INT mask = 1ULL;
1267 bool contiguous = false;
1268
1269 for (i = 0; i < size; mask <<= 1, i++)
1270 {
1271 if (contiguous)
1272 {
1273 if (mask & in)
1274 tmp_length++;
1275 else
1276 break;
1277 }
1278 else
1279 {
1280 if (mask & in)
1281 {
1282 contiguous = true;
1283 tmp_length++;
1284 }
1285 else
1286 tmp_pos++;
1287 }
1288 }
1289
1290 if (!tmp_length)
1291 return false;
1292
1293 /* Calculate a mask for all bits beyond the contiguous bits. */
1294 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1295
1296 if (mask & in)
1297 return false;
1298
1299 if (tmp_length + tmp_pos - 1 > size)
1300 return false;
1301
1302 if (length)
1303 *length = tmp_length;
1304
1305 if (pos)
1306 *pos = tmp_pos;
1307
1308 return true;
1309 }
1310
1311 /* Check whether we can (and want to) split a double-word
1312 move in mode MODE from SRC to DST into two single-word
1313 moves, moving the subword FIRST_SUBWORD first. */
1314
1315 bool
1316 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1317 {
1318 /* Floating point registers cannot be split. */
1319 if (FP_REG_P (src) || FP_REG_P (dst))
1320 return false;
1321
1322 /* We don't need to split if operands are directly accessible. */
1323 if (s_operand (src, mode) || s_operand (dst, mode))
1324 return false;
1325
1326 /* Non-offsettable memory references cannot be split. */
1327 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1328 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1329 return false;
1330
1331 /* Moving the first subword must not clobber a register
1332 needed to move the second subword. */
1333 if (register_operand (dst, mode))
1334 {
1335 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1336 if (reg_overlap_mentioned_p (subreg, src))
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
1343 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1344 and [MEM2, MEM2 + SIZE] do overlap and false
1345 otherwise. */
1346
1347 bool
1348 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1349 {
1350 rtx addr1, addr2, addr_delta;
1351 HOST_WIDE_INT delta;
1352
1353 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1354 return true;
1355
1356 if (size == 0)
1357 return false;
1358
1359 addr1 = XEXP (mem1, 0);
1360 addr2 = XEXP (mem2, 0);
1361
1362 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1363
1364 /* This overlapping check is used by peepholes merging memory block operations.
1365 Overlapping operations would otherwise be recognized by the S/390 hardware
1366 and would fall back to a slower implementation. Allowing overlapping
1367 operations would lead to slow code but not to wrong code. Therefore we are
1368 somewhat optimistic if we cannot prove that the memory blocks are
1369 overlapping.
1370 That's why we return false here although this may accept operations on
1371 overlapping memory areas. */
1372 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1373 return false;
1374
1375 delta = INTVAL (addr_delta);
1376
1377 if (delta == 0
1378 || (delta > 0 && delta < size)
1379 || (delta < 0 && -delta < size))
1380 return true;
1381
1382 return false;
1383 }
1384
1385 /* Check whether the address of memory reference MEM2 equals exactly
1386 the address of memory reference MEM1 plus DELTA. Return true if
1387 we can prove this to be the case, false otherwise. */
1388
1389 bool
1390 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1391 {
1392 rtx addr1, addr2, addr_delta;
1393
1394 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1395 return false;
1396
1397 addr1 = XEXP (mem1, 0);
1398 addr2 = XEXP (mem2, 0);
1399
1400 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1401 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1402 return false;
1403
1404 return true;
1405 }
1406
1407 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1408
1409 void
1410 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1411 rtx *operands)
1412 {
1413 enum machine_mode wmode = mode;
1414 rtx dst = operands[0];
1415 rtx src1 = operands[1];
1416 rtx src2 = operands[2];
1417 rtx op, clob, tem;
1418
1419 /* If we cannot handle the operation directly, use a temp register. */
1420 if (!s390_logical_operator_ok_p (operands))
1421 dst = gen_reg_rtx (mode);
1422
1423 /* QImode and HImode patterns make sense only if we have a destination
1424 in memory. Otherwise perform the operation in SImode. */
1425 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1426 wmode = SImode;
1427
1428 /* Widen operands if required. */
1429 if (mode != wmode)
1430 {
1431 if (GET_CODE (dst) == SUBREG
1432 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1433 dst = tem;
1434 else if (REG_P (dst))
1435 dst = gen_rtx_SUBREG (wmode, dst, 0);
1436 else
1437 dst = gen_reg_rtx (wmode);
1438
1439 if (GET_CODE (src1) == SUBREG
1440 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1441 src1 = tem;
1442 else if (GET_MODE (src1) != VOIDmode)
1443 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1444
1445 if (GET_CODE (src2) == SUBREG
1446 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1447 src2 = tem;
1448 else if (GET_MODE (src2) != VOIDmode)
1449 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1450 }
1451
1452 /* Emit the instruction. */
1453 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1454 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1455 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1456
1457 /* Fix up the destination if needed. */
1458 if (dst != operands[0])
1459 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1460 }
1461
1462 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1463
1464 bool
1465 s390_logical_operator_ok_p (rtx *operands)
1466 {
1467 /* If the destination operand is in memory, it needs to coincide
1468 with one of the source operands. After reload, it has to be
1469 the first source operand. */
1470 if (GET_CODE (operands[0]) == MEM)
1471 return rtx_equal_p (operands[0], operands[1])
1472 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1473
1474 return true;
1475 }
1476
1477 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1478 operand IMMOP to switch from SS to SI type instructions. */
1479
1480 void
1481 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1482 {
1483 int def = code == AND ? -1 : 0;
1484 HOST_WIDE_INT mask;
1485 int part;
1486
1487 gcc_assert (GET_CODE (*memop) == MEM);
1488 gcc_assert (!MEM_VOLATILE_P (*memop));
1489
1490 mask = s390_extract_part (*immop, QImode, def);
1491 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1492 gcc_assert (part >= 0);
1493
1494 *memop = adjust_address (*memop, QImode, part);
1495 *immop = gen_int_mode (mask, QImode);
1496 }
1497
1498
1499 /* How to allocate a 'struct machine_function'. */
1500
1501 static struct machine_function *
1502 s390_init_machine_status (void)
1503 {
1504 return ggc_alloc_cleared_machine_function ();
1505 }
1506
1507 static void
1508 s390_option_override (void)
1509 {
1510 /* Set up function hooks. */
1511 init_machine_status = s390_init_machine_status;
1512
1513 /* Architecture mode defaults according to ABI. */
1514 if (!(target_flags_explicit & MASK_ZARCH))
1515 {
1516 if (TARGET_64BIT)
1517 target_flags |= MASK_ZARCH;
1518 else
1519 target_flags &= ~MASK_ZARCH;
1520 }
1521
1522 /* Set the march default in case it hasn't been specified on
1523 cmdline. */
1524 if (s390_arch == PROCESSOR_max)
1525 {
1526 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1527 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1528 s390_arch_flags = processor_flags_table[(int)s390_arch];
1529 }
1530
1531 /* Determine processor to tune for. */
1532 if (s390_tune == PROCESSOR_max)
1533 {
1534 s390_tune = s390_arch;
1535 s390_tune_flags = s390_arch_flags;
1536 }
1537
1538 /* Sanity checks. */
1539 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1540 error ("z/Architecture mode not supported on %s", s390_arch_string);
1541 if (TARGET_64BIT && !TARGET_ZARCH)
1542 error ("64-bit ABI not supported in ESA/390 mode");
1543
1544 /* Use hardware DFP if available and not explicitly disabled by
1545 user. E.g. with -m31 -march=z10 -mzarch */
1546 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1547 target_flags |= MASK_HARD_DFP;
1548
1549 if (TARGET_HARD_DFP && !TARGET_DFP)
1550 {
1551 if (target_flags_explicit & MASK_HARD_DFP)
1552 {
1553 if (!TARGET_CPU_DFP)
1554 error ("hardware decimal floating point instructions"
1555 " not available on %s", s390_arch_string);
1556 if (!TARGET_ZARCH)
1557 error ("hardware decimal floating point instructions"
1558 " not available in ESA/390 mode");
1559 }
1560 else
1561 target_flags &= ~MASK_HARD_DFP;
1562 }
1563
1564 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1565 {
1566 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1567 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1568
1569 target_flags &= ~MASK_HARD_DFP;
1570 }
1571
1572 /* Set processor cost function. */
1573 switch (s390_tune)
1574 {
1575 case PROCESSOR_2084_Z990:
1576 s390_cost = &z990_cost;
1577 break;
1578 case PROCESSOR_2094_Z9_109:
1579 s390_cost = &z9_109_cost;
1580 break;
1581 case PROCESSOR_2097_Z10:
1582 s390_cost = &z10_cost;
1583 case PROCESSOR_2817_Z196:
1584 s390_cost = &z196_cost;
1585 break;
1586 default:
1587 s390_cost = &z900_cost;
1588 }
1589
1590 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1591 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1592 "in combination");
1593
1594 if (s390_stack_size)
1595 {
1596 if (s390_stack_guard >= s390_stack_size)
1597 error ("stack size must be greater than the stack guard value");
1598 else if (s390_stack_size > 1 << 16)
1599 error ("stack size must not be greater than 64k");
1600 }
1601 else if (s390_stack_guard)
1602 error ("-mstack-guard implies use of -mstack-size");
1603
1604 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1605 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1606 target_flags |= MASK_LONG_DOUBLE_128;
1607 #endif
1608
1609 if (s390_tune == PROCESSOR_2097_Z10
1610 || s390_tune == PROCESSOR_2817_Z196)
1611 {
1612 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1619 global_options.x_param_values,
1620 global_options_set.x_param_values);
1621 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1622 global_options.x_param_values,
1623 global_options_set.x_param_values);
1624 }
1625
1626 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1627 global_options.x_param_values,
1628 global_options_set.x_param_values);
1629 /* values for loop prefetching */
1630 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1631 global_options.x_param_values,
1632 global_options_set.x_param_values);
1633 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 /* s390 has more than 2 levels and the size is much larger. Since
1637 we are always running virtualized assume that we only get a small
1638 part of the caches above l1. */
1639 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1643 global_options.x_param_values,
1644 global_options_set.x_param_values);
1645 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1646 global_options.x_param_values,
1647 global_options_set.x_param_values);
1648
1649 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1650 requires the arch flags to be evaluated already. Since prefetching
1651 is beneficial on s390, we enable it if available. */
1652 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1653 flag_prefetch_loop_arrays = 1;
1654
1655 /* Use the alternative scheduling-pressure algorithm by default. */
1656 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1657 global_options.x_param_values,
1658 global_options_set.x_param_values);
1659
1660 if (TARGET_TPF)
1661 {
1662 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1663 debuggers do not yet support DWARF 3/4. */
1664 if (!global_options_set.x_dwarf_strict)
1665 dwarf_strict = 1;
1666 if (!global_options_set.x_dwarf_version)
1667 dwarf_version = 2;
1668 }
1669 }
1670
1671 /* Map for smallest class containing reg regno. */
1672
1673 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1674 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1675 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1682 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1683 ACCESS_REGS, ACCESS_REGS
1684 };
1685
1686 /* Return attribute type of insn. */
1687
1688 static enum attr_type
1689 s390_safe_attr_type (rtx insn)
1690 {
1691 if (recog_memoized (insn) >= 0)
1692 return get_attr_type (insn);
1693 else
1694 return TYPE_NONE;
1695 }
1696
1697 /* Return true if DISP is a valid short displacement. */
1698
1699 static bool
1700 s390_short_displacement (rtx disp)
1701 {
1702 /* No displacement is OK. */
1703 if (!disp)
1704 return true;
1705
1706 /* Without the long displacement facility we don't need to
1707 distingiush between long and short displacement. */
1708 if (!TARGET_LONG_DISPLACEMENT)
1709 return true;
1710
1711 /* Integer displacement in range. */
1712 if (GET_CODE (disp) == CONST_INT)
1713 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1714
1715 /* GOT offset is not OK, the GOT can be large. */
1716 if (GET_CODE (disp) == CONST
1717 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1718 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1719 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1720 return false;
1721
1722 /* All other symbolic constants are literal pool references,
1723 which are OK as the literal pool must be small. */
1724 if (GET_CODE (disp) == CONST)
1725 return true;
1726
1727 return false;
1728 }
1729
1730 /* Decompose a RTL expression ADDR for a memory address into
1731 its components, returned in OUT.
1732
1733 Returns false if ADDR is not a valid memory address, true
1734 otherwise. If OUT is NULL, don't return the components,
1735 but check for validity only.
1736
1737 Note: Only addresses in canonical form are recognized.
1738 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1739 canonical form so that they will be recognized. */
1740
1741 static int
1742 s390_decompose_address (rtx addr, struct s390_address *out)
1743 {
1744 HOST_WIDE_INT offset = 0;
1745 rtx base = NULL_RTX;
1746 rtx indx = NULL_RTX;
1747 rtx disp = NULL_RTX;
1748 rtx orig_disp;
1749 bool pointer = false;
1750 bool base_ptr = false;
1751 bool indx_ptr = false;
1752 bool literal_pool = false;
1753
1754 /* We may need to substitute the literal pool base register into the address
1755 below. However, at this point we do not know which register is going to
1756 be used as base, so we substitute the arg pointer register. This is going
1757 to be treated as holding a pointer below -- it shouldn't be used for any
1758 other purpose. */
1759 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1760
1761 /* Decompose address into base + index + displacement. */
1762
1763 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1764 base = addr;
1765
1766 else if (GET_CODE (addr) == PLUS)
1767 {
1768 rtx op0 = XEXP (addr, 0);
1769 rtx op1 = XEXP (addr, 1);
1770 enum rtx_code code0 = GET_CODE (op0);
1771 enum rtx_code code1 = GET_CODE (op1);
1772
1773 if (code0 == REG || code0 == UNSPEC)
1774 {
1775 if (code1 == REG || code1 == UNSPEC)
1776 {
1777 indx = op0; /* index + base */
1778 base = op1;
1779 }
1780
1781 else
1782 {
1783 base = op0; /* base + displacement */
1784 disp = op1;
1785 }
1786 }
1787
1788 else if (code0 == PLUS)
1789 {
1790 indx = XEXP (op0, 0); /* index + base + disp */
1791 base = XEXP (op0, 1);
1792 disp = op1;
1793 }
1794
1795 else
1796 {
1797 return false;
1798 }
1799 }
1800
1801 else
1802 disp = addr; /* displacement */
1803
1804 /* Extract integer part of displacement. */
1805 orig_disp = disp;
1806 if (disp)
1807 {
1808 if (GET_CODE (disp) == CONST_INT)
1809 {
1810 offset = INTVAL (disp);
1811 disp = NULL_RTX;
1812 }
1813 else if (GET_CODE (disp) == CONST
1814 && GET_CODE (XEXP (disp, 0)) == PLUS
1815 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1816 {
1817 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1818 disp = XEXP (XEXP (disp, 0), 0);
1819 }
1820 }
1821
1822 /* Strip off CONST here to avoid special case tests later. */
1823 if (disp && GET_CODE (disp) == CONST)
1824 disp = XEXP (disp, 0);
1825
1826 /* We can convert literal pool addresses to
1827 displacements by basing them off the base register. */
1828 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1829 {
1830 /* Either base or index must be free to hold the base register. */
1831 if (!base)
1832 base = fake_pool_base, literal_pool = true;
1833 else if (!indx)
1834 indx = fake_pool_base, literal_pool = true;
1835 else
1836 return false;
1837
1838 /* Mark up the displacement. */
1839 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1840 UNSPEC_LTREL_OFFSET);
1841 }
1842
1843 /* Validate base register. */
1844 if (base)
1845 {
1846 if (GET_CODE (base) == UNSPEC)
1847 switch (XINT (base, 1))
1848 {
1849 case UNSPEC_LTREF:
1850 if (!disp)
1851 disp = gen_rtx_UNSPEC (Pmode,
1852 gen_rtvec (1, XVECEXP (base, 0, 0)),
1853 UNSPEC_LTREL_OFFSET);
1854 else
1855 return false;
1856
1857 base = XVECEXP (base, 0, 1);
1858 break;
1859
1860 case UNSPEC_LTREL_BASE:
1861 if (XVECLEN (base, 0) == 1)
1862 base = fake_pool_base, literal_pool = true;
1863 else
1864 base = XVECEXP (base, 0, 1);
1865 break;
1866
1867 default:
1868 return false;
1869 }
1870
1871 if (!REG_P (base)
1872 || (GET_MODE (base) != SImode
1873 && GET_MODE (base) != Pmode))
1874 return false;
1875
1876 if (REGNO (base) == STACK_POINTER_REGNUM
1877 || REGNO (base) == FRAME_POINTER_REGNUM
1878 || ((reload_completed || reload_in_progress)
1879 && frame_pointer_needed
1880 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1881 || REGNO (base) == ARG_POINTER_REGNUM
1882 || (flag_pic
1883 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1884 pointer = base_ptr = true;
1885
1886 if ((reload_completed || reload_in_progress)
1887 && base == cfun->machine->base_reg)
1888 pointer = base_ptr = literal_pool = true;
1889 }
1890
1891 /* Validate index register. */
1892 if (indx)
1893 {
1894 if (GET_CODE (indx) == UNSPEC)
1895 switch (XINT (indx, 1))
1896 {
1897 case UNSPEC_LTREF:
1898 if (!disp)
1899 disp = gen_rtx_UNSPEC (Pmode,
1900 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1901 UNSPEC_LTREL_OFFSET);
1902 else
1903 return false;
1904
1905 indx = XVECEXP (indx, 0, 1);
1906 break;
1907
1908 case UNSPEC_LTREL_BASE:
1909 if (XVECLEN (indx, 0) == 1)
1910 indx = fake_pool_base, literal_pool = true;
1911 else
1912 indx = XVECEXP (indx, 0, 1);
1913 break;
1914
1915 default:
1916 return false;
1917 }
1918
1919 if (!REG_P (indx)
1920 || (GET_MODE (indx) != SImode
1921 && GET_MODE (indx) != Pmode))
1922 return false;
1923
1924 if (REGNO (indx) == STACK_POINTER_REGNUM
1925 || REGNO (indx) == FRAME_POINTER_REGNUM
1926 || ((reload_completed || reload_in_progress)
1927 && frame_pointer_needed
1928 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1929 || REGNO (indx) == ARG_POINTER_REGNUM
1930 || (flag_pic
1931 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1932 pointer = indx_ptr = true;
1933
1934 if ((reload_completed || reload_in_progress)
1935 && indx == cfun->machine->base_reg)
1936 pointer = indx_ptr = literal_pool = true;
1937 }
1938
1939 /* Prefer to use pointer as base, not index. */
1940 if (base && indx && !base_ptr
1941 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1942 {
1943 rtx tmp = base;
1944 base = indx;
1945 indx = tmp;
1946 }
1947
1948 /* Validate displacement. */
1949 if (!disp)
1950 {
1951 /* If virtual registers are involved, the displacement will change later
1952 anyway as the virtual registers get eliminated. This could make a
1953 valid displacement invalid, but it is more likely to make an invalid
1954 displacement valid, because we sometimes access the register save area
1955 via negative offsets to one of those registers.
1956 Thus we don't check the displacement for validity here. If after
1957 elimination the displacement turns out to be invalid after all,
1958 this is fixed up by reload in any case. */
1959 if (base != arg_pointer_rtx
1960 && indx != arg_pointer_rtx
1961 && base != return_address_pointer_rtx
1962 && indx != return_address_pointer_rtx
1963 && base != frame_pointer_rtx
1964 && indx != frame_pointer_rtx
1965 && base != virtual_stack_vars_rtx
1966 && indx != virtual_stack_vars_rtx)
1967 if (!DISP_IN_RANGE (offset))
1968 return false;
1969 }
1970 else
1971 {
1972 /* All the special cases are pointers. */
1973 pointer = true;
1974
1975 /* In the small-PIC case, the linker converts @GOT
1976 and @GOTNTPOFF offsets to possible displacements. */
1977 if (GET_CODE (disp) == UNSPEC
1978 && (XINT (disp, 1) == UNSPEC_GOT
1979 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1980 && flag_pic == 1)
1981 {
1982 ;
1983 }
1984
1985 /* Accept pool label offsets. */
1986 else if (GET_CODE (disp) == UNSPEC
1987 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1988 ;
1989
1990 /* Accept literal pool references. */
1991 else if (GET_CODE (disp) == UNSPEC
1992 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1993 {
1994 /* In case CSE pulled a non literal pool reference out of
1995 the pool we have to reject the address. This is
1996 especially important when loading the GOT pointer on non
1997 zarch CPUs. In this case the literal pool contains an lt
1998 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1999 will most likely exceed the displacement. */
2000 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2001 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2002 return false;
2003
2004 orig_disp = gen_rtx_CONST (Pmode, disp);
2005 if (offset)
2006 {
2007 /* If we have an offset, make sure it does not
2008 exceed the size of the constant pool entry. */
2009 rtx sym = XVECEXP (disp, 0, 0);
2010 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2011 return false;
2012
2013 orig_disp = plus_constant (Pmode, orig_disp, offset);
2014 }
2015 }
2016
2017 else
2018 return false;
2019 }
2020
2021 if (!base && !indx)
2022 pointer = true;
2023
2024 if (out)
2025 {
2026 out->base = base;
2027 out->indx = indx;
2028 out->disp = orig_disp;
2029 out->pointer = pointer;
2030 out->literal_pool = literal_pool;
2031 }
2032
2033 return true;
2034 }
2035
2036 /* Decompose a RTL expression OP for a shift count into its components,
2037 and return the base register in BASE and the offset in OFFSET.
2038
2039 Return true if OP is a valid shift count, false if not. */
2040
2041 bool
2042 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2043 {
2044 HOST_WIDE_INT off = 0;
2045
2046 /* We can have an integer constant, an address register,
2047 or a sum of the two. */
2048 if (GET_CODE (op) == CONST_INT)
2049 {
2050 off = INTVAL (op);
2051 op = NULL_RTX;
2052 }
2053 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2054 {
2055 off = INTVAL (XEXP (op, 1));
2056 op = XEXP (op, 0);
2057 }
2058 while (op && GET_CODE (op) == SUBREG)
2059 op = SUBREG_REG (op);
2060
2061 if (op && GET_CODE (op) != REG)
2062 return false;
2063
2064 if (offset)
2065 *offset = off;
2066 if (base)
2067 *base = op;
2068
2069 return true;
2070 }
2071
2072
2073 /* Return true if CODE is a valid address without index. */
2074
2075 bool
2076 s390_legitimate_address_without_index_p (rtx op)
2077 {
2078 struct s390_address addr;
2079
2080 if (!s390_decompose_address (XEXP (op, 0), &addr))
2081 return false;
2082 if (addr.indx)
2083 return false;
2084
2085 return true;
2086 }
2087
2088
2089 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2090 and return these parts in SYMREF and ADDEND. You can pass NULL in
2091 SYMREF and/or ADDEND if you are not interested in these values.
2092 Literal pool references are *not* considered symbol references. */
2093
2094 static bool
2095 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2096 {
2097 HOST_WIDE_INT tmpaddend = 0;
2098
2099 if (GET_CODE (addr) == CONST)
2100 addr = XEXP (addr, 0);
2101
2102 if (GET_CODE (addr) == PLUS)
2103 {
2104 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2105 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2106 && CONST_INT_P (XEXP (addr, 1)))
2107 {
2108 tmpaddend = INTVAL (XEXP (addr, 1));
2109 addr = XEXP (addr, 0);
2110 }
2111 else
2112 return false;
2113 }
2114 else
2115 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2116 return false;
2117
2118 if (symref)
2119 *symref = addr;
2120 if (addend)
2121 *addend = tmpaddend;
2122
2123 return true;
2124 }
2125
2126
2127 /* Return true if the address in OP is valid for constraint letter C
2128 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2129 pool MEMs should be accepted. Only the Q, R, S, T constraint
2130 letters are allowed for C. */
2131
2132 static int
2133 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2134 {
2135 struct s390_address addr;
2136 bool decomposed = false;
2137
2138 /* This check makes sure that no symbolic address (except literal
2139 pool references) are accepted by the R or T constraints. */
2140 if (s390_symref_operand_p (op, NULL, NULL))
2141 return 0;
2142
2143 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2144 if (!lit_pool_ok)
2145 {
2146 if (!s390_decompose_address (op, &addr))
2147 return 0;
2148 if (addr.literal_pool)
2149 return 0;
2150 decomposed = true;
2151 }
2152
2153 switch (c)
2154 {
2155 case 'Q': /* no index short displacement */
2156 if (!decomposed && !s390_decompose_address (op, &addr))
2157 return 0;
2158 if (addr.indx)
2159 return 0;
2160 if (!s390_short_displacement (addr.disp))
2161 return 0;
2162 break;
2163
2164 case 'R': /* with index short displacement */
2165 if (TARGET_LONG_DISPLACEMENT)
2166 {
2167 if (!decomposed && !s390_decompose_address (op, &addr))
2168 return 0;
2169 if (!s390_short_displacement (addr.disp))
2170 return 0;
2171 }
2172 /* Any invalid address here will be fixed up by reload,
2173 so accept it for the most generic constraint. */
2174 break;
2175
2176 case 'S': /* no index long displacement */
2177 if (!TARGET_LONG_DISPLACEMENT)
2178 return 0;
2179 if (!decomposed && !s390_decompose_address (op, &addr))
2180 return 0;
2181 if (addr.indx)
2182 return 0;
2183 if (s390_short_displacement (addr.disp))
2184 return 0;
2185 break;
2186
2187 case 'T': /* with index long displacement */
2188 if (!TARGET_LONG_DISPLACEMENT)
2189 return 0;
2190 /* Any invalid address here will be fixed up by reload,
2191 so accept it for the most generic constraint. */
2192 if ((decomposed || s390_decompose_address (op, &addr))
2193 && s390_short_displacement (addr.disp))
2194 return 0;
2195 break;
2196 default:
2197 return 0;
2198 }
2199 return 1;
2200 }
2201
2202
2203 /* Evaluates constraint strings described by the regular expression
2204 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2205 the constraint given in STR, or 0 else. */
2206
2207 int
2208 s390_mem_constraint (const char *str, rtx op)
2209 {
2210 char c = str[0];
2211
2212 switch (c)
2213 {
2214 case 'A':
2215 /* Check for offsettable variants of memory constraints. */
2216 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2217 return 0;
2218 if ((reload_completed || reload_in_progress)
2219 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2220 return 0;
2221 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2222 case 'B':
2223 /* Check for non-literal-pool variants of memory constraints. */
2224 if (!MEM_P (op))
2225 return 0;
2226 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2227 case 'Q':
2228 case 'R':
2229 case 'S':
2230 case 'T':
2231 if (GET_CODE (op) != MEM)
2232 return 0;
2233 return s390_check_qrst_address (c, XEXP (op, 0), true);
2234 case 'U':
2235 return (s390_check_qrst_address ('Q', op, true)
2236 || s390_check_qrst_address ('R', op, true));
2237 case 'W':
2238 return (s390_check_qrst_address ('S', op, true)
2239 || s390_check_qrst_address ('T', op, true));
2240 case 'Y':
2241 /* Simply check for the basic form of a shift count. Reload will
2242 take care of making sure we have a proper base register. */
2243 if (!s390_decompose_shift_count (op, NULL, NULL))
2244 return 0;
2245 break;
2246 case 'Z':
2247 return s390_check_qrst_address (str[1], op, true);
2248 default:
2249 return 0;
2250 }
2251 return 1;
2252 }
2253
2254
2255 /* Evaluates constraint strings starting with letter O. Input
2256 parameter C is the second letter following the "O" in the constraint
2257 string. Returns 1 if VALUE meets the respective constraint and 0
2258 otherwise. */
2259
2260 int
2261 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2262 {
2263 if (!TARGET_EXTIMM)
2264 return 0;
2265
2266 switch (c)
2267 {
2268 case 's':
2269 return trunc_int_for_mode (value, SImode) == value;
2270
2271 case 'p':
2272 return value == 0
2273 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2274
2275 case 'n':
2276 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2277
2278 default:
2279 gcc_unreachable ();
2280 }
2281 }
2282
2283
2284 /* Evaluates constraint strings starting with letter N. Parameter STR
2285 contains the letters following letter "N" in the constraint string.
2286 Returns true if VALUE matches the constraint. */
2287
2288 int
2289 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2290 {
2291 enum machine_mode mode, part_mode;
2292 int def;
2293 int part, part_goal;
2294
2295
2296 if (str[0] == 'x')
2297 part_goal = -1;
2298 else
2299 part_goal = str[0] - '0';
2300
2301 switch (str[1])
2302 {
2303 case 'Q':
2304 part_mode = QImode;
2305 break;
2306 case 'H':
2307 part_mode = HImode;
2308 break;
2309 case 'S':
2310 part_mode = SImode;
2311 break;
2312 default:
2313 return 0;
2314 }
2315
2316 switch (str[2])
2317 {
2318 case 'H':
2319 mode = HImode;
2320 break;
2321 case 'S':
2322 mode = SImode;
2323 break;
2324 case 'D':
2325 mode = DImode;
2326 break;
2327 default:
2328 return 0;
2329 }
2330
2331 switch (str[3])
2332 {
2333 case '0':
2334 def = 0;
2335 break;
2336 case 'F':
2337 def = -1;
2338 break;
2339 default:
2340 return 0;
2341 }
2342
2343 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2344 return 0;
2345
2346 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2347 if (part < 0)
2348 return 0;
2349 if (part_goal != -1 && part_goal != part)
2350 return 0;
2351
2352 return 1;
2353 }
2354
2355
2356 /* Returns true if the input parameter VALUE is a float zero. */
2357
2358 int
2359 s390_float_const_zero_p (rtx value)
2360 {
2361 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2362 && value == CONST0_RTX (GET_MODE (value)));
2363 }
2364
2365 /* Implement TARGET_REGISTER_MOVE_COST. */
2366
2367 static int
2368 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2369 reg_class_t from, reg_class_t to)
2370 {
2371 /* On s390, copy between fprs and gprs is expensive. */
2372 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2373 && reg_classes_intersect_p (to, FP_REGS))
2374 || (reg_classes_intersect_p (from, FP_REGS)
2375 && reg_classes_intersect_p (to, GENERAL_REGS)))
2376 return 10;
2377
2378 return 1;
2379 }
2380
2381 /* Implement TARGET_MEMORY_MOVE_COST. */
2382
2383 static int
2384 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2385 reg_class_t rclass ATTRIBUTE_UNUSED,
2386 bool in ATTRIBUTE_UNUSED)
2387 {
2388 return 1;
2389 }
2390
2391 /* Compute a (partial) cost for rtx X. Return true if the complete
2392 cost has been computed, and false if subexpressions should be
2393 scanned. In either case, *TOTAL contains the cost result.
2394 CODE contains GET_CODE (x), OUTER_CODE contains the code
2395 of the superexpression of x. */
2396
2397 static bool
2398 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2399 int *total, bool speed ATTRIBUTE_UNUSED)
2400 {
2401 switch (code)
2402 {
2403 case CONST:
2404 case CONST_INT:
2405 case LABEL_REF:
2406 case SYMBOL_REF:
2407 case CONST_DOUBLE:
2408 case MEM:
2409 *total = 0;
2410 return true;
2411
2412 case ASHIFT:
2413 case ASHIFTRT:
2414 case LSHIFTRT:
2415 case ROTATE:
2416 case ROTATERT:
2417 case AND:
2418 case IOR:
2419 case XOR:
2420 case NEG:
2421 case NOT:
2422 *total = COSTS_N_INSNS (1);
2423 return false;
2424
2425 case PLUS:
2426 case MINUS:
2427 *total = COSTS_N_INSNS (1);
2428 return false;
2429
2430 case MULT:
2431 switch (GET_MODE (x))
2432 {
2433 case SImode:
2434 {
2435 rtx left = XEXP (x, 0);
2436 rtx right = XEXP (x, 1);
2437 if (GET_CODE (right) == CONST_INT
2438 && CONST_OK_FOR_K (INTVAL (right)))
2439 *total = s390_cost->mhi;
2440 else if (GET_CODE (left) == SIGN_EXTEND)
2441 *total = s390_cost->mh;
2442 else
2443 *total = s390_cost->ms; /* msr, ms, msy */
2444 break;
2445 }
2446 case DImode:
2447 {
2448 rtx left = XEXP (x, 0);
2449 rtx right = XEXP (x, 1);
2450 if (TARGET_ZARCH)
2451 {
2452 if (GET_CODE (right) == CONST_INT
2453 && CONST_OK_FOR_K (INTVAL (right)))
2454 *total = s390_cost->mghi;
2455 else if (GET_CODE (left) == SIGN_EXTEND)
2456 *total = s390_cost->msgf;
2457 else
2458 *total = s390_cost->msg; /* msgr, msg */
2459 }
2460 else /* TARGET_31BIT */
2461 {
2462 if (GET_CODE (left) == SIGN_EXTEND
2463 && GET_CODE (right) == SIGN_EXTEND)
2464 /* mulsidi case: mr, m */
2465 *total = s390_cost->m;
2466 else if (GET_CODE (left) == ZERO_EXTEND
2467 && GET_CODE (right) == ZERO_EXTEND
2468 && TARGET_CPU_ZARCH)
2469 /* umulsidi case: ml, mlr */
2470 *total = s390_cost->ml;
2471 else
2472 /* Complex calculation is required. */
2473 *total = COSTS_N_INSNS (40);
2474 }
2475 break;
2476 }
2477 case SFmode:
2478 case DFmode:
2479 *total = s390_cost->mult_df;
2480 break;
2481 case TFmode:
2482 *total = s390_cost->mxbr;
2483 break;
2484 default:
2485 return false;
2486 }
2487 return false;
2488
2489 case FMA:
2490 switch (GET_MODE (x))
2491 {
2492 case DFmode:
2493 *total = s390_cost->madbr;
2494 break;
2495 case SFmode:
2496 *total = s390_cost->maebr;
2497 break;
2498 default:
2499 return false;
2500 }
2501 /* Negate in the third argument is free: FMSUB. */
2502 if (GET_CODE (XEXP (x, 2)) == NEG)
2503 {
2504 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2505 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2506 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2507 return true;
2508 }
2509 return false;
2510
2511 case UDIV:
2512 case UMOD:
2513 if (GET_MODE (x) == TImode) /* 128 bit division */
2514 *total = s390_cost->dlgr;
2515 else if (GET_MODE (x) == DImode)
2516 {
2517 rtx right = XEXP (x, 1);
2518 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2519 *total = s390_cost->dlr;
2520 else /* 64 by 64 bit division */
2521 *total = s390_cost->dlgr;
2522 }
2523 else if (GET_MODE (x) == SImode) /* 32 bit division */
2524 *total = s390_cost->dlr;
2525 return false;
2526
2527 case DIV:
2528 case MOD:
2529 if (GET_MODE (x) == DImode)
2530 {
2531 rtx right = XEXP (x, 1);
2532 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2533 if (TARGET_ZARCH)
2534 *total = s390_cost->dsgfr;
2535 else
2536 *total = s390_cost->dr;
2537 else /* 64 by 64 bit division */
2538 *total = s390_cost->dsgr;
2539 }
2540 else if (GET_MODE (x) == SImode) /* 32 bit division */
2541 *total = s390_cost->dlr;
2542 else if (GET_MODE (x) == SFmode)
2543 {
2544 *total = s390_cost->debr;
2545 }
2546 else if (GET_MODE (x) == DFmode)
2547 {
2548 *total = s390_cost->ddbr;
2549 }
2550 else if (GET_MODE (x) == TFmode)
2551 {
2552 *total = s390_cost->dxbr;
2553 }
2554 return false;
2555
2556 case SQRT:
2557 if (GET_MODE (x) == SFmode)
2558 *total = s390_cost->sqebr;
2559 else if (GET_MODE (x) == DFmode)
2560 *total = s390_cost->sqdbr;
2561 else /* TFmode */
2562 *total = s390_cost->sqxbr;
2563 return false;
2564
2565 case SIGN_EXTEND:
2566 case ZERO_EXTEND:
2567 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2568 || outer_code == PLUS || outer_code == MINUS
2569 || outer_code == COMPARE)
2570 *total = 0;
2571 return false;
2572
2573 case COMPARE:
2574 *total = COSTS_N_INSNS (1);
2575 if (GET_CODE (XEXP (x, 0)) == AND
2576 && GET_CODE (XEXP (x, 1)) == CONST_INT
2577 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2578 {
2579 rtx op0 = XEXP (XEXP (x, 0), 0);
2580 rtx op1 = XEXP (XEXP (x, 0), 1);
2581 rtx op2 = XEXP (x, 1);
2582
2583 if (memory_operand (op0, GET_MODE (op0))
2584 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2585 return true;
2586 if (register_operand (op0, GET_MODE (op0))
2587 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2588 return true;
2589 }
2590 return false;
2591
2592 default:
2593 return false;
2594 }
2595 }
2596
2597 /* Return the cost of an address rtx ADDR. */
2598
2599 static int
2600 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2601 {
2602 struct s390_address ad;
2603 if (!s390_decompose_address (addr, &ad))
2604 return 1000;
2605
2606 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2607 }
2608
2609 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2610 otherwise return 0. */
2611
2612 int
2613 tls_symbolic_operand (rtx op)
2614 {
2615 if (GET_CODE (op) != SYMBOL_REF)
2616 return 0;
2617 return SYMBOL_REF_TLS_MODEL (op);
2618 }
2619 \f
2620 /* Split DImode access register reference REG (on 64-bit) into its constituent
2621 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2622 gen_highpart cannot be used as they assume all registers are word-sized,
2623 while our access registers have only half that size. */
2624
2625 void
2626 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2627 {
2628 gcc_assert (TARGET_64BIT);
2629 gcc_assert (ACCESS_REG_P (reg));
2630 gcc_assert (GET_MODE (reg) == DImode);
2631 gcc_assert (!(REGNO (reg) & 1));
2632
2633 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2634 *hi = gen_rtx_REG (SImode, REGNO (reg));
2635 }
2636
2637 /* Return true if OP contains a symbol reference */
2638
2639 bool
2640 symbolic_reference_mentioned_p (rtx op)
2641 {
2642 const char *fmt;
2643 int i;
2644
2645 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2646 return 1;
2647
2648 fmt = GET_RTX_FORMAT (GET_CODE (op));
2649 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2650 {
2651 if (fmt[i] == 'E')
2652 {
2653 int j;
2654
2655 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2656 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2657 return 1;
2658 }
2659
2660 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2661 return 1;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /* Return true if OP contains a reference to a thread-local symbol. */
2668
2669 bool
2670 tls_symbolic_reference_mentioned_p (rtx op)
2671 {
2672 const char *fmt;
2673 int i;
2674
2675 if (GET_CODE (op) == SYMBOL_REF)
2676 return tls_symbolic_operand (op);
2677
2678 fmt = GET_RTX_FORMAT (GET_CODE (op));
2679 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2680 {
2681 if (fmt[i] == 'E')
2682 {
2683 int j;
2684
2685 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2686 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2687 return true;
2688 }
2689
2690 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2691 return true;
2692 }
2693
2694 return false;
2695 }
2696
2697
2698 /* Return true if OP is a legitimate general operand when
2699 generating PIC code. It is given that flag_pic is on
2700 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2701
2702 int
2703 legitimate_pic_operand_p (rtx op)
2704 {
2705 /* Accept all non-symbolic constants. */
2706 if (!SYMBOLIC_CONST (op))
2707 return 1;
2708
2709 /* Reject everything else; must be handled
2710 via emit_symbolic_move. */
2711 return 0;
2712 }
2713
2714 /* Returns true if the constant value OP is a legitimate general operand.
2715 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2716
2717 static bool
2718 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2719 {
2720 /* Accept all non-symbolic constants. */
2721 if (!SYMBOLIC_CONST (op))
2722 return 1;
2723
2724 /* Accept immediate LARL operands. */
2725 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2726 return 1;
2727
2728 /* Thread-local symbols are never legal constants. This is
2729 so that emit_call knows that computing such addresses
2730 might require a function call. */
2731 if (TLS_SYMBOLIC_CONST (op))
2732 return 0;
2733
2734 /* In the PIC case, symbolic constants must *not* be
2735 forced into the literal pool. We accept them here,
2736 so that they will be handled by emit_symbolic_move. */
2737 if (flag_pic)
2738 return 1;
2739
2740 /* All remaining non-PIC symbolic constants are
2741 forced into the literal pool. */
2742 return 0;
2743 }
2744
2745 /* Determine if it's legal to put X into the constant pool. This
2746 is not possible if X contains the address of a symbol that is
2747 not constant (TLS) or not known at final link time (PIC). */
2748
2749 static bool
2750 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2751 {
2752 switch (GET_CODE (x))
2753 {
2754 case CONST_INT:
2755 case CONST_DOUBLE:
2756 /* Accept all non-symbolic constants. */
2757 return false;
2758
2759 case LABEL_REF:
2760 /* Labels are OK iff we are non-PIC. */
2761 return flag_pic != 0;
2762
2763 case SYMBOL_REF:
2764 /* 'Naked' TLS symbol references are never OK,
2765 non-TLS symbols are OK iff we are non-PIC. */
2766 if (tls_symbolic_operand (x))
2767 return true;
2768 else
2769 return flag_pic != 0;
2770
2771 case CONST:
2772 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2773 case PLUS:
2774 case MINUS:
2775 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2776 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2777
2778 case UNSPEC:
2779 switch (XINT (x, 1))
2780 {
2781 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2782 case UNSPEC_LTREL_OFFSET:
2783 case UNSPEC_GOT:
2784 case UNSPEC_GOTOFF:
2785 case UNSPEC_PLTOFF:
2786 case UNSPEC_TLSGD:
2787 case UNSPEC_TLSLDM:
2788 case UNSPEC_NTPOFF:
2789 case UNSPEC_DTPOFF:
2790 case UNSPEC_GOTNTPOFF:
2791 case UNSPEC_INDNTPOFF:
2792 return false;
2793
2794 /* If the literal pool shares the code section, be put
2795 execute template placeholders into the pool as well. */
2796 case UNSPEC_INSN:
2797 return TARGET_CPU_ZARCH;
2798
2799 default:
2800 return true;
2801 }
2802 break;
2803
2804 default:
2805 gcc_unreachable ();
2806 }
2807 }
2808
2809 /* Returns true if the constant value OP is a legitimate general
2810 operand during and after reload. The difference to
2811 legitimate_constant_p is that this function will not accept
2812 a constant that would need to be forced to the literal pool
2813 before it can be used as operand.
2814 This function accepts all constants which can be loaded directly
2815 into a GPR. */
2816
2817 bool
2818 legitimate_reload_constant_p (rtx op)
2819 {
2820 /* Accept la(y) operands. */
2821 if (GET_CODE (op) == CONST_INT
2822 && DISP_IN_RANGE (INTVAL (op)))
2823 return true;
2824
2825 /* Accept l(g)hi/l(g)fi operands. */
2826 if (GET_CODE (op) == CONST_INT
2827 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2828 return true;
2829
2830 /* Accept lliXX operands. */
2831 if (TARGET_ZARCH
2832 && GET_CODE (op) == CONST_INT
2833 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2834 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2835 return true;
2836
2837 if (TARGET_EXTIMM
2838 && GET_CODE (op) == CONST_INT
2839 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2840 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2841 return true;
2842
2843 /* Accept larl operands. */
2844 if (TARGET_CPU_ZARCH
2845 && larl_operand (op, VOIDmode))
2846 return true;
2847
2848 /* Accept floating-point zero operands that fit into a single GPR. */
2849 if (GET_CODE (op) == CONST_DOUBLE
2850 && s390_float_const_zero_p (op)
2851 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2852 return true;
2853
2854 /* Accept double-word operands that can be split. */
2855 if (GET_CODE (op) == CONST_INT
2856 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2857 {
2858 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2859 rtx hi = operand_subword (op, 0, 0, dword_mode);
2860 rtx lo = operand_subword (op, 1, 0, dword_mode);
2861 return legitimate_reload_constant_p (hi)
2862 && legitimate_reload_constant_p (lo);
2863 }
2864
2865 /* Everything else cannot be handled without reload. */
2866 return false;
2867 }
2868
2869 /* Returns true if the constant value OP is a legitimate fp operand
2870 during and after reload.
2871 This function accepts all constants which can be loaded directly
2872 into an FPR. */
2873
2874 static bool
2875 legitimate_reload_fp_constant_p (rtx op)
2876 {
2877 /* Accept floating-point zero operands if the load zero instruction
2878 can be used. */
2879 if (TARGET_Z196
2880 && GET_CODE (op) == CONST_DOUBLE
2881 && s390_float_const_zero_p (op))
2882 return true;
2883
2884 return false;
2885 }
2886
2887 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2888 return the class of reg to actually use. */
2889
2890 static reg_class_t
2891 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2892 {
2893 switch (GET_CODE (op))
2894 {
2895 /* Constants we cannot reload into general registers
2896 must be forced into the literal pool. */
2897 case CONST_DOUBLE:
2898 case CONST_INT:
2899 if (reg_class_subset_p (GENERAL_REGS, rclass)
2900 && legitimate_reload_constant_p (op))
2901 return GENERAL_REGS;
2902 else if (reg_class_subset_p (ADDR_REGS, rclass)
2903 && legitimate_reload_constant_p (op))
2904 return ADDR_REGS;
2905 else if (reg_class_subset_p (FP_REGS, rclass)
2906 && legitimate_reload_fp_constant_p (op))
2907 return FP_REGS;
2908 return NO_REGS;
2909
2910 /* If a symbolic constant or a PLUS is reloaded,
2911 it is most likely being used as an address, so
2912 prefer ADDR_REGS. If 'class' is not a superset
2913 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2914 case LABEL_REF:
2915 case SYMBOL_REF:
2916 case CONST:
2917 if (!legitimate_reload_constant_p (op))
2918 return NO_REGS;
2919 /* fallthrough */
2920 case PLUS:
2921 /* load address will be used. */
2922 if (reg_class_subset_p (ADDR_REGS, rclass))
2923 return ADDR_REGS;
2924 else
2925 return NO_REGS;
2926
2927 default:
2928 break;
2929 }
2930
2931 return rclass;
2932 }
2933
2934 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2935 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2936 aligned. */
2937
2938 bool
2939 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2940 {
2941 HOST_WIDE_INT addend;
2942 rtx symref;
2943
2944 if (!s390_symref_operand_p (addr, &symref, &addend))
2945 return false;
2946
2947 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2948 && !(addend & (alignment - 1)));
2949 }
2950
2951 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2952 operand SCRATCH is used to reload the even part of the address and
2953 adding one. */
2954
2955 void
2956 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2957 {
2958 HOST_WIDE_INT addend;
2959 rtx symref;
2960
2961 if (!s390_symref_operand_p (addr, &symref, &addend))
2962 gcc_unreachable ();
2963
2964 if (!(addend & 1))
2965 /* Easy case. The addend is even so larl will do fine. */
2966 emit_move_insn (reg, addr);
2967 else
2968 {
2969 /* We can leave the scratch register untouched if the target
2970 register is a valid base register. */
2971 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2972 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2973 scratch = reg;
2974
2975 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2976 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2977
2978 if (addend != 1)
2979 emit_move_insn (scratch,
2980 gen_rtx_CONST (Pmode,
2981 gen_rtx_PLUS (Pmode, symref,
2982 GEN_INT (addend - 1))));
2983 else
2984 emit_move_insn (scratch, symref);
2985
2986 /* Increment the address using la in order to avoid clobbering cc. */
2987 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2988 }
2989 }
2990
2991 /* Generate what is necessary to move between REG and MEM using
2992 SCRATCH. The direction is given by TOMEM. */
2993
2994 void
2995 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2996 {
2997 /* Reload might have pulled a constant out of the literal pool.
2998 Force it back in. */
2999 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3000 || GET_CODE (mem) == CONST)
3001 mem = force_const_mem (GET_MODE (reg), mem);
3002
3003 gcc_assert (MEM_P (mem));
3004
3005 /* For a load from memory we can leave the scratch register
3006 untouched if the target register is a valid base register. */
3007 if (!tomem
3008 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3009 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3010 && GET_MODE (reg) == GET_MODE (scratch))
3011 scratch = reg;
3012
3013 /* Load address into scratch register. Since we can't have a
3014 secondary reload for a secondary reload we have to cover the case
3015 where larl would need a secondary reload here as well. */
3016 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3017
3018 /* Now we can use a standard load/store to do the move. */
3019 if (tomem)
3020 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3021 else
3022 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3023 }
3024
3025 /* Inform reload about cases where moving X with a mode MODE to a register in
3026 RCLASS requires an extra scratch or immediate register. Return the class
3027 needed for the immediate register. */
3028
3029 static reg_class_t
3030 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3031 enum machine_mode mode, secondary_reload_info *sri)
3032 {
3033 enum reg_class rclass = (enum reg_class) rclass_i;
3034
3035 /* Intermediate register needed. */
3036 if (reg_classes_intersect_p (CC_REGS, rclass))
3037 return GENERAL_REGS;
3038
3039 if (TARGET_Z10)
3040 {
3041 HOST_WIDE_INT offset;
3042 rtx symref;
3043
3044 /* On z10 several optimizer steps may generate larl operands with
3045 an odd addend. */
3046 if (in_p
3047 && s390_symref_operand_p (x, &symref, &offset)
3048 && mode == Pmode
3049 && !SYMBOL_REF_ALIGN1_P (symref)
3050 && (offset & 1) == 1)
3051 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3052 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3053
3054 /* On z10 we need a scratch register when moving QI, TI or floating
3055 point mode values from or to a memory location with a SYMBOL_REF
3056 or if the symref addend of a SI or DI move is not aligned to the
3057 width of the access. */
3058 if (MEM_P (x)
3059 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3060 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3061 || (!TARGET_ZARCH && mode == DImode)
3062 || ((mode == HImode || mode == SImode || mode == DImode)
3063 && (!s390_check_symref_alignment (XEXP (x, 0),
3064 GET_MODE_SIZE (mode))))))
3065 {
3066 #define __SECONDARY_RELOAD_CASE(M,m) \
3067 case M##mode: \
3068 if (TARGET_64BIT) \
3069 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3070 CODE_FOR_reload##m##di_tomem_z10; \
3071 else \
3072 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3073 CODE_FOR_reload##m##si_tomem_z10; \
3074 break;
3075
3076 switch (GET_MODE (x))
3077 {
3078 __SECONDARY_RELOAD_CASE (QI, qi);
3079 __SECONDARY_RELOAD_CASE (HI, hi);
3080 __SECONDARY_RELOAD_CASE (SI, si);
3081 __SECONDARY_RELOAD_CASE (DI, di);
3082 __SECONDARY_RELOAD_CASE (TI, ti);
3083 __SECONDARY_RELOAD_CASE (SF, sf);
3084 __SECONDARY_RELOAD_CASE (DF, df);
3085 __SECONDARY_RELOAD_CASE (TF, tf);
3086 __SECONDARY_RELOAD_CASE (SD, sd);
3087 __SECONDARY_RELOAD_CASE (DD, dd);
3088 __SECONDARY_RELOAD_CASE (TD, td);
3089
3090 default:
3091 gcc_unreachable ();
3092 }
3093 #undef __SECONDARY_RELOAD_CASE
3094 }
3095 }
3096
3097 /* We need a scratch register when loading a PLUS expression which
3098 is not a legitimate operand of the LOAD ADDRESS instruction. */
3099 if (in_p && s390_plus_operand (x, mode))
3100 sri->icode = (TARGET_64BIT ?
3101 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3102
3103 /* Performing a multiword move from or to memory we have to make sure the
3104 second chunk in memory is addressable without causing a displacement
3105 overflow. If that would be the case we calculate the address in
3106 a scratch register. */
3107 if (MEM_P (x)
3108 && GET_CODE (XEXP (x, 0)) == PLUS
3109 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3110 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3111 + GET_MODE_SIZE (mode) - 1))
3112 {
3113 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3114 in a s_operand address since we may fallback to lm/stm. So we only
3115 have to care about overflows in the b+i+d case. */
3116 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3117 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3118 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3119 /* For FP_REGS no lm/stm is available so this check is triggered
3120 for displacement overflows in b+i+d and b+d like addresses. */
3121 || (reg_classes_intersect_p (FP_REGS, rclass)
3122 && s390_class_max_nregs (FP_REGS, mode) > 1))
3123 {
3124 if (in_p)
3125 sri->icode = (TARGET_64BIT ?
3126 CODE_FOR_reloaddi_nonoffmem_in :
3127 CODE_FOR_reloadsi_nonoffmem_in);
3128 else
3129 sri->icode = (TARGET_64BIT ?
3130 CODE_FOR_reloaddi_nonoffmem_out :
3131 CODE_FOR_reloadsi_nonoffmem_out);
3132 }
3133 }
3134
3135 /* A scratch address register is needed when a symbolic constant is
3136 copied to r0 compiling with -fPIC. In other cases the target
3137 register might be used as temporary (see legitimize_pic_address). */
3138 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3139 sri->icode = (TARGET_64BIT ?
3140 CODE_FOR_reloaddi_PIC_addr :
3141 CODE_FOR_reloadsi_PIC_addr);
3142
3143 /* Either scratch or no register needed. */
3144 return NO_REGS;
3145 }
3146
3147 /* Generate code to load SRC, which is PLUS that is not a
3148 legitimate operand for the LA instruction, into TARGET.
3149 SCRATCH may be used as scratch register. */
3150
3151 void
3152 s390_expand_plus_operand (rtx target, rtx src,
3153 rtx scratch)
3154 {
3155 rtx sum1, sum2;
3156 struct s390_address ad;
3157
3158 /* src must be a PLUS; get its two operands. */
3159 gcc_assert (GET_CODE (src) == PLUS);
3160 gcc_assert (GET_MODE (src) == Pmode);
3161
3162 /* Check if any of the two operands is already scheduled
3163 for replacement by reload. This can happen e.g. when
3164 float registers occur in an address. */
3165 sum1 = find_replacement (&XEXP (src, 0));
3166 sum2 = find_replacement (&XEXP (src, 1));
3167 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3168
3169 /* If the address is already strictly valid, there's nothing to do. */
3170 if (!s390_decompose_address (src, &ad)
3171 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3172 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3173 {
3174 /* Otherwise, one of the operands cannot be an address register;
3175 we reload its value into the scratch register. */
3176 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3177 {
3178 emit_move_insn (scratch, sum1);
3179 sum1 = scratch;
3180 }
3181 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3182 {
3183 emit_move_insn (scratch, sum2);
3184 sum2 = scratch;
3185 }
3186
3187 /* According to the way these invalid addresses are generated
3188 in reload.c, it should never happen (at least on s390) that
3189 *neither* of the PLUS components, after find_replacements
3190 was applied, is an address register. */
3191 if (sum1 == scratch && sum2 == scratch)
3192 {
3193 debug_rtx (src);
3194 gcc_unreachable ();
3195 }
3196
3197 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3198 }
3199
3200 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3201 is only ever performed on addresses, so we can mark the
3202 sum as legitimate for LA in any case. */
3203 s390_load_address (target, src);
3204 }
3205
3206
3207 /* Return true if ADDR is a valid memory address.
3208 STRICT specifies whether strict register checking applies. */
3209
3210 static bool
3211 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3212 {
3213 struct s390_address ad;
3214
3215 if (TARGET_Z10
3216 && larl_operand (addr, VOIDmode)
3217 && (mode == VOIDmode
3218 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3219 return true;
3220
3221 if (!s390_decompose_address (addr, &ad))
3222 return false;
3223
3224 if (strict)
3225 {
3226 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3227 return false;
3228
3229 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3230 return false;
3231 }
3232 else
3233 {
3234 if (ad.base
3235 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3236 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3237 return false;
3238
3239 if (ad.indx
3240 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3241 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3242 return false;
3243 }
3244 return true;
3245 }
3246
3247 /* Return true if OP is a valid operand for the LA instruction.
3248 In 31-bit, we need to prove that the result is used as an
3249 address, as LA performs only a 31-bit addition. */
3250
3251 bool
3252 legitimate_la_operand_p (rtx op)
3253 {
3254 struct s390_address addr;
3255 if (!s390_decompose_address (op, &addr))
3256 return false;
3257
3258 return (TARGET_64BIT || addr.pointer);
3259 }
3260
3261 /* Return true if it is valid *and* preferable to use LA to
3262 compute the sum of OP1 and OP2. */
3263
3264 bool
3265 preferred_la_operand_p (rtx op1, rtx op2)
3266 {
3267 struct s390_address addr;
3268
3269 if (op2 != const0_rtx)
3270 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3271
3272 if (!s390_decompose_address (op1, &addr))
3273 return false;
3274 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3275 return false;
3276 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3277 return false;
3278
3279 /* Avoid LA instructions with index register on z196; it is
3280 preferable to use regular add instructions when possible. */
3281 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3282 return false;
3283
3284 if (!TARGET_64BIT && !addr.pointer)
3285 return false;
3286
3287 if (addr.pointer)
3288 return true;
3289
3290 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3291 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3292 return true;
3293
3294 return false;
3295 }
3296
3297 /* Emit a forced load-address operation to load SRC into DST.
3298 This will use the LOAD ADDRESS instruction even in situations
3299 where legitimate_la_operand_p (SRC) returns false. */
3300
3301 void
3302 s390_load_address (rtx dst, rtx src)
3303 {
3304 if (TARGET_64BIT)
3305 emit_move_insn (dst, src);
3306 else
3307 emit_insn (gen_force_la_31 (dst, src));
3308 }
3309
3310 /* Return a legitimate reference for ORIG (an address) using the
3311 register REG. If REG is 0, a new pseudo is generated.
3312
3313 There are two types of references that must be handled:
3314
3315 1. Global data references must load the address from the GOT, via
3316 the PIC reg. An insn is emitted to do this load, and the reg is
3317 returned.
3318
3319 2. Static data references, constant pool addresses, and code labels
3320 compute the address as an offset from the GOT, whose base is in
3321 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3322 differentiate them from global data objects. The returned
3323 address is the PIC reg + an unspec constant.
3324
3325 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3326 reg also appears in the address. */
3327
3328 rtx
3329 legitimize_pic_address (rtx orig, rtx reg)
3330 {
3331 rtx addr = orig;
3332 rtx new_rtx = orig;
3333 rtx base;
3334
3335 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3336
3337 if (GET_CODE (addr) == LABEL_REF
3338 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3339 {
3340 /* This is a local symbol. */
3341 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3342 {
3343 /* Access local symbols PC-relative via LARL.
3344 This is the same as in the non-PIC case, so it is
3345 handled automatically ... */
3346 }
3347 else
3348 {
3349 /* Access local symbols relative to the GOT. */
3350
3351 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3352
3353 if (reload_in_progress || reload_completed)
3354 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3355
3356 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3357 addr = gen_rtx_CONST (Pmode, addr);
3358 addr = force_const_mem (Pmode, addr);
3359 emit_move_insn (temp, addr);
3360
3361 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3362 if (reg != 0)
3363 {
3364 s390_load_address (reg, new_rtx);
3365 new_rtx = reg;
3366 }
3367 }
3368 }
3369 else if (GET_CODE (addr) == SYMBOL_REF)
3370 {
3371 if (reg == 0)
3372 reg = gen_reg_rtx (Pmode);
3373
3374 if (flag_pic == 1)
3375 {
3376 /* Assume GOT offset < 4k. This is handled the same way
3377 in both 31- and 64-bit code (@GOT). */
3378
3379 if (reload_in_progress || reload_completed)
3380 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3381
3382 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3383 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3384 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3385 new_rtx = gen_const_mem (Pmode, new_rtx);
3386 emit_move_insn (reg, new_rtx);
3387 new_rtx = reg;
3388 }
3389 else if (TARGET_CPU_ZARCH)
3390 {
3391 /* If the GOT offset might be >= 4k, we determine the position
3392 of the GOT entry via a PC-relative LARL (@GOTENT). */
3393
3394 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3395
3396 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3397 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3398
3399 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3400 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3401 emit_move_insn (temp, new_rtx);
3402
3403 new_rtx = gen_const_mem (Pmode, temp);
3404 emit_move_insn (reg, new_rtx);
3405 new_rtx = reg;
3406 }
3407 else
3408 {
3409 /* If the GOT offset might be >= 4k, we have to load it
3410 from the literal pool (@GOT). */
3411
3412 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3413
3414 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3415 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3416
3417 if (reload_in_progress || reload_completed)
3418 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3419
3420 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3421 addr = gen_rtx_CONST (Pmode, addr);
3422 addr = force_const_mem (Pmode, addr);
3423 emit_move_insn (temp, addr);
3424
3425 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3426 new_rtx = gen_const_mem (Pmode, new_rtx);
3427 emit_move_insn (reg, new_rtx);
3428 new_rtx = reg;
3429 }
3430 }
3431 else
3432 {
3433 if (GET_CODE (addr) == CONST)
3434 {
3435 addr = XEXP (addr, 0);
3436 if (GET_CODE (addr) == UNSPEC)
3437 {
3438 gcc_assert (XVECLEN (addr, 0) == 1);
3439 switch (XINT (addr, 1))
3440 {
3441 /* If someone moved a GOT-relative UNSPEC
3442 out of the literal pool, force them back in. */
3443 case UNSPEC_GOTOFF:
3444 case UNSPEC_PLTOFF:
3445 new_rtx = force_const_mem (Pmode, orig);
3446 break;
3447
3448 /* @GOT is OK as is if small. */
3449 case UNSPEC_GOT:
3450 if (flag_pic == 2)
3451 new_rtx = force_const_mem (Pmode, orig);
3452 break;
3453
3454 /* @GOTENT is OK as is. */
3455 case UNSPEC_GOTENT:
3456 break;
3457
3458 /* @PLT is OK as is on 64-bit, must be converted to
3459 GOT-relative @PLTOFF on 31-bit. */
3460 case UNSPEC_PLT:
3461 if (!TARGET_CPU_ZARCH)
3462 {
3463 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3464
3465 if (reload_in_progress || reload_completed)
3466 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3467
3468 addr = XVECEXP (addr, 0, 0);
3469 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3470 UNSPEC_PLTOFF);
3471 addr = gen_rtx_CONST (Pmode, addr);
3472 addr = force_const_mem (Pmode, addr);
3473 emit_move_insn (temp, addr);
3474
3475 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3476 if (reg != 0)
3477 {
3478 s390_load_address (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 }
3482 break;
3483
3484 /* Everything else cannot happen. */
3485 default:
3486 gcc_unreachable ();
3487 }
3488 }
3489 else
3490 gcc_assert (GET_CODE (addr) == PLUS);
3491 }
3492 if (GET_CODE (addr) == PLUS)
3493 {
3494 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3495
3496 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3497 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3498
3499 /* Check first to see if this is a constant offset
3500 from a local symbol reference. */
3501 if ((GET_CODE (op0) == LABEL_REF
3502 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3503 && GET_CODE (op1) == CONST_INT)
3504 {
3505 if (TARGET_CPU_ZARCH
3506 && larl_operand (op0, VOIDmode)
3507 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3508 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3509 {
3510 if (INTVAL (op1) & 1)
3511 {
3512 /* LARL can't handle odd offsets, so emit a
3513 pair of LARL and LA. */
3514 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3515
3516 if (!DISP_IN_RANGE (INTVAL (op1)))
3517 {
3518 HOST_WIDE_INT even = INTVAL (op1) - 1;
3519 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3520 op0 = gen_rtx_CONST (Pmode, op0);
3521 op1 = const1_rtx;
3522 }
3523
3524 emit_move_insn (temp, op0);
3525 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3526
3527 if (reg != 0)
3528 {
3529 s390_load_address (reg, new_rtx);
3530 new_rtx = reg;
3531 }
3532 }
3533 else
3534 {
3535 /* If the offset is even, we can just use LARL.
3536 This will happen automatically. */
3537 }
3538 }
3539 else
3540 {
3541 /* Access local symbols relative to the GOT. */
3542
3543 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3544
3545 if (reload_in_progress || reload_completed)
3546 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3547
3548 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3549 UNSPEC_GOTOFF);
3550 addr = gen_rtx_PLUS (Pmode, addr, op1);
3551 addr = gen_rtx_CONST (Pmode, addr);
3552 addr = force_const_mem (Pmode, addr);
3553 emit_move_insn (temp, addr);
3554
3555 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3556 if (reg != 0)
3557 {
3558 s390_load_address (reg, new_rtx);
3559 new_rtx = reg;
3560 }
3561 }
3562 }
3563
3564 /* Now, check whether it is a GOT relative symbol plus offset
3565 that was pulled out of the literal pool. Force it back in. */
3566
3567 else if (GET_CODE (op0) == UNSPEC
3568 && GET_CODE (op1) == CONST_INT
3569 && XINT (op0, 1) == UNSPEC_GOTOFF)
3570 {
3571 gcc_assert (XVECLEN (op0, 0) == 1);
3572
3573 new_rtx = force_const_mem (Pmode, orig);
3574 }
3575
3576 /* Otherwise, compute the sum. */
3577 else
3578 {
3579 base = legitimize_pic_address (XEXP (addr, 0), reg);
3580 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3581 base == reg ? NULL_RTX : reg);
3582 if (GET_CODE (new_rtx) == CONST_INT)
3583 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3584 else
3585 {
3586 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3587 {
3588 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3589 new_rtx = XEXP (new_rtx, 1);
3590 }
3591 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3592 }
3593
3594 if (GET_CODE (new_rtx) == CONST)
3595 new_rtx = XEXP (new_rtx, 0);
3596 new_rtx = force_operand (new_rtx, 0);
3597 }
3598 }
3599 }
3600 return new_rtx;
3601 }
3602
3603 /* Load the thread pointer into a register. */
3604
3605 rtx
3606 s390_get_thread_pointer (void)
3607 {
3608 rtx tp = gen_reg_rtx (Pmode);
3609
3610 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3611 mark_reg_pointer (tp, BITS_PER_WORD);
3612
3613 return tp;
3614 }
3615
3616 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3617 in s390_tls_symbol which always refers to __tls_get_offset.
3618 The returned offset is written to RESULT_REG and an USE rtx is
3619 generated for TLS_CALL. */
3620
3621 static GTY(()) rtx s390_tls_symbol;
3622
3623 static void
3624 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3625 {
3626 rtx insn;
3627
3628 if (!flag_pic)
3629 emit_insn (s390_load_got ());
3630
3631 if (!s390_tls_symbol)
3632 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3633
3634 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3635 gen_rtx_REG (Pmode, RETURN_REGNUM));
3636
3637 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3638 RTL_CONST_CALL_P (insn) = 1;
3639 }
3640
3641 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3642 this (thread-local) address. REG may be used as temporary. */
3643
3644 static rtx
3645 legitimize_tls_address (rtx addr, rtx reg)
3646 {
3647 rtx new_rtx, tls_call, temp, base, r2, insn;
3648
3649 if (GET_CODE (addr) == SYMBOL_REF)
3650 switch (tls_symbolic_operand (addr))
3651 {
3652 case TLS_MODEL_GLOBAL_DYNAMIC:
3653 start_sequence ();
3654 r2 = gen_rtx_REG (Pmode, 2);
3655 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3656 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3657 new_rtx = force_const_mem (Pmode, new_rtx);
3658 emit_move_insn (r2, new_rtx);
3659 s390_emit_tls_call_insn (r2, tls_call);
3660 insn = get_insns ();
3661 end_sequence ();
3662
3663 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3664 temp = gen_reg_rtx (Pmode);
3665 emit_libcall_block (insn, temp, r2, new_rtx);
3666
3667 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3668 if (reg != 0)
3669 {
3670 s390_load_address (reg, new_rtx);
3671 new_rtx = reg;
3672 }
3673 break;
3674
3675 case TLS_MODEL_LOCAL_DYNAMIC:
3676 start_sequence ();
3677 r2 = gen_rtx_REG (Pmode, 2);
3678 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3679 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3680 new_rtx = force_const_mem (Pmode, new_rtx);
3681 emit_move_insn (r2, new_rtx);
3682 s390_emit_tls_call_insn (r2, tls_call);
3683 insn = get_insns ();
3684 end_sequence ();
3685
3686 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3687 temp = gen_reg_rtx (Pmode);
3688 emit_libcall_block (insn, temp, r2, new_rtx);
3689
3690 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3691 base = gen_reg_rtx (Pmode);
3692 s390_load_address (base, new_rtx);
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3695 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3696 new_rtx = force_const_mem (Pmode, new_rtx);
3697 temp = gen_reg_rtx (Pmode);
3698 emit_move_insn (temp, new_rtx);
3699
3700 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3701 if (reg != 0)
3702 {
3703 s390_load_address (reg, new_rtx);
3704 new_rtx = reg;
3705 }
3706 break;
3707
3708 case TLS_MODEL_INITIAL_EXEC:
3709 if (flag_pic == 1)
3710 {
3711 /* Assume GOT offset < 4k. This is handled the same way
3712 in both 31- and 64-bit code. */
3713
3714 if (reload_in_progress || reload_completed)
3715 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3718 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3719 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3720 new_rtx = gen_const_mem (Pmode, new_rtx);
3721 temp = gen_reg_rtx (Pmode);
3722 emit_move_insn (temp, new_rtx);
3723 }
3724 else if (TARGET_CPU_ZARCH)
3725 {
3726 /* If the GOT offset might be >= 4k, we determine the position
3727 of the GOT entry via a PC-relative LARL. */
3728
3729 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3730 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3731 temp = gen_reg_rtx (Pmode);
3732 emit_move_insn (temp, new_rtx);
3733
3734 new_rtx = gen_const_mem (Pmode, temp);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_move_insn (temp, new_rtx);
3737 }
3738 else if (flag_pic)
3739 {
3740 /* If the GOT offset might be >= 4k, we have to load it
3741 from the literal pool. */
3742
3743 if (reload_in_progress || reload_completed)
3744 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3745
3746 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3747 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 temp = gen_reg_rtx (Pmode);
3750 emit_move_insn (temp, new_rtx);
3751
3752 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3753 new_rtx = gen_const_mem (Pmode, new_rtx);
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3758 }
3759 else
3760 {
3761 /* In position-dependent code, load the absolute address of
3762 the GOT entry from the literal pool. */
3763
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = temp;
3771 new_rtx = gen_const_mem (Pmode, new_rtx);
3772 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3773 temp = gen_reg_rtx (Pmode);
3774 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3775 }
3776
3777 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3778 if (reg != 0)
3779 {
3780 s390_load_address (reg, new_rtx);
3781 new_rtx = reg;
3782 }
3783 break;
3784
3785 case TLS_MODEL_LOCAL_EXEC:
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 default:
3801 gcc_unreachable ();
3802 }
3803
3804 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3805 {
3806 switch (XINT (XEXP (addr, 0), 1))
3807 {
3808 case UNSPEC_INDNTPOFF:
3809 gcc_assert (TARGET_CPU_ZARCH);
3810 new_rtx = addr;
3811 break;
3812
3813 default:
3814 gcc_unreachable ();
3815 }
3816 }
3817
3818 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3819 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3820 {
3821 new_rtx = XEXP (XEXP (addr, 0), 0);
3822 if (GET_CODE (new_rtx) != SYMBOL_REF)
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824
3825 new_rtx = legitimize_tls_address (new_rtx, reg);
3826 new_rtx = plus_constant (Pmode, new_rtx,
3827 INTVAL (XEXP (XEXP (addr, 0), 1)));
3828 new_rtx = force_operand (new_rtx, 0);
3829 }
3830
3831 else
3832 gcc_unreachable (); /* for now ... */
3833
3834 return new_rtx;
3835 }
3836
3837 /* Emit insns making the address in operands[1] valid for a standard
3838 move to operands[0]. operands[1] is replaced by an address which
3839 should be used instead of the former RTX to emit the move
3840 pattern. */
3841
3842 void
3843 emit_symbolic_move (rtx *operands)
3844 {
3845 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3846
3847 if (GET_CODE (operands[0]) == MEM)
3848 operands[1] = force_reg (Pmode, operands[1]);
3849 else if (TLS_SYMBOLIC_CONST (operands[1]))
3850 operands[1] = legitimize_tls_address (operands[1], temp);
3851 else if (flag_pic)
3852 operands[1] = legitimize_pic_address (operands[1], temp);
3853 }
3854
3855 /* Try machine-dependent ways of modifying an illegitimate address X
3856 to be legitimate. If we find one, return the new, valid address.
3857
3858 OLDX is the address as it was before break_out_memory_refs was called.
3859 In some cases it is useful to look at this to decide what needs to be done.
3860
3861 MODE is the mode of the operand pointed to by X.
3862
3863 When -fpic is used, special handling is needed for symbolic references.
3864 See comments by legitimize_pic_address for details. */
3865
3866 static rtx
3867 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3868 enum machine_mode mode ATTRIBUTE_UNUSED)
3869 {
3870 rtx constant_term = const0_rtx;
3871
3872 if (TLS_SYMBOLIC_CONST (x))
3873 {
3874 x = legitimize_tls_address (x, 0);
3875
3876 if (s390_legitimate_address_p (mode, x, FALSE))
3877 return x;
3878 }
3879 else if (GET_CODE (x) == PLUS
3880 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3881 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3882 {
3883 return x;
3884 }
3885 else if (flag_pic)
3886 {
3887 if (SYMBOLIC_CONST (x)
3888 || (GET_CODE (x) == PLUS
3889 && (SYMBOLIC_CONST (XEXP (x, 0))
3890 || SYMBOLIC_CONST (XEXP (x, 1)))))
3891 x = legitimize_pic_address (x, 0);
3892
3893 if (s390_legitimate_address_p (mode, x, FALSE))
3894 return x;
3895 }
3896
3897 x = eliminate_constant_term (x, &constant_term);
3898
3899 /* Optimize loading of large displacements by splitting them
3900 into the multiple of 4K and the rest; this allows the
3901 former to be CSE'd if possible.
3902
3903 Don't do this if the displacement is added to a register
3904 pointing into the stack frame, as the offsets will
3905 change later anyway. */
3906
3907 if (GET_CODE (constant_term) == CONST_INT
3908 && !TARGET_LONG_DISPLACEMENT
3909 && !DISP_IN_RANGE (INTVAL (constant_term))
3910 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3911 {
3912 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3913 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3914
3915 rtx temp = gen_reg_rtx (Pmode);
3916 rtx val = force_operand (GEN_INT (upper), temp);
3917 if (val != temp)
3918 emit_move_insn (temp, val);
3919
3920 x = gen_rtx_PLUS (Pmode, x, temp);
3921 constant_term = GEN_INT (lower);
3922 }
3923
3924 if (GET_CODE (x) == PLUS)
3925 {
3926 if (GET_CODE (XEXP (x, 0)) == REG)
3927 {
3928 rtx temp = gen_reg_rtx (Pmode);
3929 rtx val = force_operand (XEXP (x, 1), temp);
3930 if (val != temp)
3931 emit_move_insn (temp, val);
3932
3933 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3934 }
3935
3936 else if (GET_CODE (XEXP (x, 1)) == REG)
3937 {
3938 rtx temp = gen_reg_rtx (Pmode);
3939 rtx val = force_operand (XEXP (x, 0), temp);
3940 if (val != temp)
3941 emit_move_insn (temp, val);
3942
3943 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3944 }
3945 }
3946
3947 if (constant_term != const0_rtx)
3948 x = gen_rtx_PLUS (Pmode, x, constant_term);
3949
3950 return x;
3951 }
3952
3953 /* Try a machine-dependent way of reloading an illegitimate address AD
3954 operand. If we find one, push the reload and return the new address.
3955
3956 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3957 and TYPE is the reload type of the current reload. */
3958
3959 rtx
3960 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3961 int opnum, int type)
3962 {
3963 if (!optimize || TARGET_LONG_DISPLACEMENT)
3964 return NULL_RTX;
3965
3966 if (GET_CODE (ad) == PLUS)
3967 {
3968 rtx tem = simplify_binary_operation (PLUS, Pmode,
3969 XEXP (ad, 0), XEXP (ad, 1));
3970 if (tem)
3971 ad = tem;
3972 }
3973
3974 if (GET_CODE (ad) == PLUS
3975 && GET_CODE (XEXP (ad, 0)) == REG
3976 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3977 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3978 {
3979 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3980 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3981 rtx cst, tem, new_rtx;
3982
3983 cst = GEN_INT (upper);
3984 if (!legitimate_reload_constant_p (cst))
3985 cst = force_const_mem (Pmode, cst);
3986
3987 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3988 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3989
3990 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3991 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3992 opnum, (enum reload_type) type);
3993 return new_rtx;
3994 }
3995
3996 return NULL_RTX;
3997 }
3998
3999 /* Emit code to move LEN bytes from DST to SRC. */
4000
4001 bool
4002 s390_expand_movmem (rtx dst, rtx src, rtx len)
4003 {
4004 /* When tuning for z10 or higher we rely on the Glibc functions to
4005 do the right thing. Only for constant lengths below 64k we will
4006 generate inline code. */
4007 if (s390_tune >= PROCESSOR_2097_Z10
4008 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4009 return false;
4010
4011 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4012 {
4013 if (INTVAL (len) > 0)
4014 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4015 }
4016
4017 else if (TARGET_MVCLE)
4018 {
4019 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4020 }
4021
4022 else
4023 {
4024 rtx dst_addr, src_addr, count, blocks, temp;
4025 rtx loop_start_label = gen_label_rtx ();
4026 rtx loop_end_label = gen_label_rtx ();
4027 rtx end_label = gen_label_rtx ();
4028 enum machine_mode mode;
4029
4030 mode = GET_MODE (len);
4031 if (mode == VOIDmode)
4032 mode = Pmode;
4033
4034 dst_addr = gen_reg_rtx (Pmode);
4035 src_addr = gen_reg_rtx (Pmode);
4036 count = gen_reg_rtx (mode);
4037 blocks = gen_reg_rtx (mode);
4038
4039 convert_move (count, len, 1);
4040 emit_cmp_and_jump_insns (count, const0_rtx,
4041 EQ, NULL_RTX, mode, 1, end_label);
4042
4043 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4044 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4045 dst = change_address (dst, VOIDmode, dst_addr);
4046 src = change_address (src, VOIDmode, src_addr);
4047
4048 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4049 OPTAB_DIRECT);
4050 if (temp != count)
4051 emit_move_insn (count, temp);
4052
4053 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4054 OPTAB_DIRECT);
4055 if (temp != blocks)
4056 emit_move_insn (blocks, temp);
4057
4058 emit_cmp_and_jump_insns (blocks, const0_rtx,
4059 EQ, NULL_RTX, mode, 1, loop_end_label);
4060
4061 emit_label (loop_start_label);
4062
4063 if (TARGET_Z10
4064 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4065 {
4066 rtx prefetch;
4067
4068 /* Issue a read prefetch for the +3 cache line. */
4069 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4070 const0_rtx, const0_rtx);
4071 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4072 emit_insn (prefetch);
4073
4074 /* Issue a write prefetch for the +3 cache line. */
4075 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4076 const1_rtx, const0_rtx);
4077 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4078 emit_insn (prefetch);
4079 }
4080
4081 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4082 s390_load_address (dst_addr,
4083 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4084 s390_load_address (src_addr,
4085 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4086
4087 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4088 OPTAB_DIRECT);
4089 if (temp != blocks)
4090 emit_move_insn (blocks, temp);
4091
4092 emit_cmp_and_jump_insns (blocks, const0_rtx,
4093 EQ, NULL_RTX, mode, 1, loop_end_label);
4094
4095 emit_jump (loop_start_label);
4096 emit_label (loop_end_label);
4097
4098 emit_insn (gen_movmem_short (dst, src,
4099 convert_to_mode (Pmode, count, 1)));
4100 emit_label (end_label);
4101 }
4102 return true;
4103 }
4104
4105 /* Emit code to set LEN bytes at DST to VAL.
4106 Make use of clrmem if VAL is zero. */
4107
4108 void
4109 s390_expand_setmem (rtx dst, rtx len, rtx val)
4110 {
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4112 return;
4113
4114 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4115
4116 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4117 {
4118 if (val == const0_rtx && INTVAL (len) <= 256)
4119 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4120 else
4121 {
4122 /* Initialize memory by storing the first byte. */
4123 emit_move_insn (adjust_address (dst, QImode, 0), val);
4124
4125 if (INTVAL (len) > 1)
4126 {
4127 /* Initiate 1 byte overlap move.
4128 The first byte of DST is propagated through DSTP1.
4129 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4130 DST is set to size 1 so the rest of the memory location
4131 does not count as source operand. */
4132 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4133 set_mem_size (dst, 1);
4134
4135 emit_insn (gen_movmem_short (dstp1, dst,
4136 GEN_INT (INTVAL (len) - 2)));
4137 }
4138 }
4139 }
4140
4141 else if (TARGET_MVCLE)
4142 {
4143 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4144 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4145 }
4146
4147 else
4148 {
4149 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4150 rtx loop_start_label = gen_label_rtx ();
4151 rtx loop_end_label = gen_label_rtx ();
4152 rtx end_label = gen_label_rtx ();
4153 enum machine_mode mode;
4154
4155 mode = GET_MODE (len);
4156 if (mode == VOIDmode)
4157 mode = Pmode;
4158
4159 dst_addr = gen_reg_rtx (Pmode);
4160 count = gen_reg_rtx (mode);
4161 blocks = gen_reg_rtx (mode);
4162
4163 convert_move (count, len, 1);
4164 emit_cmp_and_jump_insns (count, const0_rtx,
4165 EQ, NULL_RTX, mode, 1, end_label);
4166
4167 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4168 dst = change_address (dst, VOIDmode, dst_addr);
4169
4170 if (val == const0_rtx)
4171 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4172 OPTAB_DIRECT);
4173 else
4174 {
4175 dstp1 = adjust_address (dst, VOIDmode, 1);
4176 set_mem_size (dst, 1);
4177
4178 /* Initialize memory by storing the first byte. */
4179 emit_move_insn (adjust_address (dst, QImode, 0), val);
4180
4181 /* If count is 1 we are done. */
4182 emit_cmp_and_jump_insns (count, const1_rtx,
4183 EQ, NULL_RTX, mode, 1, end_label);
4184
4185 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4186 OPTAB_DIRECT);
4187 }
4188 if (temp != count)
4189 emit_move_insn (count, temp);
4190
4191 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4192 OPTAB_DIRECT);
4193 if (temp != blocks)
4194 emit_move_insn (blocks, temp);
4195
4196 emit_cmp_and_jump_insns (blocks, const0_rtx,
4197 EQ, NULL_RTX, mode, 1, loop_end_label);
4198
4199 emit_label (loop_start_label);
4200
4201 if (TARGET_Z10
4202 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4203 {
4204 /* Issue a write prefetch for the +4 cache line. */
4205 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4206 GEN_INT (1024)),
4207 const1_rtx, const0_rtx);
4208 emit_insn (prefetch);
4209 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4210 }
4211
4212 if (val == const0_rtx)
4213 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4214 else
4215 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4216 s390_load_address (dst_addr,
4217 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4218
4219 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4220 OPTAB_DIRECT);
4221 if (temp != blocks)
4222 emit_move_insn (blocks, temp);
4223
4224 emit_cmp_and_jump_insns (blocks, const0_rtx,
4225 EQ, NULL_RTX, mode, 1, loop_end_label);
4226
4227 emit_jump (loop_start_label);
4228 emit_label (loop_end_label);
4229
4230 if (val == const0_rtx)
4231 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4232 else
4233 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4234 emit_label (end_label);
4235 }
4236 }
4237
4238 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4239 and return the result in TARGET. */
4240
4241 bool
4242 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4243 {
4244 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4245 rtx tmp;
4246
4247 /* When tuning for z10 or higher we rely on the Glibc functions to
4248 do the right thing. Only for constant lengths below 64k we will
4249 generate inline code. */
4250 if (s390_tune >= PROCESSOR_2097_Z10
4251 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4252 return false;
4253
4254 /* As the result of CMPINT is inverted compared to what we need,
4255 we have to swap the operands. */
4256 tmp = op0; op0 = op1; op1 = tmp;
4257
4258 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4259 {
4260 if (INTVAL (len) > 0)
4261 {
4262 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4263 emit_insn (gen_cmpint (target, ccreg));
4264 }
4265 else
4266 emit_move_insn (target, const0_rtx);
4267 }
4268 else if (TARGET_MVCLE)
4269 {
4270 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4271 emit_insn (gen_cmpint (target, ccreg));
4272 }
4273 else
4274 {
4275 rtx addr0, addr1, count, blocks, temp;
4276 rtx loop_start_label = gen_label_rtx ();
4277 rtx loop_end_label = gen_label_rtx ();
4278 rtx end_label = gen_label_rtx ();
4279 enum machine_mode mode;
4280
4281 mode = GET_MODE (len);
4282 if (mode == VOIDmode)
4283 mode = Pmode;
4284
4285 addr0 = gen_reg_rtx (Pmode);
4286 addr1 = gen_reg_rtx (Pmode);
4287 count = gen_reg_rtx (mode);
4288 blocks = gen_reg_rtx (mode);
4289
4290 convert_move (count, len, 1);
4291 emit_cmp_and_jump_insns (count, const0_rtx,
4292 EQ, NULL_RTX, mode, 1, end_label);
4293
4294 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4295 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4296 op0 = change_address (op0, VOIDmode, addr0);
4297 op1 = change_address (op1, VOIDmode, addr1);
4298
4299 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4300 OPTAB_DIRECT);
4301 if (temp != count)
4302 emit_move_insn (count, temp);
4303
4304 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4305 OPTAB_DIRECT);
4306 if (temp != blocks)
4307 emit_move_insn (blocks, temp);
4308
4309 emit_cmp_and_jump_insns (blocks, const0_rtx,
4310 EQ, NULL_RTX, mode, 1, loop_end_label);
4311
4312 emit_label (loop_start_label);
4313
4314 if (TARGET_Z10
4315 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4316 {
4317 rtx prefetch;
4318
4319 /* Issue a read prefetch for the +2 cache line of operand 1. */
4320 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4321 const0_rtx, const0_rtx);
4322 emit_insn (prefetch);
4323 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4324
4325 /* Issue a read prefetch for the +2 cache line of operand 2. */
4326 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4327 const0_rtx, const0_rtx);
4328 emit_insn (prefetch);
4329 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4330 }
4331
4332 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4333 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4334 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4335 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4336 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4337 emit_jump_insn (temp);
4338
4339 s390_load_address (addr0,
4340 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4341 s390_load_address (addr1,
4342 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4343
4344 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4345 OPTAB_DIRECT);
4346 if (temp != blocks)
4347 emit_move_insn (blocks, temp);
4348
4349 emit_cmp_and_jump_insns (blocks, const0_rtx,
4350 EQ, NULL_RTX, mode, 1, loop_end_label);
4351
4352 emit_jump (loop_start_label);
4353 emit_label (loop_end_label);
4354
4355 emit_insn (gen_cmpmem_short (op0, op1,
4356 convert_to_mode (Pmode, count, 1)));
4357 emit_label (end_label);
4358
4359 emit_insn (gen_cmpint (target, ccreg));
4360 }
4361 return true;
4362 }
4363
4364
4365 /* Expand conditional increment or decrement using alc/slb instructions.
4366 Should generate code setting DST to either SRC or SRC + INCREMENT,
4367 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4368 Returns true if successful, false otherwise.
4369
4370 That makes it possible to implement some if-constructs without jumps e.g.:
4371 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4372 unsigned int a, b, c;
4373 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4374 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4375 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4376 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4377
4378 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4379 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4380 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4381 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4382 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4383
4384 bool
4385 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4386 rtx dst, rtx src, rtx increment)
4387 {
4388 enum machine_mode cmp_mode;
4389 enum machine_mode cc_mode;
4390 rtx op_res;
4391 rtx insn;
4392 rtvec p;
4393 int ret;
4394
4395 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4396 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4397 cmp_mode = SImode;
4398 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4399 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4400 cmp_mode = DImode;
4401 else
4402 return false;
4403
4404 /* Try ADD LOGICAL WITH CARRY. */
4405 if (increment == const1_rtx)
4406 {
4407 /* Determine CC mode to use. */
4408 if (cmp_code == EQ || cmp_code == NE)
4409 {
4410 if (cmp_op1 != const0_rtx)
4411 {
4412 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4413 NULL_RTX, 0, OPTAB_WIDEN);
4414 cmp_op1 = const0_rtx;
4415 }
4416
4417 cmp_code = cmp_code == EQ ? LEU : GTU;
4418 }
4419
4420 if (cmp_code == LTU || cmp_code == LEU)
4421 {
4422 rtx tem = cmp_op0;
4423 cmp_op0 = cmp_op1;
4424 cmp_op1 = tem;
4425 cmp_code = swap_condition (cmp_code);
4426 }
4427
4428 switch (cmp_code)
4429 {
4430 case GTU:
4431 cc_mode = CCUmode;
4432 break;
4433
4434 case GEU:
4435 cc_mode = CCL3mode;
4436 break;
4437
4438 default:
4439 return false;
4440 }
4441
4442 /* Emit comparison instruction pattern. */
4443 if (!register_operand (cmp_op0, cmp_mode))
4444 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4445
4446 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4447 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4448 /* We use insn_invalid_p here to add clobbers if required. */
4449 ret = insn_invalid_p (emit_insn (insn), false);
4450 gcc_assert (!ret);
4451
4452 /* Emit ALC instruction pattern. */
4453 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4454 gen_rtx_REG (cc_mode, CC_REGNUM),
4455 const0_rtx);
4456
4457 if (src != const0_rtx)
4458 {
4459 if (!register_operand (src, GET_MODE (dst)))
4460 src = force_reg (GET_MODE (dst), src);
4461
4462 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4463 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4464 }
4465
4466 p = rtvec_alloc (2);
4467 RTVEC_ELT (p, 0) =
4468 gen_rtx_SET (VOIDmode, dst, op_res);
4469 RTVEC_ELT (p, 1) =
4470 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4471 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4472
4473 return true;
4474 }
4475
4476 /* Try SUBTRACT LOGICAL WITH BORROW. */
4477 if (increment == constm1_rtx)
4478 {
4479 /* Determine CC mode to use. */
4480 if (cmp_code == EQ || cmp_code == NE)
4481 {
4482 if (cmp_op1 != const0_rtx)
4483 {
4484 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4485 NULL_RTX, 0, OPTAB_WIDEN);
4486 cmp_op1 = const0_rtx;
4487 }
4488
4489 cmp_code = cmp_code == EQ ? LEU : GTU;
4490 }
4491
4492 if (cmp_code == GTU || cmp_code == GEU)
4493 {
4494 rtx tem = cmp_op0;
4495 cmp_op0 = cmp_op1;
4496 cmp_op1 = tem;
4497 cmp_code = swap_condition (cmp_code);
4498 }
4499
4500 switch (cmp_code)
4501 {
4502 case LEU:
4503 cc_mode = CCUmode;
4504 break;
4505
4506 case LTU:
4507 cc_mode = CCL3mode;
4508 break;
4509
4510 default:
4511 return false;
4512 }
4513
4514 /* Emit comparison instruction pattern. */
4515 if (!register_operand (cmp_op0, cmp_mode))
4516 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4517
4518 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4519 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4520 /* We use insn_invalid_p here to add clobbers if required. */
4521 ret = insn_invalid_p (emit_insn (insn), false);
4522 gcc_assert (!ret);
4523
4524 /* Emit SLB instruction pattern. */
4525 if (!register_operand (src, GET_MODE (dst)))
4526 src = force_reg (GET_MODE (dst), src);
4527
4528 op_res = gen_rtx_MINUS (GET_MODE (dst),
4529 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4530 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4531 gen_rtx_REG (cc_mode, CC_REGNUM),
4532 const0_rtx));
4533 p = rtvec_alloc (2);
4534 RTVEC_ELT (p, 0) =
4535 gen_rtx_SET (VOIDmode, dst, op_res);
4536 RTVEC_ELT (p, 1) =
4537 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4538 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4539
4540 return true;
4541 }
4542
4543 return false;
4544 }
4545
4546 /* Expand code for the insv template. Return true if successful. */
4547
4548 bool
4549 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4550 {
4551 int bitsize = INTVAL (op1);
4552 int bitpos = INTVAL (op2);
4553 enum machine_mode mode = GET_MODE (dest);
4554 enum machine_mode smode;
4555 int smode_bsize, mode_bsize;
4556 rtx op, clobber;
4557
4558 /* Generate INSERT IMMEDIATE (IILL et al). */
4559 /* (set (ze (reg)) (const_int)). */
4560 if (TARGET_ZARCH
4561 && register_operand (dest, word_mode)
4562 && (bitpos % 16) == 0
4563 && (bitsize % 16) == 0
4564 && const_int_operand (src, VOIDmode))
4565 {
4566 HOST_WIDE_INT val = INTVAL (src);
4567 int regpos = bitpos + bitsize;
4568
4569 while (regpos > bitpos)
4570 {
4571 enum machine_mode putmode;
4572 int putsize;
4573
4574 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4575 putmode = SImode;
4576 else
4577 putmode = HImode;
4578
4579 putsize = GET_MODE_BITSIZE (putmode);
4580 regpos -= putsize;
4581 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4582 GEN_INT (putsize),
4583 GEN_INT (regpos)),
4584 gen_int_mode (val, putmode));
4585 val >>= putsize;
4586 }
4587 gcc_assert (regpos == bitpos);
4588 return true;
4589 }
4590
4591 smode = smallest_mode_for_size (bitsize, MODE_INT);
4592 smode_bsize = GET_MODE_BITSIZE (smode);
4593 mode_bsize = GET_MODE_BITSIZE (mode);
4594
4595 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4596 if (bitpos == 0
4597 && (bitsize % BITS_PER_UNIT) == 0
4598 && MEM_P (dest)
4599 && (register_operand (src, word_mode)
4600 || const_int_operand (src, VOIDmode)))
4601 {
4602 /* Emit standard pattern if possible. */
4603 if (smode_bsize == bitsize)
4604 {
4605 emit_move_insn (adjust_address (dest, smode, 0),
4606 gen_lowpart (smode, src));
4607 return true;
4608 }
4609
4610 /* (set (ze (mem)) (const_int)). */
4611 else if (const_int_operand (src, VOIDmode))
4612 {
4613 int size = bitsize / BITS_PER_UNIT;
4614 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4615 BLKmode,
4616 UNITS_PER_WORD - size);
4617
4618 dest = adjust_address (dest, BLKmode, 0);
4619 set_mem_size (dest, size);
4620 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4621 return true;
4622 }
4623
4624 /* (set (ze (mem)) (reg)). */
4625 else if (register_operand (src, word_mode))
4626 {
4627 if (bitsize <= 32)
4628 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4629 const0_rtx), src);
4630 else
4631 {
4632 /* Emit st,stcmh sequence. */
4633 int stcmh_width = bitsize - 32;
4634 int size = stcmh_width / BITS_PER_UNIT;
4635
4636 emit_move_insn (adjust_address (dest, SImode, size),
4637 gen_lowpart (SImode, src));
4638 set_mem_size (dest, size);
4639 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4640 GEN_INT (stcmh_width),
4641 const0_rtx),
4642 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4643 }
4644 return true;
4645 }
4646 }
4647
4648 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4649 if ((bitpos % BITS_PER_UNIT) == 0
4650 && (bitsize % BITS_PER_UNIT) == 0
4651 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4652 && MEM_P (src)
4653 && (mode == DImode || mode == SImode)
4654 && register_operand (dest, mode))
4655 {
4656 /* Emit a strict_low_part pattern if possible. */
4657 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4658 {
4659 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4660 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4661 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4662 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4663 return true;
4664 }
4665
4666 /* ??? There are more powerful versions of ICM that are not
4667 completely represented in the md file. */
4668 }
4669
4670 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4671 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4672 {
4673 enum machine_mode mode_s = GET_MODE (src);
4674
4675 if (mode_s == VOIDmode)
4676 {
4677 /* Assume const_int etc already in the proper mode. */
4678 src = force_reg (mode, src);
4679 }
4680 else if (mode_s != mode)
4681 {
4682 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4683 src = force_reg (mode_s, src);
4684 src = gen_lowpart (mode, src);
4685 }
4686
4687 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4688 op = gen_rtx_SET (VOIDmode, op, src);
4689 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4690 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4691
4692 return true;
4693 }
4694
4695 return false;
4696 }
4697
4698 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4699 register that holds VAL of mode MODE shifted by COUNT bits. */
4700
4701 static inline rtx
4702 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4703 {
4704 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4705 NULL_RTX, 1, OPTAB_DIRECT);
4706 return expand_simple_binop (SImode, ASHIFT, val, count,
4707 NULL_RTX, 1, OPTAB_DIRECT);
4708 }
4709
4710 /* Structure to hold the initial parameters for a compare_and_swap operation
4711 in HImode and QImode. */
4712
4713 struct alignment_context
4714 {
4715 rtx memsi; /* SI aligned memory location. */
4716 rtx shift; /* Bit offset with regard to lsb. */
4717 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4718 rtx modemaski; /* ~modemask */
4719 bool aligned; /* True if memory is aligned, false else. */
4720 };
4721
4722 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4723 structure AC for transparent simplifying, if the memory alignment is known
4724 to be at least 32bit. MEM is the memory location for the actual operation
4725 and MODE its mode. */
4726
4727 static void
4728 init_alignment_context (struct alignment_context *ac, rtx mem,
4729 enum machine_mode mode)
4730 {
4731 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4732 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4733
4734 if (ac->aligned)
4735 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4736 else
4737 {
4738 /* Alignment is unknown. */
4739 rtx byteoffset, addr, align;
4740
4741 /* Force the address into a register. */
4742 addr = force_reg (Pmode, XEXP (mem, 0));
4743
4744 /* Align it to SImode. */
4745 align = expand_simple_binop (Pmode, AND, addr,
4746 GEN_INT (-GET_MODE_SIZE (SImode)),
4747 NULL_RTX, 1, OPTAB_DIRECT);
4748 /* Generate MEM. */
4749 ac->memsi = gen_rtx_MEM (SImode, align);
4750 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4751 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4752 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4753
4754 /* Calculate shiftcount. */
4755 byteoffset = expand_simple_binop (Pmode, AND, addr,
4756 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4757 NULL_RTX, 1, OPTAB_DIRECT);
4758 /* As we already have some offset, evaluate the remaining distance. */
4759 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4760 NULL_RTX, 1, OPTAB_DIRECT);
4761 }
4762
4763 /* Shift is the byte count, but we need the bitcount. */
4764 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4765 NULL_RTX, 1, OPTAB_DIRECT);
4766
4767 /* Calculate masks. */
4768 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4769 GEN_INT (GET_MODE_MASK (mode)),
4770 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4771 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4772 NULL_RTX, 1);
4773 }
4774
4775 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4776 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4777 perform the merge in SEQ2. */
4778
4779 static rtx
4780 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4781 enum machine_mode mode, rtx val, rtx ins)
4782 {
4783 rtx tmp;
4784
4785 if (ac->aligned)
4786 {
4787 start_sequence ();
4788 tmp = copy_to_mode_reg (SImode, val);
4789 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4790 const0_rtx, ins))
4791 {
4792 *seq1 = NULL;
4793 *seq2 = get_insns ();
4794 end_sequence ();
4795 return tmp;
4796 }
4797 end_sequence ();
4798 }
4799
4800 /* Failed to use insv. Generate a two part shift and mask. */
4801 start_sequence ();
4802 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4803 *seq1 = get_insns ();
4804 end_sequence ();
4805
4806 start_sequence ();
4807 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4808 *seq2 = get_insns ();
4809 end_sequence ();
4810
4811 return tmp;
4812 }
4813
4814 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4815 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4816 value to set if CMP == MEM. */
4817
4818 void
4819 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4820 rtx cmp, rtx new_rtx, bool is_weak)
4821 {
4822 struct alignment_context ac;
4823 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4824 rtx res = gen_reg_rtx (SImode);
4825 rtx csloop = NULL, csend = NULL;
4826
4827 gcc_assert (MEM_P (mem));
4828
4829 init_alignment_context (&ac, mem, mode);
4830
4831 /* Load full word. Subsequent loads are performed by CS. */
4832 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4833 NULL_RTX, 1, OPTAB_DIRECT);
4834
4835 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4836 possible, we try to use insv to make this happen efficiently. If
4837 that fails we'll generate code both inside and outside the loop. */
4838 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4839 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4840
4841 if (seq0)
4842 emit_insn (seq0);
4843 if (seq1)
4844 emit_insn (seq1);
4845
4846 /* Start CS loop. */
4847 if (!is_weak)
4848 {
4849 /* Begin assuming success. */
4850 emit_move_insn (btarget, const1_rtx);
4851
4852 csloop = gen_label_rtx ();
4853 csend = gen_label_rtx ();
4854 emit_label (csloop);
4855 }
4856
4857 /* val = "<mem>00..0<mem>"
4858 * cmp = "00..0<cmp>00..0"
4859 * new = "00..0<new>00..0"
4860 */
4861
4862 emit_insn (seq2);
4863 emit_insn (seq3);
4864
4865 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4866 if (is_weak)
4867 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4868 else
4869 {
4870 rtx tmp;
4871
4872 /* Jump to end if we're done (likely?). */
4873 s390_emit_jump (csend, cc);
4874
4875 /* Check for changes outside mode, and loop internal if so.
4876 Arrange the moves so that the compare is adjacent to the
4877 branch so that we can generate CRJ. */
4878 tmp = copy_to_reg (val);
4879 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4880 1, OPTAB_DIRECT);
4881 cc = s390_emit_compare (NE, val, tmp);
4882 s390_emit_jump (csloop, cc);
4883
4884 /* Failed. */
4885 emit_move_insn (btarget, const0_rtx);
4886 emit_label (csend);
4887 }
4888
4889 /* Return the correct part of the bitfield. */
4890 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4891 NULL_RTX, 1, OPTAB_DIRECT), 1);
4892 }
4893
4894 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4895 and VAL the value to play with. If AFTER is true then store the value
4896 MEM holds after the operation, if AFTER is false then store the value MEM
4897 holds before the operation. If TARGET is zero then discard that value, else
4898 store it to TARGET. */
4899
4900 void
4901 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4902 rtx target, rtx mem, rtx val, bool after)
4903 {
4904 struct alignment_context ac;
4905 rtx cmp;
4906 rtx new_rtx = gen_reg_rtx (SImode);
4907 rtx orig = gen_reg_rtx (SImode);
4908 rtx csloop = gen_label_rtx ();
4909
4910 gcc_assert (!target || register_operand (target, VOIDmode));
4911 gcc_assert (MEM_P (mem));
4912
4913 init_alignment_context (&ac, mem, mode);
4914
4915 /* Shift val to the correct bit positions.
4916 Preserve "icm", but prevent "ex icm". */
4917 if (!(ac.aligned && code == SET && MEM_P (val)))
4918 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4919
4920 /* Further preparation insns. */
4921 if (code == PLUS || code == MINUS)
4922 emit_move_insn (orig, val);
4923 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4924 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4925 NULL_RTX, 1, OPTAB_DIRECT);
4926
4927 /* Load full word. Subsequent loads are performed by CS. */
4928 cmp = force_reg (SImode, ac.memsi);
4929
4930 /* Start CS loop. */
4931 emit_label (csloop);
4932 emit_move_insn (new_rtx, cmp);
4933
4934 /* Patch new with val at correct position. */
4935 switch (code)
4936 {
4937 case PLUS:
4938 case MINUS:
4939 val = expand_simple_binop (SImode, code, new_rtx, orig,
4940 NULL_RTX, 1, OPTAB_DIRECT);
4941 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4942 NULL_RTX, 1, OPTAB_DIRECT);
4943 /* FALLTHRU */
4944 case SET:
4945 if (ac.aligned && MEM_P (val))
4946 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4947 0, 0, SImode, val);
4948 else
4949 {
4950 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4951 NULL_RTX, 1, OPTAB_DIRECT);
4952 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4953 NULL_RTX, 1, OPTAB_DIRECT);
4954 }
4955 break;
4956 case AND:
4957 case IOR:
4958 case XOR:
4959 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4960 NULL_RTX, 1, OPTAB_DIRECT);
4961 break;
4962 case MULT: /* NAND */
4963 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4964 NULL_RTX, 1, OPTAB_DIRECT);
4965 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4966 NULL_RTX, 1, OPTAB_DIRECT);
4967 break;
4968 default:
4969 gcc_unreachable ();
4970 }
4971
4972 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4973 ac.memsi, cmp, new_rtx));
4974
4975 /* Return the correct part of the bitfield. */
4976 if (target)
4977 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4978 after ? new_rtx : cmp, ac.shift,
4979 NULL_RTX, 1, OPTAB_DIRECT), 1);
4980 }
4981
4982 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4983 We need to emit DTP-relative relocations. */
4984
4985 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4986
4987 static void
4988 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4989 {
4990 switch (size)
4991 {
4992 case 4:
4993 fputs ("\t.long\t", file);
4994 break;
4995 case 8:
4996 fputs ("\t.quad\t", file);
4997 break;
4998 default:
4999 gcc_unreachable ();
5000 }
5001 output_addr_const (file, x);
5002 fputs ("@DTPOFF", file);
5003 }
5004
5005 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5006 /* Implement TARGET_MANGLE_TYPE. */
5007
5008 static const char *
5009 s390_mangle_type (const_tree type)
5010 {
5011 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5012 && TARGET_LONG_DOUBLE_128)
5013 return "g";
5014
5015 /* For all other types, use normal C++ mangling. */
5016 return NULL;
5017 }
5018 #endif
5019
5020 /* In the name of slightly smaller debug output, and to cater to
5021 general assembler lossage, recognize various UNSPEC sequences
5022 and turn them back into a direct symbol reference. */
5023
5024 static rtx
5025 s390_delegitimize_address (rtx orig_x)
5026 {
5027 rtx x, y;
5028
5029 orig_x = delegitimize_mem_from_attrs (orig_x);
5030 x = orig_x;
5031
5032 /* Extract the symbol ref from:
5033 (plus:SI (reg:SI 12 %r12)
5034 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5035 UNSPEC_GOTOFF/PLTOFF)))
5036 and
5037 (plus:SI (reg:SI 12 %r12)
5038 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5039 UNSPEC_GOTOFF/PLTOFF)
5040 (const_int 4 [0x4])))) */
5041 if (GET_CODE (x) == PLUS
5042 && REG_P (XEXP (x, 0))
5043 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5044 && GET_CODE (XEXP (x, 1)) == CONST)
5045 {
5046 HOST_WIDE_INT offset = 0;
5047
5048 /* The const operand. */
5049 y = XEXP (XEXP (x, 1), 0);
5050
5051 if (GET_CODE (y) == PLUS
5052 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5053 {
5054 offset = INTVAL (XEXP (y, 1));
5055 y = XEXP (y, 0);
5056 }
5057
5058 if (GET_CODE (y) == UNSPEC
5059 && (XINT (y, 1) == UNSPEC_GOTOFF
5060 || XINT (y, 1) == UNSPEC_PLTOFF))
5061 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5062 }
5063
5064 if (GET_CODE (x) != MEM)
5065 return orig_x;
5066
5067 x = XEXP (x, 0);
5068 if (GET_CODE (x) == PLUS
5069 && GET_CODE (XEXP (x, 1)) == CONST
5070 && GET_CODE (XEXP (x, 0)) == REG
5071 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5072 {
5073 y = XEXP (XEXP (x, 1), 0);
5074 if (GET_CODE (y) == UNSPEC
5075 && XINT (y, 1) == UNSPEC_GOT)
5076 y = XVECEXP (y, 0, 0);
5077 else
5078 return orig_x;
5079 }
5080 else if (GET_CODE (x) == CONST)
5081 {
5082 /* Extract the symbol ref from:
5083 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5084 UNSPEC_PLT/GOTENT))) */
5085
5086 y = XEXP (x, 0);
5087 if (GET_CODE (y) == UNSPEC
5088 && (XINT (y, 1) == UNSPEC_GOTENT
5089 || XINT (y, 1) == UNSPEC_PLT))
5090 y = XVECEXP (y, 0, 0);
5091 else
5092 return orig_x;
5093 }
5094 else
5095 return orig_x;
5096
5097 if (GET_MODE (orig_x) != Pmode)
5098 {
5099 if (GET_MODE (orig_x) == BLKmode)
5100 return orig_x;
5101 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5102 if (y == NULL_RTX)
5103 return orig_x;
5104 }
5105 return y;
5106 }
5107
5108 /* Output operand OP to stdio stream FILE.
5109 OP is an address (register + offset) which is not used to address data;
5110 instead the rightmost bits are interpreted as the value. */
5111
5112 static void
5113 print_shift_count_operand (FILE *file, rtx op)
5114 {
5115 HOST_WIDE_INT offset;
5116 rtx base;
5117
5118 /* Extract base register and offset. */
5119 if (!s390_decompose_shift_count (op, &base, &offset))
5120 gcc_unreachable ();
5121
5122 /* Sanity check. */
5123 if (base)
5124 {
5125 gcc_assert (GET_CODE (base) == REG);
5126 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5127 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5128 }
5129
5130 /* Offsets are constricted to twelve bits. */
5131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5132 if (base)
5133 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5134 }
5135
5136 /* See 'get_some_local_dynamic_name'. */
5137
5138 static int
5139 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5140 {
5141 rtx x = *px;
5142
5143 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5144 {
5145 x = get_pool_constant (x);
5146 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5147 }
5148
5149 if (GET_CODE (x) == SYMBOL_REF
5150 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5151 {
5152 cfun->machine->some_ld_name = XSTR (x, 0);
5153 return 1;
5154 }
5155
5156 return 0;
5157 }
5158
5159 /* Locate some local-dynamic symbol still in use by this function
5160 so that we can print its name in local-dynamic base patterns. */
5161
5162 static const char *
5163 get_some_local_dynamic_name (void)
5164 {
5165 rtx insn;
5166
5167 if (cfun->machine->some_ld_name)
5168 return cfun->machine->some_ld_name;
5169
5170 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5171 if (INSN_P (insn)
5172 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5173 return cfun->machine->some_ld_name;
5174
5175 gcc_unreachable ();
5176 }
5177
5178 /* Output machine-dependent UNSPECs occurring in address constant X
5179 in assembler syntax to stdio stream FILE. Returns true if the
5180 constant X could be recognized, false otherwise. */
5181
5182 static bool
5183 s390_output_addr_const_extra (FILE *file, rtx x)
5184 {
5185 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5186 switch (XINT (x, 1))
5187 {
5188 case UNSPEC_GOTENT:
5189 output_addr_const (file, XVECEXP (x, 0, 0));
5190 fprintf (file, "@GOTENT");
5191 return true;
5192 case UNSPEC_GOT:
5193 output_addr_const (file, XVECEXP (x, 0, 0));
5194 fprintf (file, "@GOT");
5195 return true;
5196 case UNSPEC_GOTOFF:
5197 output_addr_const (file, XVECEXP (x, 0, 0));
5198 fprintf (file, "@GOTOFF");
5199 return true;
5200 case UNSPEC_PLT:
5201 output_addr_const (file, XVECEXP (x, 0, 0));
5202 fprintf (file, "@PLT");
5203 return true;
5204 case UNSPEC_PLTOFF:
5205 output_addr_const (file, XVECEXP (x, 0, 0));
5206 fprintf (file, "@PLTOFF");
5207 return true;
5208 case UNSPEC_TLSGD:
5209 output_addr_const (file, XVECEXP (x, 0, 0));
5210 fprintf (file, "@TLSGD");
5211 return true;
5212 case UNSPEC_TLSLDM:
5213 assemble_name (file, get_some_local_dynamic_name ());
5214 fprintf (file, "@TLSLDM");
5215 return true;
5216 case UNSPEC_DTPOFF:
5217 output_addr_const (file, XVECEXP (x, 0, 0));
5218 fprintf (file, "@DTPOFF");
5219 return true;
5220 case UNSPEC_NTPOFF:
5221 output_addr_const (file, XVECEXP (x, 0, 0));
5222 fprintf (file, "@NTPOFF");
5223 return true;
5224 case UNSPEC_GOTNTPOFF:
5225 output_addr_const (file, XVECEXP (x, 0, 0));
5226 fprintf (file, "@GOTNTPOFF");
5227 return true;
5228 case UNSPEC_INDNTPOFF:
5229 output_addr_const (file, XVECEXP (x, 0, 0));
5230 fprintf (file, "@INDNTPOFF");
5231 return true;
5232 }
5233
5234 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5235 switch (XINT (x, 1))
5236 {
5237 case UNSPEC_POOL_OFFSET:
5238 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5239 output_addr_const (file, x);
5240 return true;
5241 }
5242 return false;
5243 }
5244
5245 /* Output address operand ADDR in assembler syntax to
5246 stdio stream FILE. */
5247
5248 void
5249 print_operand_address (FILE *file, rtx addr)
5250 {
5251 struct s390_address ad;
5252
5253 if (s390_symref_operand_p (addr, NULL, NULL))
5254 {
5255 if (!TARGET_Z10)
5256 {
5257 output_operand_lossage ("symbolic memory references are "
5258 "only supported on z10 or later");
5259 return;
5260 }
5261 output_addr_const (file, addr);
5262 return;
5263 }
5264
5265 if (!s390_decompose_address (addr, &ad)
5266 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5267 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5268 output_operand_lossage ("cannot decompose address");
5269
5270 if (ad.disp)
5271 output_addr_const (file, ad.disp);
5272 else
5273 fprintf (file, "0");
5274
5275 if (ad.base && ad.indx)
5276 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5277 reg_names[REGNO (ad.base)]);
5278 else if (ad.base)
5279 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5280 }
5281
5282 /* Output operand X in assembler syntax to stdio stream FILE.
5283 CODE specified the format flag. The following format flags
5284 are recognized:
5285
5286 'C': print opcode suffix for branch condition.
5287 'D': print opcode suffix for inverse branch condition.
5288 'E': print opcode suffix for branch on index instruction.
5289 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5290 'G': print the size of the operand in bytes.
5291 'O': print only the displacement of a memory reference.
5292 'R': print only the base register of a memory reference.
5293 'S': print S-type memory reference (base+displacement).
5294 'N': print the second word of a DImode operand.
5295 'M': print the second word of a TImode operand.
5296 'Y': print shift count operand.
5297
5298 'b': print integer X as if it's an unsigned byte.
5299 'c': print integer X as if it's an signed byte.
5300 'x': print integer X as if it's an unsigned halfword.
5301 'h': print integer X as if it's a signed halfword.
5302 'i': print the first nonzero HImode part of X.
5303 'j': print the first HImode part unequal to -1 of X.
5304 'k': print the first nonzero SImode part of X.
5305 'm': print the first SImode part unequal to -1 of X.
5306 'o': print integer X as if it's an unsigned 32bit word. */
5307
5308 void
5309 print_operand (FILE *file, rtx x, int code)
5310 {
5311 switch (code)
5312 {
5313 case 'C':
5314 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5315 return;
5316
5317 case 'D':
5318 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5319 return;
5320
5321 case 'E':
5322 if (GET_CODE (x) == LE)
5323 fprintf (file, "l");
5324 else if (GET_CODE (x) == GT)
5325 fprintf (file, "h");
5326 else
5327 output_operand_lossage ("invalid comparison operator "
5328 "for 'E' output modifier");
5329 return;
5330
5331 case 'J':
5332 if (GET_CODE (x) == SYMBOL_REF)
5333 {
5334 fprintf (file, "%s", ":tls_load:");
5335 output_addr_const (file, x);
5336 }
5337 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5338 {
5339 fprintf (file, "%s", ":tls_gdcall:");
5340 output_addr_const (file, XVECEXP (x, 0, 0));
5341 }
5342 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5343 {
5344 fprintf (file, "%s", ":tls_ldcall:");
5345 assemble_name (file, get_some_local_dynamic_name ());
5346 }
5347 else
5348 output_operand_lossage ("invalid reference for 'J' output modifier");
5349 return;
5350
5351 case 'G':
5352 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5353 return;
5354
5355 case 'O':
5356 {
5357 struct s390_address ad;
5358 int ret;
5359
5360 if (!MEM_P (x))
5361 {
5362 output_operand_lossage ("memory reference expected for "
5363 "'O' output modifier");
5364 return;
5365 }
5366
5367 ret = s390_decompose_address (XEXP (x, 0), &ad);
5368
5369 if (!ret
5370 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5371 || ad.indx)
5372 {
5373 output_operand_lossage ("invalid address for 'O' output modifier");
5374 return;
5375 }
5376
5377 if (ad.disp)
5378 output_addr_const (file, ad.disp);
5379 else
5380 fprintf (file, "0");
5381 }
5382 return;
5383
5384 case 'R':
5385 {
5386 struct s390_address ad;
5387 int ret;
5388
5389 if (!MEM_P (x))
5390 {
5391 output_operand_lossage ("memory reference expected for "
5392 "'R' output modifier");
5393 return;
5394 }
5395
5396 ret = s390_decompose_address (XEXP (x, 0), &ad);
5397
5398 if (!ret
5399 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5400 || ad.indx)
5401 {
5402 output_operand_lossage ("invalid address for 'R' output modifier");
5403 return;
5404 }
5405
5406 if (ad.base)
5407 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5408 else
5409 fprintf (file, "0");
5410 }
5411 return;
5412
5413 case 'S':
5414 {
5415 struct s390_address ad;
5416 int ret;
5417
5418 if (!MEM_P (x))
5419 {
5420 output_operand_lossage ("memory reference expected for "
5421 "'S' output modifier");
5422 return;
5423 }
5424 ret = s390_decompose_address (XEXP (x, 0), &ad);
5425
5426 if (!ret
5427 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5428 || ad.indx)
5429 {
5430 output_operand_lossage ("invalid address for 'S' output modifier");
5431 return;
5432 }
5433
5434 if (ad.disp)
5435 output_addr_const (file, ad.disp);
5436 else
5437 fprintf (file, "0");
5438
5439 if (ad.base)
5440 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5441 }
5442 return;
5443
5444 case 'N':
5445 if (GET_CODE (x) == REG)
5446 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5447 else if (GET_CODE (x) == MEM)
5448 x = change_address (x, VOIDmode,
5449 plus_constant (Pmode, XEXP (x, 0), 4));
5450 else
5451 output_operand_lossage ("register or memory expression expected "
5452 "for 'N' output modifier");
5453 break;
5454
5455 case 'M':
5456 if (GET_CODE (x) == REG)
5457 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5458 else if (GET_CODE (x) == MEM)
5459 x = change_address (x, VOIDmode,
5460 plus_constant (Pmode, XEXP (x, 0), 8));
5461 else
5462 output_operand_lossage ("register or memory expression expected "
5463 "for 'M' output modifier");
5464 break;
5465
5466 case 'Y':
5467 print_shift_count_operand (file, x);
5468 return;
5469 }
5470
5471 switch (GET_CODE (x))
5472 {
5473 case REG:
5474 fprintf (file, "%s", reg_names[REGNO (x)]);
5475 break;
5476
5477 case MEM:
5478 output_address (XEXP (x, 0));
5479 break;
5480
5481 case CONST:
5482 case CODE_LABEL:
5483 case LABEL_REF:
5484 case SYMBOL_REF:
5485 output_addr_const (file, x);
5486 break;
5487
5488 case CONST_INT:
5489 if (code == 'b')
5490 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5491 else if (code == 'c')
5492 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5493 else if (code == 'x')
5494 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5495 else if (code == 'h')
5496 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5497 else if (code == 'i')
5498 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5499 s390_extract_part (x, HImode, 0));
5500 else if (code == 'j')
5501 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5502 s390_extract_part (x, HImode, -1));
5503 else if (code == 'k')
5504 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5505 s390_extract_part (x, SImode, 0));
5506 else if (code == 'm')
5507 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5508 s390_extract_part (x, SImode, -1));
5509 else if (code == 'o')
5510 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5511 else
5512 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5513 break;
5514
5515 case CONST_DOUBLE:
5516 gcc_assert (GET_MODE (x) == VOIDmode);
5517 if (code == 'b')
5518 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5519 else if (code == 'x')
5520 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5521 else if (code == 'h')
5522 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5523 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5524 else
5525 {
5526 if (code == 0)
5527 output_operand_lossage ("invalid constant - try using "
5528 "an output modifier");
5529 else
5530 output_operand_lossage ("invalid constant for output modifier '%c'",
5531 code);
5532 }
5533 break;
5534
5535 default:
5536 if (code == 0)
5537 output_operand_lossage ("invalid expression - try using "
5538 "an output modifier");
5539 else
5540 output_operand_lossage ("invalid expression for output "
5541 "modifier '%c'", code);
5542 break;
5543 }
5544 }
5545
5546 /* Target hook for assembling integer objects. We need to define it
5547 here to work a round a bug in some versions of GAS, which couldn't
5548 handle values smaller than INT_MIN when printed in decimal. */
5549
5550 static bool
5551 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5552 {
5553 if (size == 8 && aligned_p
5554 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5555 {
5556 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5557 INTVAL (x));
5558 return true;
5559 }
5560 return default_assemble_integer (x, size, aligned_p);
5561 }
5562
5563 /* Returns true if register REGNO is used for forming
5564 a memory address in expression X. */
5565
5566 static bool
5567 reg_used_in_mem_p (int regno, rtx x)
5568 {
5569 enum rtx_code code = GET_CODE (x);
5570 int i, j;
5571 const char *fmt;
5572
5573 if (code == MEM)
5574 {
5575 if (refers_to_regno_p (regno, regno+1,
5576 XEXP (x, 0), 0))
5577 return true;
5578 }
5579 else if (code == SET
5580 && GET_CODE (SET_DEST (x)) == PC)
5581 {
5582 if (refers_to_regno_p (regno, regno+1,
5583 SET_SRC (x), 0))
5584 return true;
5585 }
5586
5587 fmt = GET_RTX_FORMAT (code);
5588 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5589 {
5590 if (fmt[i] == 'e'
5591 && reg_used_in_mem_p (regno, XEXP (x, i)))
5592 return true;
5593
5594 else if (fmt[i] == 'E')
5595 for (j = 0; j < XVECLEN (x, i); j++)
5596 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5597 return true;
5598 }
5599 return false;
5600 }
5601
5602 /* Returns true if expression DEP_RTX sets an address register
5603 used by instruction INSN to address memory. */
5604
5605 static bool
5606 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5607 {
5608 rtx target, pat;
5609
5610 if (GET_CODE (dep_rtx) == INSN)
5611 dep_rtx = PATTERN (dep_rtx);
5612
5613 if (GET_CODE (dep_rtx) == SET)
5614 {
5615 target = SET_DEST (dep_rtx);
5616 if (GET_CODE (target) == STRICT_LOW_PART)
5617 target = XEXP (target, 0);
5618 while (GET_CODE (target) == SUBREG)
5619 target = SUBREG_REG (target);
5620
5621 if (GET_CODE (target) == REG)
5622 {
5623 int regno = REGNO (target);
5624
5625 if (s390_safe_attr_type (insn) == TYPE_LA)
5626 {
5627 pat = PATTERN (insn);
5628 if (GET_CODE (pat) == PARALLEL)
5629 {
5630 gcc_assert (XVECLEN (pat, 0) == 2);
5631 pat = XVECEXP (pat, 0, 0);
5632 }
5633 gcc_assert (GET_CODE (pat) == SET);
5634 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5635 }
5636 else if (get_attr_atype (insn) == ATYPE_AGEN)
5637 return reg_used_in_mem_p (regno, PATTERN (insn));
5638 }
5639 }
5640 return false;
5641 }
5642
5643 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5644
5645 int
5646 s390_agen_dep_p (rtx dep_insn, rtx insn)
5647 {
5648 rtx dep_rtx = PATTERN (dep_insn);
5649 int i;
5650
5651 if (GET_CODE (dep_rtx) == SET
5652 && addr_generation_dependency_p (dep_rtx, insn))
5653 return 1;
5654 else if (GET_CODE (dep_rtx) == PARALLEL)
5655 {
5656 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5657 {
5658 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5659 return 1;
5660 }
5661 }
5662 return 0;
5663 }
5664
5665
5666 /* A C statement (sans semicolon) to update the integer scheduling priority
5667 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5668 reduce the priority to execute INSN later. Do not define this macro if
5669 you do not need to adjust the scheduling priorities of insns.
5670
5671 A STD instruction should be scheduled earlier,
5672 in order to use the bypass. */
5673 static int
5674 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5675 {
5676 if (! INSN_P (insn))
5677 return priority;
5678
5679 if (s390_tune != PROCESSOR_2084_Z990
5680 && s390_tune != PROCESSOR_2094_Z9_109
5681 && s390_tune != PROCESSOR_2097_Z10
5682 && s390_tune != PROCESSOR_2817_Z196)
5683 return priority;
5684
5685 switch (s390_safe_attr_type (insn))
5686 {
5687 case TYPE_FSTOREDF:
5688 case TYPE_FSTORESF:
5689 priority = priority << 3;
5690 break;
5691 case TYPE_STORE:
5692 case TYPE_STM:
5693 priority = priority << 1;
5694 break;
5695 default:
5696 break;
5697 }
5698 return priority;
5699 }
5700
5701
5702 /* The number of instructions that can be issued per cycle. */
5703
5704 static int
5705 s390_issue_rate (void)
5706 {
5707 switch (s390_tune)
5708 {
5709 case PROCESSOR_2084_Z990:
5710 case PROCESSOR_2094_Z9_109:
5711 case PROCESSOR_2817_Z196:
5712 return 3;
5713 case PROCESSOR_2097_Z10:
5714 return 2;
5715 default:
5716 return 1;
5717 }
5718 }
5719
5720 static int
5721 s390_first_cycle_multipass_dfa_lookahead (void)
5722 {
5723 return 4;
5724 }
5725
5726 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5727 Fix up MEMs as required. */
5728
5729 static void
5730 annotate_constant_pool_refs (rtx *x)
5731 {
5732 int i, j;
5733 const char *fmt;
5734
5735 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5736 || !CONSTANT_POOL_ADDRESS_P (*x));
5737
5738 /* Literal pool references can only occur inside a MEM ... */
5739 if (GET_CODE (*x) == MEM)
5740 {
5741 rtx memref = XEXP (*x, 0);
5742
5743 if (GET_CODE (memref) == SYMBOL_REF
5744 && CONSTANT_POOL_ADDRESS_P (memref))
5745 {
5746 rtx base = cfun->machine->base_reg;
5747 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5748 UNSPEC_LTREF);
5749
5750 *x = replace_equiv_address (*x, addr);
5751 return;
5752 }
5753
5754 if (GET_CODE (memref) == CONST
5755 && GET_CODE (XEXP (memref, 0)) == PLUS
5756 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5757 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5758 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5759 {
5760 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5761 rtx sym = XEXP (XEXP (memref, 0), 0);
5762 rtx base = cfun->machine->base_reg;
5763 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5764 UNSPEC_LTREF);
5765
5766 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5767 return;
5768 }
5769 }
5770
5771 /* ... or a load-address type pattern. */
5772 if (GET_CODE (*x) == SET)
5773 {
5774 rtx addrref = SET_SRC (*x);
5775
5776 if (GET_CODE (addrref) == SYMBOL_REF
5777 && CONSTANT_POOL_ADDRESS_P (addrref))
5778 {
5779 rtx base = cfun->machine->base_reg;
5780 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5781 UNSPEC_LTREF);
5782
5783 SET_SRC (*x) = addr;
5784 return;
5785 }
5786
5787 if (GET_CODE (addrref) == CONST
5788 && GET_CODE (XEXP (addrref, 0)) == PLUS
5789 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5790 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5791 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5792 {
5793 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5794 rtx sym = XEXP (XEXP (addrref, 0), 0);
5795 rtx base = cfun->machine->base_reg;
5796 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5797 UNSPEC_LTREF);
5798
5799 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5800 return;
5801 }
5802 }
5803
5804 /* Annotate LTREL_BASE as well. */
5805 if (GET_CODE (*x) == UNSPEC
5806 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5807 {
5808 rtx base = cfun->machine->base_reg;
5809 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5810 UNSPEC_LTREL_BASE);
5811 return;
5812 }
5813
5814 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5815 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5816 {
5817 if (fmt[i] == 'e')
5818 {
5819 annotate_constant_pool_refs (&XEXP (*x, i));
5820 }
5821 else if (fmt[i] == 'E')
5822 {
5823 for (j = 0; j < XVECLEN (*x, i); j++)
5824 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5825 }
5826 }
5827 }
5828
5829 /* Split all branches that exceed the maximum distance.
5830 Returns true if this created a new literal pool entry. */
5831
5832 static int
5833 s390_split_branches (void)
5834 {
5835 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5836 int new_literal = 0, ret;
5837 rtx insn, pat, tmp, target;
5838 rtx *label;
5839
5840 /* We need correct insn addresses. */
5841
5842 shorten_branches (get_insns ());
5843
5844 /* Find all branches that exceed 64KB, and split them. */
5845
5846 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5847 {
5848 if (GET_CODE (insn) != JUMP_INSN)
5849 continue;
5850
5851 pat = PATTERN (insn);
5852 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5853 pat = XVECEXP (pat, 0, 0);
5854 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5855 continue;
5856
5857 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5858 {
5859 label = &SET_SRC (pat);
5860 }
5861 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5862 {
5863 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5864 label = &XEXP (SET_SRC (pat), 1);
5865 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5866 label = &XEXP (SET_SRC (pat), 2);
5867 else
5868 continue;
5869 }
5870 else
5871 continue;
5872
5873 if (get_attr_length (insn) <= 4)
5874 continue;
5875
5876 /* We are going to use the return register as scratch register,
5877 make sure it will be saved/restored by the prologue/epilogue. */
5878 cfun_frame_layout.save_return_addr_p = 1;
5879
5880 if (!flag_pic)
5881 {
5882 new_literal = 1;
5883 tmp = force_const_mem (Pmode, *label);
5884 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5885 INSN_ADDRESSES_NEW (tmp, -1);
5886 annotate_constant_pool_refs (&PATTERN (tmp));
5887
5888 target = temp_reg;
5889 }
5890 else
5891 {
5892 new_literal = 1;
5893 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5894 UNSPEC_LTREL_OFFSET);
5895 target = gen_rtx_CONST (Pmode, target);
5896 target = force_const_mem (Pmode, target);
5897 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5898 INSN_ADDRESSES_NEW (tmp, -1);
5899 annotate_constant_pool_refs (&PATTERN (tmp));
5900
5901 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5902 cfun->machine->base_reg),
5903 UNSPEC_LTREL_BASE);
5904 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5905 }
5906
5907 ret = validate_change (insn, label, target, 0);
5908 gcc_assert (ret);
5909 }
5910
5911 return new_literal;
5912 }
5913
5914
5915 /* Find an annotated literal pool symbol referenced in RTX X,
5916 and store it at REF. Will abort if X contains references to
5917 more than one such pool symbol; multiple references to the same
5918 symbol are allowed, however.
5919
5920 The rtx pointed to by REF must be initialized to NULL_RTX
5921 by the caller before calling this routine. */
5922
5923 static void
5924 find_constant_pool_ref (rtx x, rtx *ref)
5925 {
5926 int i, j;
5927 const char *fmt;
5928
5929 /* Ignore LTREL_BASE references. */
5930 if (GET_CODE (x) == UNSPEC
5931 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5932 return;
5933 /* Likewise POOL_ENTRY insns. */
5934 if (GET_CODE (x) == UNSPEC_VOLATILE
5935 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5936 return;
5937
5938 gcc_assert (GET_CODE (x) != SYMBOL_REF
5939 || !CONSTANT_POOL_ADDRESS_P (x));
5940
5941 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5942 {
5943 rtx sym = XVECEXP (x, 0, 0);
5944 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5945 && CONSTANT_POOL_ADDRESS_P (sym));
5946
5947 if (*ref == NULL_RTX)
5948 *ref = sym;
5949 else
5950 gcc_assert (*ref == sym);
5951
5952 return;
5953 }
5954
5955 fmt = GET_RTX_FORMAT (GET_CODE (x));
5956 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5957 {
5958 if (fmt[i] == 'e')
5959 {
5960 find_constant_pool_ref (XEXP (x, i), ref);
5961 }
5962 else if (fmt[i] == 'E')
5963 {
5964 for (j = 0; j < XVECLEN (x, i); j++)
5965 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5966 }
5967 }
5968 }
5969
5970 /* Replace every reference to the annotated literal pool
5971 symbol REF in X by its base plus OFFSET. */
5972
5973 static void
5974 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5975 {
5976 int i, j;
5977 const char *fmt;
5978
5979 gcc_assert (*x != ref);
5980
5981 if (GET_CODE (*x) == UNSPEC
5982 && XINT (*x, 1) == UNSPEC_LTREF
5983 && XVECEXP (*x, 0, 0) == ref)
5984 {
5985 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5986 return;
5987 }
5988
5989 if (GET_CODE (*x) == PLUS
5990 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5991 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5992 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5993 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5994 {
5995 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5996 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
5997 return;
5998 }
5999
6000 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6001 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6002 {
6003 if (fmt[i] == 'e')
6004 {
6005 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6006 }
6007 else if (fmt[i] == 'E')
6008 {
6009 for (j = 0; j < XVECLEN (*x, i); j++)
6010 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6011 }
6012 }
6013 }
6014
6015 /* Check whether X contains an UNSPEC_LTREL_BASE.
6016 Return its constant pool symbol if found, NULL_RTX otherwise. */
6017
6018 static rtx
6019 find_ltrel_base (rtx x)
6020 {
6021 int i, j;
6022 const char *fmt;
6023
6024 if (GET_CODE (x) == UNSPEC
6025 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6026 return XVECEXP (x, 0, 0);
6027
6028 fmt = GET_RTX_FORMAT (GET_CODE (x));
6029 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6030 {
6031 if (fmt[i] == 'e')
6032 {
6033 rtx fnd = find_ltrel_base (XEXP (x, i));
6034 if (fnd)
6035 return fnd;
6036 }
6037 else if (fmt[i] == 'E')
6038 {
6039 for (j = 0; j < XVECLEN (x, i); j++)
6040 {
6041 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6042 if (fnd)
6043 return fnd;
6044 }
6045 }
6046 }
6047
6048 return NULL_RTX;
6049 }
6050
6051 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6052
6053 static void
6054 replace_ltrel_base (rtx *x)
6055 {
6056 int i, j;
6057 const char *fmt;
6058
6059 if (GET_CODE (*x) == UNSPEC
6060 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6061 {
6062 *x = XVECEXP (*x, 0, 1);
6063 return;
6064 }
6065
6066 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6067 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6068 {
6069 if (fmt[i] == 'e')
6070 {
6071 replace_ltrel_base (&XEXP (*x, i));
6072 }
6073 else if (fmt[i] == 'E')
6074 {
6075 for (j = 0; j < XVECLEN (*x, i); j++)
6076 replace_ltrel_base (&XVECEXP (*x, i, j));
6077 }
6078 }
6079 }
6080
6081
6082 /* We keep a list of constants which we have to add to internal
6083 constant tables in the middle of large functions. */
6084
6085 #define NR_C_MODES 11
6086 enum machine_mode constant_modes[NR_C_MODES] =
6087 {
6088 TFmode, TImode, TDmode,
6089 DFmode, DImode, DDmode,
6090 SFmode, SImode, SDmode,
6091 HImode,
6092 QImode
6093 };
6094
6095 struct constant
6096 {
6097 struct constant *next;
6098 rtx value;
6099 rtx label;
6100 };
6101
6102 struct constant_pool
6103 {
6104 struct constant_pool *next;
6105 rtx first_insn;
6106 rtx pool_insn;
6107 bitmap insns;
6108 rtx emit_pool_after;
6109
6110 struct constant *constants[NR_C_MODES];
6111 struct constant *execute;
6112 rtx label;
6113 int size;
6114 };
6115
6116 /* Allocate new constant_pool structure. */
6117
6118 static struct constant_pool *
6119 s390_alloc_pool (void)
6120 {
6121 struct constant_pool *pool;
6122 int i;
6123
6124 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6125 pool->next = NULL;
6126 for (i = 0; i < NR_C_MODES; i++)
6127 pool->constants[i] = NULL;
6128
6129 pool->execute = NULL;
6130 pool->label = gen_label_rtx ();
6131 pool->first_insn = NULL_RTX;
6132 pool->pool_insn = NULL_RTX;
6133 pool->insns = BITMAP_ALLOC (NULL);
6134 pool->size = 0;
6135 pool->emit_pool_after = NULL_RTX;
6136
6137 return pool;
6138 }
6139
6140 /* Create new constant pool covering instructions starting at INSN
6141 and chain it to the end of POOL_LIST. */
6142
6143 static struct constant_pool *
6144 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6145 {
6146 struct constant_pool *pool, **prev;
6147
6148 pool = s390_alloc_pool ();
6149 pool->first_insn = insn;
6150
6151 for (prev = pool_list; *prev; prev = &(*prev)->next)
6152 ;
6153 *prev = pool;
6154
6155 return pool;
6156 }
6157
6158 /* End range of instructions covered by POOL at INSN and emit
6159 placeholder insn representing the pool. */
6160
6161 static void
6162 s390_end_pool (struct constant_pool *pool, rtx insn)
6163 {
6164 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6165
6166 if (!insn)
6167 insn = get_last_insn ();
6168
6169 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6170 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6171 }
6172
6173 /* Add INSN to the list of insns covered by POOL. */
6174
6175 static void
6176 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6177 {
6178 bitmap_set_bit (pool->insns, INSN_UID (insn));
6179 }
6180
6181 /* Return pool out of POOL_LIST that covers INSN. */
6182
6183 static struct constant_pool *
6184 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6185 {
6186 struct constant_pool *pool;
6187
6188 for (pool = pool_list; pool; pool = pool->next)
6189 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6190 break;
6191
6192 return pool;
6193 }
6194
6195 /* Add constant VAL of mode MODE to the constant pool POOL. */
6196
6197 static void
6198 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6199 {
6200 struct constant *c;
6201 int i;
6202
6203 for (i = 0; i < NR_C_MODES; i++)
6204 if (constant_modes[i] == mode)
6205 break;
6206 gcc_assert (i != NR_C_MODES);
6207
6208 for (c = pool->constants[i]; c != NULL; c = c->next)
6209 if (rtx_equal_p (val, c->value))
6210 break;
6211
6212 if (c == NULL)
6213 {
6214 c = (struct constant *) xmalloc (sizeof *c);
6215 c->value = val;
6216 c->label = gen_label_rtx ();
6217 c->next = pool->constants[i];
6218 pool->constants[i] = c;
6219 pool->size += GET_MODE_SIZE (mode);
6220 }
6221 }
6222
6223 /* Return an rtx that represents the offset of X from the start of
6224 pool POOL. */
6225
6226 static rtx
6227 s390_pool_offset (struct constant_pool *pool, rtx x)
6228 {
6229 rtx label;
6230
6231 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6232 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6233 UNSPEC_POOL_OFFSET);
6234 return gen_rtx_CONST (GET_MODE (x), x);
6235 }
6236
6237 /* Find constant VAL of mode MODE in the constant pool POOL.
6238 Return an RTX describing the distance from the start of
6239 the pool to the location of the new constant. */
6240
6241 static rtx
6242 s390_find_constant (struct constant_pool *pool, rtx val,
6243 enum machine_mode mode)
6244 {
6245 struct constant *c;
6246 int i;
6247
6248 for (i = 0; i < NR_C_MODES; i++)
6249 if (constant_modes[i] == mode)
6250 break;
6251 gcc_assert (i != NR_C_MODES);
6252
6253 for (c = pool->constants[i]; c != NULL; c = c->next)
6254 if (rtx_equal_p (val, c->value))
6255 break;
6256
6257 gcc_assert (c);
6258
6259 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6260 }
6261
6262 /* Check whether INSN is an execute. Return the label_ref to its
6263 execute target template if so, NULL_RTX otherwise. */
6264
6265 static rtx
6266 s390_execute_label (rtx insn)
6267 {
6268 if (GET_CODE (insn) == INSN
6269 && GET_CODE (PATTERN (insn)) == PARALLEL
6270 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6271 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6272 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6273
6274 return NULL_RTX;
6275 }
6276
6277 /* Add execute target for INSN to the constant pool POOL. */
6278
6279 static void
6280 s390_add_execute (struct constant_pool *pool, rtx insn)
6281 {
6282 struct constant *c;
6283
6284 for (c = pool->execute; c != NULL; c = c->next)
6285 if (INSN_UID (insn) == INSN_UID (c->value))
6286 break;
6287
6288 if (c == NULL)
6289 {
6290 c = (struct constant *) xmalloc (sizeof *c);
6291 c->value = insn;
6292 c->label = gen_label_rtx ();
6293 c->next = pool->execute;
6294 pool->execute = c;
6295 pool->size += 6;
6296 }
6297 }
6298
6299 /* Find execute target for INSN in the constant pool POOL.
6300 Return an RTX describing the distance from the start of
6301 the pool to the location of the execute target. */
6302
6303 static rtx
6304 s390_find_execute (struct constant_pool *pool, rtx insn)
6305 {
6306 struct constant *c;
6307
6308 for (c = pool->execute; c != NULL; c = c->next)
6309 if (INSN_UID (insn) == INSN_UID (c->value))
6310 break;
6311
6312 gcc_assert (c);
6313
6314 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6315 }
6316
6317 /* For an execute INSN, extract the execute target template. */
6318
6319 static rtx
6320 s390_execute_target (rtx insn)
6321 {
6322 rtx pattern = PATTERN (insn);
6323 gcc_assert (s390_execute_label (insn));
6324
6325 if (XVECLEN (pattern, 0) == 2)
6326 {
6327 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6328 }
6329 else
6330 {
6331 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6332 int i;
6333
6334 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6335 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6336
6337 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6338 }
6339
6340 return pattern;
6341 }
6342
6343 /* Indicate that INSN cannot be duplicated. This is the case for
6344 execute insns that carry a unique label. */
6345
6346 static bool
6347 s390_cannot_copy_insn_p (rtx insn)
6348 {
6349 rtx label = s390_execute_label (insn);
6350 return label && label != const0_rtx;
6351 }
6352
6353 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6354 do not emit the pool base label. */
6355
6356 static void
6357 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6358 {
6359 struct constant *c;
6360 rtx insn = pool->pool_insn;
6361 int i;
6362
6363 /* Switch to rodata section. */
6364 if (TARGET_CPU_ZARCH)
6365 {
6366 insn = emit_insn_after (gen_pool_section_start (), insn);
6367 INSN_ADDRESSES_NEW (insn, -1);
6368 }
6369
6370 /* Ensure minimum pool alignment. */
6371 if (TARGET_CPU_ZARCH)
6372 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6373 else
6374 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6375 INSN_ADDRESSES_NEW (insn, -1);
6376
6377 /* Emit pool base label. */
6378 if (!remote_label)
6379 {
6380 insn = emit_label_after (pool->label, insn);
6381 INSN_ADDRESSES_NEW (insn, -1);
6382 }
6383
6384 /* Dump constants in descending alignment requirement order,
6385 ensuring proper alignment for every constant. */
6386 for (i = 0; i < NR_C_MODES; i++)
6387 for (c = pool->constants[i]; c; c = c->next)
6388 {
6389 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6390 rtx value = copy_rtx (c->value);
6391 if (GET_CODE (value) == CONST
6392 && GET_CODE (XEXP (value, 0)) == UNSPEC
6393 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6394 && XVECLEN (XEXP (value, 0), 0) == 1)
6395 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6396
6397 insn = emit_label_after (c->label, insn);
6398 INSN_ADDRESSES_NEW (insn, -1);
6399
6400 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6401 gen_rtvec (1, value),
6402 UNSPECV_POOL_ENTRY);
6403 insn = emit_insn_after (value, insn);
6404 INSN_ADDRESSES_NEW (insn, -1);
6405 }
6406
6407 /* Ensure minimum alignment for instructions. */
6408 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6409 INSN_ADDRESSES_NEW (insn, -1);
6410
6411 /* Output in-pool execute template insns. */
6412 for (c = pool->execute; c; c = c->next)
6413 {
6414 insn = emit_label_after (c->label, insn);
6415 INSN_ADDRESSES_NEW (insn, -1);
6416
6417 insn = emit_insn_after (s390_execute_target (c->value), insn);
6418 INSN_ADDRESSES_NEW (insn, -1);
6419 }
6420
6421 /* Switch back to previous section. */
6422 if (TARGET_CPU_ZARCH)
6423 {
6424 insn = emit_insn_after (gen_pool_section_end (), insn);
6425 INSN_ADDRESSES_NEW (insn, -1);
6426 }
6427
6428 insn = emit_barrier_after (insn);
6429 INSN_ADDRESSES_NEW (insn, -1);
6430
6431 /* Remove placeholder insn. */
6432 remove_insn (pool->pool_insn);
6433 }
6434
6435 /* Free all memory used by POOL. */
6436
6437 static void
6438 s390_free_pool (struct constant_pool *pool)
6439 {
6440 struct constant *c, *next;
6441 int i;
6442
6443 for (i = 0; i < NR_C_MODES; i++)
6444 for (c = pool->constants[i]; c; c = next)
6445 {
6446 next = c->next;
6447 free (c);
6448 }
6449
6450 for (c = pool->execute; c; c = next)
6451 {
6452 next = c->next;
6453 free (c);
6454 }
6455
6456 BITMAP_FREE (pool->insns);
6457 free (pool);
6458 }
6459
6460
6461 /* Collect main literal pool. Return NULL on overflow. */
6462
6463 static struct constant_pool *
6464 s390_mainpool_start (void)
6465 {
6466 struct constant_pool *pool;
6467 rtx insn;
6468
6469 pool = s390_alloc_pool ();
6470
6471 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6472 {
6473 if (GET_CODE (insn) == INSN
6474 && GET_CODE (PATTERN (insn)) == SET
6475 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6476 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6477 {
6478 gcc_assert (!pool->pool_insn);
6479 pool->pool_insn = insn;
6480 }
6481
6482 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6483 {
6484 s390_add_execute (pool, insn);
6485 }
6486 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6487 {
6488 rtx pool_ref = NULL_RTX;
6489 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6490 if (pool_ref)
6491 {
6492 rtx constant = get_pool_constant (pool_ref);
6493 enum machine_mode mode = get_pool_mode (pool_ref);
6494 s390_add_constant (pool, constant, mode);
6495 }
6496 }
6497
6498 /* If hot/cold partitioning is enabled we have to make sure that
6499 the literal pool is emitted in the same section where the
6500 initialization of the literal pool base pointer takes place.
6501 emit_pool_after is only used in the non-overflow case on non
6502 Z cpus where we can emit the literal pool at the end of the
6503 function body within the text section. */
6504 if (NOTE_P (insn)
6505 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6506 && !pool->emit_pool_after)
6507 pool->emit_pool_after = PREV_INSN (insn);
6508 }
6509
6510 gcc_assert (pool->pool_insn || pool->size == 0);
6511
6512 if (pool->size >= 4096)
6513 {
6514 /* We're going to chunkify the pool, so remove the main
6515 pool placeholder insn. */
6516 remove_insn (pool->pool_insn);
6517
6518 s390_free_pool (pool);
6519 pool = NULL;
6520 }
6521
6522 /* If the functions ends with the section where the literal pool
6523 should be emitted set the marker to its end. */
6524 if (pool && !pool->emit_pool_after)
6525 pool->emit_pool_after = get_last_insn ();
6526
6527 return pool;
6528 }
6529
6530 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6531 Modify the current function to output the pool constants as well as
6532 the pool register setup instruction. */
6533
6534 static void
6535 s390_mainpool_finish (struct constant_pool *pool)
6536 {
6537 rtx base_reg = cfun->machine->base_reg;
6538 rtx insn;
6539
6540 /* If the pool is empty, we're done. */
6541 if (pool->size == 0)
6542 {
6543 /* We don't actually need a base register after all. */
6544 cfun->machine->base_reg = NULL_RTX;
6545
6546 if (pool->pool_insn)
6547 remove_insn (pool->pool_insn);
6548 s390_free_pool (pool);
6549 return;
6550 }
6551
6552 /* We need correct insn addresses. */
6553 shorten_branches (get_insns ());
6554
6555 /* On zSeries, we use a LARL to load the pool register. The pool is
6556 located in the .rodata section, so we emit it after the function. */
6557 if (TARGET_CPU_ZARCH)
6558 {
6559 insn = gen_main_base_64 (base_reg, pool->label);
6560 insn = emit_insn_after (insn, pool->pool_insn);
6561 INSN_ADDRESSES_NEW (insn, -1);
6562 remove_insn (pool->pool_insn);
6563
6564 insn = get_last_insn ();
6565 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6566 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6567
6568 s390_dump_pool (pool, 0);
6569 }
6570
6571 /* On S/390, if the total size of the function's code plus literal pool
6572 does not exceed 4096 bytes, we use BASR to set up a function base
6573 pointer, and emit the literal pool at the end of the function. */
6574 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6575 + pool->size + 8 /* alignment slop */ < 4096)
6576 {
6577 insn = gen_main_base_31_small (base_reg, pool->label);
6578 insn = emit_insn_after (insn, pool->pool_insn);
6579 INSN_ADDRESSES_NEW (insn, -1);
6580 remove_insn (pool->pool_insn);
6581
6582 insn = emit_label_after (pool->label, insn);
6583 INSN_ADDRESSES_NEW (insn, -1);
6584
6585 /* emit_pool_after will be set by s390_mainpool_start to the
6586 last insn of the section where the literal pool should be
6587 emitted. */
6588 insn = pool->emit_pool_after;
6589
6590 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6591 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6592
6593 s390_dump_pool (pool, 1);
6594 }
6595
6596 /* Otherwise, we emit an inline literal pool and use BASR to branch
6597 over it, setting up the pool register at the same time. */
6598 else
6599 {
6600 rtx pool_end = gen_label_rtx ();
6601
6602 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6603 insn = emit_jump_insn_after (insn, pool->pool_insn);
6604 JUMP_LABEL (insn) = pool_end;
6605 INSN_ADDRESSES_NEW (insn, -1);
6606 remove_insn (pool->pool_insn);
6607
6608 insn = emit_label_after (pool->label, insn);
6609 INSN_ADDRESSES_NEW (insn, -1);
6610
6611 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6612 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6613
6614 insn = emit_label_after (pool_end, pool->pool_insn);
6615 INSN_ADDRESSES_NEW (insn, -1);
6616
6617 s390_dump_pool (pool, 1);
6618 }
6619
6620
6621 /* Replace all literal pool references. */
6622
6623 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6624 {
6625 if (INSN_P (insn))
6626 replace_ltrel_base (&PATTERN (insn));
6627
6628 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6629 {
6630 rtx addr, pool_ref = NULL_RTX;
6631 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6632 if (pool_ref)
6633 {
6634 if (s390_execute_label (insn))
6635 addr = s390_find_execute (pool, insn);
6636 else
6637 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6638 get_pool_mode (pool_ref));
6639
6640 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6641 INSN_CODE (insn) = -1;
6642 }
6643 }
6644 }
6645
6646
6647 /* Free the pool. */
6648 s390_free_pool (pool);
6649 }
6650
6651 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6652 We have decided we cannot use this pool, so revert all changes
6653 to the current function that were done by s390_mainpool_start. */
6654 static void
6655 s390_mainpool_cancel (struct constant_pool *pool)
6656 {
6657 /* We didn't actually change the instruction stream, so simply
6658 free the pool memory. */
6659 s390_free_pool (pool);
6660 }
6661
6662
6663 /* Chunkify the literal pool. */
6664
6665 #define S390_POOL_CHUNK_MIN 0xc00
6666 #define S390_POOL_CHUNK_MAX 0xe00
6667
6668 static struct constant_pool *
6669 s390_chunkify_start (void)
6670 {
6671 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6672 int extra_size = 0;
6673 bitmap far_labels;
6674 rtx pending_ltrel = NULL_RTX;
6675 rtx insn;
6676
6677 rtx (*gen_reload_base) (rtx, rtx) =
6678 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6679
6680
6681 /* We need correct insn addresses. */
6682
6683 shorten_branches (get_insns ());
6684
6685 /* Scan all insns and move literals to pool chunks. */
6686
6687 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6688 {
6689 bool section_switch_p = false;
6690
6691 /* Check for pending LTREL_BASE. */
6692 if (INSN_P (insn))
6693 {
6694 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6695 if (ltrel_base)
6696 {
6697 gcc_assert (ltrel_base == pending_ltrel);
6698 pending_ltrel = NULL_RTX;
6699 }
6700 }
6701
6702 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6703 {
6704 if (!curr_pool)
6705 curr_pool = s390_start_pool (&pool_list, insn);
6706
6707 s390_add_execute (curr_pool, insn);
6708 s390_add_pool_insn (curr_pool, insn);
6709 }
6710 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6711 {
6712 rtx pool_ref = NULL_RTX;
6713 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6714 if (pool_ref)
6715 {
6716 rtx constant = get_pool_constant (pool_ref);
6717 enum machine_mode mode = get_pool_mode (pool_ref);
6718
6719 if (!curr_pool)
6720 curr_pool = s390_start_pool (&pool_list, insn);
6721
6722 s390_add_constant (curr_pool, constant, mode);
6723 s390_add_pool_insn (curr_pool, insn);
6724
6725 /* Don't split the pool chunk between a LTREL_OFFSET load
6726 and the corresponding LTREL_BASE. */
6727 if (GET_CODE (constant) == CONST
6728 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6729 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6730 {
6731 gcc_assert (!pending_ltrel);
6732 pending_ltrel = pool_ref;
6733 }
6734 }
6735 }
6736
6737 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6738 {
6739 if (curr_pool)
6740 s390_add_pool_insn (curr_pool, insn);
6741 /* An LTREL_BASE must follow within the same basic block. */
6742 gcc_assert (!pending_ltrel);
6743 }
6744
6745 if (NOTE_P (insn))
6746 switch (NOTE_KIND (insn))
6747 {
6748 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6749 section_switch_p = true;
6750 break;
6751 case NOTE_INSN_VAR_LOCATION:
6752 case NOTE_INSN_CALL_ARG_LOCATION:
6753 continue;
6754 default:
6755 break;
6756 }
6757
6758 if (!curr_pool
6759 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6760 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6761 continue;
6762
6763 if (TARGET_CPU_ZARCH)
6764 {
6765 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6766 continue;
6767
6768 s390_end_pool (curr_pool, NULL_RTX);
6769 curr_pool = NULL;
6770 }
6771 else
6772 {
6773 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6774 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6775 + extra_size;
6776
6777 /* We will later have to insert base register reload insns.
6778 Those will have an effect on code size, which we need to
6779 consider here. This calculation makes rather pessimistic
6780 worst-case assumptions. */
6781 if (GET_CODE (insn) == CODE_LABEL)
6782 extra_size += 6;
6783
6784 if (chunk_size < S390_POOL_CHUNK_MIN
6785 && curr_pool->size < S390_POOL_CHUNK_MIN
6786 && !section_switch_p)
6787 continue;
6788
6789 /* Pool chunks can only be inserted after BARRIERs ... */
6790 if (GET_CODE (insn) == BARRIER)
6791 {
6792 s390_end_pool (curr_pool, insn);
6793 curr_pool = NULL;
6794 extra_size = 0;
6795 }
6796
6797 /* ... so if we don't find one in time, create one. */
6798 else if (chunk_size > S390_POOL_CHUNK_MAX
6799 || curr_pool->size > S390_POOL_CHUNK_MAX
6800 || section_switch_p)
6801 {
6802 rtx label, jump, barrier, next, prev;
6803
6804 if (!section_switch_p)
6805 {
6806 /* We can insert the barrier only after a 'real' insn. */
6807 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6808 continue;
6809 if (get_attr_length (insn) == 0)
6810 continue;
6811 /* Don't separate LTREL_BASE from the corresponding
6812 LTREL_OFFSET load. */
6813 if (pending_ltrel)
6814 continue;
6815 next = insn;
6816 do
6817 {
6818 insn = next;
6819 next = NEXT_INSN (insn);
6820 }
6821 while (next
6822 && NOTE_P (next)
6823 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6824 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6825 }
6826 else
6827 {
6828 gcc_assert (!pending_ltrel);
6829
6830 /* The old pool has to end before the section switch
6831 note in order to make it part of the current
6832 section. */
6833 insn = PREV_INSN (insn);
6834 }
6835
6836 label = gen_label_rtx ();
6837 prev = insn;
6838 if (prev && NOTE_P (prev))
6839 prev = prev_nonnote_insn (prev);
6840 if (prev)
6841 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6842 INSN_LOCATOR (prev));
6843 else
6844 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6845 barrier = emit_barrier_after (jump);
6846 insn = emit_label_after (label, barrier);
6847 JUMP_LABEL (jump) = label;
6848 LABEL_NUSES (label) = 1;
6849
6850 INSN_ADDRESSES_NEW (jump, -1);
6851 INSN_ADDRESSES_NEW (barrier, -1);
6852 INSN_ADDRESSES_NEW (insn, -1);
6853
6854 s390_end_pool (curr_pool, barrier);
6855 curr_pool = NULL;
6856 extra_size = 0;
6857 }
6858 }
6859 }
6860
6861 if (curr_pool)
6862 s390_end_pool (curr_pool, NULL_RTX);
6863 gcc_assert (!pending_ltrel);
6864
6865 /* Find all labels that are branched into
6866 from an insn belonging to a different chunk. */
6867
6868 far_labels = BITMAP_ALLOC (NULL);
6869
6870 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6871 {
6872 /* Labels marked with LABEL_PRESERVE_P can be target
6873 of non-local jumps, so we have to mark them.
6874 The same holds for named labels.
6875
6876 Don't do that, however, if it is the label before
6877 a jump table. */
6878
6879 if (GET_CODE (insn) == CODE_LABEL
6880 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6881 {
6882 rtx vec_insn = next_real_insn (insn);
6883 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6884 PATTERN (vec_insn) : NULL_RTX;
6885 if (!vec_pat
6886 || !(GET_CODE (vec_pat) == ADDR_VEC
6887 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6888 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6889 }
6890
6891 /* If we have a direct jump (conditional or unconditional)
6892 or a casesi jump, check all potential targets. */
6893 else if (GET_CODE (insn) == JUMP_INSN)
6894 {
6895 rtx pat = PATTERN (insn);
6896 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6897 pat = XVECEXP (pat, 0, 0);
6898
6899 if (GET_CODE (pat) == SET)
6900 {
6901 rtx label = JUMP_LABEL (insn);
6902 if (label)
6903 {
6904 if (s390_find_pool (pool_list, label)
6905 != s390_find_pool (pool_list, insn))
6906 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6907 }
6908 }
6909 else if (GET_CODE (pat) == PARALLEL
6910 && XVECLEN (pat, 0) == 2
6911 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6912 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6913 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6914 {
6915 /* Find the jump table used by this casesi jump. */
6916 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6917 rtx vec_insn = next_real_insn (vec_label);
6918 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6919 PATTERN (vec_insn) : NULL_RTX;
6920 if (vec_pat
6921 && (GET_CODE (vec_pat) == ADDR_VEC
6922 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6923 {
6924 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6925
6926 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6927 {
6928 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6929
6930 if (s390_find_pool (pool_list, label)
6931 != s390_find_pool (pool_list, insn))
6932 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6933 }
6934 }
6935 }
6936 }
6937 }
6938
6939 /* Insert base register reload insns before every pool. */
6940
6941 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6942 {
6943 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6944 curr_pool->label);
6945 rtx insn = curr_pool->first_insn;
6946 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6947 }
6948
6949 /* Insert base register reload insns at every far label. */
6950
6951 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6952 if (GET_CODE (insn) == CODE_LABEL
6953 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6954 {
6955 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6956 if (pool)
6957 {
6958 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6959 pool->label);
6960 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6961 }
6962 }
6963
6964
6965 BITMAP_FREE (far_labels);
6966
6967
6968 /* Recompute insn addresses. */
6969
6970 init_insn_lengths ();
6971 shorten_branches (get_insns ());
6972
6973 return pool_list;
6974 }
6975
6976 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6977 After we have decided to use this list, finish implementing
6978 all changes to the current function as required. */
6979
6980 static void
6981 s390_chunkify_finish (struct constant_pool *pool_list)
6982 {
6983 struct constant_pool *curr_pool = NULL;
6984 rtx insn;
6985
6986
6987 /* Replace all literal pool references. */
6988
6989 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6990 {
6991 if (INSN_P (insn))
6992 replace_ltrel_base (&PATTERN (insn));
6993
6994 curr_pool = s390_find_pool (pool_list, insn);
6995 if (!curr_pool)
6996 continue;
6997
6998 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6999 {
7000 rtx addr, pool_ref = NULL_RTX;
7001 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7002 if (pool_ref)
7003 {
7004 if (s390_execute_label (insn))
7005 addr = s390_find_execute (curr_pool, insn);
7006 else
7007 addr = s390_find_constant (curr_pool,
7008 get_pool_constant (pool_ref),
7009 get_pool_mode (pool_ref));
7010
7011 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7012 INSN_CODE (insn) = -1;
7013 }
7014 }
7015 }
7016
7017 /* Dump out all literal pools. */
7018
7019 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7020 s390_dump_pool (curr_pool, 0);
7021
7022 /* Free pool list. */
7023
7024 while (pool_list)
7025 {
7026 struct constant_pool *next = pool_list->next;
7027 s390_free_pool (pool_list);
7028 pool_list = next;
7029 }
7030 }
7031
7032 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7033 We have decided we cannot use this list, so revert all changes
7034 to the current function that were done by s390_chunkify_start. */
7035
7036 static void
7037 s390_chunkify_cancel (struct constant_pool *pool_list)
7038 {
7039 struct constant_pool *curr_pool = NULL;
7040 rtx insn;
7041
7042 /* Remove all pool placeholder insns. */
7043
7044 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7045 {
7046 /* Did we insert an extra barrier? Remove it. */
7047 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7048 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7049 rtx label = NEXT_INSN (curr_pool->pool_insn);
7050
7051 if (jump && GET_CODE (jump) == JUMP_INSN
7052 && barrier && GET_CODE (barrier) == BARRIER
7053 && label && GET_CODE (label) == CODE_LABEL
7054 && GET_CODE (PATTERN (jump)) == SET
7055 && SET_DEST (PATTERN (jump)) == pc_rtx
7056 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7057 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7058 {
7059 remove_insn (jump);
7060 remove_insn (barrier);
7061 remove_insn (label);
7062 }
7063
7064 remove_insn (curr_pool->pool_insn);
7065 }
7066
7067 /* Remove all base register reload insns. */
7068
7069 for (insn = get_insns (); insn; )
7070 {
7071 rtx next_insn = NEXT_INSN (insn);
7072
7073 if (GET_CODE (insn) == INSN
7074 && GET_CODE (PATTERN (insn)) == SET
7075 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7076 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7077 remove_insn (insn);
7078
7079 insn = next_insn;
7080 }
7081
7082 /* Free pool list. */
7083
7084 while (pool_list)
7085 {
7086 struct constant_pool *next = pool_list->next;
7087 s390_free_pool (pool_list);
7088 pool_list = next;
7089 }
7090 }
7091
7092 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7093
7094 void
7095 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7096 {
7097 REAL_VALUE_TYPE r;
7098
7099 switch (GET_MODE_CLASS (mode))
7100 {
7101 case MODE_FLOAT:
7102 case MODE_DECIMAL_FLOAT:
7103 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7104
7105 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7106 assemble_real (r, mode, align);
7107 break;
7108
7109 case MODE_INT:
7110 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7111 mark_symbol_refs_as_used (exp);
7112 break;
7113
7114 default:
7115 gcc_unreachable ();
7116 }
7117 }
7118
7119
7120 /* Return an RTL expression representing the value of the return address
7121 for the frame COUNT steps up from the current frame. FRAME is the
7122 frame pointer of that frame. */
7123
7124 rtx
7125 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7126 {
7127 int offset;
7128 rtx addr;
7129
7130 /* Without backchain, we fail for all but the current frame. */
7131
7132 if (!TARGET_BACKCHAIN && count > 0)
7133 return NULL_RTX;
7134
7135 /* For the current frame, we need to make sure the initial
7136 value of RETURN_REGNUM is actually saved. */
7137
7138 if (count == 0)
7139 {
7140 /* On non-z architectures branch splitting could overwrite r14. */
7141 if (TARGET_CPU_ZARCH)
7142 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7143 else
7144 {
7145 cfun_frame_layout.save_return_addr_p = true;
7146 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7147 }
7148 }
7149
7150 if (TARGET_PACKED_STACK)
7151 offset = -2 * UNITS_PER_LONG;
7152 else
7153 offset = RETURN_REGNUM * UNITS_PER_LONG;
7154
7155 addr = plus_constant (Pmode, frame, offset);
7156 addr = memory_address (Pmode, addr);
7157 return gen_rtx_MEM (Pmode, addr);
7158 }
7159
7160 /* Return an RTL expression representing the back chain stored in
7161 the current stack frame. */
7162
7163 rtx
7164 s390_back_chain_rtx (void)
7165 {
7166 rtx chain;
7167
7168 gcc_assert (TARGET_BACKCHAIN);
7169
7170 if (TARGET_PACKED_STACK)
7171 chain = plus_constant (Pmode, stack_pointer_rtx,
7172 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7173 else
7174 chain = stack_pointer_rtx;
7175
7176 chain = gen_rtx_MEM (Pmode, chain);
7177 return chain;
7178 }
7179
7180 /* Find first call clobbered register unused in a function.
7181 This could be used as base register in a leaf function
7182 or for holding the return address before epilogue. */
7183
7184 static int
7185 find_unused_clobbered_reg (void)
7186 {
7187 int i;
7188 for (i = 0; i < 6; i++)
7189 if (!df_regs_ever_live_p (i))
7190 return i;
7191 return 0;
7192 }
7193
7194
7195 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7196 clobbered hard regs in SETREG. */
7197
7198 static void
7199 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7200 {
7201 int *regs_ever_clobbered = (int *)data;
7202 unsigned int i, regno;
7203 enum machine_mode mode = GET_MODE (setreg);
7204
7205 if (GET_CODE (setreg) == SUBREG)
7206 {
7207 rtx inner = SUBREG_REG (setreg);
7208 if (!GENERAL_REG_P (inner))
7209 return;
7210 regno = subreg_regno (setreg);
7211 }
7212 else if (GENERAL_REG_P (setreg))
7213 regno = REGNO (setreg);
7214 else
7215 return;
7216
7217 for (i = regno;
7218 i < regno + HARD_REGNO_NREGS (regno, mode);
7219 i++)
7220 regs_ever_clobbered[i] = 1;
7221 }
7222
7223 /* Walks through all basic blocks of the current function looking
7224 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7225 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7226 each of those regs. */
7227
7228 static void
7229 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7230 {
7231 basic_block cur_bb;
7232 rtx cur_insn;
7233 unsigned int i;
7234
7235 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7236
7237 /* For non-leaf functions we have to consider all call clobbered regs to be
7238 clobbered. */
7239 if (!crtl->is_leaf)
7240 {
7241 for (i = 0; i < 16; i++)
7242 regs_ever_clobbered[i] = call_really_used_regs[i];
7243 }
7244
7245 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7246 this work is done by liveness analysis (mark_regs_live_at_end).
7247 Special care is needed for functions containing landing pads. Landing pads
7248 may use the eh registers, but the code which sets these registers is not
7249 contained in that function. Hence s390_regs_ever_clobbered is not able to
7250 deal with this automatically. */
7251 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7252 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7253 if (crtl->calls_eh_return
7254 || (cfun->machine->has_landing_pad_p
7255 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7256 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7257
7258 /* For nonlocal gotos all call-saved registers have to be saved.
7259 This flag is also set for the unwinding code in libgcc.
7260 See expand_builtin_unwind_init. For regs_ever_live this is done by
7261 reload. */
7262 if (cfun->has_nonlocal_label)
7263 for (i = 0; i < 16; i++)
7264 if (!call_really_used_regs[i])
7265 regs_ever_clobbered[i] = 1;
7266
7267 FOR_EACH_BB (cur_bb)
7268 {
7269 FOR_BB_INSNS (cur_bb, cur_insn)
7270 {
7271 if (INSN_P (cur_insn))
7272 note_stores (PATTERN (cur_insn),
7273 s390_reg_clobbered_rtx,
7274 regs_ever_clobbered);
7275 }
7276 }
7277 }
7278
7279 /* Determine the frame area which actually has to be accessed
7280 in the function epilogue. The values are stored at the
7281 given pointers AREA_BOTTOM (address of the lowest used stack
7282 address) and AREA_TOP (address of the first item which does
7283 not belong to the stack frame). */
7284
7285 static void
7286 s390_frame_area (int *area_bottom, int *area_top)
7287 {
7288 int b, t;
7289 int i;
7290
7291 b = INT_MAX;
7292 t = INT_MIN;
7293
7294 if (cfun_frame_layout.first_restore_gpr != -1)
7295 {
7296 b = (cfun_frame_layout.gprs_offset
7297 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7298 t = b + (cfun_frame_layout.last_restore_gpr
7299 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7300 }
7301
7302 if (TARGET_64BIT && cfun_save_high_fprs_p)
7303 {
7304 b = MIN (b, cfun_frame_layout.f8_offset);
7305 t = MAX (t, (cfun_frame_layout.f8_offset
7306 + cfun_frame_layout.high_fprs * 8));
7307 }
7308
7309 if (!TARGET_64BIT)
7310 for (i = 2; i < 4; i++)
7311 if (cfun_fpr_bit_p (i))
7312 {
7313 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7314 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7315 }
7316
7317 *area_bottom = b;
7318 *area_top = t;
7319 }
7320
7321 /* Fill cfun->machine with info about register usage of current function.
7322 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7323
7324 static void
7325 s390_register_info (int clobbered_regs[])
7326 {
7327 int i, j;
7328
7329 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7330 cfun_frame_layout.fpr_bitmap = 0;
7331 cfun_frame_layout.high_fprs = 0;
7332 if (TARGET_64BIT)
7333 for (i = 24; i < 32; i++)
7334 if (df_regs_ever_live_p (i) && !global_regs[i])
7335 {
7336 cfun_set_fpr_bit (i - 16);
7337 cfun_frame_layout.high_fprs++;
7338 }
7339
7340 /* Find first and last gpr to be saved. We trust regs_ever_live
7341 data, except that we don't save and restore global registers.
7342
7343 Also, all registers with special meaning to the compiler need
7344 to be handled extra. */
7345
7346 s390_regs_ever_clobbered (clobbered_regs);
7347
7348 for (i = 0; i < 16; i++)
7349 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7350
7351 if (frame_pointer_needed)
7352 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7353
7354 if (flag_pic)
7355 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7356 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7357
7358 clobbered_regs[BASE_REGNUM]
7359 |= (cfun->machine->base_reg
7360 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7361
7362 clobbered_regs[RETURN_REGNUM]
7363 |= (!crtl->is_leaf
7364 || TARGET_TPF_PROFILING
7365 || cfun->machine->split_branches_pending_p
7366 || cfun_frame_layout.save_return_addr_p
7367 || crtl->calls_eh_return
7368 || cfun->stdarg);
7369
7370 clobbered_regs[STACK_POINTER_REGNUM]
7371 |= (!crtl->is_leaf
7372 || TARGET_TPF_PROFILING
7373 || cfun_save_high_fprs_p
7374 || get_frame_size () > 0
7375 || cfun->calls_alloca
7376 || cfun->stdarg);
7377
7378 for (i = 6; i < 16; i++)
7379 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7380 break;
7381 for (j = 15; j > i; j--)
7382 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7383 break;
7384
7385 if (i == 16)
7386 {
7387 /* Nothing to save/restore. */
7388 cfun_frame_layout.first_save_gpr_slot = -1;
7389 cfun_frame_layout.last_save_gpr_slot = -1;
7390 cfun_frame_layout.first_save_gpr = -1;
7391 cfun_frame_layout.first_restore_gpr = -1;
7392 cfun_frame_layout.last_save_gpr = -1;
7393 cfun_frame_layout.last_restore_gpr = -1;
7394 }
7395 else
7396 {
7397 /* Save slots for gprs from i to j. */
7398 cfun_frame_layout.first_save_gpr_slot = i;
7399 cfun_frame_layout.last_save_gpr_slot = j;
7400
7401 for (i = cfun_frame_layout.first_save_gpr_slot;
7402 i < cfun_frame_layout.last_save_gpr_slot + 1;
7403 i++)
7404 if (clobbered_regs[i])
7405 break;
7406
7407 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7408 if (clobbered_regs[j])
7409 break;
7410
7411 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7412 {
7413 /* Nothing to save/restore. */
7414 cfun_frame_layout.first_save_gpr = -1;
7415 cfun_frame_layout.first_restore_gpr = -1;
7416 cfun_frame_layout.last_save_gpr = -1;
7417 cfun_frame_layout.last_restore_gpr = -1;
7418 }
7419 else
7420 {
7421 /* Save / Restore from gpr i to j. */
7422 cfun_frame_layout.first_save_gpr = i;
7423 cfun_frame_layout.first_restore_gpr = i;
7424 cfun_frame_layout.last_save_gpr = j;
7425 cfun_frame_layout.last_restore_gpr = j;
7426 }
7427 }
7428
7429 if (cfun->stdarg)
7430 {
7431 /* Varargs functions need to save gprs 2 to 6. */
7432 if (cfun->va_list_gpr_size
7433 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7434 {
7435 int min_gpr = crtl->args.info.gprs;
7436 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7437 if (max_gpr > GP_ARG_NUM_REG)
7438 max_gpr = GP_ARG_NUM_REG;
7439
7440 if (cfun_frame_layout.first_save_gpr == -1
7441 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7442 {
7443 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7444 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7445 }
7446
7447 if (cfun_frame_layout.last_save_gpr == -1
7448 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7449 {
7450 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7451 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7452 }
7453 }
7454
7455 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7456 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7457 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7458 {
7459 int min_fpr = crtl->args.info.fprs;
7460 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7461 if (max_fpr > FP_ARG_NUM_REG)
7462 max_fpr = FP_ARG_NUM_REG;
7463
7464 /* ??? This is currently required to ensure proper location
7465 of the fpr save slots within the va_list save area. */
7466 if (TARGET_PACKED_STACK)
7467 min_fpr = 0;
7468
7469 for (i = min_fpr; i < max_fpr; i++)
7470 cfun_set_fpr_bit (i);
7471 }
7472 }
7473
7474 if (!TARGET_64BIT)
7475 for (i = 2; i < 4; i++)
7476 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7477 cfun_set_fpr_bit (i);
7478 }
7479
7480 /* Fill cfun->machine with info about frame of current function. */
7481
7482 static void
7483 s390_frame_info (void)
7484 {
7485 int i;
7486
7487 cfun_frame_layout.frame_size = get_frame_size ();
7488 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7489 fatal_error ("total size of local variables exceeds architecture limit");
7490
7491 if (!TARGET_PACKED_STACK)
7492 {
7493 cfun_frame_layout.backchain_offset = 0;
7494 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7495 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7496 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7497 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7498 * UNITS_PER_LONG);
7499 }
7500 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7501 {
7502 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7503 - UNITS_PER_LONG);
7504 cfun_frame_layout.gprs_offset
7505 = (cfun_frame_layout.backchain_offset
7506 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7507 * UNITS_PER_LONG);
7508
7509 if (TARGET_64BIT)
7510 {
7511 cfun_frame_layout.f4_offset
7512 = (cfun_frame_layout.gprs_offset
7513 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7514
7515 cfun_frame_layout.f0_offset
7516 = (cfun_frame_layout.f4_offset
7517 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7518 }
7519 else
7520 {
7521 /* On 31 bit we have to care about alignment of the
7522 floating point regs to provide fastest access. */
7523 cfun_frame_layout.f0_offset
7524 = ((cfun_frame_layout.gprs_offset
7525 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7526 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7527
7528 cfun_frame_layout.f4_offset
7529 = (cfun_frame_layout.f0_offset
7530 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7531 }
7532 }
7533 else /* no backchain */
7534 {
7535 cfun_frame_layout.f4_offset
7536 = (STACK_POINTER_OFFSET
7537 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7538
7539 cfun_frame_layout.f0_offset
7540 = (cfun_frame_layout.f4_offset
7541 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7542
7543 cfun_frame_layout.gprs_offset
7544 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7545 }
7546
7547 if (crtl->is_leaf
7548 && !TARGET_TPF_PROFILING
7549 && cfun_frame_layout.frame_size == 0
7550 && !cfun_save_high_fprs_p
7551 && !cfun->calls_alloca
7552 && !cfun->stdarg)
7553 return;
7554
7555 if (!TARGET_PACKED_STACK)
7556 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7557 + crtl->outgoing_args_size
7558 + cfun_frame_layout.high_fprs * 8);
7559 else
7560 {
7561 if (TARGET_BACKCHAIN)
7562 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7563
7564 /* No alignment trouble here because f8-f15 are only saved under
7565 64 bit. */
7566 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7567 cfun_frame_layout.f4_offset),
7568 cfun_frame_layout.gprs_offset)
7569 - cfun_frame_layout.high_fprs * 8);
7570
7571 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7572
7573 for (i = 0; i < 8; i++)
7574 if (cfun_fpr_bit_p (i))
7575 cfun_frame_layout.frame_size += 8;
7576
7577 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7578
7579 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7580 the frame size to sustain 8 byte alignment of stack frames. */
7581 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7582 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7583 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7584
7585 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7586 }
7587 }
7588
7589 /* Generate frame layout. Fills in register and frame data for the current
7590 function in cfun->machine. This routine can be called multiple times;
7591 it will re-do the complete frame layout every time. */
7592
7593 static void
7594 s390_init_frame_layout (void)
7595 {
7596 HOST_WIDE_INT frame_size;
7597 int base_used;
7598 int clobbered_regs[16];
7599
7600 /* On S/390 machines, we may need to perform branch splitting, which
7601 will require both base and return address register. We have no
7602 choice but to assume we're going to need them until right at the
7603 end of the machine dependent reorg phase. */
7604 if (!TARGET_CPU_ZARCH)
7605 cfun->machine->split_branches_pending_p = true;
7606
7607 do
7608 {
7609 frame_size = cfun_frame_layout.frame_size;
7610
7611 /* Try to predict whether we'll need the base register. */
7612 base_used = cfun->machine->split_branches_pending_p
7613 || crtl->uses_const_pool
7614 || (!DISP_IN_RANGE (frame_size)
7615 && !CONST_OK_FOR_K (frame_size));
7616
7617 /* Decide which register to use as literal pool base. In small
7618 leaf functions, try to use an unused call-clobbered register
7619 as base register to avoid save/restore overhead. */
7620 if (!base_used)
7621 cfun->machine->base_reg = NULL_RTX;
7622 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7623 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7624 else
7625 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7626
7627 s390_register_info (clobbered_regs);
7628 s390_frame_info ();
7629 }
7630 while (frame_size != cfun_frame_layout.frame_size);
7631 }
7632
7633 /* Update frame layout. Recompute actual register save data based on
7634 current info and update regs_ever_live for the special registers.
7635 May be called multiple times, but may never cause *more* registers
7636 to be saved than s390_init_frame_layout allocated room for. */
7637
7638 static void
7639 s390_update_frame_layout (void)
7640 {
7641 int clobbered_regs[16];
7642
7643 s390_register_info (clobbered_regs);
7644
7645 df_set_regs_ever_live (BASE_REGNUM,
7646 clobbered_regs[BASE_REGNUM] ? true : false);
7647 df_set_regs_ever_live (RETURN_REGNUM,
7648 clobbered_regs[RETURN_REGNUM] ? true : false);
7649 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7650 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7651
7652 if (cfun->machine->base_reg)
7653 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7654 }
7655
7656 /* Return true if it is legal to put a value with MODE into REGNO. */
7657
7658 bool
7659 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7660 {
7661 switch (REGNO_REG_CLASS (regno))
7662 {
7663 case FP_REGS:
7664 if (REGNO_PAIR_OK (regno, mode))
7665 {
7666 if (mode == SImode || mode == DImode)
7667 return true;
7668
7669 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7670 return true;
7671 }
7672 break;
7673 case ADDR_REGS:
7674 if (FRAME_REGNO_P (regno) && mode == Pmode)
7675 return true;
7676
7677 /* fallthrough */
7678 case GENERAL_REGS:
7679 if (REGNO_PAIR_OK (regno, mode))
7680 {
7681 if (TARGET_ZARCH
7682 || (mode != TFmode && mode != TCmode && mode != TDmode))
7683 return true;
7684 }
7685 break;
7686 case CC_REGS:
7687 if (GET_MODE_CLASS (mode) == MODE_CC)
7688 return true;
7689 break;
7690 case ACCESS_REGS:
7691 if (REGNO_PAIR_OK (regno, mode))
7692 {
7693 if (mode == SImode || mode == Pmode)
7694 return true;
7695 }
7696 break;
7697 default:
7698 return false;
7699 }
7700
7701 return false;
7702 }
7703
7704 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7705
7706 bool
7707 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7708 {
7709 /* Once we've decided upon a register to use as base register, it must
7710 no longer be used for any other purpose. */
7711 if (cfun->machine->base_reg)
7712 if (REGNO (cfun->machine->base_reg) == old_reg
7713 || REGNO (cfun->machine->base_reg) == new_reg)
7714 return false;
7715
7716 return true;
7717 }
7718
7719 /* Maximum number of registers to represent a value of mode MODE
7720 in a register of class RCLASS. */
7721
7722 int
7723 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7724 {
7725 switch (rclass)
7726 {
7727 case FP_REGS:
7728 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7729 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7730 else
7731 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7732 case ACCESS_REGS:
7733 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7734 default:
7735 break;
7736 }
7737 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7738 }
7739
7740 /* Return true if register FROM can be eliminated via register TO. */
7741
7742 static bool
7743 s390_can_eliminate (const int from, const int to)
7744 {
7745 /* On zSeries machines, we have not marked the base register as fixed.
7746 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7747 If a function requires the base register, we say here that this
7748 elimination cannot be performed. This will cause reload to free
7749 up the base register (as if it were fixed). On the other hand,
7750 if the current function does *not* require the base register, we
7751 say here the elimination succeeds, which in turn allows reload
7752 to allocate the base register for any other purpose. */
7753 if (from == BASE_REGNUM && to == BASE_REGNUM)
7754 {
7755 if (TARGET_CPU_ZARCH)
7756 {
7757 s390_init_frame_layout ();
7758 return cfun->machine->base_reg == NULL_RTX;
7759 }
7760
7761 return false;
7762 }
7763
7764 /* Everything else must point into the stack frame. */
7765 gcc_assert (to == STACK_POINTER_REGNUM
7766 || to == HARD_FRAME_POINTER_REGNUM);
7767
7768 gcc_assert (from == FRAME_POINTER_REGNUM
7769 || from == ARG_POINTER_REGNUM
7770 || from == RETURN_ADDRESS_POINTER_REGNUM);
7771
7772 /* Make sure we actually saved the return address. */
7773 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7774 if (!crtl->calls_eh_return
7775 && !cfun->stdarg
7776 && !cfun_frame_layout.save_return_addr_p)
7777 return false;
7778
7779 return true;
7780 }
7781
7782 /* Return offset between register FROM and TO initially after prolog. */
7783
7784 HOST_WIDE_INT
7785 s390_initial_elimination_offset (int from, int to)
7786 {
7787 HOST_WIDE_INT offset;
7788 int index;
7789
7790 /* ??? Why are we called for non-eliminable pairs? */
7791 if (!s390_can_eliminate (from, to))
7792 return 0;
7793
7794 switch (from)
7795 {
7796 case FRAME_POINTER_REGNUM:
7797 offset = (get_frame_size()
7798 + STACK_POINTER_OFFSET
7799 + crtl->outgoing_args_size);
7800 break;
7801
7802 case ARG_POINTER_REGNUM:
7803 s390_init_frame_layout ();
7804 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7805 break;
7806
7807 case RETURN_ADDRESS_POINTER_REGNUM:
7808 s390_init_frame_layout ();
7809 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7810 gcc_assert (index >= 0);
7811 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7812 offset += index * UNITS_PER_LONG;
7813 break;
7814
7815 case BASE_REGNUM:
7816 offset = 0;
7817 break;
7818
7819 default:
7820 gcc_unreachable ();
7821 }
7822
7823 return offset;
7824 }
7825
7826 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7827 to register BASE. Return generated insn. */
7828
7829 static rtx
7830 save_fpr (rtx base, int offset, int regnum)
7831 {
7832 rtx addr;
7833 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7834
7835 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7836 set_mem_alias_set (addr, get_varargs_alias_set ());
7837 else
7838 set_mem_alias_set (addr, get_frame_alias_set ());
7839
7840 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7841 }
7842
7843 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7844 to register BASE. Return generated insn. */
7845
7846 static rtx
7847 restore_fpr (rtx base, int offset, int regnum)
7848 {
7849 rtx addr;
7850 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7851 set_mem_alias_set (addr, get_frame_alias_set ());
7852
7853 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7854 }
7855
7856 /* Return true if REGNO is a global register, but not one
7857 of the special ones that need to be saved/restored in anyway. */
7858
7859 static inline bool
7860 global_not_special_regno_p (int regno)
7861 {
7862 return (global_regs[regno]
7863 /* These registers are special and need to be
7864 restored in any case. */
7865 && !(regno == STACK_POINTER_REGNUM
7866 || regno == RETURN_REGNUM
7867 || regno == BASE_REGNUM
7868 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7869 }
7870
7871 /* Generate insn to save registers FIRST to LAST into
7872 the register save area located at offset OFFSET
7873 relative to register BASE. */
7874
7875 static rtx
7876 save_gprs (rtx base, int offset, int first, int last)
7877 {
7878 rtx addr, insn, note;
7879 int i;
7880
7881 addr = plus_constant (Pmode, base, offset);
7882 addr = gen_rtx_MEM (Pmode, addr);
7883
7884 set_mem_alias_set (addr, get_frame_alias_set ());
7885
7886 /* Special-case single register. */
7887 if (first == last)
7888 {
7889 if (TARGET_64BIT)
7890 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7891 else
7892 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7893
7894 if (!global_not_special_regno_p (first))
7895 RTX_FRAME_RELATED_P (insn) = 1;
7896 return insn;
7897 }
7898
7899
7900 insn = gen_store_multiple (addr,
7901 gen_rtx_REG (Pmode, first),
7902 GEN_INT (last - first + 1));
7903
7904 if (first <= 6 && cfun->stdarg)
7905 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7906 {
7907 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7908
7909 if (first + i <= 6)
7910 set_mem_alias_set (mem, get_varargs_alias_set ());
7911 }
7912
7913 /* We need to set the FRAME_RELATED flag on all SETs
7914 inside the store-multiple pattern.
7915
7916 However, we must not emit DWARF records for registers 2..5
7917 if they are stored for use by variable arguments ...
7918
7919 ??? Unfortunately, it is not enough to simply not the
7920 FRAME_RELATED flags for those SETs, because the first SET
7921 of the PARALLEL is always treated as if it had the flag
7922 set, even if it does not. Therefore we emit a new pattern
7923 without those registers as REG_FRAME_RELATED_EXPR note. */
7924
7925 if (first >= 6 && !global_not_special_regno_p (first))
7926 {
7927 rtx pat = PATTERN (insn);
7928
7929 for (i = 0; i < XVECLEN (pat, 0); i++)
7930 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7931 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7932 0, i)))))
7933 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7934
7935 RTX_FRAME_RELATED_P (insn) = 1;
7936 }
7937 else if (last >= 6)
7938 {
7939 int start;
7940
7941 for (start = first >= 6 ? first : 6; start <= last; start++)
7942 if (!global_not_special_regno_p (start))
7943 break;
7944
7945 if (start > last)
7946 return insn;
7947
7948 addr = plus_constant (Pmode, base,
7949 offset + (start - first) * UNITS_PER_LONG);
7950 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7951 gen_rtx_REG (Pmode, start),
7952 GEN_INT (last - start + 1));
7953 note = PATTERN (note);
7954
7955 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7956
7957 for (i = 0; i < XVECLEN (note, 0); i++)
7958 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7959 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7960 0, i)))))
7961 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7962
7963 RTX_FRAME_RELATED_P (insn) = 1;
7964 }
7965
7966 return insn;
7967 }
7968
7969 /* Generate insn to restore registers FIRST to LAST from
7970 the register save area located at offset OFFSET
7971 relative to register BASE. */
7972
7973 static rtx
7974 restore_gprs (rtx base, int offset, int first, int last)
7975 {
7976 rtx addr, insn;
7977
7978 addr = plus_constant (Pmode, base, offset);
7979 addr = gen_rtx_MEM (Pmode, addr);
7980 set_mem_alias_set (addr, get_frame_alias_set ());
7981
7982 /* Special-case single register. */
7983 if (first == last)
7984 {
7985 if (TARGET_64BIT)
7986 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7987 else
7988 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7989
7990 return insn;
7991 }
7992
7993 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7994 addr,
7995 GEN_INT (last - first + 1));
7996 return insn;
7997 }
7998
7999 /* Return insn sequence to load the GOT register. */
8000
8001 static GTY(()) rtx got_symbol;
8002 rtx
8003 s390_load_got (void)
8004 {
8005 rtx insns;
8006
8007 /* We cannot use pic_offset_table_rtx here since we use this
8008 function also for non-pic if __tls_get_offset is called and in
8009 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8010 aren't usable. */
8011 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8012
8013 if (!got_symbol)
8014 {
8015 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8016 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8017 }
8018
8019 start_sequence ();
8020
8021 if (TARGET_CPU_ZARCH)
8022 {
8023 emit_move_insn (got_rtx, got_symbol);
8024 }
8025 else
8026 {
8027 rtx offset;
8028
8029 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8030 UNSPEC_LTREL_OFFSET);
8031 offset = gen_rtx_CONST (Pmode, offset);
8032 offset = force_const_mem (Pmode, offset);
8033
8034 emit_move_insn (got_rtx, offset);
8035
8036 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8037 UNSPEC_LTREL_BASE);
8038 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8039
8040 emit_move_insn (got_rtx, offset);
8041 }
8042
8043 insns = get_insns ();
8044 end_sequence ();
8045 return insns;
8046 }
8047
8048 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8049 and the change to the stack pointer. */
8050
8051 static void
8052 s390_emit_stack_tie (void)
8053 {
8054 rtx mem = gen_frame_mem (BLKmode,
8055 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8056
8057 emit_insn (gen_stack_tie (mem));
8058 }
8059
8060 /* Expand the prologue into a bunch of separate insns. */
8061
8062 void
8063 s390_emit_prologue (void)
8064 {
8065 rtx insn, addr;
8066 rtx temp_reg;
8067 int i;
8068 int offset;
8069 int next_fpr = 0;
8070
8071 /* Complete frame layout. */
8072
8073 s390_update_frame_layout ();
8074
8075 /* Annotate all constant pool references to let the scheduler know
8076 they implicitly use the base register. */
8077
8078 push_topmost_sequence ();
8079
8080 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8081 if (INSN_P (insn))
8082 {
8083 annotate_constant_pool_refs (&PATTERN (insn));
8084 df_insn_rescan (insn);
8085 }
8086
8087 pop_topmost_sequence ();
8088
8089 /* Choose best register to use for temp use within prologue.
8090 See below for why TPF must use the register 1. */
8091
8092 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8093 && !crtl->is_leaf
8094 && !TARGET_TPF_PROFILING)
8095 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8096 else
8097 temp_reg = gen_rtx_REG (Pmode, 1);
8098
8099 /* Save call saved gprs. */
8100 if (cfun_frame_layout.first_save_gpr != -1)
8101 {
8102 insn = save_gprs (stack_pointer_rtx,
8103 cfun_frame_layout.gprs_offset +
8104 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8105 - cfun_frame_layout.first_save_gpr_slot),
8106 cfun_frame_layout.first_save_gpr,
8107 cfun_frame_layout.last_save_gpr);
8108 emit_insn (insn);
8109 }
8110
8111 /* Dummy insn to mark literal pool slot. */
8112
8113 if (cfun->machine->base_reg)
8114 emit_insn (gen_main_pool (cfun->machine->base_reg));
8115
8116 offset = cfun_frame_layout.f0_offset;
8117
8118 /* Save f0 and f2. */
8119 for (i = 0; i < 2; i++)
8120 {
8121 if (cfun_fpr_bit_p (i))
8122 {
8123 save_fpr (stack_pointer_rtx, offset, i + 16);
8124 offset += 8;
8125 }
8126 else if (!TARGET_PACKED_STACK)
8127 offset += 8;
8128 }
8129
8130 /* Save f4 and f6. */
8131 offset = cfun_frame_layout.f4_offset;
8132 for (i = 2; i < 4; i++)
8133 {
8134 if (cfun_fpr_bit_p (i))
8135 {
8136 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8137 offset += 8;
8138
8139 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8140 therefore are not frame related. */
8141 if (!call_really_used_regs[i + 16])
8142 RTX_FRAME_RELATED_P (insn) = 1;
8143 }
8144 else if (!TARGET_PACKED_STACK)
8145 offset += 8;
8146 }
8147
8148 if (TARGET_PACKED_STACK
8149 && cfun_save_high_fprs_p
8150 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8151 {
8152 offset = (cfun_frame_layout.f8_offset
8153 + (cfun_frame_layout.high_fprs - 1) * 8);
8154
8155 for (i = 15; i > 7 && offset >= 0; i--)
8156 if (cfun_fpr_bit_p (i))
8157 {
8158 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8159
8160 RTX_FRAME_RELATED_P (insn) = 1;
8161 offset -= 8;
8162 }
8163 if (offset >= cfun_frame_layout.f8_offset)
8164 next_fpr = i + 16;
8165 }
8166
8167 if (!TARGET_PACKED_STACK)
8168 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8169
8170 if (flag_stack_usage_info)
8171 current_function_static_stack_size = cfun_frame_layout.frame_size;
8172
8173 /* Decrement stack pointer. */
8174
8175 if (cfun_frame_layout.frame_size > 0)
8176 {
8177 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8178 rtx real_frame_off;
8179
8180 if (s390_stack_size)
8181 {
8182 HOST_WIDE_INT stack_guard;
8183
8184 if (s390_stack_guard)
8185 stack_guard = s390_stack_guard;
8186 else
8187 {
8188 /* If no value for stack guard is provided the smallest power of 2
8189 larger than the current frame size is chosen. */
8190 stack_guard = 1;
8191 while (stack_guard < cfun_frame_layout.frame_size)
8192 stack_guard <<= 1;
8193 }
8194
8195 if (cfun_frame_layout.frame_size >= s390_stack_size)
8196 {
8197 warning (0, "frame size of function %qs is %wd"
8198 " bytes exceeding user provided stack limit of "
8199 "%d bytes. "
8200 "An unconditional trap is added.",
8201 current_function_name(), cfun_frame_layout.frame_size,
8202 s390_stack_size);
8203 emit_insn (gen_trap ());
8204 }
8205 else
8206 {
8207 /* stack_guard has to be smaller than s390_stack_size.
8208 Otherwise we would emit an AND with zero which would
8209 not match the test under mask pattern. */
8210 if (stack_guard >= s390_stack_size)
8211 {
8212 warning (0, "frame size of function %qs is %wd"
8213 " bytes which is more than half the stack size. "
8214 "The dynamic check would not be reliable. "
8215 "No check emitted for this function.",
8216 current_function_name(),
8217 cfun_frame_layout.frame_size);
8218 }
8219 else
8220 {
8221 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8222 & ~(stack_guard - 1));
8223
8224 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8225 GEN_INT (stack_check_mask));
8226 if (TARGET_64BIT)
8227 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8228 t, const0_rtx),
8229 t, const0_rtx, const0_rtx));
8230 else
8231 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8232 t, const0_rtx),
8233 t, const0_rtx, const0_rtx));
8234 }
8235 }
8236 }
8237
8238 if (s390_warn_framesize > 0
8239 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8240 warning (0, "frame size of %qs is %wd bytes",
8241 current_function_name (), cfun_frame_layout.frame_size);
8242
8243 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8244 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8245
8246 /* Save incoming stack pointer into temp reg. */
8247 if (TARGET_BACKCHAIN || next_fpr)
8248 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8249
8250 /* Subtract frame size from stack pointer. */
8251
8252 if (DISP_IN_RANGE (INTVAL (frame_off)))
8253 {
8254 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8255 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8256 frame_off));
8257 insn = emit_insn (insn);
8258 }
8259 else
8260 {
8261 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8262 frame_off = force_const_mem (Pmode, frame_off);
8263
8264 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8265 annotate_constant_pool_refs (&PATTERN (insn));
8266 }
8267
8268 RTX_FRAME_RELATED_P (insn) = 1;
8269 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8270 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8271 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8272 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8273 real_frame_off)));
8274
8275 /* Set backchain. */
8276
8277 if (TARGET_BACKCHAIN)
8278 {
8279 if (cfun_frame_layout.backchain_offset)
8280 addr = gen_rtx_MEM (Pmode,
8281 plus_constant (Pmode, stack_pointer_rtx,
8282 cfun_frame_layout.backchain_offset));
8283 else
8284 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8285 set_mem_alias_set (addr, get_frame_alias_set ());
8286 insn = emit_insn (gen_move_insn (addr, temp_reg));
8287 }
8288
8289 /* If we support non-call exceptions (e.g. for Java),
8290 we need to make sure the backchain pointer is set up
8291 before any possibly trapping memory access. */
8292 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8293 {
8294 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8295 emit_clobber (addr);
8296 }
8297 }
8298
8299 /* Save fprs 8 - 15 (64 bit ABI). */
8300
8301 if (cfun_save_high_fprs_p && next_fpr)
8302 {
8303 /* If the stack might be accessed through a different register
8304 we have to make sure that the stack pointer decrement is not
8305 moved below the use of the stack slots. */
8306 s390_emit_stack_tie ();
8307
8308 insn = emit_insn (gen_add2_insn (temp_reg,
8309 GEN_INT (cfun_frame_layout.f8_offset)));
8310
8311 offset = 0;
8312
8313 for (i = 24; i <= next_fpr; i++)
8314 if (cfun_fpr_bit_p (i - 16))
8315 {
8316 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8317 cfun_frame_layout.frame_size
8318 + cfun_frame_layout.f8_offset
8319 + offset);
8320
8321 insn = save_fpr (temp_reg, offset, i);
8322 offset += 8;
8323 RTX_FRAME_RELATED_P (insn) = 1;
8324 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8325 gen_rtx_SET (VOIDmode,
8326 gen_rtx_MEM (DFmode, addr),
8327 gen_rtx_REG (DFmode, i)));
8328 }
8329 }
8330
8331 /* Set frame pointer, if needed. */
8332
8333 if (frame_pointer_needed)
8334 {
8335 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8336 RTX_FRAME_RELATED_P (insn) = 1;
8337 }
8338
8339 /* Set up got pointer, if needed. */
8340
8341 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8342 {
8343 rtx insns = s390_load_got ();
8344
8345 for (insn = insns; insn; insn = NEXT_INSN (insn))
8346 annotate_constant_pool_refs (&PATTERN (insn));
8347
8348 emit_insn (insns);
8349 }
8350
8351 if (TARGET_TPF_PROFILING)
8352 {
8353 /* Generate a BAS instruction to serve as a function
8354 entry intercept to facilitate the use of tracing
8355 algorithms located at the branch target. */
8356 emit_insn (gen_prologue_tpf ());
8357
8358 /* Emit a blockage here so that all code
8359 lies between the profiling mechanisms. */
8360 emit_insn (gen_blockage ());
8361 }
8362 }
8363
8364 /* Expand the epilogue into a bunch of separate insns. */
8365
8366 void
8367 s390_emit_epilogue (bool sibcall)
8368 {
8369 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8370 int area_bottom, area_top, offset = 0;
8371 int next_offset;
8372 rtvec p;
8373 int i;
8374
8375 if (TARGET_TPF_PROFILING)
8376 {
8377
8378 /* Generate a BAS instruction to serve as a function
8379 entry intercept to facilitate the use of tracing
8380 algorithms located at the branch target. */
8381
8382 /* Emit a blockage here so that all code
8383 lies between the profiling mechanisms. */
8384 emit_insn (gen_blockage ());
8385
8386 emit_insn (gen_epilogue_tpf ());
8387 }
8388
8389 /* Check whether to use frame or stack pointer for restore. */
8390
8391 frame_pointer = (frame_pointer_needed
8392 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8393
8394 s390_frame_area (&area_bottom, &area_top);
8395
8396 /* Check whether we can access the register save area.
8397 If not, increment the frame pointer as required. */
8398
8399 if (area_top <= area_bottom)
8400 {
8401 /* Nothing to restore. */
8402 }
8403 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8404 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8405 {
8406 /* Area is in range. */
8407 offset = cfun_frame_layout.frame_size;
8408 }
8409 else
8410 {
8411 rtx insn, frame_off, cfa;
8412
8413 offset = area_bottom < 0 ? -area_bottom : 0;
8414 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8415
8416 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8417 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8418 if (DISP_IN_RANGE (INTVAL (frame_off)))
8419 {
8420 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8421 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8422 insn = emit_insn (insn);
8423 }
8424 else
8425 {
8426 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8427 frame_off = force_const_mem (Pmode, frame_off);
8428
8429 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8430 annotate_constant_pool_refs (&PATTERN (insn));
8431 }
8432 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8433 RTX_FRAME_RELATED_P (insn) = 1;
8434 }
8435
8436 /* Restore call saved fprs. */
8437
8438 if (TARGET_64BIT)
8439 {
8440 if (cfun_save_high_fprs_p)
8441 {
8442 next_offset = cfun_frame_layout.f8_offset;
8443 for (i = 24; i < 32; i++)
8444 {
8445 if (cfun_fpr_bit_p (i - 16))
8446 {
8447 restore_fpr (frame_pointer,
8448 offset + next_offset, i);
8449 cfa_restores
8450 = alloc_reg_note (REG_CFA_RESTORE,
8451 gen_rtx_REG (DFmode, i), cfa_restores);
8452 next_offset += 8;
8453 }
8454 }
8455 }
8456
8457 }
8458 else
8459 {
8460 next_offset = cfun_frame_layout.f4_offset;
8461 for (i = 18; i < 20; i++)
8462 {
8463 if (cfun_fpr_bit_p (i - 16))
8464 {
8465 restore_fpr (frame_pointer,
8466 offset + next_offset, i);
8467 cfa_restores
8468 = alloc_reg_note (REG_CFA_RESTORE,
8469 gen_rtx_REG (DFmode, i), cfa_restores);
8470 next_offset += 8;
8471 }
8472 else if (!TARGET_PACKED_STACK)
8473 next_offset += 8;
8474 }
8475
8476 }
8477
8478 /* Return register. */
8479
8480 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8481
8482 /* Restore call saved gprs. */
8483
8484 if (cfun_frame_layout.first_restore_gpr != -1)
8485 {
8486 rtx insn, addr;
8487 int i;
8488
8489 /* Check for global register and save them
8490 to stack location from where they get restored. */
8491
8492 for (i = cfun_frame_layout.first_restore_gpr;
8493 i <= cfun_frame_layout.last_restore_gpr;
8494 i++)
8495 {
8496 if (global_not_special_regno_p (i))
8497 {
8498 addr = plus_constant (Pmode, frame_pointer,
8499 offset + cfun_frame_layout.gprs_offset
8500 + (i - cfun_frame_layout.first_save_gpr_slot)
8501 * UNITS_PER_LONG);
8502 addr = gen_rtx_MEM (Pmode, addr);
8503 set_mem_alias_set (addr, get_frame_alias_set ());
8504 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8505 }
8506 else
8507 cfa_restores
8508 = alloc_reg_note (REG_CFA_RESTORE,
8509 gen_rtx_REG (Pmode, i), cfa_restores);
8510 }
8511
8512 if (! sibcall)
8513 {
8514 /* Fetch return address from stack before load multiple,
8515 this will do good for scheduling. */
8516
8517 if (cfun_frame_layout.save_return_addr_p
8518 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8519 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8520 {
8521 int return_regnum = find_unused_clobbered_reg();
8522 if (!return_regnum)
8523 return_regnum = 4;
8524 return_reg = gen_rtx_REG (Pmode, return_regnum);
8525
8526 addr = plus_constant (Pmode, frame_pointer,
8527 offset + cfun_frame_layout.gprs_offset
8528 + (RETURN_REGNUM
8529 - cfun_frame_layout.first_save_gpr_slot)
8530 * UNITS_PER_LONG);
8531 addr = gen_rtx_MEM (Pmode, addr);
8532 set_mem_alias_set (addr, get_frame_alias_set ());
8533 emit_move_insn (return_reg, addr);
8534 }
8535 }
8536
8537 insn = restore_gprs (frame_pointer,
8538 offset + cfun_frame_layout.gprs_offset
8539 + (cfun_frame_layout.first_restore_gpr
8540 - cfun_frame_layout.first_save_gpr_slot)
8541 * UNITS_PER_LONG,
8542 cfun_frame_layout.first_restore_gpr,
8543 cfun_frame_layout.last_restore_gpr);
8544 insn = emit_insn (insn);
8545 REG_NOTES (insn) = cfa_restores;
8546 add_reg_note (insn, REG_CFA_DEF_CFA,
8547 plus_constant (Pmode, stack_pointer_rtx,
8548 STACK_POINTER_OFFSET));
8549 RTX_FRAME_RELATED_P (insn) = 1;
8550 }
8551
8552 if (! sibcall)
8553 {
8554
8555 /* Return to caller. */
8556
8557 p = rtvec_alloc (2);
8558
8559 RTVEC_ELT (p, 0) = ret_rtx;
8560 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8561 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8562 }
8563 }
8564
8565
8566 /* Return the size in bytes of a function argument of
8567 type TYPE and/or mode MODE. At least one of TYPE or
8568 MODE must be specified. */
8569
8570 static int
8571 s390_function_arg_size (enum machine_mode mode, const_tree type)
8572 {
8573 if (type)
8574 return int_size_in_bytes (type);
8575
8576 /* No type info available for some library calls ... */
8577 if (mode != BLKmode)
8578 return GET_MODE_SIZE (mode);
8579
8580 /* If we have neither type nor mode, abort */
8581 gcc_unreachable ();
8582 }
8583
8584 /* Return true if a function argument of type TYPE and mode MODE
8585 is to be passed in a floating-point register, if available. */
8586
8587 static bool
8588 s390_function_arg_float (enum machine_mode mode, const_tree type)
8589 {
8590 int size = s390_function_arg_size (mode, type);
8591 if (size > 8)
8592 return false;
8593
8594 /* Soft-float changes the ABI: no floating-point registers are used. */
8595 if (TARGET_SOFT_FLOAT)
8596 return false;
8597
8598 /* No type info available for some library calls ... */
8599 if (!type)
8600 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8601
8602 /* The ABI says that record types with a single member are treated
8603 just like that member would be. */
8604 while (TREE_CODE (type) == RECORD_TYPE)
8605 {
8606 tree field, single = NULL_TREE;
8607
8608 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8609 {
8610 if (TREE_CODE (field) != FIELD_DECL)
8611 continue;
8612
8613 if (single == NULL_TREE)
8614 single = TREE_TYPE (field);
8615 else
8616 return false;
8617 }
8618
8619 if (single == NULL_TREE)
8620 return false;
8621 else
8622 type = single;
8623 }
8624
8625 return TREE_CODE (type) == REAL_TYPE;
8626 }
8627
8628 /* Return true if a function argument of type TYPE and mode MODE
8629 is to be passed in an integer register, or a pair of integer
8630 registers, if available. */
8631
8632 static bool
8633 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8634 {
8635 int size = s390_function_arg_size (mode, type);
8636 if (size > 8)
8637 return false;
8638
8639 /* No type info available for some library calls ... */
8640 if (!type)
8641 return GET_MODE_CLASS (mode) == MODE_INT
8642 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8643
8644 /* We accept small integral (and similar) types. */
8645 if (INTEGRAL_TYPE_P (type)
8646 || POINTER_TYPE_P (type)
8647 || TREE_CODE (type) == NULLPTR_TYPE
8648 || TREE_CODE (type) == OFFSET_TYPE
8649 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8650 return true;
8651
8652 /* We also accept structs of size 1, 2, 4, 8 that are not
8653 passed in floating-point registers. */
8654 if (AGGREGATE_TYPE_P (type)
8655 && exact_log2 (size) >= 0
8656 && !s390_function_arg_float (mode, type))
8657 return true;
8658
8659 return false;
8660 }
8661
8662 /* Return 1 if a function argument of type TYPE and mode MODE
8663 is to be passed by reference. The ABI specifies that only
8664 structures of size 1, 2, 4, or 8 bytes are passed by value,
8665 all other structures (and complex numbers) are passed by
8666 reference. */
8667
8668 static bool
8669 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8670 enum machine_mode mode, const_tree type,
8671 bool named ATTRIBUTE_UNUSED)
8672 {
8673 int size = s390_function_arg_size (mode, type);
8674 if (size > 8)
8675 return true;
8676
8677 if (type)
8678 {
8679 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8680 return 1;
8681
8682 if (TREE_CODE (type) == COMPLEX_TYPE
8683 || TREE_CODE (type) == VECTOR_TYPE)
8684 return 1;
8685 }
8686
8687 return 0;
8688 }
8689
8690 /* Update the data in CUM to advance over an argument of mode MODE and
8691 data type TYPE. (TYPE is null for libcalls where that information
8692 may not be available.). The boolean NAMED specifies whether the
8693 argument is a named argument (as opposed to an unnamed argument
8694 matching an ellipsis). */
8695
8696 static void
8697 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8698 const_tree type, bool named ATTRIBUTE_UNUSED)
8699 {
8700 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8701
8702 if (s390_function_arg_float (mode, type))
8703 {
8704 cum->fprs += 1;
8705 }
8706 else if (s390_function_arg_integer (mode, type))
8707 {
8708 int size = s390_function_arg_size (mode, type);
8709 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8710 }
8711 else
8712 gcc_unreachable ();
8713 }
8714
8715 /* Define where to put the arguments to a function.
8716 Value is zero to push the argument on the stack,
8717 or a hard register in which to store the argument.
8718
8719 MODE is the argument's machine mode.
8720 TYPE is the data type of the argument (as a tree).
8721 This is null for libcalls where that information may
8722 not be available.
8723 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8724 the preceding args and about the function being called.
8725 NAMED is nonzero if this argument is a named parameter
8726 (otherwise it is an extra parameter matching an ellipsis).
8727
8728 On S/390, we use general purpose registers 2 through 6 to
8729 pass integer, pointer, and certain structure arguments, and
8730 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8731 to pass floating point arguments. All remaining arguments
8732 are pushed to the stack. */
8733
8734 static rtx
8735 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8736 const_tree type, bool named ATTRIBUTE_UNUSED)
8737 {
8738 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8739
8740 if (s390_function_arg_float (mode, type))
8741 {
8742 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8743 return 0;
8744 else
8745 return gen_rtx_REG (mode, cum->fprs + 16);
8746 }
8747 else if (s390_function_arg_integer (mode, type))
8748 {
8749 int size = s390_function_arg_size (mode, type);
8750 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8751
8752 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8753 return 0;
8754 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8755 return gen_rtx_REG (mode, cum->gprs + 2);
8756 else if (n_gprs == 2)
8757 {
8758 rtvec p = rtvec_alloc (2);
8759
8760 RTVEC_ELT (p, 0)
8761 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8762 const0_rtx);
8763 RTVEC_ELT (p, 1)
8764 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8765 GEN_INT (4));
8766
8767 return gen_rtx_PARALLEL (mode, p);
8768 }
8769 }
8770
8771 /* After the real arguments, expand_call calls us once again
8772 with a void_type_node type. Whatever we return here is
8773 passed as operand 2 to the call expanders.
8774
8775 We don't need this feature ... */
8776 else if (type == void_type_node)
8777 return const0_rtx;
8778
8779 gcc_unreachable ();
8780 }
8781
8782 /* Return true if return values of type TYPE should be returned
8783 in a memory buffer whose address is passed by the caller as
8784 hidden first argument. */
8785
8786 static bool
8787 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8788 {
8789 /* We accept small integral (and similar) types. */
8790 if (INTEGRAL_TYPE_P (type)
8791 || POINTER_TYPE_P (type)
8792 || TREE_CODE (type) == OFFSET_TYPE
8793 || TREE_CODE (type) == REAL_TYPE)
8794 return int_size_in_bytes (type) > 8;
8795
8796 /* Aggregates and similar constructs are always returned
8797 in memory. */
8798 if (AGGREGATE_TYPE_P (type)
8799 || TREE_CODE (type) == COMPLEX_TYPE
8800 || TREE_CODE (type) == VECTOR_TYPE)
8801 return true;
8802
8803 /* ??? We get called on all sorts of random stuff from
8804 aggregate_value_p. We can't abort, but it's not clear
8805 what's safe to return. Pretend it's a struct I guess. */
8806 return true;
8807 }
8808
8809 /* Function arguments and return values are promoted to word size. */
8810
8811 static enum machine_mode
8812 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8813 int *punsignedp,
8814 const_tree fntype ATTRIBUTE_UNUSED,
8815 int for_return ATTRIBUTE_UNUSED)
8816 {
8817 if (INTEGRAL_MODE_P (mode)
8818 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8819 {
8820 if (type != NULL_TREE && POINTER_TYPE_P (type))
8821 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8822 return Pmode;
8823 }
8824
8825 return mode;
8826 }
8827
8828 /* Define where to return a (scalar) value of type RET_TYPE.
8829 If RET_TYPE is null, define where to return a (scalar)
8830 value of mode MODE from a libcall. */
8831
8832 static rtx
8833 s390_function_and_libcall_value (enum machine_mode mode,
8834 const_tree ret_type,
8835 const_tree fntype_or_decl,
8836 bool outgoing ATTRIBUTE_UNUSED)
8837 {
8838 /* For normal functions perform the promotion as
8839 promote_function_mode would do. */
8840 if (ret_type)
8841 {
8842 int unsignedp = TYPE_UNSIGNED (ret_type);
8843 mode = promote_function_mode (ret_type, mode, &unsignedp,
8844 fntype_or_decl, 1);
8845 }
8846
8847 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8848 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8849
8850 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8851 return gen_rtx_REG (mode, 16);
8852 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8853 || UNITS_PER_LONG == UNITS_PER_WORD)
8854 return gen_rtx_REG (mode, 2);
8855 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8856 {
8857 /* This case is triggered when returning a 64 bit value with
8858 -m31 -mzarch. Although the value would fit into a single
8859 register it has to be forced into a 32 bit register pair in
8860 order to match the ABI. */
8861 rtvec p = rtvec_alloc (2);
8862
8863 RTVEC_ELT (p, 0)
8864 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8865 RTVEC_ELT (p, 1)
8866 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8867
8868 return gen_rtx_PARALLEL (mode, p);
8869 }
8870
8871 gcc_unreachable ();
8872 }
8873
8874 /* Define where to return a scalar return value of type RET_TYPE. */
8875
8876 static rtx
8877 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8878 bool outgoing)
8879 {
8880 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8881 fn_decl_or_type, outgoing);
8882 }
8883
8884 /* Define where to return a scalar libcall return value of mode
8885 MODE. */
8886
8887 static rtx
8888 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8889 {
8890 return s390_function_and_libcall_value (mode, NULL_TREE,
8891 NULL_TREE, true);
8892 }
8893
8894
8895 /* Create and return the va_list datatype.
8896
8897 On S/390, va_list is an array type equivalent to
8898
8899 typedef struct __va_list_tag
8900 {
8901 long __gpr;
8902 long __fpr;
8903 void *__overflow_arg_area;
8904 void *__reg_save_area;
8905 } va_list[1];
8906
8907 where __gpr and __fpr hold the number of general purpose
8908 or floating point arguments used up to now, respectively,
8909 __overflow_arg_area points to the stack location of the
8910 next argument passed on the stack, and __reg_save_area
8911 always points to the start of the register area in the
8912 call frame of the current function. The function prologue
8913 saves all registers used for argument passing into this
8914 area if the function uses variable arguments. */
8915
8916 static tree
8917 s390_build_builtin_va_list (void)
8918 {
8919 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8920
8921 record = lang_hooks.types.make_type (RECORD_TYPE);
8922
8923 type_decl =
8924 build_decl (BUILTINS_LOCATION,
8925 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8926
8927 f_gpr = build_decl (BUILTINS_LOCATION,
8928 FIELD_DECL, get_identifier ("__gpr"),
8929 long_integer_type_node);
8930 f_fpr = build_decl (BUILTINS_LOCATION,
8931 FIELD_DECL, get_identifier ("__fpr"),
8932 long_integer_type_node);
8933 f_ovf = build_decl (BUILTINS_LOCATION,
8934 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8935 ptr_type_node);
8936 f_sav = build_decl (BUILTINS_LOCATION,
8937 FIELD_DECL, get_identifier ("__reg_save_area"),
8938 ptr_type_node);
8939
8940 va_list_gpr_counter_field = f_gpr;
8941 va_list_fpr_counter_field = f_fpr;
8942
8943 DECL_FIELD_CONTEXT (f_gpr) = record;
8944 DECL_FIELD_CONTEXT (f_fpr) = record;
8945 DECL_FIELD_CONTEXT (f_ovf) = record;
8946 DECL_FIELD_CONTEXT (f_sav) = record;
8947
8948 TYPE_STUB_DECL (record) = type_decl;
8949 TYPE_NAME (record) = type_decl;
8950 TYPE_FIELDS (record) = f_gpr;
8951 DECL_CHAIN (f_gpr) = f_fpr;
8952 DECL_CHAIN (f_fpr) = f_ovf;
8953 DECL_CHAIN (f_ovf) = f_sav;
8954
8955 layout_type (record);
8956
8957 /* The correct type is an array type of one element. */
8958 return build_array_type (record, build_index_type (size_zero_node));
8959 }
8960
8961 /* Implement va_start by filling the va_list structure VALIST.
8962 STDARG_P is always true, and ignored.
8963 NEXTARG points to the first anonymous stack argument.
8964
8965 The following global variables are used to initialize
8966 the va_list structure:
8967
8968 crtl->args.info:
8969 holds number of gprs and fprs used for named arguments.
8970 crtl->args.arg_offset_rtx:
8971 holds the offset of the first anonymous stack argument
8972 (relative to the virtual arg pointer). */
8973
8974 static void
8975 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8976 {
8977 HOST_WIDE_INT n_gpr, n_fpr;
8978 int off;
8979 tree f_gpr, f_fpr, f_ovf, f_sav;
8980 tree gpr, fpr, ovf, sav, t;
8981
8982 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8983 f_fpr = DECL_CHAIN (f_gpr);
8984 f_ovf = DECL_CHAIN (f_fpr);
8985 f_sav = DECL_CHAIN (f_ovf);
8986
8987 valist = build_simple_mem_ref (valist);
8988 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8989 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8990 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8991 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8992
8993 /* Count number of gp and fp argument registers used. */
8994
8995 n_gpr = crtl->args.info.gprs;
8996 n_fpr = crtl->args.info.fprs;
8997
8998 if (cfun->va_list_gpr_size)
8999 {
9000 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9001 build_int_cst (NULL_TREE, n_gpr));
9002 TREE_SIDE_EFFECTS (t) = 1;
9003 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9004 }
9005
9006 if (cfun->va_list_fpr_size)
9007 {
9008 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9009 build_int_cst (NULL_TREE, n_fpr));
9010 TREE_SIDE_EFFECTS (t) = 1;
9011 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9012 }
9013
9014 /* Find the overflow area. */
9015 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9016 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9017 {
9018 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9019
9020 off = INTVAL (crtl->args.arg_offset_rtx);
9021 off = off < 0 ? 0 : off;
9022 if (TARGET_DEBUG_ARG)
9023 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9024 (int)n_gpr, (int)n_fpr, off);
9025
9026 t = fold_build_pointer_plus_hwi (t, off);
9027
9028 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9029 TREE_SIDE_EFFECTS (t) = 1;
9030 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9031 }
9032
9033 /* Find the register save area. */
9034 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9035 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9036 {
9037 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9038 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9039
9040 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9041 TREE_SIDE_EFFECTS (t) = 1;
9042 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9043 }
9044 }
9045
9046 /* Implement va_arg by updating the va_list structure
9047 VALIST as required to retrieve an argument of type
9048 TYPE, and returning that argument.
9049
9050 Generates code equivalent to:
9051
9052 if (integral value) {
9053 if (size <= 4 && args.gpr < 5 ||
9054 size > 4 && args.gpr < 4 )
9055 ret = args.reg_save_area[args.gpr+8]
9056 else
9057 ret = *args.overflow_arg_area++;
9058 } else if (float value) {
9059 if (args.fgpr < 2)
9060 ret = args.reg_save_area[args.fpr+64]
9061 else
9062 ret = *args.overflow_arg_area++;
9063 } else if (aggregate value) {
9064 if (args.gpr < 5)
9065 ret = *args.reg_save_area[args.gpr]
9066 else
9067 ret = **args.overflow_arg_area++;
9068 } */
9069
9070 static tree
9071 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9072 gimple_seq *post_p ATTRIBUTE_UNUSED)
9073 {
9074 tree f_gpr, f_fpr, f_ovf, f_sav;
9075 tree gpr, fpr, ovf, sav, reg, t, u;
9076 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9077 tree lab_false, lab_over, addr;
9078
9079 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9080 f_fpr = DECL_CHAIN (f_gpr);
9081 f_ovf = DECL_CHAIN (f_fpr);
9082 f_sav = DECL_CHAIN (f_ovf);
9083
9084 valist = build_va_arg_indirect_ref (valist);
9085 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9086 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9087 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9088
9089 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9090 both appear on a lhs. */
9091 valist = unshare_expr (valist);
9092 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9093
9094 size = int_size_in_bytes (type);
9095
9096 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9097 {
9098 if (TARGET_DEBUG_ARG)
9099 {
9100 fprintf (stderr, "va_arg: aggregate type");
9101 debug_tree (type);
9102 }
9103
9104 /* Aggregates are passed by reference. */
9105 indirect_p = 1;
9106 reg = gpr;
9107 n_reg = 1;
9108
9109 /* kernel stack layout on 31 bit: It is assumed here that no padding
9110 will be added by s390_frame_info because for va_args always an even
9111 number of gprs has to be saved r15-r2 = 14 regs. */
9112 sav_ofs = 2 * UNITS_PER_LONG;
9113 sav_scale = UNITS_PER_LONG;
9114 size = UNITS_PER_LONG;
9115 max_reg = GP_ARG_NUM_REG - n_reg;
9116 }
9117 else if (s390_function_arg_float (TYPE_MODE (type), type))
9118 {
9119 if (TARGET_DEBUG_ARG)
9120 {
9121 fprintf (stderr, "va_arg: float type");
9122 debug_tree (type);
9123 }
9124
9125 /* FP args go in FP registers, if present. */
9126 indirect_p = 0;
9127 reg = fpr;
9128 n_reg = 1;
9129 sav_ofs = 16 * UNITS_PER_LONG;
9130 sav_scale = 8;
9131 max_reg = FP_ARG_NUM_REG - n_reg;
9132 }
9133 else
9134 {
9135 if (TARGET_DEBUG_ARG)
9136 {
9137 fprintf (stderr, "va_arg: other type");
9138 debug_tree (type);
9139 }
9140
9141 /* Otherwise into GP registers. */
9142 indirect_p = 0;
9143 reg = gpr;
9144 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9145
9146 /* kernel stack layout on 31 bit: It is assumed here that no padding
9147 will be added by s390_frame_info because for va_args always an even
9148 number of gprs has to be saved r15-r2 = 14 regs. */
9149 sav_ofs = 2 * UNITS_PER_LONG;
9150
9151 if (size < UNITS_PER_LONG)
9152 sav_ofs += UNITS_PER_LONG - size;
9153
9154 sav_scale = UNITS_PER_LONG;
9155 max_reg = GP_ARG_NUM_REG - n_reg;
9156 }
9157
9158 /* Pull the value out of the saved registers ... */
9159
9160 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9161 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9162 addr = create_tmp_var (ptr_type_node, "addr");
9163
9164 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9165 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9166 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9167 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9168 gimplify_and_add (t, pre_p);
9169
9170 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9171 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9172 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9173 t = fold_build_pointer_plus (t, u);
9174
9175 gimplify_assign (addr, t, pre_p);
9176
9177 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9178
9179 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9180
9181
9182 /* ... Otherwise out of the overflow area. */
9183
9184 t = ovf;
9185 if (size < UNITS_PER_LONG)
9186 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9187
9188 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9189
9190 gimplify_assign (addr, t, pre_p);
9191
9192 t = fold_build_pointer_plus_hwi (t, size);
9193 gimplify_assign (ovf, t, pre_p);
9194
9195 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9196
9197
9198 /* Increment register save count. */
9199
9200 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9201 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9202 gimplify_and_add (u, pre_p);
9203
9204 if (indirect_p)
9205 {
9206 t = build_pointer_type_for_mode (build_pointer_type (type),
9207 ptr_mode, true);
9208 addr = fold_convert (t, addr);
9209 addr = build_va_arg_indirect_ref (addr);
9210 }
9211 else
9212 {
9213 t = build_pointer_type_for_mode (type, ptr_mode, true);
9214 addr = fold_convert (t, addr);
9215 }
9216
9217 return build_va_arg_indirect_ref (addr);
9218 }
9219
9220
9221 /* Builtins. */
9222
9223 enum s390_builtin
9224 {
9225 S390_BUILTIN_THREAD_POINTER,
9226 S390_BUILTIN_SET_THREAD_POINTER,
9227
9228 S390_BUILTIN_max
9229 };
9230
9231 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9232 CODE_FOR_get_tp_64,
9233 CODE_FOR_set_tp_64
9234 };
9235
9236 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9237 CODE_FOR_get_tp_31,
9238 CODE_FOR_set_tp_31
9239 };
9240
9241 static void
9242 s390_init_builtins (void)
9243 {
9244 tree ftype;
9245
9246 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9247 add_builtin_function ("__builtin_thread_pointer", ftype,
9248 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9249 NULL, NULL_TREE);
9250
9251 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9252 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9253 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9254 NULL, NULL_TREE);
9255 }
9256
9257 /* Expand an expression EXP that calls a built-in function,
9258 with result going to TARGET if that's convenient
9259 (and in mode MODE if that's convenient).
9260 SUBTARGET may be used as the target for computing one of EXP's operands.
9261 IGNORE is nonzero if the value is to be ignored. */
9262
9263 static rtx
9264 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9265 enum machine_mode mode ATTRIBUTE_UNUSED,
9266 int ignore ATTRIBUTE_UNUSED)
9267 {
9268 #define MAX_ARGS 2
9269
9270 enum insn_code const *code_for_builtin =
9271 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9272
9273 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9274 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9275 enum insn_code icode;
9276 rtx op[MAX_ARGS], pat;
9277 int arity;
9278 bool nonvoid;
9279 tree arg;
9280 call_expr_arg_iterator iter;
9281
9282 if (fcode >= S390_BUILTIN_max)
9283 internal_error ("bad builtin fcode");
9284 icode = code_for_builtin[fcode];
9285 if (icode == 0)
9286 internal_error ("bad builtin fcode");
9287
9288 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9289
9290 arity = 0;
9291 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9292 {
9293 const struct insn_operand_data *insn_op;
9294
9295 if (arg == error_mark_node)
9296 return NULL_RTX;
9297 if (arity > MAX_ARGS)
9298 return NULL_RTX;
9299
9300 insn_op = &insn_data[icode].operand[arity + nonvoid];
9301
9302 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9303
9304 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9305 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9306 arity++;
9307 }
9308
9309 if (nonvoid)
9310 {
9311 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9312 if (!target
9313 || GET_MODE (target) != tmode
9314 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9315 target = gen_reg_rtx (tmode);
9316 }
9317
9318 switch (arity)
9319 {
9320 case 0:
9321 pat = GEN_FCN (icode) (target);
9322 break;
9323 case 1:
9324 if (nonvoid)
9325 pat = GEN_FCN (icode) (target, op[0]);
9326 else
9327 pat = GEN_FCN (icode) (op[0]);
9328 break;
9329 case 2:
9330 pat = GEN_FCN (icode) (target, op[0], op[1]);
9331 break;
9332 default:
9333 gcc_unreachable ();
9334 }
9335 if (!pat)
9336 return NULL_RTX;
9337 emit_insn (pat);
9338
9339 if (nonvoid)
9340 return target;
9341 else
9342 return const0_rtx;
9343 }
9344
9345
9346 /* Output assembly code for the trampoline template to
9347 stdio stream FILE.
9348
9349 On S/390, we use gpr 1 internally in the trampoline code;
9350 gpr 0 is used to hold the static chain. */
9351
9352 static void
9353 s390_asm_trampoline_template (FILE *file)
9354 {
9355 rtx op[2];
9356 op[0] = gen_rtx_REG (Pmode, 0);
9357 op[1] = gen_rtx_REG (Pmode, 1);
9358
9359 if (TARGET_64BIT)
9360 {
9361 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9362 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9363 output_asm_insn ("br\t%1", op); /* 2 byte */
9364 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9365 }
9366 else
9367 {
9368 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9369 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9370 output_asm_insn ("br\t%1", op); /* 2 byte */
9371 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9372 }
9373 }
9374
9375 /* Emit RTL insns to initialize the variable parts of a trampoline.
9376 FNADDR is an RTX for the address of the function's pure code.
9377 CXT is an RTX for the static chain value for the function. */
9378
9379 static void
9380 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9381 {
9382 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9383 rtx mem;
9384
9385 emit_block_move (m_tramp, assemble_trampoline_template (),
9386 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9387
9388 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9389 emit_move_insn (mem, cxt);
9390 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9391 emit_move_insn (mem, fnaddr);
9392 }
9393
9394 /* Output assembler code to FILE to increment profiler label # LABELNO
9395 for profiling a function entry. */
9396
9397 void
9398 s390_function_profiler (FILE *file, int labelno)
9399 {
9400 rtx op[7];
9401
9402 char label[128];
9403 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9404
9405 fprintf (file, "# function profiler \n");
9406
9407 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9408 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9409 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9410
9411 op[2] = gen_rtx_REG (Pmode, 1);
9412 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9413 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9414
9415 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9416 if (flag_pic)
9417 {
9418 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9419 op[4] = gen_rtx_CONST (Pmode, op[4]);
9420 }
9421
9422 if (TARGET_64BIT)
9423 {
9424 output_asm_insn ("stg\t%0,%1", op);
9425 output_asm_insn ("larl\t%2,%3", op);
9426 output_asm_insn ("brasl\t%0,%4", op);
9427 output_asm_insn ("lg\t%0,%1", op);
9428 }
9429 else if (!flag_pic)
9430 {
9431 op[6] = gen_label_rtx ();
9432
9433 output_asm_insn ("st\t%0,%1", op);
9434 output_asm_insn ("bras\t%2,%l6", op);
9435 output_asm_insn (".long\t%4", op);
9436 output_asm_insn (".long\t%3", op);
9437 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9438 output_asm_insn ("l\t%0,0(%2)", op);
9439 output_asm_insn ("l\t%2,4(%2)", op);
9440 output_asm_insn ("basr\t%0,%0", op);
9441 output_asm_insn ("l\t%0,%1", op);
9442 }
9443 else
9444 {
9445 op[5] = gen_label_rtx ();
9446 op[6] = gen_label_rtx ();
9447
9448 output_asm_insn ("st\t%0,%1", op);
9449 output_asm_insn ("bras\t%2,%l6", op);
9450 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9451 output_asm_insn (".long\t%4-%l5", op);
9452 output_asm_insn (".long\t%3-%l5", op);
9453 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9454 output_asm_insn ("lr\t%0,%2", op);
9455 output_asm_insn ("a\t%0,0(%2)", op);
9456 output_asm_insn ("a\t%2,4(%2)", op);
9457 output_asm_insn ("basr\t%0,%0", op);
9458 output_asm_insn ("l\t%0,%1", op);
9459 }
9460 }
9461
9462 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9463 into its SYMBOL_REF_FLAGS. */
9464
9465 static void
9466 s390_encode_section_info (tree decl, rtx rtl, int first)
9467 {
9468 default_encode_section_info (decl, rtl, first);
9469
9470 if (TREE_CODE (decl) == VAR_DECL)
9471 {
9472 /* If a variable has a forced alignment to < 2 bytes, mark it
9473 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9474 operand. */
9475 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9476 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9477 if (!DECL_SIZE (decl)
9478 || !DECL_ALIGN (decl)
9479 || !host_integerp (DECL_SIZE (decl), 0)
9480 || (DECL_ALIGN (decl) <= 64
9481 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9482 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9483 }
9484
9485 /* Literal pool references don't have a decl so they are handled
9486 differently here. We rely on the information in the MEM_ALIGN
9487 entry to decide upon natural alignment. */
9488 if (MEM_P (rtl)
9489 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9490 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9491 && (MEM_ALIGN (rtl) == 0
9492 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9493 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9494 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9495 }
9496
9497 /* Output thunk to FILE that implements a C++ virtual function call (with
9498 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9499 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9500 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9501 relative to the resulting this pointer. */
9502
9503 static void
9504 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9505 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9506 tree function)
9507 {
9508 rtx op[10];
9509 int nonlocal = 0;
9510
9511 /* Make sure unwind info is emitted for the thunk if needed. */
9512 final_start_function (emit_barrier (), file, 1);
9513
9514 /* Operand 0 is the target function. */
9515 op[0] = XEXP (DECL_RTL (function), 0);
9516 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9517 {
9518 nonlocal = 1;
9519 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9520 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9521 op[0] = gen_rtx_CONST (Pmode, op[0]);
9522 }
9523
9524 /* Operand 1 is the 'this' pointer. */
9525 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9526 op[1] = gen_rtx_REG (Pmode, 3);
9527 else
9528 op[1] = gen_rtx_REG (Pmode, 2);
9529
9530 /* Operand 2 is the delta. */
9531 op[2] = GEN_INT (delta);
9532
9533 /* Operand 3 is the vcall_offset. */
9534 op[3] = GEN_INT (vcall_offset);
9535
9536 /* Operand 4 is the temporary register. */
9537 op[4] = gen_rtx_REG (Pmode, 1);
9538
9539 /* Operands 5 to 8 can be used as labels. */
9540 op[5] = NULL_RTX;
9541 op[6] = NULL_RTX;
9542 op[7] = NULL_RTX;
9543 op[8] = NULL_RTX;
9544
9545 /* Operand 9 can be used for temporary register. */
9546 op[9] = NULL_RTX;
9547
9548 /* Generate code. */
9549 if (TARGET_64BIT)
9550 {
9551 /* Setup literal pool pointer if required. */
9552 if ((!DISP_IN_RANGE (delta)
9553 && !CONST_OK_FOR_K (delta)
9554 && !CONST_OK_FOR_Os (delta))
9555 || (!DISP_IN_RANGE (vcall_offset)
9556 && !CONST_OK_FOR_K (vcall_offset)
9557 && !CONST_OK_FOR_Os (vcall_offset)))
9558 {
9559 op[5] = gen_label_rtx ();
9560 output_asm_insn ("larl\t%4,%5", op);
9561 }
9562
9563 /* Add DELTA to this pointer. */
9564 if (delta)
9565 {
9566 if (CONST_OK_FOR_J (delta))
9567 output_asm_insn ("la\t%1,%2(%1)", op);
9568 else if (DISP_IN_RANGE (delta))
9569 output_asm_insn ("lay\t%1,%2(%1)", op);
9570 else if (CONST_OK_FOR_K (delta))
9571 output_asm_insn ("aghi\t%1,%2", op);
9572 else if (CONST_OK_FOR_Os (delta))
9573 output_asm_insn ("agfi\t%1,%2", op);
9574 else
9575 {
9576 op[6] = gen_label_rtx ();
9577 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9578 }
9579 }
9580
9581 /* Perform vcall adjustment. */
9582 if (vcall_offset)
9583 {
9584 if (DISP_IN_RANGE (vcall_offset))
9585 {
9586 output_asm_insn ("lg\t%4,0(%1)", op);
9587 output_asm_insn ("ag\t%1,%3(%4)", op);
9588 }
9589 else if (CONST_OK_FOR_K (vcall_offset))
9590 {
9591 output_asm_insn ("lghi\t%4,%3", op);
9592 output_asm_insn ("ag\t%4,0(%1)", op);
9593 output_asm_insn ("ag\t%1,0(%4)", op);
9594 }
9595 else if (CONST_OK_FOR_Os (vcall_offset))
9596 {
9597 output_asm_insn ("lgfi\t%4,%3", op);
9598 output_asm_insn ("ag\t%4,0(%1)", op);
9599 output_asm_insn ("ag\t%1,0(%4)", op);
9600 }
9601 else
9602 {
9603 op[7] = gen_label_rtx ();
9604 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9605 output_asm_insn ("ag\t%4,0(%1)", op);
9606 output_asm_insn ("ag\t%1,0(%4)", op);
9607 }
9608 }
9609
9610 /* Jump to target. */
9611 output_asm_insn ("jg\t%0", op);
9612
9613 /* Output literal pool if required. */
9614 if (op[5])
9615 {
9616 output_asm_insn (".align\t4", op);
9617 targetm.asm_out.internal_label (file, "L",
9618 CODE_LABEL_NUMBER (op[5]));
9619 }
9620 if (op[6])
9621 {
9622 targetm.asm_out.internal_label (file, "L",
9623 CODE_LABEL_NUMBER (op[6]));
9624 output_asm_insn (".long\t%2", op);
9625 }
9626 if (op[7])
9627 {
9628 targetm.asm_out.internal_label (file, "L",
9629 CODE_LABEL_NUMBER (op[7]));
9630 output_asm_insn (".long\t%3", op);
9631 }
9632 }
9633 else
9634 {
9635 /* Setup base pointer if required. */
9636 if (!vcall_offset
9637 || (!DISP_IN_RANGE (delta)
9638 && !CONST_OK_FOR_K (delta)
9639 && !CONST_OK_FOR_Os (delta))
9640 || (!DISP_IN_RANGE (delta)
9641 && !CONST_OK_FOR_K (vcall_offset)
9642 && !CONST_OK_FOR_Os (vcall_offset)))
9643 {
9644 op[5] = gen_label_rtx ();
9645 output_asm_insn ("basr\t%4,0", op);
9646 targetm.asm_out.internal_label (file, "L",
9647 CODE_LABEL_NUMBER (op[5]));
9648 }
9649
9650 /* Add DELTA to this pointer. */
9651 if (delta)
9652 {
9653 if (CONST_OK_FOR_J (delta))
9654 output_asm_insn ("la\t%1,%2(%1)", op);
9655 else if (DISP_IN_RANGE (delta))
9656 output_asm_insn ("lay\t%1,%2(%1)", op);
9657 else if (CONST_OK_FOR_K (delta))
9658 output_asm_insn ("ahi\t%1,%2", op);
9659 else if (CONST_OK_FOR_Os (delta))
9660 output_asm_insn ("afi\t%1,%2", op);
9661 else
9662 {
9663 op[6] = gen_label_rtx ();
9664 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9665 }
9666 }
9667
9668 /* Perform vcall adjustment. */
9669 if (vcall_offset)
9670 {
9671 if (CONST_OK_FOR_J (vcall_offset))
9672 {
9673 output_asm_insn ("l\t%4,0(%1)", op);
9674 output_asm_insn ("a\t%1,%3(%4)", op);
9675 }
9676 else if (DISP_IN_RANGE (vcall_offset))
9677 {
9678 output_asm_insn ("l\t%4,0(%1)", op);
9679 output_asm_insn ("ay\t%1,%3(%4)", op);
9680 }
9681 else if (CONST_OK_FOR_K (vcall_offset))
9682 {
9683 output_asm_insn ("lhi\t%4,%3", op);
9684 output_asm_insn ("a\t%4,0(%1)", op);
9685 output_asm_insn ("a\t%1,0(%4)", op);
9686 }
9687 else if (CONST_OK_FOR_Os (vcall_offset))
9688 {
9689 output_asm_insn ("iilf\t%4,%3", op);
9690 output_asm_insn ("a\t%4,0(%1)", op);
9691 output_asm_insn ("a\t%1,0(%4)", op);
9692 }
9693 else
9694 {
9695 op[7] = gen_label_rtx ();
9696 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9697 output_asm_insn ("a\t%4,0(%1)", op);
9698 output_asm_insn ("a\t%1,0(%4)", op);
9699 }
9700
9701 /* We had to clobber the base pointer register.
9702 Re-setup the base pointer (with a different base). */
9703 op[5] = gen_label_rtx ();
9704 output_asm_insn ("basr\t%4,0", op);
9705 targetm.asm_out.internal_label (file, "L",
9706 CODE_LABEL_NUMBER (op[5]));
9707 }
9708
9709 /* Jump to target. */
9710 op[8] = gen_label_rtx ();
9711
9712 if (!flag_pic)
9713 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9714 else if (!nonlocal)
9715 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9716 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9717 else if (flag_pic == 1)
9718 {
9719 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9720 output_asm_insn ("l\t%4,%0(%4)", op);
9721 }
9722 else if (flag_pic == 2)
9723 {
9724 op[9] = gen_rtx_REG (Pmode, 0);
9725 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9726 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9727 output_asm_insn ("ar\t%4,%9", op);
9728 output_asm_insn ("l\t%4,0(%4)", op);
9729 }
9730
9731 output_asm_insn ("br\t%4", op);
9732
9733 /* Output literal pool. */
9734 output_asm_insn (".align\t4", op);
9735
9736 if (nonlocal && flag_pic == 2)
9737 output_asm_insn (".long\t%0", op);
9738 if (nonlocal)
9739 {
9740 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9741 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9742 }
9743
9744 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9745 if (!flag_pic)
9746 output_asm_insn (".long\t%0", op);
9747 else
9748 output_asm_insn (".long\t%0-%5", op);
9749
9750 if (op[6])
9751 {
9752 targetm.asm_out.internal_label (file, "L",
9753 CODE_LABEL_NUMBER (op[6]));
9754 output_asm_insn (".long\t%2", op);
9755 }
9756 if (op[7])
9757 {
9758 targetm.asm_out.internal_label (file, "L",
9759 CODE_LABEL_NUMBER (op[7]));
9760 output_asm_insn (".long\t%3", op);
9761 }
9762 }
9763 final_end_function ();
9764 }
9765
9766 static bool
9767 s390_valid_pointer_mode (enum machine_mode mode)
9768 {
9769 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9770 }
9771
9772 /* Checks whether the given CALL_EXPR would use a caller
9773 saved register. This is used to decide whether sibling call
9774 optimization could be performed on the respective function
9775 call. */
9776
9777 static bool
9778 s390_call_saved_register_used (tree call_expr)
9779 {
9780 CUMULATIVE_ARGS cum_v;
9781 cumulative_args_t cum;
9782 tree parameter;
9783 enum machine_mode mode;
9784 tree type;
9785 rtx parm_rtx;
9786 int reg, i;
9787
9788 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9789 cum = pack_cumulative_args (&cum_v);
9790
9791 for (i = 0; i < call_expr_nargs (call_expr); i++)
9792 {
9793 parameter = CALL_EXPR_ARG (call_expr, i);
9794 gcc_assert (parameter);
9795
9796 /* For an undeclared variable passed as parameter we will get
9797 an ERROR_MARK node here. */
9798 if (TREE_CODE (parameter) == ERROR_MARK)
9799 return true;
9800
9801 type = TREE_TYPE (parameter);
9802 gcc_assert (type);
9803
9804 mode = TYPE_MODE (type);
9805 gcc_assert (mode);
9806
9807 if (pass_by_reference (&cum_v, mode, type, true))
9808 {
9809 mode = Pmode;
9810 type = build_pointer_type (type);
9811 }
9812
9813 parm_rtx = s390_function_arg (cum, mode, type, 0);
9814
9815 s390_function_arg_advance (cum, mode, type, 0);
9816
9817 if (!parm_rtx)
9818 continue;
9819
9820 if (REG_P (parm_rtx))
9821 {
9822 for (reg = 0;
9823 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9824 reg++)
9825 if (!call_used_regs[reg + REGNO (parm_rtx)])
9826 return true;
9827 }
9828
9829 if (GET_CODE (parm_rtx) == PARALLEL)
9830 {
9831 int i;
9832
9833 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9834 {
9835 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9836
9837 gcc_assert (REG_P (r));
9838
9839 for (reg = 0;
9840 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9841 reg++)
9842 if (!call_used_regs[reg + REGNO (r)])
9843 return true;
9844 }
9845 }
9846
9847 }
9848 return false;
9849 }
9850
9851 /* Return true if the given call expression can be
9852 turned into a sibling call.
9853 DECL holds the declaration of the function to be called whereas
9854 EXP is the call expression itself. */
9855
9856 static bool
9857 s390_function_ok_for_sibcall (tree decl, tree exp)
9858 {
9859 /* The TPF epilogue uses register 1. */
9860 if (TARGET_TPF_PROFILING)
9861 return false;
9862
9863 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9864 which would have to be restored before the sibcall. */
9865 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9866 return false;
9867
9868 /* Register 6 on s390 is available as an argument register but unfortunately
9869 "caller saved". This makes functions needing this register for arguments
9870 not suitable for sibcalls. */
9871 return !s390_call_saved_register_used (exp);
9872 }
9873
9874 /* Return the fixed registers used for condition codes. */
9875
9876 static bool
9877 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9878 {
9879 *p1 = CC_REGNUM;
9880 *p2 = INVALID_REGNUM;
9881
9882 return true;
9883 }
9884
9885 /* This function is used by the call expanders of the machine description.
9886 It emits the call insn itself together with the necessary operations
9887 to adjust the target address and returns the emitted insn.
9888 ADDR_LOCATION is the target address rtx
9889 TLS_CALL the location of the thread-local symbol
9890 RESULT_REG the register where the result of the call should be stored
9891 RETADDR_REG the register where the return address should be stored
9892 If this parameter is NULL_RTX the call is considered
9893 to be a sibling call. */
9894
9895 rtx
9896 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9897 rtx retaddr_reg)
9898 {
9899 bool plt_call = false;
9900 rtx insn;
9901 rtx call;
9902 rtx clobber;
9903 rtvec vec;
9904
9905 /* Direct function calls need special treatment. */
9906 if (GET_CODE (addr_location) == SYMBOL_REF)
9907 {
9908 /* When calling a global routine in PIC mode, we must
9909 replace the symbol itself with the PLT stub. */
9910 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9911 {
9912 if (retaddr_reg != NULL_RTX)
9913 {
9914 addr_location = gen_rtx_UNSPEC (Pmode,
9915 gen_rtvec (1, addr_location),
9916 UNSPEC_PLT);
9917 addr_location = gen_rtx_CONST (Pmode, addr_location);
9918 plt_call = true;
9919 }
9920 else
9921 /* For -fpic code the PLT entries might use r12 which is
9922 call-saved. Therefore we cannot do a sibcall when
9923 calling directly using a symbol ref. When reaching
9924 this point we decided (in s390_function_ok_for_sibcall)
9925 to do a sibcall for a function pointer but one of the
9926 optimizers was able to get rid of the function pointer
9927 by propagating the symbol ref into the call. This
9928 optimization is illegal for S/390 so we turn the direct
9929 call into a indirect call again. */
9930 addr_location = force_reg (Pmode, addr_location);
9931 }
9932
9933 /* Unless we can use the bras(l) insn, force the
9934 routine address into a register. */
9935 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9936 {
9937 if (flag_pic)
9938 addr_location = legitimize_pic_address (addr_location, 0);
9939 else
9940 addr_location = force_reg (Pmode, addr_location);
9941 }
9942 }
9943
9944 /* If it is already an indirect call or the code above moved the
9945 SYMBOL_REF to somewhere else make sure the address can be found in
9946 register 1. */
9947 if (retaddr_reg == NULL_RTX
9948 && GET_CODE (addr_location) != SYMBOL_REF
9949 && !plt_call)
9950 {
9951 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9952 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9953 }
9954
9955 addr_location = gen_rtx_MEM (QImode, addr_location);
9956 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9957
9958 if (result_reg != NULL_RTX)
9959 call = gen_rtx_SET (VOIDmode, result_reg, call);
9960
9961 if (retaddr_reg != NULL_RTX)
9962 {
9963 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9964
9965 if (tls_call != NULL_RTX)
9966 vec = gen_rtvec (3, call, clobber,
9967 gen_rtx_USE (VOIDmode, tls_call));
9968 else
9969 vec = gen_rtvec (2, call, clobber);
9970
9971 call = gen_rtx_PARALLEL (VOIDmode, vec);
9972 }
9973
9974 insn = emit_call_insn (call);
9975
9976 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9977 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9978 {
9979 /* s390_function_ok_for_sibcall should
9980 have denied sibcalls in this case. */
9981 gcc_assert (retaddr_reg != NULL_RTX);
9982 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9983 }
9984 return insn;
9985 }
9986
9987 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9988
9989 static void
9990 s390_conditional_register_usage (void)
9991 {
9992 int i;
9993
9994 if (flag_pic)
9995 {
9996 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9997 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9998 }
9999 if (TARGET_CPU_ZARCH)
10000 {
10001 fixed_regs[BASE_REGNUM] = 0;
10002 call_used_regs[BASE_REGNUM] = 0;
10003 fixed_regs[RETURN_REGNUM] = 0;
10004 call_used_regs[RETURN_REGNUM] = 0;
10005 }
10006 if (TARGET_64BIT)
10007 {
10008 for (i = 24; i < 32; i++)
10009 call_used_regs[i] = call_really_used_regs[i] = 0;
10010 }
10011 else
10012 {
10013 for (i = 18; i < 20; i++)
10014 call_used_regs[i] = call_really_used_regs[i] = 0;
10015 }
10016
10017 if (TARGET_SOFT_FLOAT)
10018 {
10019 for (i = 16; i < 32; i++)
10020 call_used_regs[i] = fixed_regs[i] = 1;
10021 }
10022 }
10023
10024 /* Corresponding function to eh_return expander. */
10025
10026 static GTY(()) rtx s390_tpf_eh_return_symbol;
10027 void
10028 s390_emit_tpf_eh_return (rtx target)
10029 {
10030 rtx insn, reg;
10031
10032 if (!s390_tpf_eh_return_symbol)
10033 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10034
10035 reg = gen_rtx_REG (Pmode, 2);
10036
10037 emit_move_insn (reg, target);
10038 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10039 gen_rtx_REG (Pmode, RETURN_REGNUM));
10040 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10041
10042 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10043 }
10044
10045 /* Rework the prologue/epilogue to avoid saving/restoring
10046 registers unnecessarily. */
10047
10048 static void
10049 s390_optimize_prologue (void)
10050 {
10051 rtx insn, new_insn, next_insn;
10052
10053 /* Do a final recompute of the frame-related data. */
10054
10055 s390_update_frame_layout ();
10056
10057 /* If all special registers are in fact used, there's nothing we
10058 can do, so no point in walking the insn list. */
10059
10060 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10061 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10062 && (TARGET_CPU_ZARCH
10063 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10064 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10065 return;
10066
10067 /* Search for prologue/epilogue insns and replace them. */
10068
10069 for (insn = get_insns (); insn; insn = next_insn)
10070 {
10071 int first, last, off;
10072 rtx set, base, offset;
10073
10074 next_insn = NEXT_INSN (insn);
10075
10076 if (GET_CODE (insn) != INSN)
10077 continue;
10078
10079 if (GET_CODE (PATTERN (insn)) == PARALLEL
10080 && store_multiple_operation (PATTERN (insn), VOIDmode))
10081 {
10082 set = XVECEXP (PATTERN (insn), 0, 0);
10083 first = REGNO (SET_SRC (set));
10084 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10085 offset = const0_rtx;
10086 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10087 off = INTVAL (offset);
10088
10089 if (GET_CODE (base) != REG || off < 0)
10090 continue;
10091 if (cfun_frame_layout.first_save_gpr != -1
10092 && (cfun_frame_layout.first_save_gpr < first
10093 || cfun_frame_layout.last_save_gpr > last))
10094 continue;
10095 if (REGNO (base) != STACK_POINTER_REGNUM
10096 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10097 continue;
10098 if (first > BASE_REGNUM || last < BASE_REGNUM)
10099 continue;
10100
10101 if (cfun_frame_layout.first_save_gpr != -1)
10102 {
10103 new_insn = save_gprs (base,
10104 off + (cfun_frame_layout.first_save_gpr
10105 - first) * UNITS_PER_LONG,
10106 cfun_frame_layout.first_save_gpr,
10107 cfun_frame_layout.last_save_gpr);
10108 new_insn = emit_insn_before (new_insn, insn);
10109 INSN_ADDRESSES_NEW (new_insn, -1);
10110 }
10111
10112 remove_insn (insn);
10113 continue;
10114 }
10115
10116 if (cfun_frame_layout.first_save_gpr == -1
10117 && GET_CODE (PATTERN (insn)) == SET
10118 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10119 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10120 || (!TARGET_CPU_ZARCH
10121 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10122 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10123 {
10124 set = PATTERN (insn);
10125 first = REGNO (SET_SRC (set));
10126 offset = const0_rtx;
10127 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10128 off = INTVAL (offset);
10129
10130 if (GET_CODE (base) != REG || off < 0)
10131 continue;
10132 if (REGNO (base) != STACK_POINTER_REGNUM
10133 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10134 continue;
10135
10136 remove_insn (insn);
10137 continue;
10138 }
10139
10140 if (GET_CODE (PATTERN (insn)) == PARALLEL
10141 && load_multiple_operation (PATTERN (insn), VOIDmode))
10142 {
10143 set = XVECEXP (PATTERN (insn), 0, 0);
10144 first = REGNO (SET_DEST (set));
10145 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10146 offset = const0_rtx;
10147 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10148 off = INTVAL (offset);
10149
10150 if (GET_CODE (base) != REG || off < 0)
10151 continue;
10152 if (cfun_frame_layout.first_restore_gpr != -1
10153 && (cfun_frame_layout.first_restore_gpr < first
10154 || cfun_frame_layout.last_restore_gpr > last))
10155 continue;
10156 if (REGNO (base) != STACK_POINTER_REGNUM
10157 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10158 continue;
10159 if (first > BASE_REGNUM || last < BASE_REGNUM)
10160 continue;
10161
10162 if (cfun_frame_layout.first_restore_gpr != -1)
10163 {
10164 new_insn = restore_gprs (base,
10165 off + (cfun_frame_layout.first_restore_gpr
10166 - first) * UNITS_PER_LONG,
10167 cfun_frame_layout.first_restore_gpr,
10168 cfun_frame_layout.last_restore_gpr);
10169 new_insn = emit_insn_before (new_insn, insn);
10170 INSN_ADDRESSES_NEW (new_insn, -1);
10171 }
10172
10173 remove_insn (insn);
10174 continue;
10175 }
10176
10177 if (cfun_frame_layout.first_restore_gpr == -1
10178 && GET_CODE (PATTERN (insn)) == SET
10179 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10180 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10181 || (!TARGET_CPU_ZARCH
10182 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10183 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10184 {
10185 set = PATTERN (insn);
10186 first = REGNO (SET_DEST (set));
10187 offset = const0_rtx;
10188 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10189 off = INTVAL (offset);
10190
10191 if (GET_CODE (base) != REG || off < 0)
10192 continue;
10193 if (REGNO (base) != STACK_POINTER_REGNUM
10194 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10195 continue;
10196
10197 remove_insn (insn);
10198 continue;
10199 }
10200 }
10201 }
10202
10203 /* On z10 and later the dynamic branch prediction must see the
10204 backward jump within a certain windows. If not it falls back to
10205 the static prediction. This function rearranges the loop backward
10206 branch in a way which makes the static prediction always correct.
10207 The function returns true if it added an instruction. */
10208 static bool
10209 s390_fix_long_loop_prediction (rtx insn)
10210 {
10211 rtx set = single_set (insn);
10212 rtx code_label, label_ref, new_label;
10213 rtx uncond_jump;
10214 rtx cur_insn;
10215 rtx tmp;
10216 int distance;
10217
10218 /* This will exclude branch on count and branch on index patterns
10219 since these are correctly statically predicted. */
10220 if (!set
10221 || SET_DEST (set) != pc_rtx
10222 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10223 return false;
10224
10225 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10226 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10227
10228 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10229
10230 code_label = XEXP (label_ref, 0);
10231
10232 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10233 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10234 || (INSN_ADDRESSES (INSN_UID (insn))
10235 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10236 return false;
10237
10238 for (distance = 0, cur_insn = PREV_INSN (insn);
10239 distance < PREDICT_DISTANCE - 6;
10240 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10241 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10242 return false;
10243
10244 new_label = gen_label_rtx ();
10245 uncond_jump = emit_jump_insn_after (
10246 gen_rtx_SET (VOIDmode, pc_rtx,
10247 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10248 insn);
10249 emit_label_after (new_label, uncond_jump);
10250
10251 tmp = XEXP (SET_SRC (set), 1);
10252 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10253 XEXP (SET_SRC (set), 2) = tmp;
10254 INSN_CODE (insn) = -1;
10255
10256 XEXP (label_ref, 0) = new_label;
10257 JUMP_LABEL (insn) = new_label;
10258 JUMP_LABEL (uncond_jump) = code_label;
10259
10260 return true;
10261 }
10262
10263 /* Returns 1 if INSN reads the value of REG for purposes not related
10264 to addressing of memory, and 0 otherwise. */
10265 static int
10266 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10267 {
10268 return reg_referenced_p (reg, PATTERN (insn))
10269 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10270 }
10271
10272 /* Starting from INSN find_cond_jump looks downwards in the insn
10273 stream for a single jump insn which is the last user of the
10274 condition code set in INSN. */
10275 static rtx
10276 find_cond_jump (rtx insn)
10277 {
10278 for (; insn; insn = NEXT_INSN (insn))
10279 {
10280 rtx ite, cc;
10281
10282 if (LABEL_P (insn))
10283 break;
10284
10285 if (!JUMP_P (insn))
10286 {
10287 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10288 break;
10289 continue;
10290 }
10291
10292 /* This will be triggered by a return. */
10293 if (GET_CODE (PATTERN (insn)) != SET)
10294 break;
10295
10296 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10297 ite = SET_SRC (PATTERN (insn));
10298
10299 if (GET_CODE (ite) != IF_THEN_ELSE)
10300 break;
10301
10302 cc = XEXP (XEXP (ite, 0), 0);
10303 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10304 break;
10305
10306 if (find_reg_note (insn, REG_DEAD, cc))
10307 return insn;
10308 break;
10309 }
10310
10311 return NULL_RTX;
10312 }
10313
10314 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10315 the semantics does not change. If NULL_RTX is passed as COND the
10316 function tries to find the conditional jump starting with INSN. */
10317 static void
10318 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10319 {
10320 rtx tmp = *op0;
10321
10322 if (cond == NULL_RTX)
10323 {
10324 rtx jump = find_cond_jump (NEXT_INSN (insn));
10325 jump = jump ? single_set (jump) : NULL_RTX;
10326
10327 if (jump == NULL_RTX)
10328 return;
10329
10330 cond = XEXP (XEXP (jump, 1), 0);
10331 }
10332
10333 *op0 = *op1;
10334 *op1 = tmp;
10335 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10336 }
10337
10338 /* On z10, instructions of the compare-and-branch family have the
10339 property to access the register occurring as second operand with
10340 its bits complemented. If such a compare is grouped with a second
10341 instruction that accesses the same register non-complemented, and
10342 if that register's value is delivered via a bypass, then the
10343 pipeline recycles, thereby causing significant performance decline.
10344 This function locates such situations and exchanges the two
10345 operands of the compare. The function return true whenever it
10346 added an insn. */
10347 static bool
10348 s390_z10_optimize_cmp (rtx insn)
10349 {
10350 rtx prev_insn, next_insn;
10351 bool insn_added_p = false;
10352 rtx cond, *op0, *op1;
10353
10354 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10355 {
10356 /* Handle compare and branch and branch on count
10357 instructions. */
10358 rtx pattern = single_set (insn);
10359
10360 if (!pattern
10361 || SET_DEST (pattern) != pc_rtx
10362 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10363 return false;
10364
10365 cond = XEXP (SET_SRC (pattern), 0);
10366 op0 = &XEXP (cond, 0);
10367 op1 = &XEXP (cond, 1);
10368 }
10369 else if (GET_CODE (PATTERN (insn)) == SET)
10370 {
10371 rtx src, dest;
10372
10373 /* Handle normal compare instructions. */
10374 src = SET_SRC (PATTERN (insn));
10375 dest = SET_DEST (PATTERN (insn));
10376
10377 if (!REG_P (dest)
10378 || !CC_REGNO_P (REGNO (dest))
10379 || GET_CODE (src) != COMPARE)
10380 return false;
10381
10382 /* s390_swap_cmp will try to find the conditional
10383 jump when passing NULL_RTX as condition. */
10384 cond = NULL_RTX;
10385 op0 = &XEXP (src, 0);
10386 op1 = &XEXP (src, 1);
10387 }
10388 else
10389 return false;
10390
10391 if (!REG_P (*op0) || !REG_P (*op1))
10392 return false;
10393
10394 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10395 return false;
10396
10397 /* Swap the COMPARE arguments and its mask if there is a
10398 conflicting access in the previous insn. */
10399 prev_insn = prev_active_insn (insn);
10400 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10401 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10402 s390_swap_cmp (cond, op0, op1, insn);
10403
10404 /* Check if there is a conflict with the next insn. If there
10405 was no conflict with the previous insn, then swap the
10406 COMPARE arguments and its mask. If we already swapped
10407 the operands, or if swapping them would cause a conflict
10408 with the previous insn, issue a NOP after the COMPARE in
10409 order to separate the two instuctions. */
10410 next_insn = next_active_insn (insn);
10411 if (next_insn != NULL_RTX && INSN_P (next_insn)
10412 && s390_non_addr_reg_read_p (*op1, next_insn))
10413 {
10414 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10415 && s390_non_addr_reg_read_p (*op0, prev_insn))
10416 {
10417 if (REGNO (*op1) == 0)
10418 emit_insn_after (gen_nop1 (), insn);
10419 else
10420 emit_insn_after (gen_nop (), insn);
10421 insn_added_p = true;
10422 }
10423 else
10424 s390_swap_cmp (cond, op0, op1, insn);
10425 }
10426 return insn_added_p;
10427 }
10428
10429 /* Perform machine-dependent processing. */
10430
10431 static void
10432 s390_reorg (void)
10433 {
10434 bool pool_overflow = false;
10435
10436 /* Make sure all splits have been performed; splits after
10437 machine_dependent_reorg might confuse insn length counts. */
10438 split_all_insns_noflow ();
10439
10440 /* Install the main literal pool and the associated base
10441 register load insns.
10442
10443 In addition, there are two problematic situations we need
10444 to correct:
10445
10446 - the literal pool might be > 4096 bytes in size, so that
10447 some of its elements cannot be directly accessed
10448
10449 - a branch target might be > 64K away from the branch, so that
10450 it is not possible to use a PC-relative instruction.
10451
10452 To fix those, we split the single literal pool into multiple
10453 pool chunks, reloading the pool base register at various
10454 points throughout the function to ensure it always points to
10455 the pool chunk the following code expects, and / or replace
10456 PC-relative branches by absolute branches.
10457
10458 However, the two problems are interdependent: splitting the
10459 literal pool can move a branch further away from its target,
10460 causing the 64K limit to overflow, and on the other hand,
10461 replacing a PC-relative branch by an absolute branch means
10462 we need to put the branch target address into the literal
10463 pool, possibly causing it to overflow.
10464
10465 So, we loop trying to fix up both problems until we manage
10466 to satisfy both conditions at the same time. Note that the
10467 loop is guaranteed to terminate as every pass of the loop
10468 strictly decreases the total number of PC-relative branches
10469 in the function. (This is not completely true as there
10470 might be branch-over-pool insns introduced by chunkify_start.
10471 Those never need to be split however.) */
10472
10473 for (;;)
10474 {
10475 struct constant_pool *pool = NULL;
10476
10477 /* Collect the literal pool. */
10478 if (!pool_overflow)
10479 {
10480 pool = s390_mainpool_start ();
10481 if (!pool)
10482 pool_overflow = true;
10483 }
10484
10485 /* If literal pool overflowed, start to chunkify it. */
10486 if (pool_overflow)
10487 pool = s390_chunkify_start ();
10488
10489 /* Split out-of-range branches. If this has created new
10490 literal pool entries, cancel current chunk list and
10491 recompute it. zSeries machines have large branch
10492 instructions, so we never need to split a branch. */
10493 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10494 {
10495 if (pool_overflow)
10496 s390_chunkify_cancel (pool);
10497 else
10498 s390_mainpool_cancel (pool);
10499
10500 continue;
10501 }
10502
10503 /* If we made it up to here, both conditions are satisfied.
10504 Finish up literal pool related changes. */
10505 if (pool_overflow)
10506 s390_chunkify_finish (pool);
10507 else
10508 s390_mainpool_finish (pool);
10509
10510 /* We're done splitting branches. */
10511 cfun->machine->split_branches_pending_p = false;
10512 break;
10513 }
10514
10515 /* Generate out-of-pool execute target insns. */
10516 if (TARGET_CPU_ZARCH)
10517 {
10518 rtx insn, label, target;
10519
10520 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10521 {
10522 label = s390_execute_label (insn);
10523 if (!label)
10524 continue;
10525
10526 gcc_assert (label != const0_rtx);
10527
10528 target = emit_label (XEXP (label, 0));
10529 INSN_ADDRESSES_NEW (target, -1);
10530
10531 target = emit_insn (s390_execute_target (insn));
10532 INSN_ADDRESSES_NEW (target, -1);
10533 }
10534 }
10535
10536 /* Try to optimize prologue and epilogue further. */
10537 s390_optimize_prologue ();
10538
10539 /* Walk over the insns and do some >=z10 specific changes. */
10540 if (s390_tune == PROCESSOR_2097_Z10
10541 || s390_tune == PROCESSOR_2817_Z196)
10542 {
10543 rtx insn;
10544 bool insn_added_p = false;
10545
10546 /* The insn lengths and addresses have to be up to date for the
10547 following manipulations. */
10548 shorten_branches (get_insns ());
10549
10550 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10551 {
10552 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10553 continue;
10554
10555 if (JUMP_P (insn))
10556 insn_added_p |= s390_fix_long_loop_prediction (insn);
10557
10558 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10559 || GET_CODE (PATTERN (insn)) == SET)
10560 && s390_tune == PROCESSOR_2097_Z10)
10561 insn_added_p |= s390_z10_optimize_cmp (insn);
10562 }
10563
10564 /* Adjust branches if we added new instructions. */
10565 if (insn_added_p)
10566 shorten_branches (get_insns ());
10567 }
10568 }
10569
10570 /* Return true if INSN is a fp load insn writing register REGNO. */
10571 static inline bool
10572 s390_fpload_toreg (rtx insn, unsigned int regno)
10573 {
10574 rtx set;
10575 enum attr_type flag = s390_safe_attr_type (insn);
10576
10577 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10578 return false;
10579
10580 set = single_set (insn);
10581
10582 if (set == NULL_RTX)
10583 return false;
10584
10585 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10586 return false;
10587
10588 if (REGNO (SET_DEST (set)) != regno)
10589 return false;
10590
10591 return true;
10592 }
10593
10594 /* This value describes the distance to be avoided between an
10595 aritmetic fp instruction and an fp load writing the same register.
10596 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10597 fine but the exact value has to be avoided. Otherwise the FP
10598 pipeline will throw an exception causing a major penalty. */
10599 #define Z10_EARLYLOAD_DISTANCE 7
10600
10601 /* Rearrange the ready list in order to avoid the situation described
10602 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10603 moved to the very end of the ready list. */
10604 static void
10605 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10606 {
10607 unsigned int regno;
10608 int nready = *nready_p;
10609 rtx tmp;
10610 int i;
10611 rtx insn;
10612 rtx set;
10613 enum attr_type flag;
10614 int distance;
10615
10616 /* Skip DISTANCE - 1 active insns. */
10617 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10618 distance > 0 && insn != NULL_RTX;
10619 distance--, insn = prev_active_insn (insn))
10620 if (CALL_P (insn) || JUMP_P (insn))
10621 return;
10622
10623 if (insn == NULL_RTX)
10624 return;
10625
10626 set = single_set (insn);
10627
10628 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10629 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10630 return;
10631
10632 flag = s390_safe_attr_type (insn);
10633
10634 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10635 return;
10636
10637 regno = REGNO (SET_DEST (set));
10638 i = nready - 1;
10639
10640 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10641 i--;
10642
10643 if (!i)
10644 return;
10645
10646 tmp = ready[i];
10647 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10648 ready[0] = tmp;
10649 }
10650
10651 /* This function is called via hook TARGET_SCHED_REORDER before
10652 issuing one insn from list READY which contains *NREADYP entries.
10653 For target z10 it reorders load instructions to avoid early load
10654 conflicts in the floating point pipeline */
10655 static int
10656 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10657 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10658 {
10659 if (s390_tune == PROCESSOR_2097_Z10)
10660 if (reload_completed && *nreadyp > 1)
10661 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10662
10663 return s390_issue_rate ();
10664 }
10665
10666 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10667 the scheduler has issued INSN. It stores the last issued insn into
10668 last_scheduled_insn in order to make it available for
10669 s390_sched_reorder. */
10670 static int
10671 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10672 int verbose ATTRIBUTE_UNUSED,
10673 rtx insn, int more)
10674 {
10675 last_scheduled_insn = insn;
10676
10677 if (GET_CODE (PATTERN (insn)) != USE
10678 && GET_CODE (PATTERN (insn)) != CLOBBER)
10679 return more - 1;
10680 else
10681 return more;
10682 }
10683
10684 static void
10685 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10686 int verbose ATTRIBUTE_UNUSED,
10687 int max_ready ATTRIBUTE_UNUSED)
10688 {
10689 last_scheduled_insn = NULL_RTX;
10690 }
10691
10692 /* This function checks the whole of insn X for memory references. The
10693 function always returns zero because the framework it is called
10694 from would stop recursively analyzing the insn upon a return value
10695 other than zero. The real result of this function is updating
10696 counter variable MEM_COUNT. */
10697 static int
10698 check_dpu (rtx *x, unsigned *mem_count)
10699 {
10700 if (*x != NULL_RTX && MEM_P (*x))
10701 (*mem_count)++;
10702 return 0;
10703 }
10704
10705 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10706 a new number struct loop *loop should be unrolled if tuned for cpus with
10707 a built-in stride prefetcher.
10708 The loop is analyzed for memory accesses by calling check_dpu for
10709 each rtx of the loop. Depending on the loop_depth and the amount of
10710 memory accesses a new number <=nunroll is returned to improve the
10711 behaviour of the hardware prefetch unit. */
10712 static unsigned
10713 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10714 {
10715 basic_block *bbs;
10716 rtx insn;
10717 unsigned i;
10718 unsigned mem_count = 0;
10719
10720 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10721 return nunroll;
10722
10723 /* Count the number of memory references within the loop body. */
10724 bbs = get_loop_body (loop);
10725 for (i = 0; i < loop->num_nodes; i++)
10726 {
10727 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10728 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10729 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10730 }
10731 free (bbs);
10732
10733 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10734 if (mem_count == 0)
10735 return nunroll;
10736
10737 switch (loop_depth(loop))
10738 {
10739 case 1:
10740 return MIN (nunroll, 28 / mem_count);
10741 case 2:
10742 return MIN (nunroll, 22 / mem_count);
10743 default:
10744 return MIN (nunroll, 16 / mem_count);
10745 }
10746 }
10747
10748 /* Initialize GCC target structure. */
10749
10750 #undef TARGET_ASM_ALIGNED_HI_OP
10751 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10752 #undef TARGET_ASM_ALIGNED_DI_OP
10753 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10754 #undef TARGET_ASM_INTEGER
10755 #define TARGET_ASM_INTEGER s390_assemble_integer
10756
10757 #undef TARGET_ASM_OPEN_PAREN
10758 #define TARGET_ASM_OPEN_PAREN ""
10759
10760 #undef TARGET_ASM_CLOSE_PAREN
10761 #define TARGET_ASM_CLOSE_PAREN ""
10762
10763 #undef TARGET_OPTION_OVERRIDE
10764 #define TARGET_OPTION_OVERRIDE s390_option_override
10765
10766 #undef TARGET_ENCODE_SECTION_INFO
10767 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10768
10769 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10770 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10771
10772 #ifdef HAVE_AS_TLS
10773 #undef TARGET_HAVE_TLS
10774 #define TARGET_HAVE_TLS true
10775 #endif
10776 #undef TARGET_CANNOT_FORCE_CONST_MEM
10777 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10778
10779 #undef TARGET_DELEGITIMIZE_ADDRESS
10780 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10781
10782 #undef TARGET_LEGITIMIZE_ADDRESS
10783 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10784
10785 #undef TARGET_RETURN_IN_MEMORY
10786 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10787
10788 #undef TARGET_INIT_BUILTINS
10789 #define TARGET_INIT_BUILTINS s390_init_builtins
10790 #undef TARGET_EXPAND_BUILTIN
10791 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10792
10793 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10794 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10795
10796 #undef TARGET_ASM_OUTPUT_MI_THUNK
10797 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10798 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10799 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10800
10801 #undef TARGET_SCHED_ADJUST_PRIORITY
10802 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10803 #undef TARGET_SCHED_ISSUE_RATE
10804 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10805 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10806 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10807
10808 #undef TARGET_SCHED_VARIABLE_ISSUE
10809 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10810 #undef TARGET_SCHED_REORDER
10811 #define TARGET_SCHED_REORDER s390_sched_reorder
10812 #undef TARGET_SCHED_INIT
10813 #define TARGET_SCHED_INIT s390_sched_init
10814
10815 #undef TARGET_CANNOT_COPY_INSN_P
10816 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10817 #undef TARGET_RTX_COSTS
10818 #define TARGET_RTX_COSTS s390_rtx_costs
10819 #undef TARGET_ADDRESS_COST
10820 #define TARGET_ADDRESS_COST s390_address_cost
10821 #undef TARGET_REGISTER_MOVE_COST
10822 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10823 #undef TARGET_MEMORY_MOVE_COST
10824 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10825
10826 #undef TARGET_MACHINE_DEPENDENT_REORG
10827 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10828
10829 #undef TARGET_VALID_POINTER_MODE
10830 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10831
10832 #undef TARGET_BUILD_BUILTIN_VA_LIST
10833 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10834 #undef TARGET_EXPAND_BUILTIN_VA_START
10835 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10836 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10837 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10838
10839 #undef TARGET_PROMOTE_FUNCTION_MODE
10840 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10841 #undef TARGET_PASS_BY_REFERENCE
10842 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10843
10844 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10845 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10846 #undef TARGET_FUNCTION_ARG
10847 #define TARGET_FUNCTION_ARG s390_function_arg
10848 #undef TARGET_FUNCTION_ARG_ADVANCE
10849 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10850 #undef TARGET_FUNCTION_VALUE
10851 #define TARGET_FUNCTION_VALUE s390_function_value
10852 #undef TARGET_LIBCALL_VALUE
10853 #define TARGET_LIBCALL_VALUE s390_libcall_value
10854
10855 #undef TARGET_FIXED_CONDITION_CODE_REGS
10856 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10857
10858 #undef TARGET_CC_MODES_COMPATIBLE
10859 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10860
10861 #undef TARGET_INVALID_WITHIN_DOLOOP
10862 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10863
10864 #ifdef HAVE_AS_TLS
10865 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10866 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10867 #endif
10868
10869 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10870 #undef TARGET_MANGLE_TYPE
10871 #define TARGET_MANGLE_TYPE s390_mangle_type
10872 #endif
10873
10874 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10875 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10876
10877 #undef TARGET_PREFERRED_RELOAD_CLASS
10878 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10879
10880 #undef TARGET_SECONDARY_RELOAD
10881 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10882
10883 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10884 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10885
10886 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10887 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10888
10889 #undef TARGET_LEGITIMATE_ADDRESS_P
10890 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10891
10892 #undef TARGET_LEGITIMATE_CONSTANT_P
10893 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10894
10895 #undef TARGET_CAN_ELIMINATE
10896 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10897
10898 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10899 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10900
10901 #undef TARGET_LOOP_UNROLL_ADJUST
10902 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10903
10904 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10905 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10906 #undef TARGET_TRAMPOLINE_INIT
10907 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10908
10909 #undef TARGET_UNWIND_WORD_MODE
10910 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10911
10912 struct gcc_target targetm = TARGET_INITIALIZER;
10913
10914 #include "gt-s390.h"