s390: Rearrange temporary moves for use of CRJ
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "optabs.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "params.h"
54 #include "cfgloop.h"
55 #include "opts.h"
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (1) , /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 static const
225 struct processor_costs z196_cost =
226 {
227 COSTS_N_INSNS (7), /* M */
228 COSTS_N_INSNS (5), /* MGHI */
229 COSTS_N_INSNS (5), /* MH */
230 COSTS_N_INSNS (5), /* MHI */
231 COSTS_N_INSNS (7), /* ML */
232 COSTS_N_INSNS (7), /* MR */
233 COSTS_N_INSNS (6), /* MS */
234 COSTS_N_INSNS (8), /* MSG */
235 COSTS_N_INSNS (6), /* MSGF */
236 COSTS_N_INSNS (6), /* MSGFR */
237 COSTS_N_INSNS (8), /* MSGR */
238 COSTS_N_INSNS (6), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (40), /* MXBR B+40 */
241 COSTS_N_INSNS (100), /* SQXBR B+100 */
242 COSTS_N_INSNS (42), /* SQDBR B+42 */
243 COSTS_N_INSNS (28), /* SQEBR B+28 */
244 COSTS_N_INSNS (1), /* MADBR B */
245 COSTS_N_INSNS (1), /* MAEBR B */
246 COSTS_N_INSNS (101), /* DXBR B+101 */
247 COSTS_N_INSNS (29), /* DDBR */
248 COSTS_N_INSNS (22), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR cracked */
250 COSTS_N_INSNS (160), /* DLR cracked */
251 COSTS_N_INSNS (160), /* DR expanded */
252 COSTS_N_INSNS (160), /* DSGFR cracked */
253 COSTS_N_INSNS (160), /* DSGR cracked */
254 };
255
256 extern int reload_completed;
257
258 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
259 static rtx last_scheduled_insn;
260
261 /* Structure used to hold the components of a S/390 memory
262 address. A legitimate address on S/390 is of the general
263 form
264 base + index + displacement
265 where any of the components is optional.
266
267 base and index are registers of the class ADDR_REGS,
268 displacement is an unsigned 12-bit immediate constant. */
269
270 struct s390_address
271 {
272 rtx base;
273 rtx indx;
274 rtx disp;
275 bool pointer;
276 bool literal_pool;
277 };
278
279 /* The following structure is embedded in the machine
280 specific part of struct function. */
281
282 struct GTY (()) s390_frame_layout
283 {
284 /* Offset within stack frame. */
285 HOST_WIDE_INT gprs_offset;
286 HOST_WIDE_INT f0_offset;
287 HOST_WIDE_INT f4_offset;
288 HOST_WIDE_INT f8_offset;
289 HOST_WIDE_INT backchain_offset;
290
291 /* Number of first and last gpr where slots in the register
292 save area are reserved for. */
293 int first_save_gpr_slot;
294 int last_save_gpr_slot;
295
296 /* Number of first and last gpr to be saved, restored. */
297 int first_save_gpr;
298 int first_restore_gpr;
299 int last_save_gpr;
300 int last_restore_gpr;
301
302 /* Bits standing for floating point registers. Set, if the
303 respective register has to be saved. Starting with reg 16 (f0)
304 at the rightmost bit.
305 Bit 15 - 8 7 6 5 4 3 2 1 0
306 fpr 15 - 8 7 5 3 1 6 4 2 0
307 reg 31 - 24 23 22 21 20 19 18 17 16 */
308 unsigned int fpr_bitmap;
309
310 /* Number of floating point registers f8-f15 which must be saved. */
311 int high_fprs;
312
313 /* Set if return address needs to be saved.
314 This flag is set by s390_return_addr_rtx if it could not use
315 the initial value of r14 and therefore depends on r14 saved
316 to the stack. */
317 bool save_return_addr_p;
318
319 /* Size of stack frame. */
320 HOST_WIDE_INT frame_size;
321 };
322
323 /* Define the structure for the machine field in struct function. */
324
325 struct GTY(()) machine_function
326 {
327 struct s390_frame_layout frame_layout;
328
329 /* Literal pool base register. */
330 rtx base_reg;
331
332 /* True if we may need to perform branch splitting. */
333 bool split_branches_pending_p;
334
335 /* Some local-dynamic TLS symbol name. */
336 const char *some_ld_name;
337
338 bool has_landing_pad_p;
339 };
340
341 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
342
343 #define cfun_frame_layout (cfun->machine->frame_layout)
344 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
345 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
346 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
347 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
348 (1 << (BITNUM)))
349 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
350 (1 << (BITNUM))))
351
352 /* Number of GPRs and FPRs used for argument passing. */
353 #define GP_ARG_NUM_REG 5
354 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
355
356 /* A couple of shortcuts. */
357 #define CONST_OK_FOR_J(x) \
358 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
359 #define CONST_OK_FOR_K(x) \
360 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
361 #define CONST_OK_FOR_Os(x) \
362 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
363 #define CONST_OK_FOR_Op(x) \
364 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
365 #define CONST_OK_FOR_On(x) \
366 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
367
368 #define REGNO_PAIR_OK(REGNO, MODE) \
369 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
370
371 /* That's the read ahead of the dynamic branch prediction unit in
372 bytes on a z10 (or higher) CPU. */
373 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
374
375 /* Return the alignment for LABEL. We default to the -falign-labels
376 value except for the literal pool base label. */
377 int
378 s390_label_align (rtx label)
379 {
380 rtx prev_insn = prev_active_insn (label);
381
382 if (prev_insn == NULL_RTX)
383 goto old;
384
385 prev_insn = single_set (prev_insn);
386
387 if (prev_insn == NULL_RTX)
388 goto old;
389
390 prev_insn = SET_SRC (prev_insn);
391
392 /* Don't align literal pool base labels. */
393 if (GET_CODE (prev_insn) == UNSPEC
394 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
395 return 0;
396
397 old:
398 return align_labels_log;
399 }
400
401 static enum machine_mode
402 s390_libgcc_cmp_return_mode (void)
403 {
404 return TARGET_64BIT ? DImode : SImode;
405 }
406
407 static enum machine_mode
408 s390_libgcc_shift_count_mode (void)
409 {
410 return TARGET_64BIT ? DImode : SImode;
411 }
412
413 static enum machine_mode
414 s390_unwind_word_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 /* Return true if the back end supports mode MODE. */
420 static bool
421 s390_scalar_mode_supported_p (enum machine_mode mode)
422 {
423 /* In contrast to the default implementation reject TImode constants on 31bit
424 TARGET_ZARCH for ABI compliance. */
425 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
426 return false;
427
428 if (DECIMAL_FLOAT_MODE_P (mode))
429 return default_decimal_float_supported_p ();
430
431 return default_scalar_mode_supported_p (mode);
432 }
433
434 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
435
436 void
437 s390_set_has_landing_pad_p (bool value)
438 {
439 cfun->machine->has_landing_pad_p = value;
440 }
441
442 /* If two condition code modes are compatible, return a condition code
443 mode which is compatible with both. Otherwise, return
444 VOIDmode. */
445
446 static enum machine_mode
447 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
448 {
449 if (m1 == m2)
450 return m1;
451
452 switch (m1)
453 {
454 case CCZmode:
455 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
456 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
457 return m2;
458 return VOIDmode;
459
460 case CCSmode:
461 case CCUmode:
462 case CCTmode:
463 case CCSRmode:
464 case CCURmode:
465 case CCZ1mode:
466 if (m2 == CCZmode)
467 return m1;
468
469 return VOIDmode;
470
471 default:
472 return VOIDmode;
473 }
474 return VOIDmode;
475 }
476
477 /* Return true if SET either doesn't set the CC register, or else
478 the source and destination have matching CC modes and that
479 CC mode is at least as constrained as REQ_MODE. */
480
481 static bool
482 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
483 {
484 enum machine_mode set_mode;
485
486 gcc_assert (GET_CODE (set) == SET);
487
488 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
489 return 1;
490
491 set_mode = GET_MODE (SET_DEST (set));
492 switch (set_mode)
493 {
494 case CCSmode:
495 case CCSRmode:
496 case CCUmode:
497 case CCURmode:
498 case CCLmode:
499 case CCL1mode:
500 case CCL2mode:
501 case CCL3mode:
502 case CCT1mode:
503 case CCT2mode:
504 case CCT3mode:
505 if (req_mode != set_mode)
506 return 0;
507 break;
508
509 case CCZmode:
510 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
511 && req_mode != CCSRmode && req_mode != CCURmode)
512 return 0;
513 break;
514
515 case CCAPmode:
516 case CCANmode:
517 if (req_mode != CCAmode)
518 return 0;
519 break;
520
521 default:
522 gcc_unreachable ();
523 }
524
525 return (GET_MODE (SET_SRC (set)) == set_mode);
526 }
527
528 /* Return true if every SET in INSN that sets the CC register
529 has source and destination with matching CC modes and that
530 CC mode is at least as constrained as REQ_MODE.
531 If REQ_MODE is VOIDmode, always return false. */
532
533 bool
534 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
535 {
536 int i;
537
538 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
539 if (req_mode == VOIDmode)
540 return false;
541
542 if (GET_CODE (PATTERN (insn)) == SET)
543 return s390_match_ccmode_set (PATTERN (insn), req_mode);
544
545 if (GET_CODE (PATTERN (insn)) == PARALLEL)
546 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
547 {
548 rtx set = XVECEXP (PATTERN (insn), 0, i);
549 if (GET_CODE (set) == SET)
550 if (!s390_match_ccmode_set (set, req_mode))
551 return false;
552 }
553
554 return true;
555 }
556
557 /* If a test-under-mask instruction can be used to implement
558 (compare (and ... OP1) OP2), return the CC mode required
559 to do that. Otherwise, return VOIDmode.
560 MIXED is true if the instruction can distinguish between
561 CC1 and CC2 for mixed selected bits (TMxx), it is false
562 if the instruction cannot (TM). */
563
564 enum machine_mode
565 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
566 {
567 int bit0, bit1;
568
569 /* ??? Fixme: should work on CONST_DOUBLE as well. */
570 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
571 return VOIDmode;
572
573 /* Selected bits all zero: CC0.
574 e.g.: int a; if ((a & (16 + 128)) == 0) */
575 if (INTVAL (op2) == 0)
576 return CCTmode;
577
578 /* Selected bits all one: CC3.
579 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
580 if (INTVAL (op2) == INTVAL (op1))
581 return CCT3mode;
582
583 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
584 int a;
585 if ((a & (16 + 128)) == 16) -> CCT1
586 if ((a & (16 + 128)) == 128) -> CCT2 */
587 if (mixed)
588 {
589 bit1 = exact_log2 (INTVAL (op2));
590 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
591 if (bit0 != -1 && bit1 != -1)
592 return bit0 > bit1 ? CCT1mode : CCT2mode;
593 }
594
595 return VOIDmode;
596 }
597
598 /* Given a comparison code OP (EQ, NE, etc.) and the operands
599 OP0 and OP1 of a COMPARE, return the mode to be used for the
600 comparison. */
601
602 enum machine_mode
603 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
604 {
605 switch (code)
606 {
607 case EQ:
608 case NE:
609 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
610 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
611 return CCAPmode;
612 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
613 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
614 return CCAPmode;
615 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
616 || GET_CODE (op1) == NEG)
617 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
618 return CCLmode;
619
620 if (GET_CODE (op0) == AND)
621 {
622 /* Check whether we can potentially do it via TM. */
623 enum machine_mode ccmode;
624 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
625 if (ccmode != VOIDmode)
626 {
627 /* Relax CCTmode to CCZmode to allow fall-back to AND
628 if that turns out to be beneficial. */
629 return ccmode == CCTmode ? CCZmode : ccmode;
630 }
631 }
632
633 if (register_operand (op0, HImode)
634 && GET_CODE (op1) == CONST_INT
635 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
636 return CCT3mode;
637 if (register_operand (op0, QImode)
638 && GET_CODE (op1) == CONST_INT
639 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
640 return CCT3mode;
641
642 return CCZmode;
643
644 case LE:
645 case LT:
646 case GE:
647 case GT:
648 /* The only overflow condition of NEG and ABS happens when
649 -INT_MAX is used as parameter, which stays negative. So
650 we have an overflow from a positive value to a negative.
651 Using CCAP mode the resulting cc can be used for comparisons. */
652 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
653 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
654 return CCAPmode;
655
656 /* If constants are involved in an add instruction it is possible to use
657 the resulting cc for comparisons with zero. Knowing the sign of the
658 constant the overflow behavior gets predictable. e.g.:
659 int a, b; if ((b = a + c) > 0)
660 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
661 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
662 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
663 {
664 if (INTVAL (XEXP((op0), 1)) < 0)
665 return CCANmode;
666 else
667 return CCAPmode;
668 }
669 /* Fall through. */
670 case UNORDERED:
671 case ORDERED:
672 case UNEQ:
673 case UNLE:
674 case UNLT:
675 case UNGE:
676 case UNGT:
677 case LTGT:
678 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
679 && GET_CODE (op1) != CONST_INT)
680 return CCSRmode;
681 return CCSmode;
682
683 case LTU:
684 case GEU:
685 if (GET_CODE (op0) == PLUS
686 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
687 return CCL1mode;
688
689 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
690 && GET_CODE (op1) != CONST_INT)
691 return CCURmode;
692 return CCUmode;
693
694 case LEU:
695 case GTU:
696 if (GET_CODE (op0) == MINUS
697 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
698 return CCL2mode;
699
700 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
701 && GET_CODE (op1) != CONST_INT)
702 return CCURmode;
703 return CCUmode;
704
705 default:
706 gcc_unreachable ();
707 }
708 }
709
710 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
711 that we can implement more efficiently. */
712
713 void
714 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
715 {
716 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
717 if ((*code == EQ || *code == NE)
718 && *op1 == const0_rtx
719 && GET_CODE (*op0) == ZERO_EXTRACT
720 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
721 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
722 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
723 {
724 rtx inner = XEXP (*op0, 0);
725 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
726 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
727 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
728
729 if (len > 0 && len < modesize
730 && pos >= 0 && pos + len <= modesize
731 && modesize <= HOST_BITS_PER_WIDE_INT)
732 {
733 unsigned HOST_WIDE_INT block;
734 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
735 block <<= modesize - pos - len;
736
737 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
738 gen_int_mode (block, GET_MODE (inner)));
739 }
740 }
741
742 /* Narrow AND of memory against immediate to enable TM. */
743 if ((*code == EQ || *code == NE)
744 && *op1 == const0_rtx
745 && GET_CODE (*op0) == AND
746 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
747 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
748 {
749 rtx inner = XEXP (*op0, 0);
750 rtx mask = XEXP (*op0, 1);
751
752 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
753 if (GET_CODE (inner) == SUBREG
754 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
755 && (GET_MODE_SIZE (GET_MODE (inner))
756 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
757 && ((INTVAL (mask)
758 & GET_MODE_MASK (GET_MODE (inner))
759 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
760 == 0))
761 inner = SUBREG_REG (inner);
762
763 /* Do not change volatile MEMs. */
764 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
765 {
766 int part = s390_single_part (XEXP (*op0, 1),
767 GET_MODE (inner), QImode, 0);
768 if (part >= 0)
769 {
770 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
771 inner = adjust_address_nv (inner, QImode, part);
772 *op0 = gen_rtx_AND (QImode, inner, mask);
773 }
774 }
775 }
776
777 /* Narrow comparisons against 0xffff to HImode if possible. */
778 if ((*code == EQ || *code == NE)
779 && GET_CODE (*op1) == CONST_INT
780 && INTVAL (*op1) == 0xffff
781 && SCALAR_INT_MODE_P (GET_MODE (*op0))
782 && (nonzero_bits (*op0, GET_MODE (*op0))
783 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
784 {
785 *op0 = gen_lowpart (HImode, *op0);
786 *op1 = constm1_rtx;
787 }
788
789 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
790 if (GET_CODE (*op0) == UNSPEC
791 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
792 && XVECLEN (*op0, 0) == 1
793 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
794 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
795 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
796 && *op1 == const0_rtx)
797 {
798 enum rtx_code new_code = UNKNOWN;
799 switch (*code)
800 {
801 case EQ: new_code = EQ; break;
802 case NE: new_code = NE; break;
803 case LT: new_code = GTU; break;
804 case GT: new_code = LTU; break;
805 case LE: new_code = GEU; break;
806 case GE: new_code = LEU; break;
807 default: break;
808 }
809
810 if (new_code != UNKNOWN)
811 {
812 *op0 = XVECEXP (*op0, 0, 0);
813 *code = new_code;
814 }
815 }
816
817 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
818 if (GET_CODE (*op0) == UNSPEC
819 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
820 && XVECLEN (*op0, 0) == 1
821 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
822 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
823 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
824 && *op1 == const0_rtx)
825 {
826 enum rtx_code new_code = UNKNOWN;
827 switch (*code)
828 {
829 case EQ: new_code = EQ; break;
830 case NE: new_code = NE; break;
831 default: break;
832 }
833
834 if (new_code != UNKNOWN)
835 {
836 *op0 = XVECEXP (*op0, 0, 0);
837 *code = new_code;
838 }
839 }
840
841 /* Simplify cascaded EQ, NE with const0_rtx. */
842 if ((*code == NE || *code == EQ)
843 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
844 && GET_MODE (*op0) == SImode
845 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
846 && REG_P (XEXP (*op0, 0))
847 && XEXP (*op0, 1) == const0_rtx
848 && *op1 == const0_rtx)
849 {
850 if ((*code == EQ && GET_CODE (*op0) == NE)
851 || (*code == NE && GET_CODE (*op0) == EQ))
852 *code = EQ;
853 else
854 *code = NE;
855 *op0 = XEXP (*op0, 0);
856 }
857
858 /* Prefer register over memory as first operand. */
859 if (MEM_P (*op0) && REG_P (*op1))
860 {
861 rtx tem = *op0; *op0 = *op1; *op1 = tem;
862 *code = swap_condition (*code);
863 }
864 }
865
866 /* Emit a compare instruction suitable to implement the comparison
867 OP0 CODE OP1. Return the correct condition RTL to be placed in
868 the IF_THEN_ELSE of the conditional branch testing the result. */
869
870 rtx
871 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
872 {
873 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
874 rtx cc;
875
876 /* Do not output a redundant compare instruction if a compare_and_swap
877 pattern already computed the result and the machine modes are compatible. */
878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
879 {
880 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
881 == GET_MODE (op0));
882 cc = op0;
883 }
884 else
885 {
886 cc = gen_rtx_REG (mode, CC_REGNUM);
887 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
888 }
889
890 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
891 }
892
893 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
894 matches CMP.
895 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
896 conditional branch testing the result. */
897
898 static rtx
899 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
900 rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
904 const0_rtx);
905 }
906
907 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
908 unconditional jump, else a conditional jump under condition COND. */
909
910 void
911 s390_emit_jump (rtx target, rtx cond)
912 {
913 rtx insn;
914
915 target = gen_rtx_LABEL_REF (VOIDmode, target);
916 if (cond)
917 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
918
919 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
920 emit_jump_insn (insn);
921 }
922
923 /* Return branch condition mask to implement a branch
924 specified by CODE. Return -1 for invalid comparisons. */
925
926 int
927 s390_branch_condition_mask (rtx code)
928 {
929 const int CC0 = 1 << 3;
930 const int CC1 = 1 << 2;
931 const int CC2 = 1 << 1;
932 const int CC3 = 1 << 0;
933
934 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
935 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
936 gcc_assert (XEXP (code, 1) == const0_rtx);
937
938 switch (GET_MODE (XEXP (code, 0)))
939 {
940 case CCZmode:
941 case CCZ1mode:
942 switch (GET_CODE (code))
943 {
944 case EQ: return CC0;
945 case NE: return CC1 | CC2 | CC3;
946 default: return -1;
947 }
948 break;
949
950 case CCT1mode:
951 switch (GET_CODE (code))
952 {
953 case EQ: return CC1;
954 case NE: return CC0 | CC2 | CC3;
955 default: return -1;
956 }
957 break;
958
959 case CCT2mode:
960 switch (GET_CODE (code))
961 {
962 case EQ: return CC2;
963 case NE: return CC0 | CC1 | CC3;
964 default: return -1;
965 }
966 break;
967
968 case CCT3mode:
969 switch (GET_CODE (code))
970 {
971 case EQ: return CC3;
972 case NE: return CC0 | CC1 | CC2;
973 default: return -1;
974 }
975 break;
976
977 case CCLmode:
978 switch (GET_CODE (code))
979 {
980 case EQ: return CC0 | CC2;
981 case NE: return CC1 | CC3;
982 default: return -1;
983 }
984 break;
985
986 case CCL1mode:
987 switch (GET_CODE (code))
988 {
989 case LTU: return CC2 | CC3; /* carry */
990 case GEU: return CC0 | CC1; /* no carry */
991 default: return -1;
992 }
993 break;
994
995 case CCL2mode:
996 switch (GET_CODE (code))
997 {
998 case GTU: return CC0 | CC1; /* borrow */
999 case LEU: return CC2 | CC3; /* no borrow */
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCL3mode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0 | CC2;
1008 case NE: return CC1 | CC3;
1009 case LTU: return CC1;
1010 case GTU: return CC3;
1011 case LEU: return CC1 | CC2;
1012 case GEU: return CC2 | CC3;
1013 default: return -1;
1014 }
1015
1016 case CCUmode:
1017 switch (GET_CODE (code))
1018 {
1019 case EQ: return CC0;
1020 case NE: return CC1 | CC2 | CC3;
1021 case LTU: return CC1;
1022 case GTU: return CC2;
1023 case LEU: return CC0 | CC1;
1024 case GEU: return CC0 | CC2;
1025 default: return -1;
1026 }
1027 break;
1028
1029 case CCURmode:
1030 switch (GET_CODE (code))
1031 {
1032 case EQ: return CC0;
1033 case NE: return CC2 | CC1 | CC3;
1034 case LTU: return CC2;
1035 case GTU: return CC1;
1036 case LEU: return CC0 | CC2;
1037 case GEU: return CC0 | CC1;
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCAPmode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0;
1046 case NE: return CC1 | CC2 | CC3;
1047 case LT: return CC1 | CC3;
1048 case GT: return CC2;
1049 case LE: return CC0 | CC1 | CC3;
1050 case GE: return CC0 | CC2;
1051 default: return -1;
1052 }
1053 break;
1054
1055 case CCANmode:
1056 switch (GET_CODE (code))
1057 {
1058 case EQ: return CC0;
1059 case NE: return CC1 | CC2 | CC3;
1060 case LT: return CC1;
1061 case GT: return CC2 | CC3;
1062 case LE: return CC0 | CC1;
1063 case GE: return CC0 | CC2 | CC3;
1064 default: return -1;
1065 }
1066 break;
1067
1068 case CCSmode:
1069 switch (GET_CODE (code))
1070 {
1071 case EQ: return CC0;
1072 case NE: return CC1 | CC2 | CC3;
1073 case LT: return CC1;
1074 case GT: return CC2;
1075 case LE: return CC0 | CC1;
1076 case GE: return CC0 | CC2;
1077 case UNORDERED: return CC3;
1078 case ORDERED: return CC0 | CC1 | CC2;
1079 case UNEQ: return CC0 | CC3;
1080 case UNLT: return CC1 | CC3;
1081 case UNGT: return CC2 | CC3;
1082 case UNLE: return CC0 | CC1 | CC3;
1083 case UNGE: return CC0 | CC2 | CC3;
1084 case LTGT: return CC1 | CC2;
1085 default: return -1;
1086 }
1087 break;
1088
1089 case CCSRmode:
1090 switch (GET_CODE (code))
1091 {
1092 case EQ: return CC0;
1093 case NE: return CC2 | CC1 | CC3;
1094 case LT: return CC2;
1095 case GT: return CC1;
1096 case LE: return CC0 | CC2;
1097 case GE: return CC0 | CC1;
1098 case UNORDERED: return CC3;
1099 case ORDERED: return CC0 | CC2 | CC1;
1100 case UNEQ: return CC0 | CC3;
1101 case UNLT: return CC2 | CC3;
1102 case UNGT: return CC1 | CC3;
1103 case UNLE: return CC0 | CC2 | CC3;
1104 case UNGE: return CC0 | CC1 | CC3;
1105 case LTGT: return CC2 | CC1;
1106 default: return -1;
1107 }
1108 break;
1109
1110 default:
1111 return -1;
1112 }
1113 }
1114
1115
1116 /* Return branch condition mask to implement a compare and branch
1117 specified by CODE. Return -1 for invalid comparisons. */
1118
1119 int
1120 s390_compare_and_branch_condition_mask (rtx code)
1121 {
1122 const int CC0 = 1 << 3;
1123 const int CC1 = 1 << 2;
1124 const int CC2 = 1 << 1;
1125
1126 switch (GET_CODE (code))
1127 {
1128 case EQ:
1129 return CC0;
1130 case NE:
1131 return CC1 | CC2;
1132 case LT:
1133 case LTU:
1134 return CC1;
1135 case GT:
1136 case GTU:
1137 return CC2;
1138 case LE:
1139 case LEU:
1140 return CC0 | CC1;
1141 case GE:
1142 case GEU:
1143 return CC0 | CC2;
1144 default:
1145 gcc_unreachable ();
1146 }
1147 return -1;
1148 }
1149
1150 /* If INV is false, return assembler mnemonic string to implement
1151 a branch specified by CODE. If INV is true, return mnemonic
1152 for the corresponding inverted branch. */
1153
1154 static const char *
1155 s390_branch_condition_mnemonic (rtx code, int inv)
1156 {
1157 int mask;
1158
1159 static const char *const mnemonic[16] =
1160 {
1161 NULL, "o", "h", "nle",
1162 "l", "nhe", "lh", "ne",
1163 "e", "nlh", "he", "nl",
1164 "le", "nh", "no", NULL
1165 };
1166
1167 if (GET_CODE (XEXP (code, 0)) == REG
1168 && REGNO (XEXP (code, 0)) == CC_REGNUM
1169 && XEXP (code, 1) == const0_rtx)
1170 mask = s390_branch_condition_mask (code);
1171 else
1172 mask = s390_compare_and_branch_condition_mask (code);
1173
1174 gcc_assert (mask >= 0);
1175
1176 if (inv)
1177 mask ^= 15;
1178
1179 gcc_assert (mask >= 1 && mask <= 14);
1180
1181 return mnemonic[mask];
1182 }
1183
1184 /* Return the part of op which has a value different from def.
1185 The size of the part is determined by mode.
1186 Use this function only if you already know that op really
1187 contains such a part. */
1188
1189 unsigned HOST_WIDE_INT
1190 s390_extract_part (rtx op, enum machine_mode mode, int def)
1191 {
1192 unsigned HOST_WIDE_INT value = 0;
1193 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1194 int part_bits = GET_MODE_BITSIZE (mode);
1195 unsigned HOST_WIDE_INT part_mask
1196 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1197 int i;
1198
1199 for (i = 0; i < max_parts; i++)
1200 {
1201 if (i == 0)
1202 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1203 else
1204 value >>= part_bits;
1205
1206 if ((value & part_mask) != (def & part_mask))
1207 return value & part_mask;
1208 }
1209
1210 gcc_unreachable ();
1211 }
1212
1213 /* If OP is an integer constant of mode MODE with exactly one
1214 part of mode PART_MODE unequal to DEF, return the number of that
1215 part. Otherwise, return -1. */
1216
1217 int
1218 s390_single_part (rtx op,
1219 enum machine_mode mode,
1220 enum machine_mode part_mode,
1221 int def)
1222 {
1223 unsigned HOST_WIDE_INT value = 0;
1224 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1225 unsigned HOST_WIDE_INT part_mask
1226 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1227 int i, part = -1;
1228
1229 if (GET_CODE (op) != CONST_INT)
1230 return -1;
1231
1232 for (i = 0; i < n_parts; i++)
1233 {
1234 if (i == 0)
1235 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1236 else
1237 value >>= GET_MODE_BITSIZE (part_mode);
1238
1239 if ((value & part_mask) != (def & part_mask))
1240 {
1241 if (part != -1)
1242 return -1;
1243 else
1244 part = i;
1245 }
1246 }
1247 return part == -1 ? -1 : n_parts - 1 - part;
1248 }
1249
1250 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1251 bits and no other bits are set in IN. POS and LENGTH can be used
1252 to obtain the start position and the length of the bitfield.
1253
1254 POS gives the position of the first bit of the bitfield counting
1255 from the lowest order bit starting with zero. In order to use this
1256 value for S/390 instructions this has to be converted to "bits big
1257 endian" style. */
1258
1259 bool
1260 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1261 int *pos, int *length)
1262 {
1263 int tmp_pos = 0;
1264 int tmp_length = 0;
1265 int i;
1266 unsigned HOST_WIDE_INT mask = 1ULL;
1267 bool contiguous = false;
1268
1269 for (i = 0; i < size; mask <<= 1, i++)
1270 {
1271 if (contiguous)
1272 {
1273 if (mask & in)
1274 tmp_length++;
1275 else
1276 break;
1277 }
1278 else
1279 {
1280 if (mask & in)
1281 {
1282 contiguous = true;
1283 tmp_length++;
1284 }
1285 else
1286 tmp_pos++;
1287 }
1288 }
1289
1290 if (!tmp_length)
1291 return false;
1292
1293 /* Calculate a mask for all bits beyond the contiguous bits. */
1294 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1295
1296 if (mask & in)
1297 return false;
1298
1299 if (tmp_length + tmp_pos - 1 > size)
1300 return false;
1301
1302 if (length)
1303 *length = tmp_length;
1304
1305 if (pos)
1306 *pos = tmp_pos;
1307
1308 return true;
1309 }
1310
1311 /* Check whether we can (and want to) split a double-word
1312 move in mode MODE from SRC to DST into two single-word
1313 moves, moving the subword FIRST_SUBWORD first. */
1314
1315 bool
1316 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1317 {
1318 /* Floating point registers cannot be split. */
1319 if (FP_REG_P (src) || FP_REG_P (dst))
1320 return false;
1321
1322 /* We don't need to split if operands are directly accessible. */
1323 if (s_operand (src, mode) || s_operand (dst, mode))
1324 return false;
1325
1326 /* Non-offsettable memory references cannot be split. */
1327 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1328 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1329 return false;
1330
1331 /* Moving the first subword must not clobber a register
1332 needed to move the second subword. */
1333 if (register_operand (dst, mode))
1334 {
1335 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1336 if (reg_overlap_mentioned_p (subreg, src))
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
1343 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1344 and [MEM2, MEM2 + SIZE] do overlap and false
1345 otherwise. */
1346
1347 bool
1348 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1349 {
1350 rtx addr1, addr2, addr_delta;
1351 HOST_WIDE_INT delta;
1352
1353 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1354 return true;
1355
1356 if (size == 0)
1357 return false;
1358
1359 addr1 = XEXP (mem1, 0);
1360 addr2 = XEXP (mem2, 0);
1361
1362 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1363
1364 /* This overlapping check is used by peepholes merging memory block operations.
1365 Overlapping operations would otherwise be recognized by the S/390 hardware
1366 and would fall back to a slower implementation. Allowing overlapping
1367 operations would lead to slow code but not to wrong code. Therefore we are
1368 somewhat optimistic if we cannot prove that the memory blocks are
1369 overlapping.
1370 That's why we return false here although this may accept operations on
1371 overlapping memory areas. */
1372 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1373 return false;
1374
1375 delta = INTVAL (addr_delta);
1376
1377 if (delta == 0
1378 || (delta > 0 && delta < size)
1379 || (delta < 0 && -delta < size))
1380 return true;
1381
1382 return false;
1383 }
1384
1385 /* Check whether the address of memory reference MEM2 equals exactly
1386 the address of memory reference MEM1 plus DELTA. Return true if
1387 we can prove this to be the case, false otherwise. */
1388
1389 bool
1390 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1391 {
1392 rtx addr1, addr2, addr_delta;
1393
1394 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1395 return false;
1396
1397 addr1 = XEXP (mem1, 0);
1398 addr2 = XEXP (mem2, 0);
1399
1400 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1401 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1402 return false;
1403
1404 return true;
1405 }
1406
1407 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1408
1409 void
1410 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1411 rtx *operands)
1412 {
1413 enum machine_mode wmode = mode;
1414 rtx dst = operands[0];
1415 rtx src1 = operands[1];
1416 rtx src2 = operands[2];
1417 rtx op, clob, tem;
1418
1419 /* If we cannot handle the operation directly, use a temp register. */
1420 if (!s390_logical_operator_ok_p (operands))
1421 dst = gen_reg_rtx (mode);
1422
1423 /* QImode and HImode patterns make sense only if we have a destination
1424 in memory. Otherwise perform the operation in SImode. */
1425 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1426 wmode = SImode;
1427
1428 /* Widen operands if required. */
1429 if (mode != wmode)
1430 {
1431 if (GET_CODE (dst) == SUBREG
1432 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1433 dst = tem;
1434 else if (REG_P (dst))
1435 dst = gen_rtx_SUBREG (wmode, dst, 0);
1436 else
1437 dst = gen_reg_rtx (wmode);
1438
1439 if (GET_CODE (src1) == SUBREG
1440 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1441 src1 = tem;
1442 else if (GET_MODE (src1) != VOIDmode)
1443 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1444
1445 if (GET_CODE (src2) == SUBREG
1446 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1447 src2 = tem;
1448 else if (GET_MODE (src2) != VOIDmode)
1449 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1450 }
1451
1452 /* Emit the instruction. */
1453 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1454 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1455 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1456
1457 /* Fix up the destination if needed. */
1458 if (dst != operands[0])
1459 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1460 }
1461
1462 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1463
1464 bool
1465 s390_logical_operator_ok_p (rtx *operands)
1466 {
1467 /* If the destination operand is in memory, it needs to coincide
1468 with one of the source operands. After reload, it has to be
1469 the first source operand. */
1470 if (GET_CODE (operands[0]) == MEM)
1471 return rtx_equal_p (operands[0], operands[1])
1472 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1473
1474 return true;
1475 }
1476
1477 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1478 operand IMMOP to switch from SS to SI type instructions. */
1479
1480 void
1481 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1482 {
1483 int def = code == AND ? -1 : 0;
1484 HOST_WIDE_INT mask;
1485 int part;
1486
1487 gcc_assert (GET_CODE (*memop) == MEM);
1488 gcc_assert (!MEM_VOLATILE_P (*memop));
1489
1490 mask = s390_extract_part (*immop, QImode, def);
1491 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1492 gcc_assert (part >= 0);
1493
1494 *memop = adjust_address (*memop, QImode, part);
1495 *immop = gen_int_mode (mask, QImode);
1496 }
1497
1498
1499 /* How to allocate a 'struct machine_function'. */
1500
1501 static struct machine_function *
1502 s390_init_machine_status (void)
1503 {
1504 return ggc_alloc_cleared_machine_function ();
1505 }
1506
1507 static void
1508 s390_option_override (void)
1509 {
1510 /* Set up function hooks. */
1511 init_machine_status = s390_init_machine_status;
1512
1513 /* Architecture mode defaults according to ABI. */
1514 if (!(target_flags_explicit & MASK_ZARCH))
1515 {
1516 if (TARGET_64BIT)
1517 target_flags |= MASK_ZARCH;
1518 else
1519 target_flags &= ~MASK_ZARCH;
1520 }
1521
1522 /* Set the march default in case it hasn't been specified on
1523 cmdline. */
1524 if (s390_arch == PROCESSOR_max)
1525 {
1526 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1527 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1528 s390_arch_flags = processor_flags_table[(int)s390_arch];
1529 }
1530
1531 /* Determine processor to tune for. */
1532 if (s390_tune == PROCESSOR_max)
1533 {
1534 s390_tune = s390_arch;
1535 s390_tune_flags = s390_arch_flags;
1536 }
1537
1538 /* Sanity checks. */
1539 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1540 error ("z/Architecture mode not supported on %s", s390_arch_string);
1541 if (TARGET_64BIT && !TARGET_ZARCH)
1542 error ("64-bit ABI not supported in ESA/390 mode");
1543
1544 /* Use hardware DFP if available and not explicitly disabled by
1545 user. E.g. with -m31 -march=z10 -mzarch */
1546 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1547 target_flags |= MASK_HARD_DFP;
1548
1549 if (TARGET_HARD_DFP && !TARGET_DFP)
1550 {
1551 if (target_flags_explicit & MASK_HARD_DFP)
1552 {
1553 if (!TARGET_CPU_DFP)
1554 error ("hardware decimal floating point instructions"
1555 " not available on %s", s390_arch_string);
1556 if (!TARGET_ZARCH)
1557 error ("hardware decimal floating point instructions"
1558 " not available in ESA/390 mode");
1559 }
1560 else
1561 target_flags &= ~MASK_HARD_DFP;
1562 }
1563
1564 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1565 {
1566 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1567 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1568
1569 target_flags &= ~MASK_HARD_DFP;
1570 }
1571
1572 /* Set processor cost function. */
1573 switch (s390_tune)
1574 {
1575 case PROCESSOR_2084_Z990:
1576 s390_cost = &z990_cost;
1577 break;
1578 case PROCESSOR_2094_Z9_109:
1579 s390_cost = &z9_109_cost;
1580 break;
1581 case PROCESSOR_2097_Z10:
1582 s390_cost = &z10_cost;
1583 case PROCESSOR_2817_Z196:
1584 s390_cost = &z196_cost;
1585 break;
1586 default:
1587 s390_cost = &z900_cost;
1588 }
1589
1590 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1591 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1592 "in combination");
1593
1594 if (s390_stack_size)
1595 {
1596 if (s390_stack_guard >= s390_stack_size)
1597 error ("stack size must be greater than the stack guard value");
1598 else if (s390_stack_size > 1 << 16)
1599 error ("stack size must not be greater than 64k");
1600 }
1601 else if (s390_stack_guard)
1602 error ("-mstack-guard implies use of -mstack-size");
1603
1604 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1605 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1606 target_flags |= MASK_LONG_DOUBLE_128;
1607 #endif
1608
1609 if (s390_tune == PROCESSOR_2097_Z10
1610 || s390_tune == PROCESSOR_2817_Z196)
1611 {
1612 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1619 global_options.x_param_values,
1620 global_options_set.x_param_values);
1621 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1622 global_options.x_param_values,
1623 global_options_set.x_param_values);
1624 }
1625
1626 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1627 global_options.x_param_values,
1628 global_options_set.x_param_values);
1629 /* values for loop prefetching */
1630 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1631 global_options.x_param_values,
1632 global_options_set.x_param_values);
1633 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 /* s390 has more than 2 levels and the size is much larger. Since
1637 we are always running virtualized assume that we only get a small
1638 part of the caches above l1. */
1639 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1643 global_options.x_param_values,
1644 global_options_set.x_param_values);
1645 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1646 global_options.x_param_values,
1647 global_options_set.x_param_values);
1648
1649 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1650 requires the arch flags to be evaluated already. Since prefetching
1651 is beneficial on s390, we enable it if available. */
1652 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1653 flag_prefetch_loop_arrays = 1;
1654
1655 /* Use the alternative scheduling-pressure algorithm by default. */
1656 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1657 global_options.x_param_values,
1658 global_options_set.x_param_values);
1659
1660 if (TARGET_TPF)
1661 {
1662 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1663 debuggers do not yet support DWARF 3/4. */
1664 if (!global_options_set.x_dwarf_strict)
1665 dwarf_strict = 1;
1666 if (!global_options_set.x_dwarf_version)
1667 dwarf_version = 2;
1668 }
1669 }
1670
1671 /* Map for smallest class containing reg regno. */
1672
1673 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1674 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1675 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1682 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1683 ACCESS_REGS, ACCESS_REGS
1684 };
1685
1686 /* Return attribute type of insn. */
1687
1688 static enum attr_type
1689 s390_safe_attr_type (rtx insn)
1690 {
1691 if (recog_memoized (insn) >= 0)
1692 return get_attr_type (insn);
1693 else
1694 return TYPE_NONE;
1695 }
1696
1697 /* Return true if DISP is a valid short displacement. */
1698
1699 static bool
1700 s390_short_displacement (rtx disp)
1701 {
1702 /* No displacement is OK. */
1703 if (!disp)
1704 return true;
1705
1706 /* Without the long displacement facility we don't need to
1707 distingiush between long and short displacement. */
1708 if (!TARGET_LONG_DISPLACEMENT)
1709 return true;
1710
1711 /* Integer displacement in range. */
1712 if (GET_CODE (disp) == CONST_INT)
1713 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1714
1715 /* GOT offset is not OK, the GOT can be large. */
1716 if (GET_CODE (disp) == CONST
1717 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1718 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1719 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1720 return false;
1721
1722 /* All other symbolic constants are literal pool references,
1723 which are OK as the literal pool must be small. */
1724 if (GET_CODE (disp) == CONST)
1725 return true;
1726
1727 return false;
1728 }
1729
1730 /* Decompose a RTL expression ADDR for a memory address into
1731 its components, returned in OUT.
1732
1733 Returns false if ADDR is not a valid memory address, true
1734 otherwise. If OUT is NULL, don't return the components,
1735 but check for validity only.
1736
1737 Note: Only addresses in canonical form are recognized.
1738 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1739 canonical form so that they will be recognized. */
1740
1741 static int
1742 s390_decompose_address (rtx addr, struct s390_address *out)
1743 {
1744 HOST_WIDE_INT offset = 0;
1745 rtx base = NULL_RTX;
1746 rtx indx = NULL_RTX;
1747 rtx disp = NULL_RTX;
1748 rtx orig_disp;
1749 bool pointer = false;
1750 bool base_ptr = false;
1751 bool indx_ptr = false;
1752 bool literal_pool = false;
1753
1754 /* We may need to substitute the literal pool base register into the address
1755 below. However, at this point we do not know which register is going to
1756 be used as base, so we substitute the arg pointer register. This is going
1757 to be treated as holding a pointer below -- it shouldn't be used for any
1758 other purpose. */
1759 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1760
1761 /* Decompose address into base + index + displacement. */
1762
1763 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1764 base = addr;
1765
1766 else if (GET_CODE (addr) == PLUS)
1767 {
1768 rtx op0 = XEXP (addr, 0);
1769 rtx op1 = XEXP (addr, 1);
1770 enum rtx_code code0 = GET_CODE (op0);
1771 enum rtx_code code1 = GET_CODE (op1);
1772
1773 if (code0 == REG || code0 == UNSPEC)
1774 {
1775 if (code1 == REG || code1 == UNSPEC)
1776 {
1777 indx = op0; /* index + base */
1778 base = op1;
1779 }
1780
1781 else
1782 {
1783 base = op0; /* base + displacement */
1784 disp = op1;
1785 }
1786 }
1787
1788 else if (code0 == PLUS)
1789 {
1790 indx = XEXP (op0, 0); /* index + base + disp */
1791 base = XEXP (op0, 1);
1792 disp = op1;
1793 }
1794
1795 else
1796 {
1797 return false;
1798 }
1799 }
1800
1801 else
1802 disp = addr; /* displacement */
1803
1804 /* Extract integer part of displacement. */
1805 orig_disp = disp;
1806 if (disp)
1807 {
1808 if (GET_CODE (disp) == CONST_INT)
1809 {
1810 offset = INTVAL (disp);
1811 disp = NULL_RTX;
1812 }
1813 else if (GET_CODE (disp) == CONST
1814 && GET_CODE (XEXP (disp, 0)) == PLUS
1815 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1816 {
1817 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1818 disp = XEXP (XEXP (disp, 0), 0);
1819 }
1820 }
1821
1822 /* Strip off CONST here to avoid special case tests later. */
1823 if (disp && GET_CODE (disp) == CONST)
1824 disp = XEXP (disp, 0);
1825
1826 /* We can convert literal pool addresses to
1827 displacements by basing them off the base register. */
1828 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1829 {
1830 /* Either base or index must be free to hold the base register. */
1831 if (!base)
1832 base = fake_pool_base, literal_pool = true;
1833 else if (!indx)
1834 indx = fake_pool_base, literal_pool = true;
1835 else
1836 return false;
1837
1838 /* Mark up the displacement. */
1839 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1840 UNSPEC_LTREL_OFFSET);
1841 }
1842
1843 /* Validate base register. */
1844 if (base)
1845 {
1846 if (GET_CODE (base) == UNSPEC)
1847 switch (XINT (base, 1))
1848 {
1849 case UNSPEC_LTREF:
1850 if (!disp)
1851 disp = gen_rtx_UNSPEC (Pmode,
1852 gen_rtvec (1, XVECEXP (base, 0, 0)),
1853 UNSPEC_LTREL_OFFSET);
1854 else
1855 return false;
1856
1857 base = XVECEXP (base, 0, 1);
1858 break;
1859
1860 case UNSPEC_LTREL_BASE:
1861 if (XVECLEN (base, 0) == 1)
1862 base = fake_pool_base, literal_pool = true;
1863 else
1864 base = XVECEXP (base, 0, 1);
1865 break;
1866
1867 default:
1868 return false;
1869 }
1870
1871 if (!REG_P (base)
1872 || (GET_MODE (base) != SImode
1873 && GET_MODE (base) != Pmode))
1874 return false;
1875
1876 if (REGNO (base) == STACK_POINTER_REGNUM
1877 || REGNO (base) == FRAME_POINTER_REGNUM
1878 || ((reload_completed || reload_in_progress)
1879 && frame_pointer_needed
1880 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1881 || REGNO (base) == ARG_POINTER_REGNUM
1882 || (flag_pic
1883 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1884 pointer = base_ptr = true;
1885
1886 if ((reload_completed || reload_in_progress)
1887 && base == cfun->machine->base_reg)
1888 pointer = base_ptr = literal_pool = true;
1889 }
1890
1891 /* Validate index register. */
1892 if (indx)
1893 {
1894 if (GET_CODE (indx) == UNSPEC)
1895 switch (XINT (indx, 1))
1896 {
1897 case UNSPEC_LTREF:
1898 if (!disp)
1899 disp = gen_rtx_UNSPEC (Pmode,
1900 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1901 UNSPEC_LTREL_OFFSET);
1902 else
1903 return false;
1904
1905 indx = XVECEXP (indx, 0, 1);
1906 break;
1907
1908 case UNSPEC_LTREL_BASE:
1909 if (XVECLEN (indx, 0) == 1)
1910 indx = fake_pool_base, literal_pool = true;
1911 else
1912 indx = XVECEXP (indx, 0, 1);
1913 break;
1914
1915 default:
1916 return false;
1917 }
1918
1919 if (!REG_P (indx)
1920 || (GET_MODE (indx) != SImode
1921 && GET_MODE (indx) != Pmode))
1922 return false;
1923
1924 if (REGNO (indx) == STACK_POINTER_REGNUM
1925 || REGNO (indx) == FRAME_POINTER_REGNUM
1926 || ((reload_completed || reload_in_progress)
1927 && frame_pointer_needed
1928 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1929 || REGNO (indx) == ARG_POINTER_REGNUM
1930 || (flag_pic
1931 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1932 pointer = indx_ptr = true;
1933
1934 if ((reload_completed || reload_in_progress)
1935 && indx == cfun->machine->base_reg)
1936 pointer = indx_ptr = literal_pool = true;
1937 }
1938
1939 /* Prefer to use pointer as base, not index. */
1940 if (base && indx && !base_ptr
1941 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1942 {
1943 rtx tmp = base;
1944 base = indx;
1945 indx = tmp;
1946 }
1947
1948 /* Validate displacement. */
1949 if (!disp)
1950 {
1951 /* If virtual registers are involved, the displacement will change later
1952 anyway as the virtual registers get eliminated. This could make a
1953 valid displacement invalid, but it is more likely to make an invalid
1954 displacement valid, because we sometimes access the register save area
1955 via negative offsets to one of those registers.
1956 Thus we don't check the displacement for validity here. If after
1957 elimination the displacement turns out to be invalid after all,
1958 this is fixed up by reload in any case. */
1959 if (base != arg_pointer_rtx
1960 && indx != arg_pointer_rtx
1961 && base != return_address_pointer_rtx
1962 && indx != return_address_pointer_rtx
1963 && base != frame_pointer_rtx
1964 && indx != frame_pointer_rtx
1965 && base != virtual_stack_vars_rtx
1966 && indx != virtual_stack_vars_rtx)
1967 if (!DISP_IN_RANGE (offset))
1968 return false;
1969 }
1970 else
1971 {
1972 /* All the special cases are pointers. */
1973 pointer = true;
1974
1975 /* In the small-PIC case, the linker converts @GOT
1976 and @GOTNTPOFF offsets to possible displacements. */
1977 if (GET_CODE (disp) == UNSPEC
1978 && (XINT (disp, 1) == UNSPEC_GOT
1979 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1980 && flag_pic == 1)
1981 {
1982 ;
1983 }
1984
1985 /* Accept pool label offsets. */
1986 else if (GET_CODE (disp) == UNSPEC
1987 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1988 ;
1989
1990 /* Accept literal pool references. */
1991 else if (GET_CODE (disp) == UNSPEC
1992 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1993 {
1994 /* In case CSE pulled a non literal pool reference out of
1995 the pool we have to reject the address. This is
1996 especially important when loading the GOT pointer on non
1997 zarch CPUs. In this case the literal pool contains an lt
1998 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1999 will most likely exceed the displacement. */
2000 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2001 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2002 return false;
2003
2004 orig_disp = gen_rtx_CONST (Pmode, disp);
2005 if (offset)
2006 {
2007 /* If we have an offset, make sure it does not
2008 exceed the size of the constant pool entry. */
2009 rtx sym = XVECEXP (disp, 0, 0);
2010 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2011 return false;
2012
2013 orig_disp = plus_constant (Pmode, orig_disp, offset);
2014 }
2015 }
2016
2017 else
2018 return false;
2019 }
2020
2021 if (!base && !indx)
2022 pointer = true;
2023
2024 if (out)
2025 {
2026 out->base = base;
2027 out->indx = indx;
2028 out->disp = orig_disp;
2029 out->pointer = pointer;
2030 out->literal_pool = literal_pool;
2031 }
2032
2033 return true;
2034 }
2035
2036 /* Decompose a RTL expression OP for a shift count into its components,
2037 and return the base register in BASE and the offset in OFFSET.
2038
2039 Return true if OP is a valid shift count, false if not. */
2040
2041 bool
2042 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2043 {
2044 HOST_WIDE_INT off = 0;
2045
2046 /* We can have an integer constant, an address register,
2047 or a sum of the two. */
2048 if (GET_CODE (op) == CONST_INT)
2049 {
2050 off = INTVAL (op);
2051 op = NULL_RTX;
2052 }
2053 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2054 {
2055 off = INTVAL (XEXP (op, 1));
2056 op = XEXP (op, 0);
2057 }
2058 while (op && GET_CODE (op) == SUBREG)
2059 op = SUBREG_REG (op);
2060
2061 if (op && GET_CODE (op) != REG)
2062 return false;
2063
2064 if (offset)
2065 *offset = off;
2066 if (base)
2067 *base = op;
2068
2069 return true;
2070 }
2071
2072
2073 /* Return true if CODE is a valid address without index. */
2074
2075 bool
2076 s390_legitimate_address_without_index_p (rtx op)
2077 {
2078 struct s390_address addr;
2079
2080 if (!s390_decompose_address (XEXP (op, 0), &addr))
2081 return false;
2082 if (addr.indx)
2083 return false;
2084
2085 return true;
2086 }
2087
2088
2089 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2090 and return these parts in SYMREF and ADDEND. You can pass NULL in
2091 SYMREF and/or ADDEND if you are not interested in these values.
2092 Literal pool references are *not* considered symbol references. */
2093
2094 static bool
2095 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2096 {
2097 HOST_WIDE_INT tmpaddend = 0;
2098
2099 if (GET_CODE (addr) == CONST)
2100 addr = XEXP (addr, 0);
2101
2102 if (GET_CODE (addr) == PLUS)
2103 {
2104 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2105 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2106 && CONST_INT_P (XEXP (addr, 1)))
2107 {
2108 tmpaddend = INTVAL (XEXP (addr, 1));
2109 addr = XEXP (addr, 0);
2110 }
2111 else
2112 return false;
2113 }
2114 else
2115 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2116 return false;
2117
2118 if (symref)
2119 *symref = addr;
2120 if (addend)
2121 *addend = tmpaddend;
2122
2123 return true;
2124 }
2125
2126
2127 /* Return true if the address in OP is valid for constraint letter C
2128 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2129 pool MEMs should be accepted. Only the Q, R, S, T constraint
2130 letters are allowed for C. */
2131
2132 static int
2133 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2134 {
2135 struct s390_address addr;
2136 bool decomposed = false;
2137
2138 /* This check makes sure that no symbolic address (except literal
2139 pool references) are accepted by the R or T constraints. */
2140 if (s390_symref_operand_p (op, NULL, NULL))
2141 return 0;
2142
2143 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2144 if (!lit_pool_ok)
2145 {
2146 if (!s390_decompose_address (op, &addr))
2147 return 0;
2148 if (addr.literal_pool)
2149 return 0;
2150 decomposed = true;
2151 }
2152
2153 switch (c)
2154 {
2155 case 'Q': /* no index short displacement */
2156 if (!decomposed && !s390_decompose_address (op, &addr))
2157 return 0;
2158 if (addr.indx)
2159 return 0;
2160 if (!s390_short_displacement (addr.disp))
2161 return 0;
2162 break;
2163
2164 case 'R': /* with index short displacement */
2165 if (TARGET_LONG_DISPLACEMENT)
2166 {
2167 if (!decomposed && !s390_decompose_address (op, &addr))
2168 return 0;
2169 if (!s390_short_displacement (addr.disp))
2170 return 0;
2171 }
2172 /* Any invalid address here will be fixed up by reload,
2173 so accept it for the most generic constraint. */
2174 break;
2175
2176 case 'S': /* no index long displacement */
2177 if (!TARGET_LONG_DISPLACEMENT)
2178 return 0;
2179 if (!decomposed && !s390_decompose_address (op, &addr))
2180 return 0;
2181 if (addr.indx)
2182 return 0;
2183 if (s390_short_displacement (addr.disp))
2184 return 0;
2185 break;
2186
2187 case 'T': /* with index long displacement */
2188 if (!TARGET_LONG_DISPLACEMENT)
2189 return 0;
2190 /* Any invalid address here will be fixed up by reload,
2191 so accept it for the most generic constraint. */
2192 if ((decomposed || s390_decompose_address (op, &addr))
2193 && s390_short_displacement (addr.disp))
2194 return 0;
2195 break;
2196 default:
2197 return 0;
2198 }
2199 return 1;
2200 }
2201
2202
2203 /* Evaluates constraint strings described by the regular expression
2204 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2205 the constraint given in STR, or 0 else. */
2206
2207 int
2208 s390_mem_constraint (const char *str, rtx op)
2209 {
2210 char c = str[0];
2211
2212 switch (c)
2213 {
2214 case 'A':
2215 /* Check for offsettable variants of memory constraints. */
2216 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2217 return 0;
2218 if ((reload_completed || reload_in_progress)
2219 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2220 return 0;
2221 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2222 case 'B':
2223 /* Check for non-literal-pool variants of memory constraints. */
2224 if (!MEM_P (op))
2225 return 0;
2226 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2227 case 'Q':
2228 case 'R':
2229 case 'S':
2230 case 'T':
2231 if (GET_CODE (op) != MEM)
2232 return 0;
2233 return s390_check_qrst_address (c, XEXP (op, 0), true);
2234 case 'U':
2235 return (s390_check_qrst_address ('Q', op, true)
2236 || s390_check_qrst_address ('R', op, true));
2237 case 'W':
2238 return (s390_check_qrst_address ('S', op, true)
2239 || s390_check_qrst_address ('T', op, true));
2240 case 'Y':
2241 /* Simply check for the basic form of a shift count. Reload will
2242 take care of making sure we have a proper base register. */
2243 if (!s390_decompose_shift_count (op, NULL, NULL))
2244 return 0;
2245 break;
2246 case 'Z':
2247 return s390_check_qrst_address (str[1], op, true);
2248 default:
2249 return 0;
2250 }
2251 return 1;
2252 }
2253
2254
2255 /* Evaluates constraint strings starting with letter O. Input
2256 parameter C is the second letter following the "O" in the constraint
2257 string. Returns 1 if VALUE meets the respective constraint and 0
2258 otherwise. */
2259
2260 int
2261 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2262 {
2263 if (!TARGET_EXTIMM)
2264 return 0;
2265
2266 switch (c)
2267 {
2268 case 's':
2269 return trunc_int_for_mode (value, SImode) == value;
2270
2271 case 'p':
2272 return value == 0
2273 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2274
2275 case 'n':
2276 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2277
2278 default:
2279 gcc_unreachable ();
2280 }
2281 }
2282
2283
2284 /* Evaluates constraint strings starting with letter N. Parameter STR
2285 contains the letters following letter "N" in the constraint string.
2286 Returns true if VALUE matches the constraint. */
2287
2288 int
2289 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2290 {
2291 enum machine_mode mode, part_mode;
2292 int def;
2293 int part, part_goal;
2294
2295
2296 if (str[0] == 'x')
2297 part_goal = -1;
2298 else
2299 part_goal = str[0] - '0';
2300
2301 switch (str[1])
2302 {
2303 case 'Q':
2304 part_mode = QImode;
2305 break;
2306 case 'H':
2307 part_mode = HImode;
2308 break;
2309 case 'S':
2310 part_mode = SImode;
2311 break;
2312 default:
2313 return 0;
2314 }
2315
2316 switch (str[2])
2317 {
2318 case 'H':
2319 mode = HImode;
2320 break;
2321 case 'S':
2322 mode = SImode;
2323 break;
2324 case 'D':
2325 mode = DImode;
2326 break;
2327 default:
2328 return 0;
2329 }
2330
2331 switch (str[3])
2332 {
2333 case '0':
2334 def = 0;
2335 break;
2336 case 'F':
2337 def = -1;
2338 break;
2339 default:
2340 return 0;
2341 }
2342
2343 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2344 return 0;
2345
2346 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2347 if (part < 0)
2348 return 0;
2349 if (part_goal != -1 && part_goal != part)
2350 return 0;
2351
2352 return 1;
2353 }
2354
2355
2356 /* Returns true if the input parameter VALUE is a float zero. */
2357
2358 int
2359 s390_float_const_zero_p (rtx value)
2360 {
2361 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2362 && value == CONST0_RTX (GET_MODE (value)));
2363 }
2364
2365 /* Implement TARGET_REGISTER_MOVE_COST. */
2366
2367 static int
2368 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2369 reg_class_t from, reg_class_t to)
2370 {
2371 /* On s390, copy between fprs and gprs is expensive. */
2372 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2373 && reg_classes_intersect_p (to, FP_REGS))
2374 || (reg_classes_intersect_p (from, FP_REGS)
2375 && reg_classes_intersect_p (to, GENERAL_REGS)))
2376 return 10;
2377
2378 return 1;
2379 }
2380
2381 /* Implement TARGET_MEMORY_MOVE_COST. */
2382
2383 static int
2384 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2385 reg_class_t rclass ATTRIBUTE_UNUSED,
2386 bool in ATTRIBUTE_UNUSED)
2387 {
2388 return 1;
2389 }
2390
2391 /* Compute a (partial) cost for rtx X. Return true if the complete
2392 cost has been computed, and false if subexpressions should be
2393 scanned. In either case, *TOTAL contains the cost result.
2394 CODE contains GET_CODE (x), OUTER_CODE contains the code
2395 of the superexpression of x. */
2396
2397 static bool
2398 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2399 int *total, bool speed ATTRIBUTE_UNUSED)
2400 {
2401 switch (code)
2402 {
2403 case CONST:
2404 case CONST_INT:
2405 case LABEL_REF:
2406 case SYMBOL_REF:
2407 case CONST_DOUBLE:
2408 case MEM:
2409 *total = 0;
2410 return true;
2411
2412 case ASHIFT:
2413 case ASHIFTRT:
2414 case LSHIFTRT:
2415 case ROTATE:
2416 case ROTATERT:
2417 case AND:
2418 case IOR:
2419 case XOR:
2420 case NEG:
2421 case NOT:
2422 *total = COSTS_N_INSNS (1);
2423 return false;
2424
2425 case PLUS:
2426 case MINUS:
2427 *total = COSTS_N_INSNS (1);
2428 return false;
2429
2430 case MULT:
2431 switch (GET_MODE (x))
2432 {
2433 case SImode:
2434 {
2435 rtx left = XEXP (x, 0);
2436 rtx right = XEXP (x, 1);
2437 if (GET_CODE (right) == CONST_INT
2438 && CONST_OK_FOR_K (INTVAL (right)))
2439 *total = s390_cost->mhi;
2440 else if (GET_CODE (left) == SIGN_EXTEND)
2441 *total = s390_cost->mh;
2442 else
2443 *total = s390_cost->ms; /* msr, ms, msy */
2444 break;
2445 }
2446 case DImode:
2447 {
2448 rtx left = XEXP (x, 0);
2449 rtx right = XEXP (x, 1);
2450 if (TARGET_ZARCH)
2451 {
2452 if (GET_CODE (right) == CONST_INT
2453 && CONST_OK_FOR_K (INTVAL (right)))
2454 *total = s390_cost->mghi;
2455 else if (GET_CODE (left) == SIGN_EXTEND)
2456 *total = s390_cost->msgf;
2457 else
2458 *total = s390_cost->msg; /* msgr, msg */
2459 }
2460 else /* TARGET_31BIT */
2461 {
2462 if (GET_CODE (left) == SIGN_EXTEND
2463 && GET_CODE (right) == SIGN_EXTEND)
2464 /* mulsidi case: mr, m */
2465 *total = s390_cost->m;
2466 else if (GET_CODE (left) == ZERO_EXTEND
2467 && GET_CODE (right) == ZERO_EXTEND
2468 && TARGET_CPU_ZARCH)
2469 /* umulsidi case: ml, mlr */
2470 *total = s390_cost->ml;
2471 else
2472 /* Complex calculation is required. */
2473 *total = COSTS_N_INSNS (40);
2474 }
2475 break;
2476 }
2477 case SFmode:
2478 case DFmode:
2479 *total = s390_cost->mult_df;
2480 break;
2481 case TFmode:
2482 *total = s390_cost->mxbr;
2483 break;
2484 default:
2485 return false;
2486 }
2487 return false;
2488
2489 case FMA:
2490 switch (GET_MODE (x))
2491 {
2492 case DFmode:
2493 *total = s390_cost->madbr;
2494 break;
2495 case SFmode:
2496 *total = s390_cost->maebr;
2497 break;
2498 default:
2499 return false;
2500 }
2501 /* Negate in the third argument is free: FMSUB. */
2502 if (GET_CODE (XEXP (x, 2)) == NEG)
2503 {
2504 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2505 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2506 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2507 return true;
2508 }
2509 return false;
2510
2511 case UDIV:
2512 case UMOD:
2513 if (GET_MODE (x) == TImode) /* 128 bit division */
2514 *total = s390_cost->dlgr;
2515 else if (GET_MODE (x) == DImode)
2516 {
2517 rtx right = XEXP (x, 1);
2518 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2519 *total = s390_cost->dlr;
2520 else /* 64 by 64 bit division */
2521 *total = s390_cost->dlgr;
2522 }
2523 else if (GET_MODE (x) == SImode) /* 32 bit division */
2524 *total = s390_cost->dlr;
2525 return false;
2526
2527 case DIV:
2528 case MOD:
2529 if (GET_MODE (x) == DImode)
2530 {
2531 rtx right = XEXP (x, 1);
2532 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2533 if (TARGET_ZARCH)
2534 *total = s390_cost->dsgfr;
2535 else
2536 *total = s390_cost->dr;
2537 else /* 64 by 64 bit division */
2538 *total = s390_cost->dsgr;
2539 }
2540 else if (GET_MODE (x) == SImode) /* 32 bit division */
2541 *total = s390_cost->dlr;
2542 else if (GET_MODE (x) == SFmode)
2543 {
2544 *total = s390_cost->debr;
2545 }
2546 else if (GET_MODE (x) == DFmode)
2547 {
2548 *total = s390_cost->ddbr;
2549 }
2550 else if (GET_MODE (x) == TFmode)
2551 {
2552 *total = s390_cost->dxbr;
2553 }
2554 return false;
2555
2556 case SQRT:
2557 if (GET_MODE (x) == SFmode)
2558 *total = s390_cost->sqebr;
2559 else if (GET_MODE (x) == DFmode)
2560 *total = s390_cost->sqdbr;
2561 else /* TFmode */
2562 *total = s390_cost->sqxbr;
2563 return false;
2564
2565 case SIGN_EXTEND:
2566 case ZERO_EXTEND:
2567 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2568 || outer_code == PLUS || outer_code == MINUS
2569 || outer_code == COMPARE)
2570 *total = 0;
2571 return false;
2572
2573 case COMPARE:
2574 *total = COSTS_N_INSNS (1);
2575 if (GET_CODE (XEXP (x, 0)) == AND
2576 && GET_CODE (XEXP (x, 1)) == CONST_INT
2577 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2578 {
2579 rtx op0 = XEXP (XEXP (x, 0), 0);
2580 rtx op1 = XEXP (XEXP (x, 0), 1);
2581 rtx op2 = XEXP (x, 1);
2582
2583 if (memory_operand (op0, GET_MODE (op0))
2584 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2585 return true;
2586 if (register_operand (op0, GET_MODE (op0))
2587 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2588 return true;
2589 }
2590 return false;
2591
2592 default:
2593 return false;
2594 }
2595 }
2596
2597 /* Return the cost of an address rtx ADDR. */
2598
2599 static int
2600 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2601 {
2602 struct s390_address ad;
2603 if (!s390_decompose_address (addr, &ad))
2604 return 1000;
2605
2606 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2607 }
2608
2609 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2610 otherwise return 0. */
2611
2612 int
2613 tls_symbolic_operand (rtx op)
2614 {
2615 if (GET_CODE (op) != SYMBOL_REF)
2616 return 0;
2617 return SYMBOL_REF_TLS_MODEL (op);
2618 }
2619 \f
2620 /* Split DImode access register reference REG (on 64-bit) into its constituent
2621 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2622 gen_highpart cannot be used as they assume all registers are word-sized,
2623 while our access registers have only half that size. */
2624
2625 void
2626 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2627 {
2628 gcc_assert (TARGET_64BIT);
2629 gcc_assert (ACCESS_REG_P (reg));
2630 gcc_assert (GET_MODE (reg) == DImode);
2631 gcc_assert (!(REGNO (reg) & 1));
2632
2633 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2634 *hi = gen_rtx_REG (SImode, REGNO (reg));
2635 }
2636
2637 /* Return true if OP contains a symbol reference */
2638
2639 bool
2640 symbolic_reference_mentioned_p (rtx op)
2641 {
2642 const char *fmt;
2643 int i;
2644
2645 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2646 return 1;
2647
2648 fmt = GET_RTX_FORMAT (GET_CODE (op));
2649 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2650 {
2651 if (fmt[i] == 'E')
2652 {
2653 int j;
2654
2655 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2656 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2657 return 1;
2658 }
2659
2660 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2661 return 1;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /* Return true if OP contains a reference to a thread-local symbol. */
2668
2669 bool
2670 tls_symbolic_reference_mentioned_p (rtx op)
2671 {
2672 const char *fmt;
2673 int i;
2674
2675 if (GET_CODE (op) == SYMBOL_REF)
2676 return tls_symbolic_operand (op);
2677
2678 fmt = GET_RTX_FORMAT (GET_CODE (op));
2679 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2680 {
2681 if (fmt[i] == 'E')
2682 {
2683 int j;
2684
2685 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2686 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2687 return true;
2688 }
2689
2690 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2691 return true;
2692 }
2693
2694 return false;
2695 }
2696
2697
2698 /* Return true if OP is a legitimate general operand when
2699 generating PIC code. It is given that flag_pic is on
2700 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2701
2702 int
2703 legitimate_pic_operand_p (rtx op)
2704 {
2705 /* Accept all non-symbolic constants. */
2706 if (!SYMBOLIC_CONST (op))
2707 return 1;
2708
2709 /* Reject everything else; must be handled
2710 via emit_symbolic_move. */
2711 return 0;
2712 }
2713
2714 /* Returns true if the constant value OP is a legitimate general operand.
2715 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2716
2717 static bool
2718 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2719 {
2720 /* Accept all non-symbolic constants. */
2721 if (!SYMBOLIC_CONST (op))
2722 return 1;
2723
2724 /* Accept immediate LARL operands. */
2725 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2726 return 1;
2727
2728 /* Thread-local symbols are never legal constants. This is
2729 so that emit_call knows that computing such addresses
2730 might require a function call. */
2731 if (TLS_SYMBOLIC_CONST (op))
2732 return 0;
2733
2734 /* In the PIC case, symbolic constants must *not* be
2735 forced into the literal pool. We accept them here,
2736 so that they will be handled by emit_symbolic_move. */
2737 if (flag_pic)
2738 return 1;
2739
2740 /* All remaining non-PIC symbolic constants are
2741 forced into the literal pool. */
2742 return 0;
2743 }
2744
2745 /* Determine if it's legal to put X into the constant pool. This
2746 is not possible if X contains the address of a symbol that is
2747 not constant (TLS) or not known at final link time (PIC). */
2748
2749 static bool
2750 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2751 {
2752 switch (GET_CODE (x))
2753 {
2754 case CONST_INT:
2755 case CONST_DOUBLE:
2756 /* Accept all non-symbolic constants. */
2757 return false;
2758
2759 case LABEL_REF:
2760 /* Labels are OK iff we are non-PIC. */
2761 return flag_pic != 0;
2762
2763 case SYMBOL_REF:
2764 /* 'Naked' TLS symbol references are never OK,
2765 non-TLS symbols are OK iff we are non-PIC. */
2766 if (tls_symbolic_operand (x))
2767 return true;
2768 else
2769 return flag_pic != 0;
2770
2771 case CONST:
2772 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2773 case PLUS:
2774 case MINUS:
2775 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2776 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2777
2778 case UNSPEC:
2779 switch (XINT (x, 1))
2780 {
2781 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2782 case UNSPEC_LTREL_OFFSET:
2783 case UNSPEC_GOT:
2784 case UNSPEC_GOTOFF:
2785 case UNSPEC_PLTOFF:
2786 case UNSPEC_TLSGD:
2787 case UNSPEC_TLSLDM:
2788 case UNSPEC_NTPOFF:
2789 case UNSPEC_DTPOFF:
2790 case UNSPEC_GOTNTPOFF:
2791 case UNSPEC_INDNTPOFF:
2792 return false;
2793
2794 /* If the literal pool shares the code section, be put
2795 execute template placeholders into the pool as well. */
2796 case UNSPEC_INSN:
2797 return TARGET_CPU_ZARCH;
2798
2799 default:
2800 return true;
2801 }
2802 break;
2803
2804 default:
2805 gcc_unreachable ();
2806 }
2807 }
2808
2809 /* Returns true if the constant value OP is a legitimate general
2810 operand during and after reload. The difference to
2811 legitimate_constant_p is that this function will not accept
2812 a constant that would need to be forced to the literal pool
2813 before it can be used as operand.
2814 This function accepts all constants which can be loaded directly
2815 into a GPR. */
2816
2817 bool
2818 legitimate_reload_constant_p (rtx op)
2819 {
2820 /* Accept la(y) operands. */
2821 if (GET_CODE (op) == CONST_INT
2822 && DISP_IN_RANGE (INTVAL (op)))
2823 return true;
2824
2825 /* Accept l(g)hi/l(g)fi operands. */
2826 if (GET_CODE (op) == CONST_INT
2827 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2828 return true;
2829
2830 /* Accept lliXX operands. */
2831 if (TARGET_ZARCH
2832 && GET_CODE (op) == CONST_INT
2833 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2834 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2835 return true;
2836
2837 if (TARGET_EXTIMM
2838 && GET_CODE (op) == CONST_INT
2839 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2840 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2841 return true;
2842
2843 /* Accept larl operands. */
2844 if (TARGET_CPU_ZARCH
2845 && larl_operand (op, VOIDmode))
2846 return true;
2847
2848 /* Accept floating-point zero operands that fit into a single GPR. */
2849 if (GET_CODE (op) == CONST_DOUBLE
2850 && s390_float_const_zero_p (op)
2851 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2852 return true;
2853
2854 /* Accept double-word operands that can be split. */
2855 if (GET_CODE (op) == CONST_INT
2856 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2857 {
2858 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2859 rtx hi = operand_subword (op, 0, 0, dword_mode);
2860 rtx lo = operand_subword (op, 1, 0, dword_mode);
2861 return legitimate_reload_constant_p (hi)
2862 && legitimate_reload_constant_p (lo);
2863 }
2864
2865 /* Everything else cannot be handled without reload. */
2866 return false;
2867 }
2868
2869 /* Returns true if the constant value OP is a legitimate fp operand
2870 during and after reload.
2871 This function accepts all constants which can be loaded directly
2872 into an FPR. */
2873
2874 static bool
2875 legitimate_reload_fp_constant_p (rtx op)
2876 {
2877 /* Accept floating-point zero operands if the load zero instruction
2878 can be used. */
2879 if (TARGET_Z196
2880 && GET_CODE (op) == CONST_DOUBLE
2881 && s390_float_const_zero_p (op))
2882 return true;
2883
2884 return false;
2885 }
2886
2887 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2888 return the class of reg to actually use. */
2889
2890 static reg_class_t
2891 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2892 {
2893 switch (GET_CODE (op))
2894 {
2895 /* Constants we cannot reload into general registers
2896 must be forced into the literal pool. */
2897 case CONST_DOUBLE:
2898 case CONST_INT:
2899 if (reg_class_subset_p (GENERAL_REGS, rclass)
2900 && legitimate_reload_constant_p (op))
2901 return GENERAL_REGS;
2902 else if (reg_class_subset_p (ADDR_REGS, rclass)
2903 && legitimate_reload_constant_p (op))
2904 return ADDR_REGS;
2905 else if (reg_class_subset_p (FP_REGS, rclass)
2906 && legitimate_reload_fp_constant_p (op))
2907 return FP_REGS;
2908 return NO_REGS;
2909
2910 /* If a symbolic constant or a PLUS is reloaded,
2911 it is most likely being used as an address, so
2912 prefer ADDR_REGS. If 'class' is not a superset
2913 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2914 case LABEL_REF:
2915 case SYMBOL_REF:
2916 case CONST:
2917 if (!legitimate_reload_constant_p (op))
2918 return NO_REGS;
2919 /* fallthrough */
2920 case PLUS:
2921 /* load address will be used. */
2922 if (reg_class_subset_p (ADDR_REGS, rclass))
2923 return ADDR_REGS;
2924 else
2925 return NO_REGS;
2926
2927 default:
2928 break;
2929 }
2930
2931 return rclass;
2932 }
2933
2934 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2935 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2936 aligned. */
2937
2938 bool
2939 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2940 {
2941 HOST_WIDE_INT addend;
2942 rtx symref;
2943
2944 if (!s390_symref_operand_p (addr, &symref, &addend))
2945 return false;
2946
2947 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2948 && !(addend & (alignment - 1)));
2949 }
2950
2951 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2952 operand SCRATCH is used to reload the even part of the address and
2953 adding one. */
2954
2955 void
2956 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2957 {
2958 HOST_WIDE_INT addend;
2959 rtx symref;
2960
2961 if (!s390_symref_operand_p (addr, &symref, &addend))
2962 gcc_unreachable ();
2963
2964 if (!(addend & 1))
2965 /* Easy case. The addend is even so larl will do fine. */
2966 emit_move_insn (reg, addr);
2967 else
2968 {
2969 /* We can leave the scratch register untouched if the target
2970 register is a valid base register. */
2971 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2972 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2973 scratch = reg;
2974
2975 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2976 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2977
2978 if (addend != 1)
2979 emit_move_insn (scratch,
2980 gen_rtx_CONST (Pmode,
2981 gen_rtx_PLUS (Pmode, symref,
2982 GEN_INT (addend - 1))));
2983 else
2984 emit_move_insn (scratch, symref);
2985
2986 /* Increment the address using la in order to avoid clobbering cc. */
2987 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2988 }
2989 }
2990
2991 /* Generate what is necessary to move between REG and MEM using
2992 SCRATCH. The direction is given by TOMEM. */
2993
2994 void
2995 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2996 {
2997 /* Reload might have pulled a constant out of the literal pool.
2998 Force it back in. */
2999 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3000 || GET_CODE (mem) == CONST)
3001 mem = force_const_mem (GET_MODE (reg), mem);
3002
3003 gcc_assert (MEM_P (mem));
3004
3005 /* For a load from memory we can leave the scratch register
3006 untouched if the target register is a valid base register. */
3007 if (!tomem
3008 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3009 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3010 && GET_MODE (reg) == GET_MODE (scratch))
3011 scratch = reg;
3012
3013 /* Load address into scratch register. Since we can't have a
3014 secondary reload for a secondary reload we have to cover the case
3015 where larl would need a secondary reload here as well. */
3016 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3017
3018 /* Now we can use a standard load/store to do the move. */
3019 if (tomem)
3020 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3021 else
3022 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3023 }
3024
3025 /* Inform reload about cases where moving X with a mode MODE to a register in
3026 RCLASS requires an extra scratch or immediate register. Return the class
3027 needed for the immediate register. */
3028
3029 static reg_class_t
3030 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3031 enum machine_mode mode, secondary_reload_info *sri)
3032 {
3033 enum reg_class rclass = (enum reg_class) rclass_i;
3034
3035 /* Intermediate register needed. */
3036 if (reg_classes_intersect_p (CC_REGS, rclass))
3037 return GENERAL_REGS;
3038
3039 if (TARGET_Z10)
3040 {
3041 HOST_WIDE_INT offset;
3042 rtx symref;
3043
3044 /* On z10 several optimizer steps may generate larl operands with
3045 an odd addend. */
3046 if (in_p
3047 && s390_symref_operand_p (x, &symref, &offset)
3048 && mode == Pmode
3049 && !SYMBOL_REF_ALIGN1_P (symref)
3050 && (offset & 1) == 1)
3051 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3052 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3053
3054 /* On z10 we need a scratch register when moving QI, TI or floating
3055 point mode values from or to a memory location with a SYMBOL_REF
3056 or if the symref addend of a SI or DI move is not aligned to the
3057 width of the access. */
3058 if (MEM_P (x)
3059 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3060 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3061 || (!TARGET_ZARCH && mode == DImode)
3062 || ((mode == HImode || mode == SImode || mode == DImode)
3063 && (!s390_check_symref_alignment (XEXP (x, 0),
3064 GET_MODE_SIZE (mode))))))
3065 {
3066 #define __SECONDARY_RELOAD_CASE(M,m) \
3067 case M##mode: \
3068 if (TARGET_64BIT) \
3069 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3070 CODE_FOR_reload##m##di_tomem_z10; \
3071 else \
3072 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3073 CODE_FOR_reload##m##si_tomem_z10; \
3074 break;
3075
3076 switch (GET_MODE (x))
3077 {
3078 __SECONDARY_RELOAD_CASE (QI, qi);
3079 __SECONDARY_RELOAD_CASE (HI, hi);
3080 __SECONDARY_RELOAD_CASE (SI, si);
3081 __SECONDARY_RELOAD_CASE (DI, di);
3082 __SECONDARY_RELOAD_CASE (TI, ti);
3083 __SECONDARY_RELOAD_CASE (SF, sf);
3084 __SECONDARY_RELOAD_CASE (DF, df);
3085 __SECONDARY_RELOAD_CASE (TF, tf);
3086 __SECONDARY_RELOAD_CASE (SD, sd);
3087 __SECONDARY_RELOAD_CASE (DD, dd);
3088 __SECONDARY_RELOAD_CASE (TD, td);
3089
3090 default:
3091 gcc_unreachable ();
3092 }
3093 #undef __SECONDARY_RELOAD_CASE
3094 }
3095 }
3096
3097 /* We need a scratch register when loading a PLUS expression which
3098 is not a legitimate operand of the LOAD ADDRESS instruction. */
3099 if (in_p && s390_plus_operand (x, mode))
3100 sri->icode = (TARGET_64BIT ?
3101 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3102
3103 /* Performing a multiword move from or to memory we have to make sure the
3104 second chunk in memory is addressable without causing a displacement
3105 overflow. If that would be the case we calculate the address in
3106 a scratch register. */
3107 if (MEM_P (x)
3108 && GET_CODE (XEXP (x, 0)) == PLUS
3109 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3110 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3111 + GET_MODE_SIZE (mode) - 1))
3112 {
3113 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3114 in a s_operand address since we may fallback to lm/stm. So we only
3115 have to care about overflows in the b+i+d case. */
3116 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3117 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3118 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3119 /* For FP_REGS no lm/stm is available so this check is triggered
3120 for displacement overflows in b+i+d and b+d like addresses. */
3121 || (reg_classes_intersect_p (FP_REGS, rclass)
3122 && s390_class_max_nregs (FP_REGS, mode) > 1))
3123 {
3124 if (in_p)
3125 sri->icode = (TARGET_64BIT ?
3126 CODE_FOR_reloaddi_nonoffmem_in :
3127 CODE_FOR_reloadsi_nonoffmem_in);
3128 else
3129 sri->icode = (TARGET_64BIT ?
3130 CODE_FOR_reloaddi_nonoffmem_out :
3131 CODE_FOR_reloadsi_nonoffmem_out);
3132 }
3133 }
3134
3135 /* A scratch address register is needed when a symbolic constant is
3136 copied to r0 compiling with -fPIC. In other cases the target
3137 register might be used as temporary (see legitimize_pic_address). */
3138 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3139 sri->icode = (TARGET_64BIT ?
3140 CODE_FOR_reloaddi_PIC_addr :
3141 CODE_FOR_reloadsi_PIC_addr);
3142
3143 /* Either scratch or no register needed. */
3144 return NO_REGS;
3145 }
3146
3147 /* Generate code to load SRC, which is PLUS that is not a
3148 legitimate operand for the LA instruction, into TARGET.
3149 SCRATCH may be used as scratch register. */
3150
3151 void
3152 s390_expand_plus_operand (rtx target, rtx src,
3153 rtx scratch)
3154 {
3155 rtx sum1, sum2;
3156 struct s390_address ad;
3157
3158 /* src must be a PLUS; get its two operands. */
3159 gcc_assert (GET_CODE (src) == PLUS);
3160 gcc_assert (GET_MODE (src) == Pmode);
3161
3162 /* Check if any of the two operands is already scheduled
3163 for replacement by reload. This can happen e.g. when
3164 float registers occur in an address. */
3165 sum1 = find_replacement (&XEXP (src, 0));
3166 sum2 = find_replacement (&XEXP (src, 1));
3167 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3168
3169 /* If the address is already strictly valid, there's nothing to do. */
3170 if (!s390_decompose_address (src, &ad)
3171 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3172 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3173 {
3174 /* Otherwise, one of the operands cannot be an address register;
3175 we reload its value into the scratch register. */
3176 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3177 {
3178 emit_move_insn (scratch, sum1);
3179 sum1 = scratch;
3180 }
3181 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3182 {
3183 emit_move_insn (scratch, sum2);
3184 sum2 = scratch;
3185 }
3186
3187 /* According to the way these invalid addresses are generated
3188 in reload.c, it should never happen (at least on s390) that
3189 *neither* of the PLUS components, after find_replacements
3190 was applied, is an address register. */
3191 if (sum1 == scratch && sum2 == scratch)
3192 {
3193 debug_rtx (src);
3194 gcc_unreachable ();
3195 }
3196
3197 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3198 }
3199
3200 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3201 is only ever performed on addresses, so we can mark the
3202 sum as legitimate for LA in any case. */
3203 s390_load_address (target, src);
3204 }
3205
3206
3207 /* Return true if ADDR is a valid memory address.
3208 STRICT specifies whether strict register checking applies. */
3209
3210 static bool
3211 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3212 {
3213 struct s390_address ad;
3214
3215 if (TARGET_Z10
3216 && larl_operand (addr, VOIDmode)
3217 && (mode == VOIDmode
3218 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3219 return true;
3220
3221 if (!s390_decompose_address (addr, &ad))
3222 return false;
3223
3224 if (strict)
3225 {
3226 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3227 return false;
3228
3229 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3230 return false;
3231 }
3232 else
3233 {
3234 if (ad.base
3235 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3236 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3237 return false;
3238
3239 if (ad.indx
3240 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3241 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3242 return false;
3243 }
3244 return true;
3245 }
3246
3247 /* Return true if OP is a valid operand for the LA instruction.
3248 In 31-bit, we need to prove that the result is used as an
3249 address, as LA performs only a 31-bit addition. */
3250
3251 bool
3252 legitimate_la_operand_p (rtx op)
3253 {
3254 struct s390_address addr;
3255 if (!s390_decompose_address (op, &addr))
3256 return false;
3257
3258 return (TARGET_64BIT || addr.pointer);
3259 }
3260
3261 /* Return true if it is valid *and* preferable to use LA to
3262 compute the sum of OP1 and OP2. */
3263
3264 bool
3265 preferred_la_operand_p (rtx op1, rtx op2)
3266 {
3267 struct s390_address addr;
3268
3269 if (op2 != const0_rtx)
3270 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3271
3272 if (!s390_decompose_address (op1, &addr))
3273 return false;
3274 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3275 return false;
3276 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3277 return false;
3278
3279 /* Avoid LA instructions with index register on z196; it is
3280 preferable to use regular add instructions when possible. */
3281 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3282 return false;
3283
3284 if (!TARGET_64BIT && !addr.pointer)
3285 return false;
3286
3287 if (addr.pointer)
3288 return true;
3289
3290 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3291 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3292 return true;
3293
3294 return false;
3295 }
3296
3297 /* Emit a forced load-address operation to load SRC into DST.
3298 This will use the LOAD ADDRESS instruction even in situations
3299 where legitimate_la_operand_p (SRC) returns false. */
3300
3301 void
3302 s390_load_address (rtx dst, rtx src)
3303 {
3304 if (TARGET_64BIT)
3305 emit_move_insn (dst, src);
3306 else
3307 emit_insn (gen_force_la_31 (dst, src));
3308 }
3309
3310 /* Return a legitimate reference for ORIG (an address) using the
3311 register REG. If REG is 0, a new pseudo is generated.
3312
3313 There are two types of references that must be handled:
3314
3315 1. Global data references must load the address from the GOT, via
3316 the PIC reg. An insn is emitted to do this load, and the reg is
3317 returned.
3318
3319 2. Static data references, constant pool addresses, and code labels
3320 compute the address as an offset from the GOT, whose base is in
3321 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3322 differentiate them from global data objects. The returned
3323 address is the PIC reg + an unspec constant.
3324
3325 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3326 reg also appears in the address. */
3327
3328 rtx
3329 legitimize_pic_address (rtx orig, rtx reg)
3330 {
3331 rtx addr = orig;
3332 rtx new_rtx = orig;
3333 rtx base;
3334
3335 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3336
3337 if (GET_CODE (addr) == LABEL_REF
3338 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3339 {
3340 /* This is a local symbol. */
3341 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3342 {
3343 /* Access local symbols PC-relative via LARL.
3344 This is the same as in the non-PIC case, so it is
3345 handled automatically ... */
3346 }
3347 else
3348 {
3349 /* Access local symbols relative to the GOT. */
3350
3351 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3352
3353 if (reload_in_progress || reload_completed)
3354 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3355
3356 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3357 addr = gen_rtx_CONST (Pmode, addr);
3358 addr = force_const_mem (Pmode, addr);
3359 emit_move_insn (temp, addr);
3360
3361 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3362 if (reg != 0)
3363 {
3364 s390_load_address (reg, new_rtx);
3365 new_rtx = reg;
3366 }
3367 }
3368 }
3369 else if (GET_CODE (addr) == SYMBOL_REF)
3370 {
3371 if (reg == 0)
3372 reg = gen_reg_rtx (Pmode);
3373
3374 if (flag_pic == 1)
3375 {
3376 /* Assume GOT offset < 4k. This is handled the same way
3377 in both 31- and 64-bit code (@GOT). */
3378
3379 if (reload_in_progress || reload_completed)
3380 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3381
3382 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3383 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3384 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3385 new_rtx = gen_const_mem (Pmode, new_rtx);
3386 emit_move_insn (reg, new_rtx);
3387 new_rtx = reg;
3388 }
3389 else if (TARGET_CPU_ZARCH)
3390 {
3391 /* If the GOT offset might be >= 4k, we determine the position
3392 of the GOT entry via a PC-relative LARL (@GOTENT). */
3393
3394 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3395
3396 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3397 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3398
3399 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3400 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3401 emit_move_insn (temp, new_rtx);
3402
3403 new_rtx = gen_const_mem (Pmode, temp);
3404 emit_move_insn (reg, new_rtx);
3405 new_rtx = reg;
3406 }
3407 else
3408 {
3409 /* If the GOT offset might be >= 4k, we have to load it
3410 from the literal pool (@GOT). */
3411
3412 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3413
3414 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3415 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3416
3417 if (reload_in_progress || reload_completed)
3418 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3419
3420 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3421 addr = gen_rtx_CONST (Pmode, addr);
3422 addr = force_const_mem (Pmode, addr);
3423 emit_move_insn (temp, addr);
3424
3425 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3426 new_rtx = gen_const_mem (Pmode, new_rtx);
3427 emit_move_insn (reg, new_rtx);
3428 new_rtx = reg;
3429 }
3430 }
3431 else
3432 {
3433 if (GET_CODE (addr) == CONST)
3434 {
3435 addr = XEXP (addr, 0);
3436 if (GET_CODE (addr) == UNSPEC)
3437 {
3438 gcc_assert (XVECLEN (addr, 0) == 1);
3439 switch (XINT (addr, 1))
3440 {
3441 /* If someone moved a GOT-relative UNSPEC
3442 out of the literal pool, force them back in. */
3443 case UNSPEC_GOTOFF:
3444 case UNSPEC_PLTOFF:
3445 new_rtx = force_const_mem (Pmode, orig);
3446 break;
3447
3448 /* @GOT is OK as is if small. */
3449 case UNSPEC_GOT:
3450 if (flag_pic == 2)
3451 new_rtx = force_const_mem (Pmode, orig);
3452 break;
3453
3454 /* @GOTENT is OK as is. */
3455 case UNSPEC_GOTENT:
3456 break;
3457
3458 /* @PLT is OK as is on 64-bit, must be converted to
3459 GOT-relative @PLTOFF on 31-bit. */
3460 case UNSPEC_PLT:
3461 if (!TARGET_CPU_ZARCH)
3462 {
3463 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3464
3465 if (reload_in_progress || reload_completed)
3466 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3467
3468 addr = XVECEXP (addr, 0, 0);
3469 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3470 UNSPEC_PLTOFF);
3471 addr = gen_rtx_CONST (Pmode, addr);
3472 addr = force_const_mem (Pmode, addr);
3473 emit_move_insn (temp, addr);
3474
3475 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3476 if (reg != 0)
3477 {
3478 s390_load_address (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 }
3482 break;
3483
3484 /* Everything else cannot happen. */
3485 default:
3486 gcc_unreachable ();
3487 }
3488 }
3489 else
3490 gcc_assert (GET_CODE (addr) == PLUS);
3491 }
3492 if (GET_CODE (addr) == PLUS)
3493 {
3494 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3495
3496 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3497 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3498
3499 /* Check first to see if this is a constant offset
3500 from a local symbol reference. */
3501 if ((GET_CODE (op0) == LABEL_REF
3502 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3503 && GET_CODE (op1) == CONST_INT)
3504 {
3505 if (TARGET_CPU_ZARCH
3506 && larl_operand (op0, VOIDmode)
3507 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3508 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3509 {
3510 if (INTVAL (op1) & 1)
3511 {
3512 /* LARL can't handle odd offsets, so emit a
3513 pair of LARL and LA. */
3514 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3515
3516 if (!DISP_IN_RANGE (INTVAL (op1)))
3517 {
3518 HOST_WIDE_INT even = INTVAL (op1) - 1;
3519 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3520 op0 = gen_rtx_CONST (Pmode, op0);
3521 op1 = const1_rtx;
3522 }
3523
3524 emit_move_insn (temp, op0);
3525 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3526
3527 if (reg != 0)
3528 {
3529 s390_load_address (reg, new_rtx);
3530 new_rtx = reg;
3531 }
3532 }
3533 else
3534 {
3535 /* If the offset is even, we can just use LARL.
3536 This will happen automatically. */
3537 }
3538 }
3539 else
3540 {
3541 /* Access local symbols relative to the GOT. */
3542
3543 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3544
3545 if (reload_in_progress || reload_completed)
3546 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3547
3548 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3549 UNSPEC_GOTOFF);
3550 addr = gen_rtx_PLUS (Pmode, addr, op1);
3551 addr = gen_rtx_CONST (Pmode, addr);
3552 addr = force_const_mem (Pmode, addr);
3553 emit_move_insn (temp, addr);
3554
3555 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3556 if (reg != 0)
3557 {
3558 s390_load_address (reg, new_rtx);
3559 new_rtx = reg;
3560 }
3561 }
3562 }
3563
3564 /* Now, check whether it is a GOT relative symbol plus offset
3565 that was pulled out of the literal pool. Force it back in. */
3566
3567 else if (GET_CODE (op0) == UNSPEC
3568 && GET_CODE (op1) == CONST_INT
3569 && XINT (op0, 1) == UNSPEC_GOTOFF)
3570 {
3571 gcc_assert (XVECLEN (op0, 0) == 1);
3572
3573 new_rtx = force_const_mem (Pmode, orig);
3574 }
3575
3576 /* Otherwise, compute the sum. */
3577 else
3578 {
3579 base = legitimize_pic_address (XEXP (addr, 0), reg);
3580 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3581 base == reg ? NULL_RTX : reg);
3582 if (GET_CODE (new_rtx) == CONST_INT)
3583 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3584 else
3585 {
3586 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3587 {
3588 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3589 new_rtx = XEXP (new_rtx, 1);
3590 }
3591 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3592 }
3593
3594 if (GET_CODE (new_rtx) == CONST)
3595 new_rtx = XEXP (new_rtx, 0);
3596 new_rtx = force_operand (new_rtx, 0);
3597 }
3598 }
3599 }
3600 return new_rtx;
3601 }
3602
3603 /* Load the thread pointer into a register. */
3604
3605 rtx
3606 s390_get_thread_pointer (void)
3607 {
3608 rtx tp = gen_reg_rtx (Pmode);
3609
3610 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3611 mark_reg_pointer (tp, BITS_PER_WORD);
3612
3613 return tp;
3614 }
3615
3616 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3617 in s390_tls_symbol which always refers to __tls_get_offset.
3618 The returned offset is written to RESULT_REG and an USE rtx is
3619 generated for TLS_CALL. */
3620
3621 static GTY(()) rtx s390_tls_symbol;
3622
3623 static void
3624 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3625 {
3626 rtx insn;
3627
3628 if (!flag_pic)
3629 emit_insn (s390_load_got ());
3630
3631 if (!s390_tls_symbol)
3632 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3633
3634 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3635 gen_rtx_REG (Pmode, RETURN_REGNUM));
3636
3637 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3638 RTL_CONST_CALL_P (insn) = 1;
3639 }
3640
3641 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3642 this (thread-local) address. REG may be used as temporary. */
3643
3644 static rtx
3645 legitimize_tls_address (rtx addr, rtx reg)
3646 {
3647 rtx new_rtx, tls_call, temp, base, r2, insn;
3648
3649 if (GET_CODE (addr) == SYMBOL_REF)
3650 switch (tls_symbolic_operand (addr))
3651 {
3652 case TLS_MODEL_GLOBAL_DYNAMIC:
3653 start_sequence ();
3654 r2 = gen_rtx_REG (Pmode, 2);
3655 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3656 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3657 new_rtx = force_const_mem (Pmode, new_rtx);
3658 emit_move_insn (r2, new_rtx);
3659 s390_emit_tls_call_insn (r2, tls_call);
3660 insn = get_insns ();
3661 end_sequence ();
3662
3663 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3664 temp = gen_reg_rtx (Pmode);
3665 emit_libcall_block (insn, temp, r2, new_rtx);
3666
3667 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3668 if (reg != 0)
3669 {
3670 s390_load_address (reg, new_rtx);
3671 new_rtx = reg;
3672 }
3673 break;
3674
3675 case TLS_MODEL_LOCAL_DYNAMIC:
3676 start_sequence ();
3677 r2 = gen_rtx_REG (Pmode, 2);
3678 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3679 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3680 new_rtx = force_const_mem (Pmode, new_rtx);
3681 emit_move_insn (r2, new_rtx);
3682 s390_emit_tls_call_insn (r2, tls_call);
3683 insn = get_insns ();
3684 end_sequence ();
3685
3686 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3687 temp = gen_reg_rtx (Pmode);
3688 emit_libcall_block (insn, temp, r2, new_rtx);
3689
3690 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3691 base = gen_reg_rtx (Pmode);
3692 s390_load_address (base, new_rtx);
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3695 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3696 new_rtx = force_const_mem (Pmode, new_rtx);
3697 temp = gen_reg_rtx (Pmode);
3698 emit_move_insn (temp, new_rtx);
3699
3700 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3701 if (reg != 0)
3702 {
3703 s390_load_address (reg, new_rtx);
3704 new_rtx = reg;
3705 }
3706 break;
3707
3708 case TLS_MODEL_INITIAL_EXEC:
3709 if (flag_pic == 1)
3710 {
3711 /* Assume GOT offset < 4k. This is handled the same way
3712 in both 31- and 64-bit code. */
3713
3714 if (reload_in_progress || reload_completed)
3715 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3718 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3719 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3720 new_rtx = gen_const_mem (Pmode, new_rtx);
3721 temp = gen_reg_rtx (Pmode);
3722 emit_move_insn (temp, new_rtx);
3723 }
3724 else if (TARGET_CPU_ZARCH)
3725 {
3726 /* If the GOT offset might be >= 4k, we determine the position
3727 of the GOT entry via a PC-relative LARL. */
3728
3729 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3730 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3731 temp = gen_reg_rtx (Pmode);
3732 emit_move_insn (temp, new_rtx);
3733
3734 new_rtx = gen_const_mem (Pmode, temp);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_move_insn (temp, new_rtx);
3737 }
3738 else if (flag_pic)
3739 {
3740 /* If the GOT offset might be >= 4k, we have to load it
3741 from the literal pool. */
3742
3743 if (reload_in_progress || reload_completed)
3744 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3745
3746 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3747 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 temp = gen_reg_rtx (Pmode);
3750 emit_move_insn (temp, new_rtx);
3751
3752 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3753 new_rtx = gen_const_mem (Pmode, new_rtx);
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3758 }
3759 else
3760 {
3761 /* In position-dependent code, load the absolute address of
3762 the GOT entry from the literal pool. */
3763
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = temp;
3771 new_rtx = gen_const_mem (Pmode, new_rtx);
3772 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3773 temp = gen_reg_rtx (Pmode);
3774 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3775 }
3776
3777 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3778 if (reg != 0)
3779 {
3780 s390_load_address (reg, new_rtx);
3781 new_rtx = reg;
3782 }
3783 break;
3784
3785 case TLS_MODEL_LOCAL_EXEC:
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 default:
3801 gcc_unreachable ();
3802 }
3803
3804 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3805 {
3806 switch (XINT (XEXP (addr, 0), 1))
3807 {
3808 case UNSPEC_INDNTPOFF:
3809 gcc_assert (TARGET_CPU_ZARCH);
3810 new_rtx = addr;
3811 break;
3812
3813 default:
3814 gcc_unreachable ();
3815 }
3816 }
3817
3818 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3819 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3820 {
3821 new_rtx = XEXP (XEXP (addr, 0), 0);
3822 if (GET_CODE (new_rtx) != SYMBOL_REF)
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824
3825 new_rtx = legitimize_tls_address (new_rtx, reg);
3826 new_rtx = plus_constant (Pmode, new_rtx,
3827 INTVAL (XEXP (XEXP (addr, 0), 1)));
3828 new_rtx = force_operand (new_rtx, 0);
3829 }
3830
3831 else
3832 gcc_unreachable (); /* for now ... */
3833
3834 return new_rtx;
3835 }
3836
3837 /* Emit insns making the address in operands[1] valid for a standard
3838 move to operands[0]. operands[1] is replaced by an address which
3839 should be used instead of the former RTX to emit the move
3840 pattern. */
3841
3842 void
3843 emit_symbolic_move (rtx *operands)
3844 {
3845 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3846
3847 if (GET_CODE (operands[0]) == MEM)
3848 operands[1] = force_reg (Pmode, operands[1]);
3849 else if (TLS_SYMBOLIC_CONST (operands[1]))
3850 operands[1] = legitimize_tls_address (operands[1], temp);
3851 else if (flag_pic)
3852 operands[1] = legitimize_pic_address (operands[1], temp);
3853 }
3854
3855 /* Try machine-dependent ways of modifying an illegitimate address X
3856 to be legitimate. If we find one, return the new, valid address.
3857
3858 OLDX is the address as it was before break_out_memory_refs was called.
3859 In some cases it is useful to look at this to decide what needs to be done.
3860
3861 MODE is the mode of the operand pointed to by X.
3862
3863 When -fpic is used, special handling is needed for symbolic references.
3864 See comments by legitimize_pic_address for details. */
3865
3866 static rtx
3867 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3868 enum machine_mode mode ATTRIBUTE_UNUSED)
3869 {
3870 rtx constant_term = const0_rtx;
3871
3872 if (TLS_SYMBOLIC_CONST (x))
3873 {
3874 x = legitimize_tls_address (x, 0);
3875
3876 if (s390_legitimate_address_p (mode, x, FALSE))
3877 return x;
3878 }
3879 else if (GET_CODE (x) == PLUS
3880 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3881 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3882 {
3883 return x;
3884 }
3885 else if (flag_pic)
3886 {
3887 if (SYMBOLIC_CONST (x)
3888 || (GET_CODE (x) == PLUS
3889 && (SYMBOLIC_CONST (XEXP (x, 0))
3890 || SYMBOLIC_CONST (XEXP (x, 1)))))
3891 x = legitimize_pic_address (x, 0);
3892
3893 if (s390_legitimate_address_p (mode, x, FALSE))
3894 return x;
3895 }
3896
3897 x = eliminate_constant_term (x, &constant_term);
3898
3899 /* Optimize loading of large displacements by splitting them
3900 into the multiple of 4K and the rest; this allows the
3901 former to be CSE'd if possible.
3902
3903 Don't do this if the displacement is added to a register
3904 pointing into the stack frame, as the offsets will
3905 change later anyway. */
3906
3907 if (GET_CODE (constant_term) == CONST_INT
3908 && !TARGET_LONG_DISPLACEMENT
3909 && !DISP_IN_RANGE (INTVAL (constant_term))
3910 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3911 {
3912 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3913 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3914
3915 rtx temp = gen_reg_rtx (Pmode);
3916 rtx val = force_operand (GEN_INT (upper), temp);
3917 if (val != temp)
3918 emit_move_insn (temp, val);
3919
3920 x = gen_rtx_PLUS (Pmode, x, temp);
3921 constant_term = GEN_INT (lower);
3922 }
3923
3924 if (GET_CODE (x) == PLUS)
3925 {
3926 if (GET_CODE (XEXP (x, 0)) == REG)
3927 {
3928 rtx temp = gen_reg_rtx (Pmode);
3929 rtx val = force_operand (XEXP (x, 1), temp);
3930 if (val != temp)
3931 emit_move_insn (temp, val);
3932
3933 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3934 }
3935
3936 else if (GET_CODE (XEXP (x, 1)) == REG)
3937 {
3938 rtx temp = gen_reg_rtx (Pmode);
3939 rtx val = force_operand (XEXP (x, 0), temp);
3940 if (val != temp)
3941 emit_move_insn (temp, val);
3942
3943 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3944 }
3945 }
3946
3947 if (constant_term != const0_rtx)
3948 x = gen_rtx_PLUS (Pmode, x, constant_term);
3949
3950 return x;
3951 }
3952
3953 /* Try a machine-dependent way of reloading an illegitimate address AD
3954 operand. If we find one, push the reload and return the new address.
3955
3956 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3957 and TYPE is the reload type of the current reload. */
3958
3959 rtx
3960 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3961 int opnum, int type)
3962 {
3963 if (!optimize || TARGET_LONG_DISPLACEMENT)
3964 return NULL_RTX;
3965
3966 if (GET_CODE (ad) == PLUS)
3967 {
3968 rtx tem = simplify_binary_operation (PLUS, Pmode,
3969 XEXP (ad, 0), XEXP (ad, 1));
3970 if (tem)
3971 ad = tem;
3972 }
3973
3974 if (GET_CODE (ad) == PLUS
3975 && GET_CODE (XEXP (ad, 0)) == REG
3976 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3977 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3978 {
3979 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3980 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3981 rtx cst, tem, new_rtx;
3982
3983 cst = GEN_INT (upper);
3984 if (!legitimate_reload_constant_p (cst))
3985 cst = force_const_mem (Pmode, cst);
3986
3987 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3988 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3989
3990 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3991 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3992 opnum, (enum reload_type) type);
3993 return new_rtx;
3994 }
3995
3996 return NULL_RTX;
3997 }
3998
3999 /* Emit code to move LEN bytes from DST to SRC. */
4000
4001 bool
4002 s390_expand_movmem (rtx dst, rtx src, rtx len)
4003 {
4004 /* When tuning for z10 or higher we rely on the Glibc functions to
4005 do the right thing. Only for constant lengths below 64k we will
4006 generate inline code. */
4007 if (s390_tune >= PROCESSOR_2097_Z10
4008 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4009 return false;
4010
4011 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4012 {
4013 if (INTVAL (len) > 0)
4014 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4015 }
4016
4017 else if (TARGET_MVCLE)
4018 {
4019 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4020 }
4021
4022 else
4023 {
4024 rtx dst_addr, src_addr, count, blocks, temp;
4025 rtx loop_start_label = gen_label_rtx ();
4026 rtx loop_end_label = gen_label_rtx ();
4027 rtx end_label = gen_label_rtx ();
4028 enum machine_mode mode;
4029
4030 mode = GET_MODE (len);
4031 if (mode == VOIDmode)
4032 mode = Pmode;
4033
4034 dst_addr = gen_reg_rtx (Pmode);
4035 src_addr = gen_reg_rtx (Pmode);
4036 count = gen_reg_rtx (mode);
4037 blocks = gen_reg_rtx (mode);
4038
4039 convert_move (count, len, 1);
4040 emit_cmp_and_jump_insns (count, const0_rtx,
4041 EQ, NULL_RTX, mode, 1, end_label);
4042
4043 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4044 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4045 dst = change_address (dst, VOIDmode, dst_addr);
4046 src = change_address (src, VOIDmode, src_addr);
4047
4048 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4049 OPTAB_DIRECT);
4050 if (temp != count)
4051 emit_move_insn (count, temp);
4052
4053 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4054 OPTAB_DIRECT);
4055 if (temp != blocks)
4056 emit_move_insn (blocks, temp);
4057
4058 emit_cmp_and_jump_insns (blocks, const0_rtx,
4059 EQ, NULL_RTX, mode, 1, loop_end_label);
4060
4061 emit_label (loop_start_label);
4062
4063 if (TARGET_Z10
4064 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4065 {
4066 rtx prefetch;
4067
4068 /* Issue a read prefetch for the +3 cache line. */
4069 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4070 const0_rtx, const0_rtx);
4071 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4072 emit_insn (prefetch);
4073
4074 /* Issue a write prefetch for the +3 cache line. */
4075 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4076 const1_rtx, const0_rtx);
4077 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4078 emit_insn (prefetch);
4079 }
4080
4081 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4082 s390_load_address (dst_addr,
4083 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4084 s390_load_address (src_addr,
4085 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4086
4087 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4088 OPTAB_DIRECT);
4089 if (temp != blocks)
4090 emit_move_insn (blocks, temp);
4091
4092 emit_cmp_and_jump_insns (blocks, const0_rtx,
4093 EQ, NULL_RTX, mode, 1, loop_end_label);
4094
4095 emit_jump (loop_start_label);
4096 emit_label (loop_end_label);
4097
4098 emit_insn (gen_movmem_short (dst, src,
4099 convert_to_mode (Pmode, count, 1)));
4100 emit_label (end_label);
4101 }
4102 return true;
4103 }
4104
4105 /* Emit code to set LEN bytes at DST to VAL.
4106 Make use of clrmem if VAL is zero. */
4107
4108 void
4109 s390_expand_setmem (rtx dst, rtx len, rtx val)
4110 {
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4112 return;
4113
4114 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4115
4116 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4117 {
4118 if (val == const0_rtx && INTVAL (len) <= 256)
4119 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4120 else
4121 {
4122 /* Initialize memory by storing the first byte. */
4123 emit_move_insn (adjust_address (dst, QImode, 0), val);
4124
4125 if (INTVAL (len) > 1)
4126 {
4127 /* Initiate 1 byte overlap move.
4128 The first byte of DST is propagated through DSTP1.
4129 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4130 DST is set to size 1 so the rest of the memory location
4131 does not count as source operand. */
4132 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4133 set_mem_size (dst, 1);
4134
4135 emit_insn (gen_movmem_short (dstp1, dst,
4136 GEN_INT (INTVAL (len) - 2)));
4137 }
4138 }
4139 }
4140
4141 else if (TARGET_MVCLE)
4142 {
4143 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4144 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4145 }
4146
4147 else
4148 {
4149 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4150 rtx loop_start_label = gen_label_rtx ();
4151 rtx loop_end_label = gen_label_rtx ();
4152 rtx end_label = gen_label_rtx ();
4153 enum machine_mode mode;
4154
4155 mode = GET_MODE (len);
4156 if (mode == VOIDmode)
4157 mode = Pmode;
4158
4159 dst_addr = gen_reg_rtx (Pmode);
4160 count = gen_reg_rtx (mode);
4161 blocks = gen_reg_rtx (mode);
4162
4163 convert_move (count, len, 1);
4164 emit_cmp_and_jump_insns (count, const0_rtx,
4165 EQ, NULL_RTX, mode, 1, end_label);
4166
4167 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4168 dst = change_address (dst, VOIDmode, dst_addr);
4169
4170 if (val == const0_rtx)
4171 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4172 OPTAB_DIRECT);
4173 else
4174 {
4175 dstp1 = adjust_address (dst, VOIDmode, 1);
4176 set_mem_size (dst, 1);
4177
4178 /* Initialize memory by storing the first byte. */
4179 emit_move_insn (adjust_address (dst, QImode, 0), val);
4180
4181 /* If count is 1 we are done. */
4182 emit_cmp_and_jump_insns (count, const1_rtx,
4183 EQ, NULL_RTX, mode, 1, end_label);
4184
4185 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4186 OPTAB_DIRECT);
4187 }
4188 if (temp != count)
4189 emit_move_insn (count, temp);
4190
4191 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4192 OPTAB_DIRECT);
4193 if (temp != blocks)
4194 emit_move_insn (blocks, temp);
4195
4196 emit_cmp_and_jump_insns (blocks, const0_rtx,
4197 EQ, NULL_RTX, mode, 1, loop_end_label);
4198
4199 emit_label (loop_start_label);
4200
4201 if (TARGET_Z10
4202 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4203 {
4204 /* Issue a write prefetch for the +4 cache line. */
4205 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4206 GEN_INT (1024)),
4207 const1_rtx, const0_rtx);
4208 emit_insn (prefetch);
4209 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4210 }
4211
4212 if (val == const0_rtx)
4213 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4214 else
4215 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4216 s390_load_address (dst_addr,
4217 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4218
4219 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4220 OPTAB_DIRECT);
4221 if (temp != blocks)
4222 emit_move_insn (blocks, temp);
4223
4224 emit_cmp_and_jump_insns (blocks, const0_rtx,
4225 EQ, NULL_RTX, mode, 1, loop_end_label);
4226
4227 emit_jump (loop_start_label);
4228 emit_label (loop_end_label);
4229
4230 if (val == const0_rtx)
4231 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4232 else
4233 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4234 emit_label (end_label);
4235 }
4236 }
4237
4238 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4239 and return the result in TARGET. */
4240
4241 bool
4242 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4243 {
4244 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4245 rtx tmp;
4246
4247 /* When tuning for z10 or higher we rely on the Glibc functions to
4248 do the right thing. Only for constant lengths below 64k we will
4249 generate inline code. */
4250 if (s390_tune >= PROCESSOR_2097_Z10
4251 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4252 return false;
4253
4254 /* As the result of CMPINT is inverted compared to what we need,
4255 we have to swap the operands. */
4256 tmp = op0; op0 = op1; op1 = tmp;
4257
4258 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4259 {
4260 if (INTVAL (len) > 0)
4261 {
4262 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4263 emit_insn (gen_cmpint (target, ccreg));
4264 }
4265 else
4266 emit_move_insn (target, const0_rtx);
4267 }
4268 else if (TARGET_MVCLE)
4269 {
4270 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4271 emit_insn (gen_cmpint (target, ccreg));
4272 }
4273 else
4274 {
4275 rtx addr0, addr1, count, blocks, temp;
4276 rtx loop_start_label = gen_label_rtx ();
4277 rtx loop_end_label = gen_label_rtx ();
4278 rtx end_label = gen_label_rtx ();
4279 enum machine_mode mode;
4280
4281 mode = GET_MODE (len);
4282 if (mode == VOIDmode)
4283 mode = Pmode;
4284
4285 addr0 = gen_reg_rtx (Pmode);
4286 addr1 = gen_reg_rtx (Pmode);
4287 count = gen_reg_rtx (mode);
4288 blocks = gen_reg_rtx (mode);
4289
4290 convert_move (count, len, 1);
4291 emit_cmp_and_jump_insns (count, const0_rtx,
4292 EQ, NULL_RTX, mode, 1, end_label);
4293
4294 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4295 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4296 op0 = change_address (op0, VOIDmode, addr0);
4297 op1 = change_address (op1, VOIDmode, addr1);
4298
4299 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4300 OPTAB_DIRECT);
4301 if (temp != count)
4302 emit_move_insn (count, temp);
4303
4304 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4305 OPTAB_DIRECT);
4306 if (temp != blocks)
4307 emit_move_insn (blocks, temp);
4308
4309 emit_cmp_and_jump_insns (blocks, const0_rtx,
4310 EQ, NULL_RTX, mode, 1, loop_end_label);
4311
4312 emit_label (loop_start_label);
4313
4314 if (TARGET_Z10
4315 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4316 {
4317 rtx prefetch;
4318
4319 /* Issue a read prefetch for the +2 cache line of operand 1. */
4320 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4321 const0_rtx, const0_rtx);
4322 emit_insn (prefetch);
4323 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4324
4325 /* Issue a read prefetch for the +2 cache line of operand 2. */
4326 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4327 const0_rtx, const0_rtx);
4328 emit_insn (prefetch);
4329 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4330 }
4331
4332 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4333 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4334 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4335 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4336 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4337 emit_jump_insn (temp);
4338
4339 s390_load_address (addr0,
4340 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4341 s390_load_address (addr1,
4342 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4343
4344 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4345 OPTAB_DIRECT);
4346 if (temp != blocks)
4347 emit_move_insn (blocks, temp);
4348
4349 emit_cmp_and_jump_insns (blocks, const0_rtx,
4350 EQ, NULL_RTX, mode, 1, loop_end_label);
4351
4352 emit_jump (loop_start_label);
4353 emit_label (loop_end_label);
4354
4355 emit_insn (gen_cmpmem_short (op0, op1,
4356 convert_to_mode (Pmode, count, 1)));
4357 emit_label (end_label);
4358
4359 emit_insn (gen_cmpint (target, ccreg));
4360 }
4361 return true;
4362 }
4363
4364
4365 /* Expand conditional increment or decrement using alc/slb instructions.
4366 Should generate code setting DST to either SRC or SRC + INCREMENT,
4367 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4368 Returns true if successful, false otherwise.
4369
4370 That makes it possible to implement some if-constructs without jumps e.g.:
4371 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4372 unsigned int a, b, c;
4373 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4374 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4375 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4376 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4377
4378 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4379 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4380 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4381 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4382 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4383
4384 bool
4385 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4386 rtx dst, rtx src, rtx increment)
4387 {
4388 enum machine_mode cmp_mode;
4389 enum machine_mode cc_mode;
4390 rtx op_res;
4391 rtx insn;
4392 rtvec p;
4393 int ret;
4394
4395 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4396 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4397 cmp_mode = SImode;
4398 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4399 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4400 cmp_mode = DImode;
4401 else
4402 return false;
4403
4404 /* Try ADD LOGICAL WITH CARRY. */
4405 if (increment == const1_rtx)
4406 {
4407 /* Determine CC mode to use. */
4408 if (cmp_code == EQ || cmp_code == NE)
4409 {
4410 if (cmp_op1 != const0_rtx)
4411 {
4412 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4413 NULL_RTX, 0, OPTAB_WIDEN);
4414 cmp_op1 = const0_rtx;
4415 }
4416
4417 cmp_code = cmp_code == EQ ? LEU : GTU;
4418 }
4419
4420 if (cmp_code == LTU || cmp_code == LEU)
4421 {
4422 rtx tem = cmp_op0;
4423 cmp_op0 = cmp_op1;
4424 cmp_op1 = tem;
4425 cmp_code = swap_condition (cmp_code);
4426 }
4427
4428 switch (cmp_code)
4429 {
4430 case GTU:
4431 cc_mode = CCUmode;
4432 break;
4433
4434 case GEU:
4435 cc_mode = CCL3mode;
4436 break;
4437
4438 default:
4439 return false;
4440 }
4441
4442 /* Emit comparison instruction pattern. */
4443 if (!register_operand (cmp_op0, cmp_mode))
4444 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4445
4446 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4447 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4448 /* We use insn_invalid_p here to add clobbers if required. */
4449 ret = insn_invalid_p (emit_insn (insn), false);
4450 gcc_assert (!ret);
4451
4452 /* Emit ALC instruction pattern. */
4453 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4454 gen_rtx_REG (cc_mode, CC_REGNUM),
4455 const0_rtx);
4456
4457 if (src != const0_rtx)
4458 {
4459 if (!register_operand (src, GET_MODE (dst)))
4460 src = force_reg (GET_MODE (dst), src);
4461
4462 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4463 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4464 }
4465
4466 p = rtvec_alloc (2);
4467 RTVEC_ELT (p, 0) =
4468 gen_rtx_SET (VOIDmode, dst, op_res);
4469 RTVEC_ELT (p, 1) =
4470 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4471 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4472
4473 return true;
4474 }
4475
4476 /* Try SUBTRACT LOGICAL WITH BORROW. */
4477 if (increment == constm1_rtx)
4478 {
4479 /* Determine CC mode to use. */
4480 if (cmp_code == EQ || cmp_code == NE)
4481 {
4482 if (cmp_op1 != const0_rtx)
4483 {
4484 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4485 NULL_RTX, 0, OPTAB_WIDEN);
4486 cmp_op1 = const0_rtx;
4487 }
4488
4489 cmp_code = cmp_code == EQ ? LEU : GTU;
4490 }
4491
4492 if (cmp_code == GTU || cmp_code == GEU)
4493 {
4494 rtx tem = cmp_op0;
4495 cmp_op0 = cmp_op1;
4496 cmp_op1 = tem;
4497 cmp_code = swap_condition (cmp_code);
4498 }
4499
4500 switch (cmp_code)
4501 {
4502 case LEU:
4503 cc_mode = CCUmode;
4504 break;
4505
4506 case LTU:
4507 cc_mode = CCL3mode;
4508 break;
4509
4510 default:
4511 return false;
4512 }
4513
4514 /* Emit comparison instruction pattern. */
4515 if (!register_operand (cmp_op0, cmp_mode))
4516 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4517
4518 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4519 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4520 /* We use insn_invalid_p here to add clobbers if required. */
4521 ret = insn_invalid_p (emit_insn (insn), false);
4522 gcc_assert (!ret);
4523
4524 /* Emit SLB instruction pattern. */
4525 if (!register_operand (src, GET_MODE (dst)))
4526 src = force_reg (GET_MODE (dst), src);
4527
4528 op_res = gen_rtx_MINUS (GET_MODE (dst),
4529 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4530 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4531 gen_rtx_REG (cc_mode, CC_REGNUM),
4532 const0_rtx));
4533 p = rtvec_alloc (2);
4534 RTVEC_ELT (p, 0) =
4535 gen_rtx_SET (VOIDmode, dst, op_res);
4536 RTVEC_ELT (p, 1) =
4537 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4538 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4539
4540 return true;
4541 }
4542
4543 return false;
4544 }
4545
4546 /* Expand code for the insv template. Return true if successful. */
4547
4548 bool
4549 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4550 {
4551 int bitsize = INTVAL (op1);
4552 int bitpos = INTVAL (op2);
4553 enum machine_mode mode = GET_MODE (dest);
4554 enum machine_mode smode;
4555 int smode_bsize, mode_bsize;
4556 rtx op, clobber;
4557
4558 /* Generate INSERT IMMEDIATE (IILL et al). */
4559 /* (set (ze (reg)) (const_int)). */
4560 if (TARGET_ZARCH
4561 && register_operand (dest, word_mode)
4562 && (bitpos % 16) == 0
4563 && (bitsize % 16) == 0
4564 && const_int_operand (src, VOIDmode))
4565 {
4566 HOST_WIDE_INT val = INTVAL (src);
4567 int regpos = bitpos + bitsize;
4568
4569 while (regpos > bitpos)
4570 {
4571 enum machine_mode putmode;
4572 int putsize;
4573
4574 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4575 putmode = SImode;
4576 else
4577 putmode = HImode;
4578
4579 putsize = GET_MODE_BITSIZE (putmode);
4580 regpos -= putsize;
4581 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4582 GEN_INT (putsize),
4583 GEN_INT (regpos)),
4584 gen_int_mode (val, putmode));
4585 val >>= putsize;
4586 }
4587 gcc_assert (regpos == bitpos);
4588 return true;
4589 }
4590
4591 smode = smallest_mode_for_size (bitsize, MODE_INT);
4592 smode_bsize = GET_MODE_BITSIZE (smode);
4593 mode_bsize = GET_MODE_BITSIZE (mode);
4594
4595 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4596 if (bitpos == 0
4597 && (bitsize % BITS_PER_UNIT) == 0
4598 && MEM_P (dest)
4599 && (register_operand (src, word_mode)
4600 || const_int_operand (src, VOIDmode)))
4601 {
4602 /* Emit standard pattern if possible. */
4603 if (smode_bsize == bitsize)
4604 {
4605 emit_move_insn (adjust_address (dest, smode, 0),
4606 gen_lowpart (smode, src));
4607 return true;
4608 }
4609
4610 /* (set (ze (mem)) (const_int)). */
4611 else if (const_int_operand (src, VOIDmode))
4612 {
4613 int size = bitsize / BITS_PER_UNIT;
4614 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4615 BLKmode,
4616 UNITS_PER_WORD - size);
4617
4618 dest = adjust_address (dest, BLKmode, 0);
4619 set_mem_size (dest, size);
4620 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4621 return true;
4622 }
4623
4624 /* (set (ze (mem)) (reg)). */
4625 else if (register_operand (src, word_mode))
4626 {
4627 if (bitsize <= 32)
4628 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4629 const0_rtx), src);
4630 else
4631 {
4632 /* Emit st,stcmh sequence. */
4633 int stcmh_width = bitsize - 32;
4634 int size = stcmh_width / BITS_PER_UNIT;
4635
4636 emit_move_insn (adjust_address (dest, SImode, size),
4637 gen_lowpart (SImode, src));
4638 set_mem_size (dest, size);
4639 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4640 GEN_INT (stcmh_width),
4641 const0_rtx),
4642 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4643 }
4644 return true;
4645 }
4646 }
4647
4648 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4649 if ((bitpos % BITS_PER_UNIT) == 0
4650 && (bitsize % BITS_PER_UNIT) == 0
4651 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4652 && MEM_P (src)
4653 && (mode == DImode || mode == SImode)
4654 && register_operand (dest, mode))
4655 {
4656 /* Emit a strict_low_part pattern if possible. */
4657 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4658 {
4659 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4660 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4661 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4662 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4663 return true;
4664 }
4665
4666 /* ??? There are more powerful versions of ICM that are not
4667 completely represented in the md file. */
4668 }
4669
4670 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4671 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4672 {
4673 enum machine_mode mode_s = GET_MODE (src);
4674
4675 if (mode_s == VOIDmode)
4676 {
4677 /* Assume const_int etc already in the proper mode. */
4678 src = force_reg (mode, src);
4679 }
4680 else if (mode_s != mode)
4681 {
4682 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4683 src = force_reg (mode_s, src);
4684 src = gen_lowpart (mode, src);
4685 }
4686
4687 op = gen_rtx_SET (mode,
4688 gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4689 src);
4690 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4691 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4692
4693 return true;
4694 }
4695
4696 return false;
4697 }
4698
4699 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4700 register that holds VAL of mode MODE shifted by COUNT bits. */
4701
4702 static inline rtx
4703 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4704 {
4705 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4706 NULL_RTX, 1, OPTAB_DIRECT);
4707 return expand_simple_binop (SImode, ASHIFT, val, count,
4708 NULL_RTX, 1, OPTAB_DIRECT);
4709 }
4710
4711 /* Structure to hold the initial parameters for a compare_and_swap operation
4712 in HImode and QImode. */
4713
4714 struct alignment_context
4715 {
4716 rtx memsi; /* SI aligned memory location. */
4717 rtx shift; /* Bit offset with regard to lsb. */
4718 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4719 rtx modemaski; /* ~modemask */
4720 bool aligned; /* True if memory is aligned, false else. */
4721 };
4722
4723 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4724 structure AC for transparent simplifying, if the memory alignment is known
4725 to be at least 32bit. MEM is the memory location for the actual operation
4726 and MODE its mode. */
4727
4728 static void
4729 init_alignment_context (struct alignment_context *ac, rtx mem,
4730 enum machine_mode mode)
4731 {
4732 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4733 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4734
4735 if (ac->aligned)
4736 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4737 else
4738 {
4739 /* Alignment is unknown. */
4740 rtx byteoffset, addr, align;
4741
4742 /* Force the address into a register. */
4743 addr = force_reg (Pmode, XEXP (mem, 0));
4744
4745 /* Align it to SImode. */
4746 align = expand_simple_binop (Pmode, AND, addr,
4747 GEN_INT (-GET_MODE_SIZE (SImode)),
4748 NULL_RTX, 1, OPTAB_DIRECT);
4749 /* Generate MEM. */
4750 ac->memsi = gen_rtx_MEM (SImode, align);
4751 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4752 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4753 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4754
4755 /* Calculate shiftcount. */
4756 byteoffset = expand_simple_binop (Pmode, AND, addr,
4757 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4758 NULL_RTX, 1, OPTAB_DIRECT);
4759 /* As we already have some offset, evaluate the remaining distance. */
4760 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4761 NULL_RTX, 1, OPTAB_DIRECT);
4762 }
4763
4764 /* Shift is the byte count, but we need the bitcount. */
4765 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4766 NULL_RTX, 1, OPTAB_DIRECT);
4767
4768 /* Calculate masks. */
4769 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4770 GEN_INT (GET_MODE_MASK (mode)),
4771 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4772 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4773 NULL_RTX, 1);
4774 }
4775
4776 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4777 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4778 perform the merge in SEQ2. */
4779
4780 static rtx
4781 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4782 enum machine_mode mode, rtx val, rtx ins)
4783 {
4784 rtx tmp;
4785
4786 if (ac->aligned)
4787 {
4788 start_sequence ();
4789 tmp = copy_to_mode_reg (SImode, val);
4790 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4791 const0_rtx, ins))
4792 {
4793 *seq1 = NULL;
4794 *seq2 = get_insns ();
4795 end_sequence ();
4796 return tmp;
4797 }
4798 end_sequence ();
4799 }
4800
4801 /* Failed to use insv. Generate a two part shift and mask. */
4802 start_sequence ();
4803 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4804 *seq1 = get_insns ();
4805 end_sequence ();
4806
4807 start_sequence ();
4808 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4809 *seq2 = get_insns ();
4810 end_sequence ();
4811
4812 return tmp;
4813 }
4814
4815 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4816 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4817 value to set if CMP == MEM. */
4818
4819 void
4820 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4821 rtx cmp, rtx new_rtx, bool is_weak)
4822 {
4823 struct alignment_context ac;
4824 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4825 rtx res = gen_reg_rtx (SImode);
4826 rtx csloop = NULL, csend = NULL;
4827
4828 gcc_assert (MEM_P (mem));
4829
4830 init_alignment_context (&ac, mem, mode);
4831
4832 /* Load full word. Subsequent loads are performed by CS. */
4833 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4834 NULL_RTX, 1, OPTAB_DIRECT);
4835
4836 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4837 possible, we try to use insv to make this happen efficiently. If
4838 that fails we'll generate code both inside and outside the loop. */
4839 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4840 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4841
4842 if (seq0)
4843 emit_insn (seq0);
4844 if (seq1)
4845 emit_insn (seq1);
4846
4847 /* Start CS loop. */
4848 if (!is_weak)
4849 {
4850 /* Begin assuming success. */
4851 emit_move_insn (btarget, const1_rtx);
4852
4853 csloop = gen_label_rtx ();
4854 csend = gen_label_rtx ();
4855 emit_label (csloop);
4856 }
4857
4858 /* val = "<mem>00..0<mem>"
4859 * cmp = "00..0<cmp>00..0"
4860 * new = "00..0<new>00..0"
4861 */
4862
4863 emit_insn (seq2);
4864 emit_insn (seq3);
4865
4866 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4867 if (is_weak)
4868 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4869 else
4870 {
4871 rtx tmp;
4872
4873 /* Jump to end if we're done (likely?). */
4874 s390_emit_jump (csend, cc);
4875
4876 /* Check for changes outside mode, and loop internal if so.
4877 Arrange the moves so that the compare is adjacent to the
4878 branch so that we can generate CRJ. */
4879 tmp = copy_to_reg (val);
4880 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4881 1, OPTAB_DIRECT);
4882 cc = s390_emit_compare (NE, val, tmp);
4883 s390_emit_jump (csloop, cc);
4884
4885 /* Failed. */
4886 emit_move_insn (btarget, const0_rtx);
4887 emit_label (csend);
4888 }
4889
4890 /* Return the correct part of the bitfield. */
4891 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4892 NULL_RTX, 1, OPTAB_DIRECT), 1);
4893 }
4894
4895 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4896 and VAL the value to play with. If AFTER is true then store the value
4897 MEM holds after the operation, if AFTER is false then store the value MEM
4898 holds before the operation. If TARGET is zero then discard that value, else
4899 store it to TARGET. */
4900
4901 void
4902 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4903 rtx target, rtx mem, rtx val, bool after)
4904 {
4905 struct alignment_context ac;
4906 rtx cmp;
4907 rtx new_rtx = gen_reg_rtx (SImode);
4908 rtx orig = gen_reg_rtx (SImode);
4909 rtx csloop = gen_label_rtx ();
4910
4911 gcc_assert (!target || register_operand (target, VOIDmode));
4912 gcc_assert (MEM_P (mem));
4913
4914 init_alignment_context (&ac, mem, mode);
4915
4916 /* Shift val to the correct bit positions.
4917 Preserve "icm", but prevent "ex icm". */
4918 if (!(ac.aligned && code == SET && MEM_P (val)))
4919 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4920
4921 /* Further preparation insns. */
4922 if (code == PLUS || code == MINUS)
4923 emit_move_insn (orig, val);
4924 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4925 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4926 NULL_RTX, 1, OPTAB_DIRECT);
4927
4928 /* Load full word. Subsequent loads are performed by CS. */
4929 cmp = force_reg (SImode, ac.memsi);
4930
4931 /* Start CS loop. */
4932 emit_label (csloop);
4933 emit_move_insn (new_rtx, cmp);
4934
4935 /* Patch new with val at correct position. */
4936 switch (code)
4937 {
4938 case PLUS:
4939 case MINUS:
4940 val = expand_simple_binop (SImode, code, new_rtx, orig,
4941 NULL_RTX, 1, OPTAB_DIRECT);
4942 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4943 NULL_RTX, 1, OPTAB_DIRECT);
4944 /* FALLTHRU */
4945 case SET:
4946 if (ac.aligned && MEM_P (val))
4947 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4948 0, 0, SImode, val);
4949 else
4950 {
4951 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4952 NULL_RTX, 1, OPTAB_DIRECT);
4953 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4954 NULL_RTX, 1, OPTAB_DIRECT);
4955 }
4956 break;
4957 case AND:
4958 case IOR:
4959 case XOR:
4960 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4961 NULL_RTX, 1, OPTAB_DIRECT);
4962 break;
4963 case MULT: /* NAND */
4964 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4965 NULL_RTX, 1, OPTAB_DIRECT);
4966 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4967 NULL_RTX, 1, OPTAB_DIRECT);
4968 break;
4969 default:
4970 gcc_unreachable ();
4971 }
4972
4973 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4974 ac.memsi, cmp, new_rtx));
4975
4976 /* Return the correct part of the bitfield. */
4977 if (target)
4978 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4979 after ? new_rtx : cmp, ac.shift,
4980 NULL_RTX, 1, OPTAB_DIRECT), 1);
4981 }
4982
4983 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4984 We need to emit DTP-relative relocations. */
4985
4986 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4987
4988 static void
4989 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4990 {
4991 switch (size)
4992 {
4993 case 4:
4994 fputs ("\t.long\t", file);
4995 break;
4996 case 8:
4997 fputs ("\t.quad\t", file);
4998 break;
4999 default:
5000 gcc_unreachable ();
5001 }
5002 output_addr_const (file, x);
5003 fputs ("@DTPOFF", file);
5004 }
5005
5006 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5007 /* Implement TARGET_MANGLE_TYPE. */
5008
5009 static const char *
5010 s390_mangle_type (const_tree type)
5011 {
5012 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5013 && TARGET_LONG_DOUBLE_128)
5014 return "g";
5015
5016 /* For all other types, use normal C++ mangling. */
5017 return NULL;
5018 }
5019 #endif
5020
5021 /* In the name of slightly smaller debug output, and to cater to
5022 general assembler lossage, recognize various UNSPEC sequences
5023 and turn them back into a direct symbol reference. */
5024
5025 static rtx
5026 s390_delegitimize_address (rtx orig_x)
5027 {
5028 rtx x, y;
5029
5030 orig_x = delegitimize_mem_from_attrs (orig_x);
5031 x = orig_x;
5032
5033 /* Extract the symbol ref from:
5034 (plus:SI (reg:SI 12 %r12)
5035 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5036 UNSPEC_GOTOFF/PLTOFF)))
5037 and
5038 (plus:SI (reg:SI 12 %r12)
5039 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5040 UNSPEC_GOTOFF/PLTOFF)
5041 (const_int 4 [0x4])))) */
5042 if (GET_CODE (x) == PLUS
5043 && REG_P (XEXP (x, 0))
5044 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5045 && GET_CODE (XEXP (x, 1)) == CONST)
5046 {
5047 HOST_WIDE_INT offset = 0;
5048
5049 /* The const operand. */
5050 y = XEXP (XEXP (x, 1), 0);
5051
5052 if (GET_CODE (y) == PLUS
5053 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5054 {
5055 offset = INTVAL (XEXP (y, 1));
5056 y = XEXP (y, 0);
5057 }
5058
5059 if (GET_CODE (y) == UNSPEC
5060 && (XINT (y, 1) == UNSPEC_GOTOFF
5061 || XINT (y, 1) == UNSPEC_PLTOFF))
5062 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5063 }
5064
5065 if (GET_CODE (x) != MEM)
5066 return orig_x;
5067
5068 x = XEXP (x, 0);
5069 if (GET_CODE (x) == PLUS
5070 && GET_CODE (XEXP (x, 1)) == CONST
5071 && GET_CODE (XEXP (x, 0)) == REG
5072 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5073 {
5074 y = XEXP (XEXP (x, 1), 0);
5075 if (GET_CODE (y) == UNSPEC
5076 && XINT (y, 1) == UNSPEC_GOT)
5077 y = XVECEXP (y, 0, 0);
5078 else
5079 return orig_x;
5080 }
5081 else if (GET_CODE (x) == CONST)
5082 {
5083 /* Extract the symbol ref from:
5084 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5085 UNSPEC_PLT/GOTENT))) */
5086
5087 y = XEXP (x, 0);
5088 if (GET_CODE (y) == UNSPEC
5089 && (XINT (y, 1) == UNSPEC_GOTENT
5090 || XINT (y, 1) == UNSPEC_PLT))
5091 y = XVECEXP (y, 0, 0);
5092 else
5093 return orig_x;
5094 }
5095 else
5096 return orig_x;
5097
5098 if (GET_MODE (orig_x) != Pmode)
5099 {
5100 if (GET_MODE (orig_x) == BLKmode)
5101 return orig_x;
5102 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5103 if (y == NULL_RTX)
5104 return orig_x;
5105 }
5106 return y;
5107 }
5108
5109 /* Output operand OP to stdio stream FILE.
5110 OP is an address (register + offset) which is not used to address data;
5111 instead the rightmost bits are interpreted as the value. */
5112
5113 static void
5114 print_shift_count_operand (FILE *file, rtx op)
5115 {
5116 HOST_WIDE_INT offset;
5117 rtx base;
5118
5119 /* Extract base register and offset. */
5120 if (!s390_decompose_shift_count (op, &base, &offset))
5121 gcc_unreachable ();
5122
5123 /* Sanity check. */
5124 if (base)
5125 {
5126 gcc_assert (GET_CODE (base) == REG);
5127 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5128 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5129 }
5130
5131 /* Offsets are constricted to twelve bits. */
5132 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5133 if (base)
5134 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5135 }
5136
5137 /* See 'get_some_local_dynamic_name'. */
5138
5139 static int
5140 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5141 {
5142 rtx x = *px;
5143
5144 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5145 {
5146 x = get_pool_constant (x);
5147 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5148 }
5149
5150 if (GET_CODE (x) == SYMBOL_REF
5151 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5152 {
5153 cfun->machine->some_ld_name = XSTR (x, 0);
5154 return 1;
5155 }
5156
5157 return 0;
5158 }
5159
5160 /* Locate some local-dynamic symbol still in use by this function
5161 so that we can print its name in local-dynamic base patterns. */
5162
5163 static const char *
5164 get_some_local_dynamic_name (void)
5165 {
5166 rtx insn;
5167
5168 if (cfun->machine->some_ld_name)
5169 return cfun->machine->some_ld_name;
5170
5171 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5172 if (INSN_P (insn)
5173 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5174 return cfun->machine->some_ld_name;
5175
5176 gcc_unreachable ();
5177 }
5178
5179 /* Output machine-dependent UNSPECs occurring in address constant X
5180 in assembler syntax to stdio stream FILE. Returns true if the
5181 constant X could be recognized, false otherwise. */
5182
5183 static bool
5184 s390_output_addr_const_extra (FILE *file, rtx x)
5185 {
5186 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5187 switch (XINT (x, 1))
5188 {
5189 case UNSPEC_GOTENT:
5190 output_addr_const (file, XVECEXP (x, 0, 0));
5191 fprintf (file, "@GOTENT");
5192 return true;
5193 case UNSPEC_GOT:
5194 output_addr_const (file, XVECEXP (x, 0, 0));
5195 fprintf (file, "@GOT");
5196 return true;
5197 case UNSPEC_GOTOFF:
5198 output_addr_const (file, XVECEXP (x, 0, 0));
5199 fprintf (file, "@GOTOFF");
5200 return true;
5201 case UNSPEC_PLT:
5202 output_addr_const (file, XVECEXP (x, 0, 0));
5203 fprintf (file, "@PLT");
5204 return true;
5205 case UNSPEC_PLTOFF:
5206 output_addr_const (file, XVECEXP (x, 0, 0));
5207 fprintf (file, "@PLTOFF");
5208 return true;
5209 case UNSPEC_TLSGD:
5210 output_addr_const (file, XVECEXP (x, 0, 0));
5211 fprintf (file, "@TLSGD");
5212 return true;
5213 case UNSPEC_TLSLDM:
5214 assemble_name (file, get_some_local_dynamic_name ());
5215 fprintf (file, "@TLSLDM");
5216 return true;
5217 case UNSPEC_DTPOFF:
5218 output_addr_const (file, XVECEXP (x, 0, 0));
5219 fprintf (file, "@DTPOFF");
5220 return true;
5221 case UNSPEC_NTPOFF:
5222 output_addr_const (file, XVECEXP (x, 0, 0));
5223 fprintf (file, "@NTPOFF");
5224 return true;
5225 case UNSPEC_GOTNTPOFF:
5226 output_addr_const (file, XVECEXP (x, 0, 0));
5227 fprintf (file, "@GOTNTPOFF");
5228 return true;
5229 case UNSPEC_INDNTPOFF:
5230 output_addr_const (file, XVECEXP (x, 0, 0));
5231 fprintf (file, "@INDNTPOFF");
5232 return true;
5233 }
5234
5235 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5236 switch (XINT (x, 1))
5237 {
5238 case UNSPEC_POOL_OFFSET:
5239 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5240 output_addr_const (file, x);
5241 return true;
5242 }
5243 return false;
5244 }
5245
5246 /* Output address operand ADDR in assembler syntax to
5247 stdio stream FILE. */
5248
5249 void
5250 print_operand_address (FILE *file, rtx addr)
5251 {
5252 struct s390_address ad;
5253
5254 if (s390_symref_operand_p (addr, NULL, NULL))
5255 {
5256 if (!TARGET_Z10)
5257 {
5258 output_operand_lossage ("symbolic memory references are "
5259 "only supported on z10 or later");
5260 return;
5261 }
5262 output_addr_const (file, addr);
5263 return;
5264 }
5265
5266 if (!s390_decompose_address (addr, &ad)
5267 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5268 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5269 output_operand_lossage ("cannot decompose address");
5270
5271 if (ad.disp)
5272 output_addr_const (file, ad.disp);
5273 else
5274 fprintf (file, "0");
5275
5276 if (ad.base && ad.indx)
5277 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5278 reg_names[REGNO (ad.base)]);
5279 else if (ad.base)
5280 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5281 }
5282
5283 /* Output operand X in assembler syntax to stdio stream FILE.
5284 CODE specified the format flag. The following format flags
5285 are recognized:
5286
5287 'C': print opcode suffix for branch condition.
5288 'D': print opcode suffix for inverse branch condition.
5289 'E': print opcode suffix for branch on index instruction.
5290 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5291 'G': print the size of the operand in bytes.
5292 'O': print only the displacement of a memory reference.
5293 'R': print only the base register of a memory reference.
5294 'S': print S-type memory reference (base+displacement).
5295 'N': print the second word of a DImode operand.
5296 'M': print the second word of a TImode operand.
5297 'Y': print shift count operand.
5298
5299 'b': print integer X as if it's an unsigned byte.
5300 'c': print integer X as if it's an signed byte.
5301 'x': print integer X as if it's an unsigned halfword.
5302 'h': print integer X as if it's a signed halfword.
5303 'i': print the first nonzero HImode part of X.
5304 'j': print the first HImode part unequal to -1 of X.
5305 'k': print the first nonzero SImode part of X.
5306 'm': print the first SImode part unequal to -1 of X.
5307 'o': print integer X as if it's an unsigned 32bit word. */
5308
5309 void
5310 print_operand (FILE *file, rtx x, int code)
5311 {
5312 switch (code)
5313 {
5314 case 'C':
5315 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5316 return;
5317
5318 case 'D':
5319 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5320 return;
5321
5322 case 'E':
5323 if (GET_CODE (x) == LE)
5324 fprintf (file, "l");
5325 else if (GET_CODE (x) == GT)
5326 fprintf (file, "h");
5327 else
5328 output_operand_lossage ("invalid comparison operator "
5329 "for 'E' output modifier");
5330 return;
5331
5332 case 'J':
5333 if (GET_CODE (x) == SYMBOL_REF)
5334 {
5335 fprintf (file, "%s", ":tls_load:");
5336 output_addr_const (file, x);
5337 }
5338 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5339 {
5340 fprintf (file, "%s", ":tls_gdcall:");
5341 output_addr_const (file, XVECEXP (x, 0, 0));
5342 }
5343 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5344 {
5345 fprintf (file, "%s", ":tls_ldcall:");
5346 assemble_name (file, get_some_local_dynamic_name ());
5347 }
5348 else
5349 output_operand_lossage ("invalid reference for 'J' output modifier");
5350 return;
5351
5352 case 'G':
5353 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5354 return;
5355
5356 case 'O':
5357 {
5358 struct s390_address ad;
5359 int ret;
5360
5361 if (!MEM_P (x))
5362 {
5363 output_operand_lossage ("memory reference expected for "
5364 "'O' output modifier");
5365 return;
5366 }
5367
5368 ret = s390_decompose_address (XEXP (x, 0), &ad);
5369
5370 if (!ret
5371 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5372 || ad.indx)
5373 {
5374 output_operand_lossage ("invalid address for 'O' output modifier");
5375 return;
5376 }
5377
5378 if (ad.disp)
5379 output_addr_const (file, ad.disp);
5380 else
5381 fprintf (file, "0");
5382 }
5383 return;
5384
5385 case 'R':
5386 {
5387 struct s390_address ad;
5388 int ret;
5389
5390 if (!MEM_P (x))
5391 {
5392 output_operand_lossage ("memory reference expected for "
5393 "'R' output modifier");
5394 return;
5395 }
5396
5397 ret = s390_decompose_address (XEXP (x, 0), &ad);
5398
5399 if (!ret
5400 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5401 || ad.indx)
5402 {
5403 output_operand_lossage ("invalid address for 'R' output modifier");
5404 return;
5405 }
5406
5407 if (ad.base)
5408 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5409 else
5410 fprintf (file, "0");
5411 }
5412 return;
5413
5414 case 'S':
5415 {
5416 struct s390_address ad;
5417 int ret;
5418
5419 if (!MEM_P (x))
5420 {
5421 output_operand_lossage ("memory reference expected for "
5422 "'S' output modifier");
5423 return;
5424 }
5425 ret = s390_decompose_address (XEXP (x, 0), &ad);
5426
5427 if (!ret
5428 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5429 || ad.indx)
5430 {
5431 output_operand_lossage ("invalid address for 'S' output modifier");
5432 return;
5433 }
5434
5435 if (ad.disp)
5436 output_addr_const (file, ad.disp);
5437 else
5438 fprintf (file, "0");
5439
5440 if (ad.base)
5441 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5442 }
5443 return;
5444
5445 case 'N':
5446 if (GET_CODE (x) == REG)
5447 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5448 else if (GET_CODE (x) == MEM)
5449 x = change_address (x, VOIDmode,
5450 plus_constant (Pmode, XEXP (x, 0), 4));
5451 else
5452 output_operand_lossage ("register or memory expression expected "
5453 "for 'N' output modifier");
5454 break;
5455
5456 case 'M':
5457 if (GET_CODE (x) == REG)
5458 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5459 else if (GET_CODE (x) == MEM)
5460 x = change_address (x, VOIDmode,
5461 plus_constant (Pmode, XEXP (x, 0), 8));
5462 else
5463 output_operand_lossage ("register or memory expression expected "
5464 "for 'M' output modifier");
5465 break;
5466
5467 case 'Y':
5468 print_shift_count_operand (file, x);
5469 return;
5470 }
5471
5472 switch (GET_CODE (x))
5473 {
5474 case REG:
5475 fprintf (file, "%s", reg_names[REGNO (x)]);
5476 break;
5477
5478 case MEM:
5479 output_address (XEXP (x, 0));
5480 break;
5481
5482 case CONST:
5483 case CODE_LABEL:
5484 case LABEL_REF:
5485 case SYMBOL_REF:
5486 output_addr_const (file, x);
5487 break;
5488
5489 case CONST_INT:
5490 if (code == 'b')
5491 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5492 else if (code == 'c')
5493 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5494 else if (code == 'x')
5495 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5496 else if (code == 'h')
5497 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5498 else if (code == 'i')
5499 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5500 s390_extract_part (x, HImode, 0));
5501 else if (code == 'j')
5502 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5503 s390_extract_part (x, HImode, -1));
5504 else if (code == 'k')
5505 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5506 s390_extract_part (x, SImode, 0));
5507 else if (code == 'm')
5508 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5509 s390_extract_part (x, SImode, -1));
5510 else if (code == 'o')
5511 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5512 else
5513 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5514 break;
5515
5516 case CONST_DOUBLE:
5517 gcc_assert (GET_MODE (x) == VOIDmode);
5518 if (code == 'b')
5519 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5520 else if (code == 'x')
5521 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5522 else if (code == 'h')
5523 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5524 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5525 else
5526 {
5527 if (code == 0)
5528 output_operand_lossage ("invalid constant - try using "
5529 "an output modifier");
5530 else
5531 output_operand_lossage ("invalid constant for output modifier '%c'",
5532 code);
5533 }
5534 break;
5535
5536 default:
5537 if (code == 0)
5538 output_operand_lossage ("invalid expression - try using "
5539 "an output modifier");
5540 else
5541 output_operand_lossage ("invalid expression for output "
5542 "modifier '%c'", code);
5543 break;
5544 }
5545 }
5546
5547 /* Target hook for assembling integer objects. We need to define it
5548 here to work a round a bug in some versions of GAS, which couldn't
5549 handle values smaller than INT_MIN when printed in decimal. */
5550
5551 static bool
5552 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5553 {
5554 if (size == 8 && aligned_p
5555 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5556 {
5557 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5558 INTVAL (x));
5559 return true;
5560 }
5561 return default_assemble_integer (x, size, aligned_p);
5562 }
5563
5564 /* Returns true if register REGNO is used for forming
5565 a memory address in expression X. */
5566
5567 static bool
5568 reg_used_in_mem_p (int regno, rtx x)
5569 {
5570 enum rtx_code code = GET_CODE (x);
5571 int i, j;
5572 const char *fmt;
5573
5574 if (code == MEM)
5575 {
5576 if (refers_to_regno_p (regno, regno+1,
5577 XEXP (x, 0), 0))
5578 return true;
5579 }
5580 else if (code == SET
5581 && GET_CODE (SET_DEST (x)) == PC)
5582 {
5583 if (refers_to_regno_p (regno, regno+1,
5584 SET_SRC (x), 0))
5585 return true;
5586 }
5587
5588 fmt = GET_RTX_FORMAT (code);
5589 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5590 {
5591 if (fmt[i] == 'e'
5592 && reg_used_in_mem_p (regno, XEXP (x, i)))
5593 return true;
5594
5595 else if (fmt[i] == 'E')
5596 for (j = 0; j < XVECLEN (x, i); j++)
5597 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5598 return true;
5599 }
5600 return false;
5601 }
5602
5603 /* Returns true if expression DEP_RTX sets an address register
5604 used by instruction INSN to address memory. */
5605
5606 static bool
5607 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5608 {
5609 rtx target, pat;
5610
5611 if (GET_CODE (dep_rtx) == INSN)
5612 dep_rtx = PATTERN (dep_rtx);
5613
5614 if (GET_CODE (dep_rtx) == SET)
5615 {
5616 target = SET_DEST (dep_rtx);
5617 if (GET_CODE (target) == STRICT_LOW_PART)
5618 target = XEXP (target, 0);
5619 while (GET_CODE (target) == SUBREG)
5620 target = SUBREG_REG (target);
5621
5622 if (GET_CODE (target) == REG)
5623 {
5624 int regno = REGNO (target);
5625
5626 if (s390_safe_attr_type (insn) == TYPE_LA)
5627 {
5628 pat = PATTERN (insn);
5629 if (GET_CODE (pat) == PARALLEL)
5630 {
5631 gcc_assert (XVECLEN (pat, 0) == 2);
5632 pat = XVECEXP (pat, 0, 0);
5633 }
5634 gcc_assert (GET_CODE (pat) == SET);
5635 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5636 }
5637 else if (get_attr_atype (insn) == ATYPE_AGEN)
5638 return reg_used_in_mem_p (regno, PATTERN (insn));
5639 }
5640 }
5641 return false;
5642 }
5643
5644 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5645
5646 int
5647 s390_agen_dep_p (rtx dep_insn, rtx insn)
5648 {
5649 rtx dep_rtx = PATTERN (dep_insn);
5650 int i;
5651
5652 if (GET_CODE (dep_rtx) == SET
5653 && addr_generation_dependency_p (dep_rtx, insn))
5654 return 1;
5655 else if (GET_CODE (dep_rtx) == PARALLEL)
5656 {
5657 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5658 {
5659 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5660 return 1;
5661 }
5662 }
5663 return 0;
5664 }
5665
5666
5667 /* A C statement (sans semicolon) to update the integer scheduling priority
5668 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5669 reduce the priority to execute INSN later. Do not define this macro if
5670 you do not need to adjust the scheduling priorities of insns.
5671
5672 A STD instruction should be scheduled earlier,
5673 in order to use the bypass. */
5674 static int
5675 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5676 {
5677 if (! INSN_P (insn))
5678 return priority;
5679
5680 if (s390_tune != PROCESSOR_2084_Z990
5681 && s390_tune != PROCESSOR_2094_Z9_109
5682 && s390_tune != PROCESSOR_2097_Z10
5683 && s390_tune != PROCESSOR_2817_Z196)
5684 return priority;
5685
5686 switch (s390_safe_attr_type (insn))
5687 {
5688 case TYPE_FSTOREDF:
5689 case TYPE_FSTORESF:
5690 priority = priority << 3;
5691 break;
5692 case TYPE_STORE:
5693 case TYPE_STM:
5694 priority = priority << 1;
5695 break;
5696 default:
5697 break;
5698 }
5699 return priority;
5700 }
5701
5702
5703 /* The number of instructions that can be issued per cycle. */
5704
5705 static int
5706 s390_issue_rate (void)
5707 {
5708 switch (s390_tune)
5709 {
5710 case PROCESSOR_2084_Z990:
5711 case PROCESSOR_2094_Z9_109:
5712 case PROCESSOR_2817_Z196:
5713 return 3;
5714 case PROCESSOR_2097_Z10:
5715 return 2;
5716 default:
5717 return 1;
5718 }
5719 }
5720
5721 static int
5722 s390_first_cycle_multipass_dfa_lookahead (void)
5723 {
5724 return 4;
5725 }
5726
5727 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5728 Fix up MEMs as required. */
5729
5730 static void
5731 annotate_constant_pool_refs (rtx *x)
5732 {
5733 int i, j;
5734 const char *fmt;
5735
5736 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5737 || !CONSTANT_POOL_ADDRESS_P (*x));
5738
5739 /* Literal pool references can only occur inside a MEM ... */
5740 if (GET_CODE (*x) == MEM)
5741 {
5742 rtx memref = XEXP (*x, 0);
5743
5744 if (GET_CODE (memref) == SYMBOL_REF
5745 && CONSTANT_POOL_ADDRESS_P (memref))
5746 {
5747 rtx base = cfun->machine->base_reg;
5748 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5749 UNSPEC_LTREF);
5750
5751 *x = replace_equiv_address (*x, addr);
5752 return;
5753 }
5754
5755 if (GET_CODE (memref) == CONST
5756 && GET_CODE (XEXP (memref, 0)) == PLUS
5757 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5758 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5759 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5760 {
5761 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5762 rtx sym = XEXP (XEXP (memref, 0), 0);
5763 rtx base = cfun->machine->base_reg;
5764 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5765 UNSPEC_LTREF);
5766
5767 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5768 return;
5769 }
5770 }
5771
5772 /* ... or a load-address type pattern. */
5773 if (GET_CODE (*x) == SET)
5774 {
5775 rtx addrref = SET_SRC (*x);
5776
5777 if (GET_CODE (addrref) == SYMBOL_REF
5778 && CONSTANT_POOL_ADDRESS_P (addrref))
5779 {
5780 rtx base = cfun->machine->base_reg;
5781 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5782 UNSPEC_LTREF);
5783
5784 SET_SRC (*x) = addr;
5785 return;
5786 }
5787
5788 if (GET_CODE (addrref) == CONST
5789 && GET_CODE (XEXP (addrref, 0)) == PLUS
5790 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5791 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5792 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5793 {
5794 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5795 rtx sym = XEXP (XEXP (addrref, 0), 0);
5796 rtx base = cfun->machine->base_reg;
5797 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5798 UNSPEC_LTREF);
5799
5800 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5801 return;
5802 }
5803 }
5804
5805 /* Annotate LTREL_BASE as well. */
5806 if (GET_CODE (*x) == UNSPEC
5807 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5808 {
5809 rtx base = cfun->machine->base_reg;
5810 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5811 UNSPEC_LTREL_BASE);
5812 return;
5813 }
5814
5815 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5816 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5817 {
5818 if (fmt[i] == 'e')
5819 {
5820 annotate_constant_pool_refs (&XEXP (*x, i));
5821 }
5822 else if (fmt[i] == 'E')
5823 {
5824 for (j = 0; j < XVECLEN (*x, i); j++)
5825 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5826 }
5827 }
5828 }
5829
5830 /* Split all branches that exceed the maximum distance.
5831 Returns true if this created a new literal pool entry. */
5832
5833 static int
5834 s390_split_branches (void)
5835 {
5836 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5837 int new_literal = 0, ret;
5838 rtx insn, pat, tmp, target;
5839 rtx *label;
5840
5841 /* We need correct insn addresses. */
5842
5843 shorten_branches (get_insns ());
5844
5845 /* Find all branches that exceed 64KB, and split them. */
5846
5847 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5848 {
5849 if (GET_CODE (insn) != JUMP_INSN)
5850 continue;
5851
5852 pat = PATTERN (insn);
5853 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5854 pat = XVECEXP (pat, 0, 0);
5855 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5856 continue;
5857
5858 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5859 {
5860 label = &SET_SRC (pat);
5861 }
5862 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5863 {
5864 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5865 label = &XEXP (SET_SRC (pat), 1);
5866 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5867 label = &XEXP (SET_SRC (pat), 2);
5868 else
5869 continue;
5870 }
5871 else
5872 continue;
5873
5874 if (get_attr_length (insn) <= 4)
5875 continue;
5876
5877 /* We are going to use the return register as scratch register,
5878 make sure it will be saved/restored by the prologue/epilogue. */
5879 cfun_frame_layout.save_return_addr_p = 1;
5880
5881 if (!flag_pic)
5882 {
5883 new_literal = 1;
5884 tmp = force_const_mem (Pmode, *label);
5885 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5886 INSN_ADDRESSES_NEW (tmp, -1);
5887 annotate_constant_pool_refs (&PATTERN (tmp));
5888
5889 target = temp_reg;
5890 }
5891 else
5892 {
5893 new_literal = 1;
5894 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5895 UNSPEC_LTREL_OFFSET);
5896 target = gen_rtx_CONST (Pmode, target);
5897 target = force_const_mem (Pmode, target);
5898 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5899 INSN_ADDRESSES_NEW (tmp, -1);
5900 annotate_constant_pool_refs (&PATTERN (tmp));
5901
5902 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5903 cfun->machine->base_reg),
5904 UNSPEC_LTREL_BASE);
5905 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5906 }
5907
5908 ret = validate_change (insn, label, target, 0);
5909 gcc_assert (ret);
5910 }
5911
5912 return new_literal;
5913 }
5914
5915
5916 /* Find an annotated literal pool symbol referenced in RTX X,
5917 and store it at REF. Will abort if X contains references to
5918 more than one such pool symbol; multiple references to the same
5919 symbol are allowed, however.
5920
5921 The rtx pointed to by REF must be initialized to NULL_RTX
5922 by the caller before calling this routine. */
5923
5924 static void
5925 find_constant_pool_ref (rtx x, rtx *ref)
5926 {
5927 int i, j;
5928 const char *fmt;
5929
5930 /* Ignore LTREL_BASE references. */
5931 if (GET_CODE (x) == UNSPEC
5932 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5933 return;
5934 /* Likewise POOL_ENTRY insns. */
5935 if (GET_CODE (x) == UNSPEC_VOLATILE
5936 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5937 return;
5938
5939 gcc_assert (GET_CODE (x) != SYMBOL_REF
5940 || !CONSTANT_POOL_ADDRESS_P (x));
5941
5942 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5943 {
5944 rtx sym = XVECEXP (x, 0, 0);
5945 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5946 && CONSTANT_POOL_ADDRESS_P (sym));
5947
5948 if (*ref == NULL_RTX)
5949 *ref = sym;
5950 else
5951 gcc_assert (*ref == sym);
5952
5953 return;
5954 }
5955
5956 fmt = GET_RTX_FORMAT (GET_CODE (x));
5957 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5958 {
5959 if (fmt[i] == 'e')
5960 {
5961 find_constant_pool_ref (XEXP (x, i), ref);
5962 }
5963 else if (fmt[i] == 'E')
5964 {
5965 for (j = 0; j < XVECLEN (x, i); j++)
5966 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5967 }
5968 }
5969 }
5970
5971 /* Replace every reference to the annotated literal pool
5972 symbol REF in X by its base plus OFFSET. */
5973
5974 static void
5975 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5976 {
5977 int i, j;
5978 const char *fmt;
5979
5980 gcc_assert (*x != ref);
5981
5982 if (GET_CODE (*x) == UNSPEC
5983 && XINT (*x, 1) == UNSPEC_LTREF
5984 && XVECEXP (*x, 0, 0) == ref)
5985 {
5986 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5987 return;
5988 }
5989
5990 if (GET_CODE (*x) == PLUS
5991 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5992 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5993 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5994 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5995 {
5996 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5997 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
5998 return;
5999 }
6000
6001 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6002 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6003 {
6004 if (fmt[i] == 'e')
6005 {
6006 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6007 }
6008 else if (fmt[i] == 'E')
6009 {
6010 for (j = 0; j < XVECLEN (*x, i); j++)
6011 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6012 }
6013 }
6014 }
6015
6016 /* Check whether X contains an UNSPEC_LTREL_BASE.
6017 Return its constant pool symbol if found, NULL_RTX otherwise. */
6018
6019 static rtx
6020 find_ltrel_base (rtx x)
6021 {
6022 int i, j;
6023 const char *fmt;
6024
6025 if (GET_CODE (x) == UNSPEC
6026 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6027 return XVECEXP (x, 0, 0);
6028
6029 fmt = GET_RTX_FORMAT (GET_CODE (x));
6030 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6031 {
6032 if (fmt[i] == 'e')
6033 {
6034 rtx fnd = find_ltrel_base (XEXP (x, i));
6035 if (fnd)
6036 return fnd;
6037 }
6038 else if (fmt[i] == 'E')
6039 {
6040 for (j = 0; j < XVECLEN (x, i); j++)
6041 {
6042 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6043 if (fnd)
6044 return fnd;
6045 }
6046 }
6047 }
6048
6049 return NULL_RTX;
6050 }
6051
6052 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6053
6054 static void
6055 replace_ltrel_base (rtx *x)
6056 {
6057 int i, j;
6058 const char *fmt;
6059
6060 if (GET_CODE (*x) == UNSPEC
6061 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6062 {
6063 *x = XVECEXP (*x, 0, 1);
6064 return;
6065 }
6066
6067 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6068 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6069 {
6070 if (fmt[i] == 'e')
6071 {
6072 replace_ltrel_base (&XEXP (*x, i));
6073 }
6074 else if (fmt[i] == 'E')
6075 {
6076 for (j = 0; j < XVECLEN (*x, i); j++)
6077 replace_ltrel_base (&XVECEXP (*x, i, j));
6078 }
6079 }
6080 }
6081
6082
6083 /* We keep a list of constants which we have to add to internal
6084 constant tables in the middle of large functions. */
6085
6086 #define NR_C_MODES 11
6087 enum machine_mode constant_modes[NR_C_MODES] =
6088 {
6089 TFmode, TImode, TDmode,
6090 DFmode, DImode, DDmode,
6091 SFmode, SImode, SDmode,
6092 HImode,
6093 QImode
6094 };
6095
6096 struct constant
6097 {
6098 struct constant *next;
6099 rtx value;
6100 rtx label;
6101 };
6102
6103 struct constant_pool
6104 {
6105 struct constant_pool *next;
6106 rtx first_insn;
6107 rtx pool_insn;
6108 bitmap insns;
6109 rtx emit_pool_after;
6110
6111 struct constant *constants[NR_C_MODES];
6112 struct constant *execute;
6113 rtx label;
6114 int size;
6115 };
6116
6117 /* Allocate new constant_pool structure. */
6118
6119 static struct constant_pool *
6120 s390_alloc_pool (void)
6121 {
6122 struct constant_pool *pool;
6123 int i;
6124
6125 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6126 pool->next = NULL;
6127 for (i = 0; i < NR_C_MODES; i++)
6128 pool->constants[i] = NULL;
6129
6130 pool->execute = NULL;
6131 pool->label = gen_label_rtx ();
6132 pool->first_insn = NULL_RTX;
6133 pool->pool_insn = NULL_RTX;
6134 pool->insns = BITMAP_ALLOC (NULL);
6135 pool->size = 0;
6136 pool->emit_pool_after = NULL_RTX;
6137
6138 return pool;
6139 }
6140
6141 /* Create new constant pool covering instructions starting at INSN
6142 and chain it to the end of POOL_LIST. */
6143
6144 static struct constant_pool *
6145 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6146 {
6147 struct constant_pool *pool, **prev;
6148
6149 pool = s390_alloc_pool ();
6150 pool->first_insn = insn;
6151
6152 for (prev = pool_list; *prev; prev = &(*prev)->next)
6153 ;
6154 *prev = pool;
6155
6156 return pool;
6157 }
6158
6159 /* End range of instructions covered by POOL at INSN and emit
6160 placeholder insn representing the pool. */
6161
6162 static void
6163 s390_end_pool (struct constant_pool *pool, rtx insn)
6164 {
6165 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6166
6167 if (!insn)
6168 insn = get_last_insn ();
6169
6170 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6171 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6172 }
6173
6174 /* Add INSN to the list of insns covered by POOL. */
6175
6176 static void
6177 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6178 {
6179 bitmap_set_bit (pool->insns, INSN_UID (insn));
6180 }
6181
6182 /* Return pool out of POOL_LIST that covers INSN. */
6183
6184 static struct constant_pool *
6185 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6186 {
6187 struct constant_pool *pool;
6188
6189 for (pool = pool_list; pool; pool = pool->next)
6190 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6191 break;
6192
6193 return pool;
6194 }
6195
6196 /* Add constant VAL of mode MODE to the constant pool POOL. */
6197
6198 static void
6199 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6200 {
6201 struct constant *c;
6202 int i;
6203
6204 for (i = 0; i < NR_C_MODES; i++)
6205 if (constant_modes[i] == mode)
6206 break;
6207 gcc_assert (i != NR_C_MODES);
6208
6209 for (c = pool->constants[i]; c != NULL; c = c->next)
6210 if (rtx_equal_p (val, c->value))
6211 break;
6212
6213 if (c == NULL)
6214 {
6215 c = (struct constant *) xmalloc (sizeof *c);
6216 c->value = val;
6217 c->label = gen_label_rtx ();
6218 c->next = pool->constants[i];
6219 pool->constants[i] = c;
6220 pool->size += GET_MODE_SIZE (mode);
6221 }
6222 }
6223
6224 /* Return an rtx that represents the offset of X from the start of
6225 pool POOL. */
6226
6227 static rtx
6228 s390_pool_offset (struct constant_pool *pool, rtx x)
6229 {
6230 rtx label;
6231
6232 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6233 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6234 UNSPEC_POOL_OFFSET);
6235 return gen_rtx_CONST (GET_MODE (x), x);
6236 }
6237
6238 /* Find constant VAL of mode MODE in the constant pool POOL.
6239 Return an RTX describing the distance from the start of
6240 the pool to the location of the new constant. */
6241
6242 static rtx
6243 s390_find_constant (struct constant_pool *pool, rtx val,
6244 enum machine_mode mode)
6245 {
6246 struct constant *c;
6247 int i;
6248
6249 for (i = 0; i < NR_C_MODES; i++)
6250 if (constant_modes[i] == mode)
6251 break;
6252 gcc_assert (i != NR_C_MODES);
6253
6254 for (c = pool->constants[i]; c != NULL; c = c->next)
6255 if (rtx_equal_p (val, c->value))
6256 break;
6257
6258 gcc_assert (c);
6259
6260 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6261 }
6262
6263 /* Check whether INSN is an execute. Return the label_ref to its
6264 execute target template if so, NULL_RTX otherwise. */
6265
6266 static rtx
6267 s390_execute_label (rtx insn)
6268 {
6269 if (GET_CODE (insn) == INSN
6270 && GET_CODE (PATTERN (insn)) == PARALLEL
6271 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6272 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6273 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6274
6275 return NULL_RTX;
6276 }
6277
6278 /* Add execute target for INSN to the constant pool POOL. */
6279
6280 static void
6281 s390_add_execute (struct constant_pool *pool, rtx insn)
6282 {
6283 struct constant *c;
6284
6285 for (c = pool->execute; c != NULL; c = c->next)
6286 if (INSN_UID (insn) == INSN_UID (c->value))
6287 break;
6288
6289 if (c == NULL)
6290 {
6291 c = (struct constant *) xmalloc (sizeof *c);
6292 c->value = insn;
6293 c->label = gen_label_rtx ();
6294 c->next = pool->execute;
6295 pool->execute = c;
6296 pool->size += 6;
6297 }
6298 }
6299
6300 /* Find execute target for INSN in the constant pool POOL.
6301 Return an RTX describing the distance from the start of
6302 the pool to the location of the execute target. */
6303
6304 static rtx
6305 s390_find_execute (struct constant_pool *pool, rtx insn)
6306 {
6307 struct constant *c;
6308
6309 for (c = pool->execute; c != NULL; c = c->next)
6310 if (INSN_UID (insn) == INSN_UID (c->value))
6311 break;
6312
6313 gcc_assert (c);
6314
6315 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6316 }
6317
6318 /* For an execute INSN, extract the execute target template. */
6319
6320 static rtx
6321 s390_execute_target (rtx insn)
6322 {
6323 rtx pattern = PATTERN (insn);
6324 gcc_assert (s390_execute_label (insn));
6325
6326 if (XVECLEN (pattern, 0) == 2)
6327 {
6328 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6329 }
6330 else
6331 {
6332 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6333 int i;
6334
6335 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6336 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6337
6338 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6339 }
6340
6341 return pattern;
6342 }
6343
6344 /* Indicate that INSN cannot be duplicated. This is the case for
6345 execute insns that carry a unique label. */
6346
6347 static bool
6348 s390_cannot_copy_insn_p (rtx insn)
6349 {
6350 rtx label = s390_execute_label (insn);
6351 return label && label != const0_rtx;
6352 }
6353
6354 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6355 do not emit the pool base label. */
6356
6357 static void
6358 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6359 {
6360 struct constant *c;
6361 rtx insn = pool->pool_insn;
6362 int i;
6363
6364 /* Switch to rodata section. */
6365 if (TARGET_CPU_ZARCH)
6366 {
6367 insn = emit_insn_after (gen_pool_section_start (), insn);
6368 INSN_ADDRESSES_NEW (insn, -1);
6369 }
6370
6371 /* Ensure minimum pool alignment. */
6372 if (TARGET_CPU_ZARCH)
6373 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6374 else
6375 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6376 INSN_ADDRESSES_NEW (insn, -1);
6377
6378 /* Emit pool base label. */
6379 if (!remote_label)
6380 {
6381 insn = emit_label_after (pool->label, insn);
6382 INSN_ADDRESSES_NEW (insn, -1);
6383 }
6384
6385 /* Dump constants in descending alignment requirement order,
6386 ensuring proper alignment for every constant. */
6387 for (i = 0; i < NR_C_MODES; i++)
6388 for (c = pool->constants[i]; c; c = c->next)
6389 {
6390 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6391 rtx value = copy_rtx (c->value);
6392 if (GET_CODE (value) == CONST
6393 && GET_CODE (XEXP (value, 0)) == UNSPEC
6394 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6395 && XVECLEN (XEXP (value, 0), 0) == 1)
6396 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6397
6398 insn = emit_label_after (c->label, insn);
6399 INSN_ADDRESSES_NEW (insn, -1);
6400
6401 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6402 gen_rtvec (1, value),
6403 UNSPECV_POOL_ENTRY);
6404 insn = emit_insn_after (value, insn);
6405 INSN_ADDRESSES_NEW (insn, -1);
6406 }
6407
6408 /* Ensure minimum alignment for instructions. */
6409 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6410 INSN_ADDRESSES_NEW (insn, -1);
6411
6412 /* Output in-pool execute template insns. */
6413 for (c = pool->execute; c; c = c->next)
6414 {
6415 insn = emit_label_after (c->label, insn);
6416 INSN_ADDRESSES_NEW (insn, -1);
6417
6418 insn = emit_insn_after (s390_execute_target (c->value), insn);
6419 INSN_ADDRESSES_NEW (insn, -1);
6420 }
6421
6422 /* Switch back to previous section. */
6423 if (TARGET_CPU_ZARCH)
6424 {
6425 insn = emit_insn_after (gen_pool_section_end (), insn);
6426 INSN_ADDRESSES_NEW (insn, -1);
6427 }
6428
6429 insn = emit_barrier_after (insn);
6430 INSN_ADDRESSES_NEW (insn, -1);
6431
6432 /* Remove placeholder insn. */
6433 remove_insn (pool->pool_insn);
6434 }
6435
6436 /* Free all memory used by POOL. */
6437
6438 static void
6439 s390_free_pool (struct constant_pool *pool)
6440 {
6441 struct constant *c, *next;
6442 int i;
6443
6444 for (i = 0; i < NR_C_MODES; i++)
6445 for (c = pool->constants[i]; c; c = next)
6446 {
6447 next = c->next;
6448 free (c);
6449 }
6450
6451 for (c = pool->execute; c; c = next)
6452 {
6453 next = c->next;
6454 free (c);
6455 }
6456
6457 BITMAP_FREE (pool->insns);
6458 free (pool);
6459 }
6460
6461
6462 /* Collect main literal pool. Return NULL on overflow. */
6463
6464 static struct constant_pool *
6465 s390_mainpool_start (void)
6466 {
6467 struct constant_pool *pool;
6468 rtx insn;
6469
6470 pool = s390_alloc_pool ();
6471
6472 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6473 {
6474 if (GET_CODE (insn) == INSN
6475 && GET_CODE (PATTERN (insn)) == SET
6476 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6477 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6478 {
6479 gcc_assert (!pool->pool_insn);
6480 pool->pool_insn = insn;
6481 }
6482
6483 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6484 {
6485 s390_add_execute (pool, insn);
6486 }
6487 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6488 {
6489 rtx pool_ref = NULL_RTX;
6490 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6491 if (pool_ref)
6492 {
6493 rtx constant = get_pool_constant (pool_ref);
6494 enum machine_mode mode = get_pool_mode (pool_ref);
6495 s390_add_constant (pool, constant, mode);
6496 }
6497 }
6498
6499 /* If hot/cold partitioning is enabled we have to make sure that
6500 the literal pool is emitted in the same section where the
6501 initialization of the literal pool base pointer takes place.
6502 emit_pool_after is only used in the non-overflow case on non
6503 Z cpus where we can emit the literal pool at the end of the
6504 function body within the text section. */
6505 if (NOTE_P (insn)
6506 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6507 && !pool->emit_pool_after)
6508 pool->emit_pool_after = PREV_INSN (insn);
6509 }
6510
6511 gcc_assert (pool->pool_insn || pool->size == 0);
6512
6513 if (pool->size >= 4096)
6514 {
6515 /* We're going to chunkify the pool, so remove the main
6516 pool placeholder insn. */
6517 remove_insn (pool->pool_insn);
6518
6519 s390_free_pool (pool);
6520 pool = NULL;
6521 }
6522
6523 /* If the functions ends with the section where the literal pool
6524 should be emitted set the marker to its end. */
6525 if (pool && !pool->emit_pool_after)
6526 pool->emit_pool_after = get_last_insn ();
6527
6528 return pool;
6529 }
6530
6531 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6532 Modify the current function to output the pool constants as well as
6533 the pool register setup instruction. */
6534
6535 static void
6536 s390_mainpool_finish (struct constant_pool *pool)
6537 {
6538 rtx base_reg = cfun->machine->base_reg;
6539 rtx insn;
6540
6541 /* If the pool is empty, we're done. */
6542 if (pool->size == 0)
6543 {
6544 /* We don't actually need a base register after all. */
6545 cfun->machine->base_reg = NULL_RTX;
6546
6547 if (pool->pool_insn)
6548 remove_insn (pool->pool_insn);
6549 s390_free_pool (pool);
6550 return;
6551 }
6552
6553 /* We need correct insn addresses. */
6554 shorten_branches (get_insns ());
6555
6556 /* On zSeries, we use a LARL to load the pool register. The pool is
6557 located in the .rodata section, so we emit it after the function. */
6558 if (TARGET_CPU_ZARCH)
6559 {
6560 insn = gen_main_base_64 (base_reg, pool->label);
6561 insn = emit_insn_after (insn, pool->pool_insn);
6562 INSN_ADDRESSES_NEW (insn, -1);
6563 remove_insn (pool->pool_insn);
6564
6565 insn = get_last_insn ();
6566 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6567 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6568
6569 s390_dump_pool (pool, 0);
6570 }
6571
6572 /* On S/390, if the total size of the function's code plus literal pool
6573 does not exceed 4096 bytes, we use BASR to set up a function base
6574 pointer, and emit the literal pool at the end of the function. */
6575 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6576 + pool->size + 8 /* alignment slop */ < 4096)
6577 {
6578 insn = gen_main_base_31_small (base_reg, pool->label);
6579 insn = emit_insn_after (insn, pool->pool_insn);
6580 INSN_ADDRESSES_NEW (insn, -1);
6581 remove_insn (pool->pool_insn);
6582
6583 insn = emit_label_after (pool->label, insn);
6584 INSN_ADDRESSES_NEW (insn, -1);
6585
6586 /* emit_pool_after will be set by s390_mainpool_start to the
6587 last insn of the section where the literal pool should be
6588 emitted. */
6589 insn = pool->emit_pool_after;
6590
6591 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6592 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6593
6594 s390_dump_pool (pool, 1);
6595 }
6596
6597 /* Otherwise, we emit an inline literal pool and use BASR to branch
6598 over it, setting up the pool register at the same time. */
6599 else
6600 {
6601 rtx pool_end = gen_label_rtx ();
6602
6603 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6604 insn = emit_jump_insn_after (insn, pool->pool_insn);
6605 JUMP_LABEL (insn) = pool_end;
6606 INSN_ADDRESSES_NEW (insn, -1);
6607 remove_insn (pool->pool_insn);
6608
6609 insn = emit_label_after (pool->label, insn);
6610 INSN_ADDRESSES_NEW (insn, -1);
6611
6612 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6613 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6614
6615 insn = emit_label_after (pool_end, pool->pool_insn);
6616 INSN_ADDRESSES_NEW (insn, -1);
6617
6618 s390_dump_pool (pool, 1);
6619 }
6620
6621
6622 /* Replace all literal pool references. */
6623
6624 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6625 {
6626 if (INSN_P (insn))
6627 replace_ltrel_base (&PATTERN (insn));
6628
6629 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6630 {
6631 rtx addr, pool_ref = NULL_RTX;
6632 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6633 if (pool_ref)
6634 {
6635 if (s390_execute_label (insn))
6636 addr = s390_find_execute (pool, insn);
6637 else
6638 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6639 get_pool_mode (pool_ref));
6640
6641 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6642 INSN_CODE (insn) = -1;
6643 }
6644 }
6645 }
6646
6647
6648 /* Free the pool. */
6649 s390_free_pool (pool);
6650 }
6651
6652 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6653 We have decided we cannot use this pool, so revert all changes
6654 to the current function that were done by s390_mainpool_start. */
6655 static void
6656 s390_mainpool_cancel (struct constant_pool *pool)
6657 {
6658 /* We didn't actually change the instruction stream, so simply
6659 free the pool memory. */
6660 s390_free_pool (pool);
6661 }
6662
6663
6664 /* Chunkify the literal pool. */
6665
6666 #define S390_POOL_CHUNK_MIN 0xc00
6667 #define S390_POOL_CHUNK_MAX 0xe00
6668
6669 static struct constant_pool *
6670 s390_chunkify_start (void)
6671 {
6672 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6673 int extra_size = 0;
6674 bitmap far_labels;
6675 rtx pending_ltrel = NULL_RTX;
6676 rtx insn;
6677
6678 rtx (*gen_reload_base) (rtx, rtx) =
6679 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6680
6681
6682 /* We need correct insn addresses. */
6683
6684 shorten_branches (get_insns ());
6685
6686 /* Scan all insns and move literals to pool chunks. */
6687
6688 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6689 {
6690 bool section_switch_p = false;
6691
6692 /* Check for pending LTREL_BASE. */
6693 if (INSN_P (insn))
6694 {
6695 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6696 if (ltrel_base)
6697 {
6698 gcc_assert (ltrel_base == pending_ltrel);
6699 pending_ltrel = NULL_RTX;
6700 }
6701 }
6702
6703 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6704 {
6705 if (!curr_pool)
6706 curr_pool = s390_start_pool (&pool_list, insn);
6707
6708 s390_add_execute (curr_pool, insn);
6709 s390_add_pool_insn (curr_pool, insn);
6710 }
6711 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6712 {
6713 rtx pool_ref = NULL_RTX;
6714 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6715 if (pool_ref)
6716 {
6717 rtx constant = get_pool_constant (pool_ref);
6718 enum machine_mode mode = get_pool_mode (pool_ref);
6719
6720 if (!curr_pool)
6721 curr_pool = s390_start_pool (&pool_list, insn);
6722
6723 s390_add_constant (curr_pool, constant, mode);
6724 s390_add_pool_insn (curr_pool, insn);
6725
6726 /* Don't split the pool chunk between a LTREL_OFFSET load
6727 and the corresponding LTREL_BASE. */
6728 if (GET_CODE (constant) == CONST
6729 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6730 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6731 {
6732 gcc_assert (!pending_ltrel);
6733 pending_ltrel = pool_ref;
6734 }
6735 }
6736 }
6737
6738 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6739 {
6740 if (curr_pool)
6741 s390_add_pool_insn (curr_pool, insn);
6742 /* An LTREL_BASE must follow within the same basic block. */
6743 gcc_assert (!pending_ltrel);
6744 }
6745
6746 if (NOTE_P (insn))
6747 switch (NOTE_KIND (insn))
6748 {
6749 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6750 section_switch_p = true;
6751 break;
6752 case NOTE_INSN_VAR_LOCATION:
6753 case NOTE_INSN_CALL_ARG_LOCATION:
6754 continue;
6755 default:
6756 break;
6757 }
6758
6759 if (!curr_pool
6760 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6761 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6762 continue;
6763
6764 if (TARGET_CPU_ZARCH)
6765 {
6766 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6767 continue;
6768
6769 s390_end_pool (curr_pool, NULL_RTX);
6770 curr_pool = NULL;
6771 }
6772 else
6773 {
6774 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6775 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6776 + extra_size;
6777
6778 /* We will later have to insert base register reload insns.
6779 Those will have an effect on code size, which we need to
6780 consider here. This calculation makes rather pessimistic
6781 worst-case assumptions. */
6782 if (GET_CODE (insn) == CODE_LABEL)
6783 extra_size += 6;
6784
6785 if (chunk_size < S390_POOL_CHUNK_MIN
6786 && curr_pool->size < S390_POOL_CHUNK_MIN
6787 && !section_switch_p)
6788 continue;
6789
6790 /* Pool chunks can only be inserted after BARRIERs ... */
6791 if (GET_CODE (insn) == BARRIER)
6792 {
6793 s390_end_pool (curr_pool, insn);
6794 curr_pool = NULL;
6795 extra_size = 0;
6796 }
6797
6798 /* ... so if we don't find one in time, create one. */
6799 else if (chunk_size > S390_POOL_CHUNK_MAX
6800 || curr_pool->size > S390_POOL_CHUNK_MAX
6801 || section_switch_p)
6802 {
6803 rtx label, jump, barrier, next, prev;
6804
6805 if (!section_switch_p)
6806 {
6807 /* We can insert the barrier only after a 'real' insn. */
6808 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6809 continue;
6810 if (get_attr_length (insn) == 0)
6811 continue;
6812 /* Don't separate LTREL_BASE from the corresponding
6813 LTREL_OFFSET load. */
6814 if (pending_ltrel)
6815 continue;
6816 next = insn;
6817 do
6818 {
6819 insn = next;
6820 next = NEXT_INSN (insn);
6821 }
6822 while (next
6823 && NOTE_P (next)
6824 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6825 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6826 }
6827 else
6828 {
6829 gcc_assert (!pending_ltrel);
6830
6831 /* The old pool has to end before the section switch
6832 note in order to make it part of the current
6833 section. */
6834 insn = PREV_INSN (insn);
6835 }
6836
6837 label = gen_label_rtx ();
6838 prev = insn;
6839 if (prev && NOTE_P (prev))
6840 prev = prev_nonnote_insn (prev);
6841 if (prev)
6842 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6843 INSN_LOCATOR (prev));
6844 else
6845 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6846 barrier = emit_barrier_after (jump);
6847 insn = emit_label_after (label, barrier);
6848 JUMP_LABEL (jump) = label;
6849 LABEL_NUSES (label) = 1;
6850
6851 INSN_ADDRESSES_NEW (jump, -1);
6852 INSN_ADDRESSES_NEW (barrier, -1);
6853 INSN_ADDRESSES_NEW (insn, -1);
6854
6855 s390_end_pool (curr_pool, barrier);
6856 curr_pool = NULL;
6857 extra_size = 0;
6858 }
6859 }
6860 }
6861
6862 if (curr_pool)
6863 s390_end_pool (curr_pool, NULL_RTX);
6864 gcc_assert (!pending_ltrel);
6865
6866 /* Find all labels that are branched into
6867 from an insn belonging to a different chunk. */
6868
6869 far_labels = BITMAP_ALLOC (NULL);
6870
6871 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6872 {
6873 /* Labels marked with LABEL_PRESERVE_P can be target
6874 of non-local jumps, so we have to mark them.
6875 The same holds for named labels.
6876
6877 Don't do that, however, if it is the label before
6878 a jump table. */
6879
6880 if (GET_CODE (insn) == CODE_LABEL
6881 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6882 {
6883 rtx vec_insn = next_real_insn (insn);
6884 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6885 PATTERN (vec_insn) : NULL_RTX;
6886 if (!vec_pat
6887 || !(GET_CODE (vec_pat) == ADDR_VEC
6888 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6889 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6890 }
6891
6892 /* If we have a direct jump (conditional or unconditional)
6893 or a casesi jump, check all potential targets. */
6894 else if (GET_CODE (insn) == JUMP_INSN)
6895 {
6896 rtx pat = PATTERN (insn);
6897 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6898 pat = XVECEXP (pat, 0, 0);
6899
6900 if (GET_CODE (pat) == SET)
6901 {
6902 rtx label = JUMP_LABEL (insn);
6903 if (label)
6904 {
6905 if (s390_find_pool (pool_list, label)
6906 != s390_find_pool (pool_list, insn))
6907 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6908 }
6909 }
6910 else if (GET_CODE (pat) == PARALLEL
6911 && XVECLEN (pat, 0) == 2
6912 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6913 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6914 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6915 {
6916 /* Find the jump table used by this casesi jump. */
6917 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6918 rtx vec_insn = next_real_insn (vec_label);
6919 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6920 PATTERN (vec_insn) : NULL_RTX;
6921 if (vec_pat
6922 && (GET_CODE (vec_pat) == ADDR_VEC
6923 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6924 {
6925 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6926
6927 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6928 {
6929 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6930
6931 if (s390_find_pool (pool_list, label)
6932 != s390_find_pool (pool_list, insn))
6933 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6934 }
6935 }
6936 }
6937 }
6938 }
6939
6940 /* Insert base register reload insns before every pool. */
6941
6942 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6943 {
6944 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6945 curr_pool->label);
6946 rtx insn = curr_pool->first_insn;
6947 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6948 }
6949
6950 /* Insert base register reload insns at every far label. */
6951
6952 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6953 if (GET_CODE (insn) == CODE_LABEL
6954 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6955 {
6956 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6957 if (pool)
6958 {
6959 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6960 pool->label);
6961 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6962 }
6963 }
6964
6965
6966 BITMAP_FREE (far_labels);
6967
6968
6969 /* Recompute insn addresses. */
6970
6971 init_insn_lengths ();
6972 shorten_branches (get_insns ());
6973
6974 return pool_list;
6975 }
6976
6977 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6978 After we have decided to use this list, finish implementing
6979 all changes to the current function as required. */
6980
6981 static void
6982 s390_chunkify_finish (struct constant_pool *pool_list)
6983 {
6984 struct constant_pool *curr_pool = NULL;
6985 rtx insn;
6986
6987
6988 /* Replace all literal pool references. */
6989
6990 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6991 {
6992 if (INSN_P (insn))
6993 replace_ltrel_base (&PATTERN (insn));
6994
6995 curr_pool = s390_find_pool (pool_list, insn);
6996 if (!curr_pool)
6997 continue;
6998
6999 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
7000 {
7001 rtx addr, pool_ref = NULL_RTX;
7002 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7003 if (pool_ref)
7004 {
7005 if (s390_execute_label (insn))
7006 addr = s390_find_execute (curr_pool, insn);
7007 else
7008 addr = s390_find_constant (curr_pool,
7009 get_pool_constant (pool_ref),
7010 get_pool_mode (pool_ref));
7011
7012 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7013 INSN_CODE (insn) = -1;
7014 }
7015 }
7016 }
7017
7018 /* Dump out all literal pools. */
7019
7020 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7021 s390_dump_pool (curr_pool, 0);
7022
7023 /* Free pool list. */
7024
7025 while (pool_list)
7026 {
7027 struct constant_pool *next = pool_list->next;
7028 s390_free_pool (pool_list);
7029 pool_list = next;
7030 }
7031 }
7032
7033 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7034 We have decided we cannot use this list, so revert all changes
7035 to the current function that were done by s390_chunkify_start. */
7036
7037 static void
7038 s390_chunkify_cancel (struct constant_pool *pool_list)
7039 {
7040 struct constant_pool *curr_pool = NULL;
7041 rtx insn;
7042
7043 /* Remove all pool placeholder insns. */
7044
7045 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7046 {
7047 /* Did we insert an extra barrier? Remove it. */
7048 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7049 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7050 rtx label = NEXT_INSN (curr_pool->pool_insn);
7051
7052 if (jump && GET_CODE (jump) == JUMP_INSN
7053 && barrier && GET_CODE (barrier) == BARRIER
7054 && label && GET_CODE (label) == CODE_LABEL
7055 && GET_CODE (PATTERN (jump)) == SET
7056 && SET_DEST (PATTERN (jump)) == pc_rtx
7057 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7058 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7059 {
7060 remove_insn (jump);
7061 remove_insn (barrier);
7062 remove_insn (label);
7063 }
7064
7065 remove_insn (curr_pool->pool_insn);
7066 }
7067
7068 /* Remove all base register reload insns. */
7069
7070 for (insn = get_insns (); insn; )
7071 {
7072 rtx next_insn = NEXT_INSN (insn);
7073
7074 if (GET_CODE (insn) == INSN
7075 && GET_CODE (PATTERN (insn)) == SET
7076 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7077 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7078 remove_insn (insn);
7079
7080 insn = next_insn;
7081 }
7082
7083 /* Free pool list. */
7084
7085 while (pool_list)
7086 {
7087 struct constant_pool *next = pool_list->next;
7088 s390_free_pool (pool_list);
7089 pool_list = next;
7090 }
7091 }
7092
7093 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7094
7095 void
7096 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7097 {
7098 REAL_VALUE_TYPE r;
7099
7100 switch (GET_MODE_CLASS (mode))
7101 {
7102 case MODE_FLOAT:
7103 case MODE_DECIMAL_FLOAT:
7104 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7105
7106 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7107 assemble_real (r, mode, align);
7108 break;
7109
7110 case MODE_INT:
7111 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7112 mark_symbol_refs_as_used (exp);
7113 break;
7114
7115 default:
7116 gcc_unreachable ();
7117 }
7118 }
7119
7120
7121 /* Return an RTL expression representing the value of the return address
7122 for the frame COUNT steps up from the current frame. FRAME is the
7123 frame pointer of that frame. */
7124
7125 rtx
7126 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7127 {
7128 int offset;
7129 rtx addr;
7130
7131 /* Without backchain, we fail for all but the current frame. */
7132
7133 if (!TARGET_BACKCHAIN && count > 0)
7134 return NULL_RTX;
7135
7136 /* For the current frame, we need to make sure the initial
7137 value of RETURN_REGNUM is actually saved. */
7138
7139 if (count == 0)
7140 {
7141 /* On non-z architectures branch splitting could overwrite r14. */
7142 if (TARGET_CPU_ZARCH)
7143 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7144 else
7145 {
7146 cfun_frame_layout.save_return_addr_p = true;
7147 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7148 }
7149 }
7150
7151 if (TARGET_PACKED_STACK)
7152 offset = -2 * UNITS_PER_LONG;
7153 else
7154 offset = RETURN_REGNUM * UNITS_PER_LONG;
7155
7156 addr = plus_constant (Pmode, frame, offset);
7157 addr = memory_address (Pmode, addr);
7158 return gen_rtx_MEM (Pmode, addr);
7159 }
7160
7161 /* Return an RTL expression representing the back chain stored in
7162 the current stack frame. */
7163
7164 rtx
7165 s390_back_chain_rtx (void)
7166 {
7167 rtx chain;
7168
7169 gcc_assert (TARGET_BACKCHAIN);
7170
7171 if (TARGET_PACKED_STACK)
7172 chain = plus_constant (Pmode, stack_pointer_rtx,
7173 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7174 else
7175 chain = stack_pointer_rtx;
7176
7177 chain = gen_rtx_MEM (Pmode, chain);
7178 return chain;
7179 }
7180
7181 /* Find first call clobbered register unused in a function.
7182 This could be used as base register in a leaf function
7183 or for holding the return address before epilogue. */
7184
7185 static int
7186 find_unused_clobbered_reg (void)
7187 {
7188 int i;
7189 for (i = 0; i < 6; i++)
7190 if (!df_regs_ever_live_p (i))
7191 return i;
7192 return 0;
7193 }
7194
7195
7196 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7197 clobbered hard regs in SETREG. */
7198
7199 static void
7200 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7201 {
7202 int *regs_ever_clobbered = (int *)data;
7203 unsigned int i, regno;
7204 enum machine_mode mode = GET_MODE (setreg);
7205
7206 if (GET_CODE (setreg) == SUBREG)
7207 {
7208 rtx inner = SUBREG_REG (setreg);
7209 if (!GENERAL_REG_P (inner))
7210 return;
7211 regno = subreg_regno (setreg);
7212 }
7213 else if (GENERAL_REG_P (setreg))
7214 regno = REGNO (setreg);
7215 else
7216 return;
7217
7218 for (i = regno;
7219 i < regno + HARD_REGNO_NREGS (regno, mode);
7220 i++)
7221 regs_ever_clobbered[i] = 1;
7222 }
7223
7224 /* Walks through all basic blocks of the current function looking
7225 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7226 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7227 each of those regs. */
7228
7229 static void
7230 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7231 {
7232 basic_block cur_bb;
7233 rtx cur_insn;
7234 unsigned int i;
7235
7236 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7237
7238 /* For non-leaf functions we have to consider all call clobbered regs to be
7239 clobbered. */
7240 if (!crtl->is_leaf)
7241 {
7242 for (i = 0; i < 16; i++)
7243 regs_ever_clobbered[i] = call_really_used_regs[i];
7244 }
7245
7246 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7247 this work is done by liveness analysis (mark_regs_live_at_end).
7248 Special care is needed for functions containing landing pads. Landing pads
7249 may use the eh registers, but the code which sets these registers is not
7250 contained in that function. Hence s390_regs_ever_clobbered is not able to
7251 deal with this automatically. */
7252 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7253 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7254 if (crtl->calls_eh_return
7255 || (cfun->machine->has_landing_pad_p
7256 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7257 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7258
7259 /* For nonlocal gotos all call-saved registers have to be saved.
7260 This flag is also set for the unwinding code in libgcc.
7261 See expand_builtin_unwind_init. For regs_ever_live this is done by
7262 reload. */
7263 if (cfun->has_nonlocal_label)
7264 for (i = 0; i < 16; i++)
7265 if (!call_really_used_regs[i])
7266 regs_ever_clobbered[i] = 1;
7267
7268 FOR_EACH_BB (cur_bb)
7269 {
7270 FOR_BB_INSNS (cur_bb, cur_insn)
7271 {
7272 if (INSN_P (cur_insn))
7273 note_stores (PATTERN (cur_insn),
7274 s390_reg_clobbered_rtx,
7275 regs_ever_clobbered);
7276 }
7277 }
7278 }
7279
7280 /* Determine the frame area which actually has to be accessed
7281 in the function epilogue. The values are stored at the
7282 given pointers AREA_BOTTOM (address of the lowest used stack
7283 address) and AREA_TOP (address of the first item which does
7284 not belong to the stack frame). */
7285
7286 static void
7287 s390_frame_area (int *area_bottom, int *area_top)
7288 {
7289 int b, t;
7290 int i;
7291
7292 b = INT_MAX;
7293 t = INT_MIN;
7294
7295 if (cfun_frame_layout.first_restore_gpr != -1)
7296 {
7297 b = (cfun_frame_layout.gprs_offset
7298 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7299 t = b + (cfun_frame_layout.last_restore_gpr
7300 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7301 }
7302
7303 if (TARGET_64BIT && cfun_save_high_fprs_p)
7304 {
7305 b = MIN (b, cfun_frame_layout.f8_offset);
7306 t = MAX (t, (cfun_frame_layout.f8_offset
7307 + cfun_frame_layout.high_fprs * 8));
7308 }
7309
7310 if (!TARGET_64BIT)
7311 for (i = 2; i < 4; i++)
7312 if (cfun_fpr_bit_p (i))
7313 {
7314 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7315 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7316 }
7317
7318 *area_bottom = b;
7319 *area_top = t;
7320 }
7321
7322 /* Fill cfun->machine with info about register usage of current function.
7323 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7324
7325 static void
7326 s390_register_info (int clobbered_regs[])
7327 {
7328 int i, j;
7329
7330 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7331 cfun_frame_layout.fpr_bitmap = 0;
7332 cfun_frame_layout.high_fprs = 0;
7333 if (TARGET_64BIT)
7334 for (i = 24; i < 32; i++)
7335 if (df_regs_ever_live_p (i) && !global_regs[i])
7336 {
7337 cfun_set_fpr_bit (i - 16);
7338 cfun_frame_layout.high_fprs++;
7339 }
7340
7341 /* Find first and last gpr to be saved. We trust regs_ever_live
7342 data, except that we don't save and restore global registers.
7343
7344 Also, all registers with special meaning to the compiler need
7345 to be handled extra. */
7346
7347 s390_regs_ever_clobbered (clobbered_regs);
7348
7349 for (i = 0; i < 16; i++)
7350 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7351
7352 if (frame_pointer_needed)
7353 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7354
7355 if (flag_pic)
7356 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7357 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7358
7359 clobbered_regs[BASE_REGNUM]
7360 |= (cfun->machine->base_reg
7361 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7362
7363 clobbered_regs[RETURN_REGNUM]
7364 |= (!crtl->is_leaf
7365 || TARGET_TPF_PROFILING
7366 || cfun->machine->split_branches_pending_p
7367 || cfun_frame_layout.save_return_addr_p
7368 || crtl->calls_eh_return
7369 || cfun->stdarg);
7370
7371 clobbered_regs[STACK_POINTER_REGNUM]
7372 |= (!crtl->is_leaf
7373 || TARGET_TPF_PROFILING
7374 || cfun_save_high_fprs_p
7375 || get_frame_size () > 0
7376 || cfun->calls_alloca
7377 || cfun->stdarg);
7378
7379 for (i = 6; i < 16; i++)
7380 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7381 break;
7382 for (j = 15; j > i; j--)
7383 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7384 break;
7385
7386 if (i == 16)
7387 {
7388 /* Nothing to save/restore. */
7389 cfun_frame_layout.first_save_gpr_slot = -1;
7390 cfun_frame_layout.last_save_gpr_slot = -1;
7391 cfun_frame_layout.first_save_gpr = -1;
7392 cfun_frame_layout.first_restore_gpr = -1;
7393 cfun_frame_layout.last_save_gpr = -1;
7394 cfun_frame_layout.last_restore_gpr = -1;
7395 }
7396 else
7397 {
7398 /* Save slots for gprs from i to j. */
7399 cfun_frame_layout.first_save_gpr_slot = i;
7400 cfun_frame_layout.last_save_gpr_slot = j;
7401
7402 for (i = cfun_frame_layout.first_save_gpr_slot;
7403 i < cfun_frame_layout.last_save_gpr_slot + 1;
7404 i++)
7405 if (clobbered_regs[i])
7406 break;
7407
7408 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7409 if (clobbered_regs[j])
7410 break;
7411
7412 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7413 {
7414 /* Nothing to save/restore. */
7415 cfun_frame_layout.first_save_gpr = -1;
7416 cfun_frame_layout.first_restore_gpr = -1;
7417 cfun_frame_layout.last_save_gpr = -1;
7418 cfun_frame_layout.last_restore_gpr = -1;
7419 }
7420 else
7421 {
7422 /* Save / Restore from gpr i to j. */
7423 cfun_frame_layout.first_save_gpr = i;
7424 cfun_frame_layout.first_restore_gpr = i;
7425 cfun_frame_layout.last_save_gpr = j;
7426 cfun_frame_layout.last_restore_gpr = j;
7427 }
7428 }
7429
7430 if (cfun->stdarg)
7431 {
7432 /* Varargs functions need to save gprs 2 to 6. */
7433 if (cfun->va_list_gpr_size
7434 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7435 {
7436 int min_gpr = crtl->args.info.gprs;
7437 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7438 if (max_gpr > GP_ARG_NUM_REG)
7439 max_gpr = GP_ARG_NUM_REG;
7440
7441 if (cfun_frame_layout.first_save_gpr == -1
7442 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7443 {
7444 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7445 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7446 }
7447
7448 if (cfun_frame_layout.last_save_gpr == -1
7449 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7450 {
7451 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7452 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7453 }
7454 }
7455
7456 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7457 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7458 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7459 {
7460 int min_fpr = crtl->args.info.fprs;
7461 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7462 if (max_fpr > FP_ARG_NUM_REG)
7463 max_fpr = FP_ARG_NUM_REG;
7464
7465 /* ??? This is currently required to ensure proper location
7466 of the fpr save slots within the va_list save area. */
7467 if (TARGET_PACKED_STACK)
7468 min_fpr = 0;
7469
7470 for (i = min_fpr; i < max_fpr; i++)
7471 cfun_set_fpr_bit (i);
7472 }
7473 }
7474
7475 if (!TARGET_64BIT)
7476 for (i = 2; i < 4; i++)
7477 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7478 cfun_set_fpr_bit (i);
7479 }
7480
7481 /* Fill cfun->machine with info about frame of current function. */
7482
7483 static void
7484 s390_frame_info (void)
7485 {
7486 int i;
7487
7488 cfun_frame_layout.frame_size = get_frame_size ();
7489 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7490 fatal_error ("total size of local variables exceeds architecture limit");
7491
7492 if (!TARGET_PACKED_STACK)
7493 {
7494 cfun_frame_layout.backchain_offset = 0;
7495 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7496 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7497 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7498 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7499 * UNITS_PER_LONG);
7500 }
7501 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7502 {
7503 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7504 - UNITS_PER_LONG);
7505 cfun_frame_layout.gprs_offset
7506 = (cfun_frame_layout.backchain_offset
7507 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7508 * UNITS_PER_LONG);
7509
7510 if (TARGET_64BIT)
7511 {
7512 cfun_frame_layout.f4_offset
7513 = (cfun_frame_layout.gprs_offset
7514 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7515
7516 cfun_frame_layout.f0_offset
7517 = (cfun_frame_layout.f4_offset
7518 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7519 }
7520 else
7521 {
7522 /* On 31 bit we have to care about alignment of the
7523 floating point regs to provide fastest access. */
7524 cfun_frame_layout.f0_offset
7525 = ((cfun_frame_layout.gprs_offset
7526 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7527 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7528
7529 cfun_frame_layout.f4_offset
7530 = (cfun_frame_layout.f0_offset
7531 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7532 }
7533 }
7534 else /* no backchain */
7535 {
7536 cfun_frame_layout.f4_offset
7537 = (STACK_POINTER_OFFSET
7538 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7539
7540 cfun_frame_layout.f0_offset
7541 = (cfun_frame_layout.f4_offset
7542 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7543
7544 cfun_frame_layout.gprs_offset
7545 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7546 }
7547
7548 if (crtl->is_leaf
7549 && !TARGET_TPF_PROFILING
7550 && cfun_frame_layout.frame_size == 0
7551 && !cfun_save_high_fprs_p
7552 && !cfun->calls_alloca
7553 && !cfun->stdarg)
7554 return;
7555
7556 if (!TARGET_PACKED_STACK)
7557 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7558 + crtl->outgoing_args_size
7559 + cfun_frame_layout.high_fprs * 8);
7560 else
7561 {
7562 if (TARGET_BACKCHAIN)
7563 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7564
7565 /* No alignment trouble here because f8-f15 are only saved under
7566 64 bit. */
7567 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7568 cfun_frame_layout.f4_offset),
7569 cfun_frame_layout.gprs_offset)
7570 - cfun_frame_layout.high_fprs * 8);
7571
7572 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7573
7574 for (i = 0; i < 8; i++)
7575 if (cfun_fpr_bit_p (i))
7576 cfun_frame_layout.frame_size += 8;
7577
7578 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7579
7580 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7581 the frame size to sustain 8 byte alignment of stack frames. */
7582 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7583 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7584 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7585
7586 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7587 }
7588 }
7589
7590 /* Generate frame layout. Fills in register and frame data for the current
7591 function in cfun->machine. This routine can be called multiple times;
7592 it will re-do the complete frame layout every time. */
7593
7594 static void
7595 s390_init_frame_layout (void)
7596 {
7597 HOST_WIDE_INT frame_size;
7598 int base_used;
7599 int clobbered_regs[16];
7600
7601 /* On S/390 machines, we may need to perform branch splitting, which
7602 will require both base and return address register. We have no
7603 choice but to assume we're going to need them until right at the
7604 end of the machine dependent reorg phase. */
7605 if (!TARGET_CPU_ZARCH)
7606 cfun->machine->split_branches_pending_p = true;
7607
7608 do
7609 {
7610 frame_size = cfun_frame_layout.frame_size;
7611
7612 /* Try to predict whether we'll need the base register. */
7613 base_used = cfun->machine->split_branches_pending_p
7614 || crtl->uses_const_pool
7615 || (!DISP_IN_RANGE (frame_size)
7616 && !CONST_OK_FOR_K (frame_size));
7617
7618 /* Decide which register to use as literal pool base. In small
7619 leaf functions, try to use an unused call-clobbered register
7620 as base register to avoid save/restore overhead. */
7621 if (!base_used)
7622 cfun->machine->base_reg = NULL_RTX;
7623 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7624 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7625 else
7626 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7627
7628 s390_register_info (clobbered_regs);
7629 s390_frame_info ();
7630 }
7631 while (frame_size != cfun_frame_layout.frame_size);
7632 }
7633
7634 /* Update frame layout. Recompute actual register save data based on
7635 current info and update regs_ever_live for the special registers.
7636 May be called multiple times, but may never cause *more* registers
7637 to be saved than s390_init_frame_layout allocated room for. */
7638
7639 static void
7640 s390_update_frame_layout (void)
7641 {
7642 int clobbered_regs[16];
7643
7644 s390_register_info (clobbered_regs);
7645
7646 df_set_regs_ever_live (BASE_REGNUM,
7647 clobbered_regs[BASE_REGNUM] ? true : false);
7648 df_set_regs_ever_live (RETURN_REGNUM,
7649 clobbered_regs[RETURN_REGNUM] ? true : false);
7650 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7651 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7652
7653 if (cfun->machine->base_reg)
7654 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7655 }
7656
7657 /* Return true if it is legal to put a value with MODE into REGNO. */
7658
7659 bool
7660 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7661 {
7662 switch (REGNO_REG_CLASS (regno))
7663 {
7664 case FP_REGS:
7665 if (REGNO_PAIR_OK (regno, mode))
7666 {
7667 if (mode == SImode || mode == DImode)
7668 return true;
7669
7670 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7671 return true;
7672 }
7673 break;
7674 case ADDR_REGS:
7675 if (FRAME_REGNO_P (regno) && mode == Pmode)
7676 return true;
7677
7678 /* fallthrough */
7679 case GENERAL_REGS:
7680 if (REGNO_PAIR_OK (regno, mode))
7681 {
7682 if (TARGET_ZARCH
7683 || (mode != TFmode && mode != TCmode && mode != TDmode))
7684 return true;
7685 }
7686 break;
7687 case CC_REGS:
7688 if (GET_MODE_CLASS (mode) == MODE_CC)
7689 return true;
7690 break;
7691 case ACCESS_REGS:
7692 if (REGNO_PAIR_OK (regno, mode))
7693 {
7694 if (mode == SImode || mode == Pmode)
7695 return true;
7696 }
7697 break;
7698 default:
7699 return false;
7700 }
7701
7702 return false;
7703 }
7704
7705 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7706
7707 bool
7708 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7709 {
7710 /* Once we've decided upon a register to use as base register, it must
7711 no longer be used for any other purpose. */
7712 if (cfun->machine->base_reg)
7713 if (REGNO (cfun->machine->base_reg) == old_reg
7714 || REGNO (cfun->machine->base_reg) == new_reg)
7715 return false;
7716
7717 return true;
7718 }
7719
7720 /* Maximum number of registers to represent a value of mode MODE
7721 in a register of class RCLASS. */
7722
7723 int
7724 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7725 {
7726 switch (rclass)
7727 {
7728 case FP_REGS:
7729 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7730 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7731 else
7732 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7733 case ACCESS_REGS:
7734 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7735 default:
7736 break;
7737 }
7738 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7739 }
7740
7741 /* Return true if register FROM can be eliminated via register TO. */
7742
7743 static bool
7744 s390_can_eliminate (const int from, const int to)
7745 {
7746 /* On zSeries machines, we have not marked the base register as fixed.
7747 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7748 If a function requires the base register, we say here that this
7749 elimination cannot be performed. This will cause reload to free
7750 up the base register (as if it were fixed). On the other hand,
7751 if the current function does *not* require the base register, we
7752 say here the elimination succeeds, which in turn allows reload
7753 to allocate the base register for any other purpose. */
7754 if (from == BASE_REGNUM && to == BASE_REGNUM)
7755 {
7756 if (TARGET_CPU_ZARCH)
7757 {
7758 s390_init_frame_layout ();
7759 return cfun->machine->base_reg == NULL_RTX;
7760 }
7761
7762 return false;
7763 }
7764
7765 /* Everything else must point into the stack frame. */
7766 gcc_assert (to == STACK_POINTER_REGNUM
7767 || to == HARD_FRAME_POINTER_REGNUM);
7768
7769 gcc_assert (from == FRAME_POINTER_REGNUM
7770 || from == ARG_POINTER_REGNUM
7771 || from == RETURN_ADDRESS_POINTER_REGNUM);
7772
7773 /* Make sure we actually saved the return address. */
7774 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7775 if (!crtl->calls_eh_return
7776 && !cfun->stdarg
7777 && !cfun_frame_layout.save_return_addr_p)
7778 return false;
7779
7780 return true;
7781 }
7782
7783 /* Return offset between register FROM and TO initially after prolog. */
7784
7785 HOST_WIDE_INT
7786 s390_initial_elimination_offset (int from, int to)
7787 {
7788 HOST_WIDE_INT offset;
7789 int index;
7790
7791 /* ??? Why are we called for non-eliminable pairs? */
7792 if (!s390_can_eliminate (from, to))
7793 return 0;
7794
7795 switch (from)
7796 {
7797 case FRAME_POINTER_REGNUM:
7798 offset = (get_frame_size()
7799 + STACK_POINTER_OFFSET
7800 + crtl->outgoing_args_size);
7801 break;
7802
7803 case ARG_POINTER_REGNUM:
7804 s390_init_frame_layout ();
7805 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7806 break;
7807
7808 case RETURN_ADDRESS_POINTER_REGNUM:
7809 s390_init_frame_layout ();
7810 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7811 gcc_assert (index >= 0);
7812 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7813 offset += index * UNITS_PER_LONG;
7814 break;
7815
7816 case BASE_REGNUM:
7817 offset = 0;
7818 break;
7819
7820 default:
7821 gcc_unreachable ();
7822 }
7823
7824 return offset;
7825 }
7826
7827 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7828 to register BASE. Return generated insn. */
7829
7830 static rtx
7831 save_fpr (rtx base, int offset, int regnum)
7832 {
7833 rtx addr;
7834 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7835
7836 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7837 set_mem_alias_set (addr, get_varargs_alias_set ());
7838 else
7839 set_mem_alias_set (addr, get_frame_alias_set ());
7840
7841 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7842 }
7843
7844 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7845 to register BASE. Return generated insn. */
7846
7847 static rtx
7848 restore_fpr (rtx base, int offset, int regnum)
7849 {
7850 rtx addr;
7851 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7852 set_mem_alias_set (addr, get_frame_alias_set ());
7853
7854 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7855 }
7856
7857 /* Return true if REGNO is a global register, but not one
7858 of the special ones that need to be saved/restored in anyway. */
7859
7860 static inline bool
7861 global_not_special_regno_p (int regno)
7862 {
7863 return (global_regs[regno]
7864 /* These registers are special and need to be
7865 restored in any case. */
7866 && !(regno == STACK_POINTER_REGNUM
7867 || regno == RETURN_REGNUM
7868 || regno == BASE_REGNUM
7869 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7870 }
7871
7872 /* Generate insn to save registers FIRST to LAST into
7873 the register save area located at offset OFFSET
7874 relative to register BASE. */
7875
7876 static rtx
7877 save_gprs (rtx base, int offset, int first, int last)
7878 {
7879 rtx addr, insn, note;
7880 int i;
7881
7882 addr = plus_constant (Pmode, base, offset);
7883 addr = gen_rtx_MEM (Pmode, addr);
7884
7885 set_mem_alias_set (addr, get_frame_alias_set ());
7886
7887 /* Special-case single register. */
7888 if (first == last)
7889 {
7890 if (TARGET_64BIT)
7891 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7892 else
7893 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7894
7895 if (!global_not_special_regno_p (first))
7896 RTX_FRAME_RELATED_P (insn) = 1;
7897 return insn;
7898 }
7899
7900
7901 insn = gen_store_multiple (addr,
7902 gen_rtx_REG (Pmode, first),
7903 GEN_INT (last - first + 1));
7904
7905 if (first <= 6 && cfun->stdarg)
7906 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7907 {
7908 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7909
7910 if (first + i <= 6)
7911 set_mem_alias_set (mem, get_varargs_alias_set ());
7912 }
7913
7914 /* We need to set the FRAME_RELATED flag on all SETs
7915 inside the store-multiple pattern.
7916
7917 However, we must not emit DWARF records for registers 2..5
7918 if they are stored for use by variable arguments ...
7919
7920 ??? Unfortunately, it is not enough to simply not the
7921 FRAME_RELATED flags for those SETs, because the first SET
7922 of the PARALLEL is always treated as if it had the flag
7923 set, even if it does not. Therefore we emit a new pattern
7924 without those registers as REG_FRAME_RELATED_EXPR note. */
7925
7926 if (first >= 6 && !global_not_special_regno_p (first))
7927 {
7928 rtx pat = PATTERN (insn);
7929
7930 for (i = 0; i < XVECLEN (pat, 0); i++)
7931 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7932 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7933 0, i)))))
7934 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7935
7936 RTX_FRAME_RELATED_P (insn) = 1;
7937 }
7938 else if (last >= 6)
7939 {
7940 int start;
7941
7942 for (start = first >= 6 ? first : 6; start <= last; start++)
7943 if (!global_not_special_regno_p (start))
7944 break;
7945
7946 if (start > last)
7947 return insn;
7948
7949 addr = plus_constant (Pmode, base,
7950 offset + (start - first) * UNITS_PER_LONG);
7951 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7952 gen_rtx_REG (Pmode, start),
7953 GEN_INT (last - start + 1));
7954 note = PATTERN (note);
7955
7956 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7957
7958 for (i = 0; i < XVECLEN (note, 0); i++)
7959 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7960 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7961 0, i)))))
7962 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7963
7964 RTX_FRAME_RELATED_P (insn) = 1;
7965 }
7966
7967 return insn;
7968 }
7969
7970 /* Generate insn to restore registers FIRST to LAST from
7971 the register save area located at offset OFFSET
7972 relative to register BASE. */
7973
7974 static rtx
7975 restore_gprs (rtx base, int offset, int first, int last)
7976 {
7977 rtx addr, insn;
7978
7979 addr = plus_constant (Pmode, base, offset);
7980 addr = gen_rtx_MEM (Pmode, addr);
7981 set_mem_alias_set (addr, get_frame_alias_set ());
7982
7983 /* Special-case single register. */
7984 if (first == last)
7985 {
7986 if (TARGET_64BIT)
7987 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7988 else
7989 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7990
7991 return insn;
7992 }
7993
7994 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7995 addr,
7996 GEN_INT (last - first + 1));
7997 return insn;
7998 }
7999
8000 /* Return insn sequence to load the GOT register. */
8001
8002 static GTY(()) rtx got_symbol;
8003 rtx
8004 s390_load_got (void)
8005 {
8006 rtx insns;
8007
8008 /* We cannot use pic_offset_table_rtx here since we use this
8009 function also for non-pic if __tls_get_offset is called and in
8010 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8011 aren't usable. */
8012 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8013
8014 if (!got_symbol)
8015 {
8016 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8017 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8018 }
8019
8020 start_sequence ();
8021
8022 if (TARGET_CPU_ZARCH)
8023 {
8024 emit_move_insn (got_rtx, got_symbol);
8025 }
8026 else
8027 {
8028 rtx offset;
8029
8030 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8031 UNSPEC_LTREL_OFFSET);
8032 offset = gen_rtx_CONST (Pmode, offset);
8033 offset = force_const_mem (Pmode, offset);
8034
8035 emit_move_insn (got_rtx, offset);
8036
8037 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8038 UNSPEC_LTREL_BASE);
8039 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8040
8041 emit_move_insn (got_rtx, offset);
8042 }
8043
8044 insns = get_insns ();
8045 end_sequence ();
8046 return insns;
8047 }
8048
8049 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8050 and the change to the stack pointer. */
8051
8052 static void
8053 s390_emit_stack_tie (void)
8054 {
8055 rtx mem = gen_frame_mem (BLKmode,
8056 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8057
8058 emit_insn (gen_stack_tie (mem));
8059 }
8060
8061 /* Expand the prologue into a bunch of separate insns. */
8062
8063 void
8064 s390_emit_prologue (void)
8065 {
8066 rtx insn, addr;
8067 rtx temp_reg;
8068 int i;
8069 int offset;
8070 int next_fpr = 0;
8071
8072 /* Complete frame layout. */
8073
8074 s390_update_frame_layout ();
8075
8076 /* Annotate all constant pool references to let the scheduler know
8077 they implicitly use the base register. */
8078
8079 push_topmost_sequence ();
8080
8081 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8082 if (INSN_P (insn))
8083 {
8084 annotate_constant_pool_refs (&PATTERN (insn));
8085 df_insn_rescan (insn);
8086 }
8087
8088 pop_topmost_sequence ();
8089
8090 /* Choose best register to use for temp use within prologue.
8091 See below for why TPF must use the register 1. */
8092
8093 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8094 && !crtl->is_leaf
8095 && !TARGET_TPF_PROFILING)
8096 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8097 else
8098 temp_reg = gen_rtx_REG (Pmode, 1);
8099
8100 /* Save call saved gprs. */
8101 if (cfun_frame_layout.first_save_gpr != -1)
8102 {
8103 insn = save_gprs (stack_pointer_rtx,
8104 cfun_frame_layout.gprs_offset +
8105 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8106 - cfun_frame_layout.first_save_gpr_slot),
8107 cfun_frame_layout.first_save_gpr,
8108 cfun_frame_layout.last_save_gpr);
8109 emit_insn (insn);
8110 }
8111
8112 /* Dummy insn to mark literal pool slot. */
8113
8114 if (cfun->machine->base_reg)
8115 emit_insn (gen_main_pool (cfun->machine->base_reg));
8116
8117 offset = cfun_frame_layout.f0_offset;
8118
8119 /* Save f0 and f2. */
8120 for (i = 0; i < 2; i++)
8121 {
8122 if (cfun_fpr_bit_p (i))
8123 {
8124 save_fpr (stack_pointer_rtx, offset, i + 16);
8125 offset += 8;
8126 }
8127 else if (!TARGET_PACKED_STACK)
8128 offset += 8;
8129 }
8130
8131 /* Save f4 and f6. */
8132 offset = cfun_frame_layout.f4_offset;
8133 for (i = 2; i < 4; i++)
8134 {
8135 if (cfun_fpr_bit_p (i))
8136 {
8137 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8138 offset += 8;
8139
8140 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8141 therefore are not frame related. */
8142 if (!call_really_used_regs[i + 16])
8143 RTX_FRAME_RELATED_P (insn) = 1;
8144 }
8145 else if (!TARGET_PACKED_STACK)
8146 offset += 8;
8147 }
8148
8149 if (TARGET_PACKED_STACK
8150 && cfun_save_high_fprs_p
8151 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8152 {
8153 offset = (cfun_frame_layout.f8_offset
8154 + (cfun_frame_layout.high_fprs - 1) * 8);
8155
8156 for (i = 15; i > 7 && offset >= 0; i--)
8157 if (cfun_fpr_bit_p (i))
8158 {
8159 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8160
8161 RTX_FRAME_RELATED_P (insn) = 1;
8162 offset -= 8;
8163 }
8164 if (offset >= cfun_frame_layout.f8_offset)
8165 next_fpr = i + 16;
8166 }
8167
8168 if (!TARGET_PACKED_STACK)
8169 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8170
8171 if (flag_stack_usage_info)
8172 current_function_static_stack_size = cfun_frame_layout.frame_size;
8173
8174 /* Decrement stack pointer. */
8175
8176 if (cfun_frame_layout.frame_size > 0)
8177 {
8178 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8179 rtx real_frame_off;
8180
8181 if (s390_stack_size)
8182 {
8183 HOST_WIDE_INT stack_guard;
8184
8185 if (s390_stack_guard)
8186 stack_guard = s390_stack_guard;
8187 else
8188 {
8189 /* If no value for stack guard is provided the smallest power of 2
8190 larger than the current frame size is chosen. */
8191 stack_guard = 1;
8192 while (stack_guard < cfun_frame_layout.frame_size)
8193 stack_guard <<= 1;
8194 }
8195
8196 if (cfun_frame_layout.frame_size >= s390_stack_size)
8197 {
8198 warning (0, "frame size of function %qs is %wd"
8199 " bytes exceeding user provided stack limit of "
8200 "%d bytes. "
8201 "An unconditional trap is added.",
8202 current_function_name(), cfun_frame_layout.frame_size,
8203 s390_stack_size);
8204 emit_insn (gen_trap ());
8205 }
8206 else
8207 {
8208 /* stack_guard has to be smaller than s390_stack_size.
8209 Otherwise we would emit an AND with zero which would
8210 not match the test under mask pattern. */
8211 if (stack_guard >= s390_stack_size)
8212 {
8213 warning (0, "frame size of function %qs is %wd"
8214 " bytes which is more than half the stack size. "
8215 "The dynamic check would not be reliable. "
8216 "No check emitted for this function.",
8217 current_function_name(),
8218 cfun_frame_layout.frame_size);
8219 }
8220 else
8221 {
8222 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8223 & ~(stack_guard - 1));
8224
8225 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8226 GEN_INT (stack_check_mask));
8227 if (TARGET_64BIT)
8228 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8229 t, const0_rtx),
8230 t, const0_rtx, const0_rtx));
8231 else
8232 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8233 t, const0_rtx),
8234 t, const0_rtx, const0_rtx));
8235 }
8236 }
8237 }
8238
8239 if (s390_warn_framesize > 0
8240 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8241 warning (0, "frame size of %qs is %wd bytes",
8242 current_function_name (), cfun_frame_layout.frame_size);
8243
8244 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8245 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8246
8247 /* Save incoming stack pointer into temp reg. */
8248 if (TARGET_BACKCHAIN || next_fpr)
8249 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8250
8251 /* Subtract frame size from stack pointer. */
8252
8253 if (DISP_IN_RANGE (INTVAL (frame_off)))
8254 {
8255 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8256 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8257 frame_off));
8258 insn = emit_insn (insn);
8259 }
8260 else
8261 {
8262 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8263 frame_off = force_const_mem (Pmode, frame_off);
8264
8265 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8266 annotate_constant_pool_refs (&PATTERN (insn));
8267 }
8268
8269 RTX_FRAME_RELATED_P (insn) = 1;
8270 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8271 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8272 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8273 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8274 real_frame_off)));
8275
8276 /* Set backchain. */
8277
8278 if (TARGET_BACKCHAIN)
8279 {
8280 if (cfun_frame_layout.backchain_offset)
8281 addr = gen_rtx_MEM (Pmode,
8282 plus_constant (Pmode, stack_pointer_rtx,
8283 cfun_frame_layout.backchain_offset));
8284 else
8285 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8286 set_mem_alias_set (addr, get_frame_alias_set ());
8287 insn = emit_insn (gen_move_insn (addr, temp_reg));
8288 }
8289
8290 /* If we support non-call exceptions (e.g. for Java),
8291 we need to make sure the backchain pointer is set up
8292 before any possibly trapping memory access. */
8293 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8294 {
8295 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8296 emit_clobber (addr);
8297 }
8298 }
8299
8300 /* Save fprs 8 - 15 (64 bit ABI). */
8301
8302 if (cfun_save_high_fprs_p && next_fpr)
8303 {
8304 /* If the stack might be accessed through a different register
8305 we have to make sure that the stack pointer decrement is not
8306 moved below the use of the stack slots. */
8307 s390_emit_stack_tie ();
8308
8309 insn = emit_insn (gen_add2_insn (temp_reg,
8310 GEN_INT (cfun_frame_layout.f8_offset)));
8311
8312 offset = 0;
8313
8314 for (i = 24; i <= next_fpr; i++)
8315 if (cfun_fpr_bit_p (i - 16))
8316 {
8317 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8318 cfun_frame_layout.frame_size
8319 + cfun_frame_layout.f8_offset
8320 + offset);
8321
8322 insn = save_fpr (temp_reg, offset, i);
8323 offset += 8;
8324 RTX_FRAME_RELATED_P (insn) = 1;
8325 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8326 gen_rtx_SET (VOIDmode,
8327 gen_rtx_MEM (DFmode, addr),
8328 gen_rtx_REG (DFmode, i)));
8329 }
8330 }
8331
8332 /* Set frame pointer, if needed. */
8333
8334 if (frame_pointer_needed)
8335 {
8336 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8337 RTX_FRAME_RELATED_P (insn) = 1;
8338 }
8339
8340 /* Set up got pointer, if needed. */
8341
8342 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8343 {
8344 rtx insns = s390_load_got ();
8345
8346 for (insn = insns; insn; insn = NEXT_INSN (insn))
8347 annotate_constant_pool_refs (&PATTERN (insn));
8348
8349 emit_insn (insns);
8350 }
8351
8352 if (TARGET_TPF_PROFILING)
8353 {
8354 /* Generate a BAS instruction to serve as a function
8355 entry intercept to facilitate the use of tracing
8356 algorithms located at the branch target. */
8357 emit_insn (gen_prologue_tpf ());
8358
8359 /* Emit a blockage here so that all code
8360 lies between the profiling mechanisms. */
8361 emit_insn (gen_blockage ());
8362 }
8363 }
8364
8365 /* Expand the epilogue into a bunch of separate insns. */
8366
8367 void
8368 s390_emit_epilogue (bool sibcall)
8369 {
8370 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8371 int area_bottom, area_top, offset = 0;
8372 int next_offset;
8373 rtvec p;
8374 int i;
8375
8376 if (TARGET_TPF_PROFILING)
8377 {
8378
8379 /* Generate a BAS instruction to serve as a function
8380 entry intercept to facilitate the use of tracing
8381 algorithms located at the branch target. */
8382
8383 /* Emit a blockage here so that all code
8384 lies between the profiling mechanisms. */
8385 emit_insn (gen_blockage ());
8386
8387 emit_insn (gen_epilogue_tpf ());
8388 }
8389
8390 /* Check whether to use frame or stack pointer for restore. */
8391
8392 frame_pointer = (frame_pointer_needed
8393 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8394
8395 s390_frame_area (&area_bottom, &area_top);
8396
8397 /* Check whether we can access the register save area.
8398 If not, increment the frame pointer as required. */
8399
8400 if (area_top <= area_bottom)
8401 {
8402 /* Nothing to restore. */
8403 }
8404 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8405 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8406 {
8407 /* Area is in range. */
8408 offset = cfun_frame_layout.frame_size;
8409 }
8410 else
8411 {
8412 rtx insn, frame_off, cfa;
8413
8414 offset = area_bottom < 0 ? -area_bottom : 0;
8415 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8416
8417 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8418 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8419 if (DISP_IN_RANGE (INTVAL (frame_off)))
8420 {
8421 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8422 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8423 insn = emit_insn (insn);
8424 }
8425 else
8426 {
8427 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8428 frame_off = force_const_mem (Pmode, frame_off);
8429
8430 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8431 annotate_constant_pool_refs (&PATTERN (insn));
8432 }
8433 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8434 RTX_FRAME_RELATED_P (insn) = 1;
8435 }
8436
8437 /* Restore call saved fprs. */
8438
8439 if (TARGET_64BIT)
8440 {
8441 if (cfun_save_high_fprs_p)
8442 {
8443 next_offset = cfun_frame_layout.f8_offset;
8444 for (i = 24; i < 32; i++)
8445 {
8446 if (cfun_fpr_bit_p (i - 16))
8447 {
8448 restore_fpr (frame_pointer,
8449 offset + next_offset, i);
8450 cfa_restores
8451 = alloc_reg_note (REG_CFA_RESTORE,
8452 gen_rtx_REG (DFmode, i), cfa_restores);
8453 next_offset += 8;
8454 }
8455 }
8456 }
8457
8458 }
8459 else
8460 {
8461 next_offset = cfun_frame_layout.f4_offset;
8462 for (i = 18; i < 20; i++)
8463 {
8464 if (cfun_fpr_bit_p (i - 16))
8465 {
8466 restore_fpr (frame_pointer,
8467 offset + next_offset, i);
8468 cfa_restores
8469 = alloc_reg_note (REG_CFA_RESTORE,
8470 gen_rtx_REG (DFmode, i), cfa_restores);
8471 next_offset += 8;
8472 }
8473 else if (!TARGET_PACKED_STACK)
8474 next_offset += 8;
8475 }
8476
8477 }
8478
8479 /* Return register. */
8480
8481 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8482
8483 /* Restore call saved gprs. */
8484
8485 if (cfun_frame_layout.first_restore_gpr != -1)
8486 {
8487 rtx insn, addr;
8488 int i;
8489
8490 /* Check for global register and save them
8491 to stack location from where they get restored. */
8492
8493 for (i = cfun_frame_layout.first_restore_gpr;
8494 i <= cfun_frame_layout.last_restore_gpr;
8495 i++)
8496 {
8497 if (global_not_special_regno_p (i))
8498 {
8499 addr = plus_constant (Pmode, frame_pointer,
8500 offset + cfun_frame_layout.gprs_offset
8501 + (i - cfun_frame_layout.first_save_gpr_slot)
8502 * UNITS_PER_LONG);
8503 addr = gen_rtx_MEM (Pmode, addr);
8504 set_mem_alias_set (addr, get_frame_alias_set ());
8505 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8506 }
8507 else
8508 cfa_restores
8509 = alloc_reg_note (REG_CFA_RESTORE,
8510 gen_rtx_REG (Pmode, i), cfa_restores);
8511 }
8512
8513 if (! sibcall)
8514 {
8515 /* Fetch return address from stack before load multiple,
8516 this will do good for scheduling. */
8517
8518 if (cfun_frame_layout.save_return_addr_p
8519 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8520 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8521 {
8522 int return_regnum = find_unused_clobbered_reg();
8523 if (!return_regnum)
8524 return_regnum = 4;
8525 return_reg = gen_rtx_REG (Pmode, return_regnum);
8526
8527 addr = plus_constant (Pmode, frame_pointer,
8528 offset + cfun_frame_layout.gprs_offset
8529 + (RETURN_REGNUM
8530 - cfun_frame_layout.first_save_gpr_slot)
8531 * UNITS_PER_LONG);
8532 addr = gen_rtx_MEM (Pmode, addr);
8533 set_mem_alias_set (addr, get_frame_alias_set ());
8534 emit_move_insn (return_reg, addr);
8535 }
8536 }
8537
8538 insn = restore_gprs (frame_pointer,
8539 offset + cfun_frame_layout.gprs_offset
8540 + (cfun_frame_layout.first_restore_gpr
8541 - cfun_frame_layout.first_save_gpr_slot)
8542 * UNITS_PER_LONG,
8543 cfun_frame_layout.first_restore_gpr,
8544 cfun_frame_layout.last_restore_gpr);
8545 insn = emit_insn (insn);
8546 REG_NOTES (insn) = cfa_restores;
8547 add_reg_note (insn, REG_CFA_DEF_CFA,
8548 plus_constant (Pmode, stack_pointer_rtx,
8549 STACK_POINTER_OFFSET));
8550 RTX_FRAME_RELATED_P (insn) = 1;
8551 }
8552
8553 if (! sibcall)
8554 {
8555
8556 /* Return to caller. */
8557
8558 p = rtvec_alloc (2);
8559
8560 RTVEC_ELT (p, 0) = ret_rtx;
8561 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8562 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8563 }
8564 }
8565
8566
8567 /* Return the size in bytes of a function argument of
8568 type TYPE and/or mode MODE. At least one of TYPE or
8569 MODE must be specified. */
8570
8571 static int
8572 s390_function_arg_size (enum machine_mode mode, const_tree type)
8573 {
8574 if (type)
8575 return int_size_in_bytes (type);
8576
8577 /* No type info available for some library calls ... */
8578 if (mode != BLKmode)
8579 return GET_MODE_SIZE (mode);
8580
8581 /* If we have neither type nor mode, abort */
8582 gcc_unreachable ();
8583 }
8584
8585 /* Return true if a function argument of type TYPE and mode MODE
8586 is to be passed in a floating-point register, if available. */
8587
8588 static bool
8589 s390_function_arg_float (enum machine_mode mode, const_tree type)
8590 {
8591 int size = s390_function_arg_size (mode, type);
8592 if (size > 8)
8593 return false;
8594
8595 /* Soft-float changes the ABI: no floating-point registers are used. */
8596 if (TARGET_SOFT_FLOAT)
8597 return false;
8598
8599 /* No type info available for some library calls ... */
8600 if (!type)
8601 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8602
8603 /* The ABI says that record types with a single member are treated
8604 just like that member would be. */
8605 while (TREE_CODE (type) == RECORD_TYPE)
8606 {
8607 tree field, single = NULL_TREE;
8608
8609 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8610 {
8611 if (TREE_CODE (field) != FIELD_DECL)
8612 continue;
8613
8614 if (single == NULL_TREE)
8615 single = TREE_TYPE (field);
8616 else
8617 return false;
8618 }
8619
8620 if (single == NULL_TREE)
8621 return false;
8622 else
8623 type = single;
8624 }
8625
8626 return TREE_CODE (type) == REAL_TYPE;
8627 }
8628
8629 /* Return true if a function argument of type TYPE and mode MODE
8630 is to be passed in an integer register, or a pair of integer
8631 registers, if available. */
8632
8633 static bool
8634 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8635 {
8636 int size = s390_function_arg_size (mode, type);
8637 if (size > 8)
8638 return false;
8639
8640 /* No type info available for some library calls ... */
8641 if (!type)
8642 return GET_MODE_CLASS (mode) == MODE_INT
8643 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8644
8645 /* We accept small integral (and similar) types. */
8646 if (INTEGRAL_TYPE_P (type)
8647 || POINTER_TYPE_P (type)
8648 || TREE_CODE (type) == NULLPTR_TYPE
8649 || TREE_CODE (type) == OFFSET_TYPE
8650 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8651 return true;
8652
8653 /* We also accept structs of size 1, 2, 4, 8 that are not
8654 passed in floating-point registers. */
8655 if (AGGREGATE_TYPE_P (type)
8656 && exact_log2 (size) >= 0
8657 && !s390_function_arg_float (mode, type))
8658 return true;
8659
8660 return false;
8661 }
8662
8663 /* Return 1 if a function argument of type TYPE and mode MODE
8664 is to be passed by reference. The ABI specifies that only
8665 structures of size 1, 2, 4, or 8 bytes are passed by value,
8666 all other structures (and complex numbers) are passed by
8667 reference. */
8668
8669 static bool
8670 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8671 enum machine_mode mode, const_tree type,
8672 bool named ATTRIBUTE_UNUSED)
8673 {
8674 int size = s390_function_arg_size (mode, type);
8675 if (size > 8)
8676 return true;
8677
8678 if (type)
8679 {
8680 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8681 return 1;
8682
8683 if (TREE_CODE (type) == COMPLEX_TYPE
8684 || TREE_CODE (type) == VECTOR_TYPE)
8685 return 1;
8686 }
8687
8688 return 0;
8689 }
8690
8691 /* Update the data in CUM to advance over an argument of mode MODE and
8692 data type TYPE. (TYPE is null for libcalls where that information
8693 may not be available.). The boolean NAMED specifies whether the
8694 argument is a named argument (as opposed to an unnamed argument
8695 matching an ellipsis). */
8696
8697 static void
8698 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8699 const_tree type, bool named ATTRIBUTE_UNUSED)
8700 {
8701 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8702
8703 if (s390_function_arg_float (mode, type))
8704 {
8705 cum->fprs += 1;
8706 }
8707 else if (s390_function_arg_integer (mode, type))
8708 {
8709 int size = s390_function_arg_size (mode, type);
8710 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8711 }
8712 else
8713 gcc_unreachable ();
8714 }
8715
8716 /* Define where to put the arguments to a function.
8717 Value is zero to push the argument on the stack,
8718 or a hard register in which to store the argument.
8719
8720 MODE is the argument's machine mode.
8721 TYPE is the data type of the argument (as a tree).
8722 This is null for libcalls where that information may
8723 not be available.
8724 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8725 the preceding args and about the function being called.
8726 NAMED is nonzero if this argument is a named parameter
8727 (otherwise it is an extra parameter matching an ellipsis).
8728
8729 On S/390, we use general purpose registers 2 through 6 to
8730 pass integer, pointer, and certain structure arguments, and
8731 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8732 to pass floating point arguments. All remaining arguments
8733 are pushed to the stack. */
8734
8735 static rtx
8736 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8737 const_tree type, bool named ATTRIBUTE_UNUSED)
8738 {
8739 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8740
8741 if (s390_function_arg_float (mode, type))
8742 {
8743 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8744 return 0;
8745 else
8746 return gen_rtx_REG (mode, cum->fprs + 16);
8747 }
8748 else if (s390_function_arg_integer (mode, type))
8749 {
8750 int size = s390_function_arg_size (mode, type);
8751 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8752
8753 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8754 return 0;
8755 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8756 return gen_rtx_REG (mode, cum->gprs + 2);
8757 else if (n_gprs == 2)
8758 {
8759 rtvec p = rtvec_alloc (2);
8760
8761 RTVEC_ELT (p, 0)
8762 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8763 const0_rtx);
8764 RTVEC_ELT (p, 1)
8765 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8766 GEN_INT (4));
8767
8768 return gen_rtx_PARALLEL (mode, p);
8769 }
8770 }
8771
8772 /* After the real arguments, expand_call calls us once again
8773 with a void_type_node type. Whatever we return here is
8774 passed as operand 2 to the call expanders.
8775
8776 We don't need this feature ... */
8777 else if (type == void_type_node)
8778 return const0_rtx;
8779
8780 gcc_unreachable ();
8781 }
8782
8783 /* Return true if return values of type TYPE should be returned
8784 in a memory buffer whose address is passed by the caller as
8785 hidden first argument. */
8786
8787 static bool
8788 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8789 {
8790 /* We accept small integral (and similar) types. */
8791 if (INTEGRAL_TYPE_P (type)
8792 || POINTER_TYPE_P (type)
8793 || TREE_CODE (type) == OFFSET_TYPE
8794 || TREE_CODE (type) == REAL_TYPE)
8795 return int_size_in_bytes (type) > 8;
8796
8797 /* Aggregates and similar constructs are always returned
8798 in memory. */
8799 if (AGGREGATE_TYPE_P (type)
8800 || TREE_CODE (type) == COMPLEX_TYPE
8801 || TREE_CODE (type) == VECTOR_TYPE)
8802 return true;
8803
8804 /* ??? We get called on all sorts of random stuff from
8805 aggregate_value_p. We can't abort, but it's not clear
8806 what's safe to return. Pretend it's a struct I guess. */
8807 return true;
8808 }
8809
8810 /* Function arguments and return values are promoted to word size. */
8811
8812 static enum machine_mode
8813 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8814 int *punsignedp,
8815 const_tree fntype ATTRIBUTE_UNUSED,
8816 int for_return ATTRIBUTE_UNUSED)
8817 {
8818 if (INTEGRAL_MODE_P (mode)
8819 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8820 {
8821 if (type != NULL_TREE && POINTER_TYPE_P (type))
8822 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8823 return Pmode;
8824 }
8825
8826 return mode;
8827 }
8828
8829 /* Define where to return a (scalar) value of type RET_TYPE.
8830 If RET_TYPE is null, define where to return a (scalar)
8831 value of mode MODE from a libcall. */
8832
8833 static rtx
8834 s390_function_and_libcall_value (enum machine_mode mode,
8835 const_tree ret_type,
8836 const_tree fntype_or_decl,
8837 bool outgoing ATTRIBUTE_UNUSED)
8838 {
8839 /* For normal functions perform the promotion as
8840 promote_function_mode would do. */
8841 if (ret_type)
8842 {
8843 int unsignedp = TYPE_UNSIGNED (ret_type);
8844 mode = promote_function_mode (ret_type, mode, &unsignedp,
8845 fntype_or_decl, 1);
8846 }
8847
8848 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8849 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8850
8851 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8852 return gen_rtx_REG (mode, 16);
8853 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8854 || UNITS_PER_LONG == UNITS_PER_WORD)
8855 return gen_rtx_REG (mode, 2);
8856 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8857 {
8858 /* This case is triggered when returning a 64 bit value with
8859 -m31 -mzarch. Although the value would fit into a single
8860 register it has to be forced into a 32 bit register pair in
8861 order to match the ABI. */
8862 rtvec p = rtvec_alloc (2);
8863
8864 RTVEC_ELT (p, 0)
8865 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8866 RTVEC_ELT (p, 1)
8867 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8868
8869 return gen_rtx_PARALLEL (mode, p);
8870 }
8871
8872 gcc_unreachable ();
8873 }
8874
8875 /* Define where to return a scalar return value of type RET_TYPE. */
8876
8877 static rtx
8878 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8879 bool outgoing)
8880 {
8881 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8882 fn_decl_or_type, outgoing);
8883 }
8884
8885 /* Define where to return a scalar libcall return value of mode
8886 MODE. */
8887
8888 static rtx
8889 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8890 {
8891 return s390_function_and_libcall_value (mode, NULL_TREE,
8892 NULL_TREE, true);
8893 }
8894
8895
8896 /* Create and return the va_list datatype.
8897
8898 On S/390, va_list is an array type equivalent to
8899
8900 typedef struct __va_list_tag
8901 {
8902 long __gpr;
8903 long __fpr;
8904 void *__overflow_arg_area;
8905 void *__reg_save_area;
8906 } va_list[1];
8907
8908 where __gpr and __fpr hold the number of general purpose
8909 or floating point arguments used up to now, respectively,
8910 __overflow_arg_area points to the stack location of the
8911 next argument passed on the stack, and __reg_save_area
8912 always points to the start of the register area in the
8913 call frame of the current function. The function prologue
8914 saves all registers used for argument passing into this
8915 area if the function uses variable arguments. */
8916
8917 static tree
8918 s390_build_builtin_va_list (void)
8919 {
8920 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8921
8922 record = lang_hooks.types.make_type (RECORD_TYPE);
8923
8924 type_decl =
8925 build_decl (BUILTINS_LOCATION,
8926 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8927
8928 f_gpr = build_decl (BUILTINS_LOCATION,
8929 FIELD_DECL, get_identifier ("__gpr"),
8930 long_integer_type_node);
8931 f_fpr = build_decl (BUILTINS_LOCATION,
8932 FIELD_DECL, get_identifier ("__fpr"),
8933 long_integer_type_node);
8934 f_ovf = build_decl (BUILTINS_LOCATION,
8935 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8936 ptr_type_node);
8937 f_sav = build_decl (BUILTINS_LOCATION,
8938 FIELD_DECL, get_identifier ("__reg_save_area"),
8939 ptr_type_node);
8940
8941 va_list_gpr_counter_field = f_gpr;
8942 va_list_fpr_counter_field = f_fpr;
8943
8944 DECL_FIELD_CONTEXT (f_gpr) = record;
8945 DECL_FIELD_CONTEXT (f_fpr) = record;
8946 DECL_FIELD_CONTEXT (f_ovf) = record;
8947 DECL_FIELD_CONTEXT (f_sav) = record;
8948
8949 TYPE_STUB_DECL (record) = type_decl;
8950 TYPE_NAME (record) = type_decl;
8951 TYPE_FIELDS (record) = f_gpr;
8952 DECL_CHAIN (f_gpr) = f_fpr;
8953 DECL_CHAIN (f_fpr) = f_ovf;
8954 DECL_CHAIN (f_ovf) = f_sav;
8955
8956 layout_type (record);
8957
8958 /* The correct type is an array type of one element. */
8959 return build_array_type (record, build_index_type (size_zero_node));
8960 }
8961
8962 /* Implement va_start by filling the va_list structure VALIST.
8963 STDARG_P is always true, and ignored.
8964 NEXTARG points to the first anonymous stack argument.
8965
8966 The following global variables are used to initialize
8967 the va_list structure:
8968
8969 crtl->args.info:
8970 holds number of gprs and fprs used for named arguments.
8971 crtl->args.arg_offset_rtx:
8972 holds the offset of the first anonymous stack argument
8973 (relative to the virtual arg pointer). */
8974
8975 static void
8976 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8977 {
8978 HOST_WIDE_INT n_gpr, n_fpr;
8979 int off;
8980 tree f_gpr, f_fpr, f_ovf, f_sav;
8981 tree gpr, fpr, ovf, sav, t;
8982
8983 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8984 f_fpr = DECL_CHAIN (f_gpr);
8985 f_ovf = DECL_CHAIN (f_fpr);
8986 f_sav = DECL_CHAIN (f_ovf);
8987
8988 valist = build_simple_mem_ref (valist);
8989 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8990 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8991 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8992 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8993
8994 /* Count number of gp and fp argument registers used. */
8995
8996 n_gpr = crtl->args.info.gprs;
8997 n_fpr = crtl->args.info.fprs;
8998
8999 if (cfun->va_list_gpr_size)
9000 {
9001 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9002 build_int_cst (NULL_TREE, n_gpr));
9003 TREE_SIDE_EFFECTS (t) = 1;
9004 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9005 }
9006
9007 if (cfun->va_list_fpr_size)
9008 {
9009 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9010 build_int_cst (NULL_TREE, n_fpr));
9011 TREE_SIDE_EFFECTS (t) = 1;
9012 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9013 }
9014
9015 /* Find the overflow area. */
9016 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9017 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9018 {
9019 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9020
9021 off = INTVAL (crtl->args.arg_offset_rtx);
9022 off = off < 0 ? 0 : off;
9023 if (TARGET_DEBUG_ARG)
9024 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9025 (int)n_gpr, (int)n_fpr, off);
9026
9027 t = fold_build_pointer_plus_hwi (t, off);
9028
9029 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9030 TREE_SIDE_EFFECTS (t) = 1;
9031 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9032 }
9033
9034 /* Find the register save area. */
9035 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9036 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9037 {
9038 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9039 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9040
9041 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9042 TREE_SIDE_EFFECTS (t) = 1;
9043 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9044 }
9045 }
9046
9047 /* Implement va_arg by updating the va_list structure
9048 VALIST as required to retrieve an argument of type
9049 TYPE, and returning that argument.
9050
9051 Generates code equivalent to:
9052
9053 if (integral value) {
9054 if (size <= 4 && args.gpr < 5 ||
9055 size > 4 && args.gpr < 4 )
9056 ret = args.reg_save_area[args.gpr+8]
9057 else
9058 ret = *args.overflow_arg_area++;
9059 } else if (float value) {
9060 if (args.fgpr < 2)
9061 ret = args.reg_save_area[args.fpr+64]
9062 else
9063 ret = *args.overflow_arg_area++;
9064 } else if (aggregate value) {
9065 if (args.gpr < 5)
9066 ret = *args.reg_save_area[args.gpr]
9067 else
9068 ret = **args.overflow_arg_area++;
9069 } */
9070
9071 static tree
9072 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9073 gimple_seq *post_p ATTRIBUTE_UNUSED)
9074 {
9075 tree f_gpr, f_fpr, f_ovf, f_sav;
9076 tree gpr, fpr, ovf, sav, reg, t, u;
9077 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9078 tree lab_false, lab_over, addr;
9079
9080 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9081 f_fpr = DECL_CHAIN (f_gpr);
9082 f_ovf = DECL_CHAIN (f_fpr);
9083 f_sav = DECL_CHAIN (f_ovf);
9084
9085 valist = build_va_arg_indirect_ref (valist);
9086 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9087 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9088 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9089
9090 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9091 both appear on a lhs. */
9092 valist = unshare_expr (valist);
9093 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9094
9095 size = int_size_in_bytes (type);
9096
9097 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9098 {
9099 if (TARGET_DEBUG_ARG)
9100 {
9101 fprintf (stderr, "va_arg: aggregate type");
9102 debug_tree (type);
9103 }
9104
9105 /* Aggregates are passed by reference. */
9106 indirect_p = 1;
9107 reg = gpr;
9108 n_reg = 1;
9109
9110 /* kernel stack layout on 31 bit: It is assumed here that no padding
9111 will be added by s390_frame_info because for va_args always an even
9112 number of gprs has to be saved r15-r2 = 14 regs. */
9113 sav_ofs = 2 * UNITS_PER_LONG;
9114 sav_scale = UNITS_PER_LONG;
9115 size = UNITS_PER_LONG;
9116 max_reg = GP_ARG_NUM_REG - n_reg;
9117 }
9118 else if (s390_function_arg_float (TYPE_MODE (type), type))
9119 {
9120 if (TARGET_DEBUG_ARG)
9121 {
9122 fprintf (stderr, "va_arg: float type");
9123 debug_tree (type);
9124 }
9125
9126 /* FP args go in FP registers, if present. */
9127 indirect_p = 0;
9128 reg = fpr;
9129 n_reg = 1;
9130 sav_ofs = 16 * UNITS_PER_LONG;
9131 sav_scale = 8;
9132 max_reg = FP_ARG_NUM_REG - n_reg;
9133 }
9134 else
9135 {
9136 if (TARGET_DEBUG_ARG)
9137 {
9138 fprintf (stderr, "va_arg: other type");
9139 debug_tree (type);
9140 }
9141
9142 /* Otherwise into GP registers. */
9143 indirect_p = 0;
9144 reg = gpr;
9145 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9146
9147 /* kernel stack layout on 31 bit: It is assumed here that no padding
9148 will be added by s390_frame_info because for va_args always an even
9149 number of gprs has to be saved r15-r2 = 14 regs. */
9150 sav_ofs = 2 * UNITS_PER_LONG;
9151
9152 if (size < UNITS_PER_LONG)
9153 sav_ofs += UNITS_PER_LONG - size;
9154
9155 sav_scale = UNITS_PER_LONG;
9156 max_reg = GP_ARG_NUM_REG - n_reg;
9157 }
9158
9159 /* Pull the value out of the saved registers ... */
9160
9161 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9162 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9163 addr = create_tmp_var (ptr_type_node, "addr");
9164
9165 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9166 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9167 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9168 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9169 gimplify_and_add (t, pre_p);
9170
9171 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9172 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9173 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9174 t = fold_build_pointer_plus (t, u);
9175
9176 gimplify_assign (addr, t, pre_p);
9177
9178 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9179
9180 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9181
9182
9183 /* ... Otherwise out of the overflow area. */
9184
9185 t = ovf;
9186 if (size < UNITS_PER_LONG)
9187 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9188
9189 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9190
9191 gimplify_assign (addr, t, pre_p);
9192
9193 t = fold_build_pointer_plus_hwi (t, size);
9194 gimplify_assign (ovf, t, pre_p);
9195
9196 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9197
9198
9199 /* Increment register save count. */
9200
9201 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9202 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9203 gimplify_and_add (u, pre_p);
9204
9205 if (indirect_p)
9206 {
9207 t = build_pointer_type_for_mode (build_pointer_type (type),
9208 ptr_mode, true);
9209 addr = fold_convert (t, addr);
9210 addr = build_va_arg_indirect_ref (addr);
9211 }
9212 else
9213 {
9214 t = build_pointer_type_for_mode (type, ptr_mode, true);
9215 addr = fold_convert (t, addr);
9216 }
9217
9218 return build_va_arg_indirect_ref (addr);
9219 }
9220
9221
9222 /* Builtins. */
9223
9224 enum s390_builtin
9225 {
9226 S390_BUILTIN_THREAD_POINTER,
9227 S390_BUILTIN_SET_THREAD_POINTER,
9228
9229 S390_BUILTIN_max
9230 };
9231
9232 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9233 CODE_FOR_get_tp_64,
9234 CODE_FOR_set_tp_64
9235 };
9236
9237 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9238 CODE_FOR_get_tp_31,
9239 CODE_FOR_set_tp_31
9240 };
9241
9242 static void
9243 s390_init_builtins (void)
9244 {
9245 tree ftype;
9246
9247 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9248 add_builtin_function ("__builtin_thread_pointer", ftype,
9249 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9250 NULL, NULL_TREE);
9251
9252 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9253 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9254 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9255 NULL, NULL_TREE);
9256 }
9257
9258 /* Expand an expression EXP that calls a built-in function,
9259 with result going to TARGET if that's convenient
9260 (and in mode MODE if that's convenient).
9261 SUBTARGET may be used as the target for computing one of EXP's operands.
9262 IGNORE is nonzero if the value is to be ignored. */
9263
9264 static rtx
9265 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9266 enum machine_mode mode ATTRIBUTE_UNUSED,
9267 int ignore ATTRIBUTE_UNUSED)
9268 {
9269 #define MAX_ARGS 2
9270
9271 enum insn_code const *code_for_builtin =
9272 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9273
9274 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9275 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9276 enum insn_code icode;
9277 rtx op[MAX_ARGS], pat;
9278 int arity;
9279 bool nonvoid;
9280 tree arg;
9281 call_expr_arg_iterator iter;
9282
9283 if (fcode >= S390_BUILTIN_max)
9284 internal_error ("bad builtin fcode");
9285 icode = code_for_builtin[fcode];
9286 if (icode == 0)
9287 internal_error ("bad builtin fcode");
9288
9289 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9290
9291 arity = 0;
9292 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9293 {
9294 const struct insn_operand_data *insn_op;
9295
9296 if (arg == error_mark_node)
9297 return NULL_RTX;
9298 if (arity > MAX_ARGS)
9299 return NULL_RTX;
9300
9301 insn_op = &insn_data[icode].operand[arity + nonvoid];
9302
9303 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9304
9305 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9306 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9307 arity++;
9308 }
9309
9310 if (nonvoid)
9311 {
9312 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9313 if (!target
9314 || GET_MODE (target) != tmode
9315 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9316 target = gen_reg_rtx (tmode);
9317 }
9318
9319 switch (arity)
9320 {
9321 case 0:
9322 pat = GEN_FCN (icode) (target);
9323 break;
9324 case 1:
9325 if (nonvoid)
9326 pat = GEN_FCN (icode) (target, op[0]);
9327 else
9328 pat = GEN_FCN (icode) (op[0]);
9329 break;
9330 case 2:
9331 pat = GEN_FCN (icode) (target, op[0], op[1]);
9332 break;
9333 default:
9334 gcc_unreachable ();
9335 }
9336 if (!pat)
9337 return NULL_RTX;
9338 emit_insn (pat);
9339
9340 if (nonvoid)
9341 return target;
9342 else
9343 return const0_rtx;
9344 }
9345
9346
9347 /* Output assembly code for the trampoline template to
9348 stdio stream FILE.
9349
9350 On S/390, we use gpr 1 internally in the trampoline code;
9351 gpr 0 is used to hold the static chain. */
9352
9353 static void
9354 s390_asm_trampoline_template (FILE *file)
9355 {
9356 rtx op[2];
9357 op[0] = gen_rtx_REG (Pmode, 0);
9358 op[1] = gen_rtx_REG (Pmode, 1);
9359
9360 if (TARGET_64BIT)
9361 {
9362 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9363 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9364 output_asm_insn ("br\t%1", op); /* 2 byte */
9365 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9366 }
9367 else
9368 {
9369 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9370 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9371 output_asm_insn ("br\t%1", op); /* 2 byte */
9372 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9373 }
9374 }
9375
9376 /* Emit RTL insns to initialize the variable parts of a trampoline.
9377 FNADDR is an RTX for the address of the function's pure code.
9378 CXT is an RTX for the static chain value for the function. */
9379
9380 static void
9381 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9382 {
9383 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9384 rtx mem;
9385
9386 emit_block_move (m_tramp, assemble_trampoline_template (),
9387 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9388
9389 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9390 emit_move_insn (mem, cxt);
9391 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9392 emit_move_insn (mem, fnaddr);
9393 }
9394
9395 /* Output assembler code to FILE to increment profiler label # LABELNO
9396 for profiling a function entry. */
9397
9398 void
9399 s390_function_profiler (FILE *file, int labelno)
9400 {
9401 rtx op[7];
9402
9403 char label[128];
9404 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9405
9406 fprintf (file, "# function profiler \n");
9407
9408 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9409 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9410 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9411
9412 op[2] = gen_rtx_REG (Pmode, 1);
9413 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9414 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9415
9416 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9417 if (flag_pic)
9418 {
9419 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9420 op[4] = gen_rtx_CONST (Pmode, op[4]);
9421 }
9422
9423 if (TARGET_64BIT)
9424 {
9425 output_asm_insn ("stg\t%0,%1", op);
9426 output_asm_insn ("larl\t%2,%3", op);
9427 output_asm_insn ("brasl\t%0,%4", op);
9428 output_asm_insn ("lg\t%0,%1", op);
9429 }
9430 else if (!flag_pic)
9431 {
9432 op[6] = gen_label_rtx ();
9433
9434 output_asm_insn ("st\t%0,%1", op);
9435 output_asm_insn ("bras\t%2,%l6", op);
9436 output_asm_insn (".long\t%4", op);
9437 output_asm_insn (".long\t%3", op);
9438 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9439 output_asm_insn ("l\t%0,0(%2)", op);
9440 output_asm_insn ("l\t%2,4(%2)", op);
9441 output_asm_insn ("basr\t%0,%0", op);
9442 output_asm_insn ("l\t%0,%1", op);
9443 }
9444 else
9445 {
9446 op[5] = gen_label_rtx ();
9447 op[6] = gen_label_rtx ();
9448
9449 output_asm_insn ("st\t%0,%1", op);
9450 output_asm_insn ("bras\t%2,%l6", op);
9451 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9452 output_asm_insn (".long\t%4-%l5", op);
9453 output_asm_insn (".long\t%3-%l5", op);
9454 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9455 output_asm_insn ("lr\t%0,%2", op);
9456 output_asm_insn ("a\t%0,0(%2)", op);
9457 output_asm_insn ("a\t%2,4(%2)", op);
9458 output_asm_insn ("basr\t%0,%0", op);
9459 output_asm_insn ("l\t%0,%1", op);
9460 }
9461 }
9462
9463 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9464 into its SYMBOL_REF_FLAGS. */
9465
9466 static void
9467 s390_encode_section_info (tree decl, rtx rtl, int first)
9468 {
9469 default_encode_section_info (decl, rtl, first);
9470
9471 if (TREE_CODE (decl) == VAR_DECL)
9472 {
9473 /* If a variable has a forced alignment to < 2 bytes, mark it
9474 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9475 operand. */
9476 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9477 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9478 if (!DECL_SIZE (decl)
9479 || !DECL_ALIGN (decl)
9480 || !host_integerp (DECL_SIZE (decl), 0)
9481 || (DECL_ALIGN (decl) <= 64
9482 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9483 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9484 }
9485
9486 /* Literal pool references don't have a decl so they are handled
9487 differently here. We rely on the information in the MEM_ALIGN
9488 entry to decide upon natural alignment. */
9489 if (MEM_P (rtl)
9490 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9491 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9492 && (MEM_ALIGN (rtl) == 0
9493 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9494 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9495 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9496 }
9497
9498 /* Output thunk to FILE that implements a C++ virtual function call (with
9499 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9500 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9501 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9502 relative to the resulting this pointer. */
9503
9504 static void
9505 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9506 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9507 tree function)
9508 {
9509 rtx op[10];
9510 int nonlocal = 0;
9511
9512 /* Make sure unwind info is emitted for the thunk if needed. */
9513 final_start_function (emit_barrier (), file, 1);
9514
9515 /* Operand 0 is the target function. */
9516 op[0] = XEXP (DECL_RTL (function), 0);
9517 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9518 {
9519 nonlocal = 1;
9520 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9521 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9522 op[0] = gen_rtx_CONST (Pmode, op[0]);
9523 }
9524
9525 /* Operand 1 is the 'this' pointer. */
9526 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9527 op[1] = gen_rtx_REG (Pmode, 3);
9528 else
9529 op[1] = gen_rtx_REG (Pmode, 2);
9530
9531 /* Operand 2 is the delta. */
9532 op[2] = GEN_INT (delta);
9533
9534 /* Operand 3 is the vcall_offset. */
9535 op[3] = GEN_INT (vcall_offset);
9536
9537 /* Operand 4 is the temporary register. */
9538 op[4] = gen_rtx_REG (Pmode, 1);
9539
9540 /* Operands 5 to 8 can be used as labels. */
9541 op[5] = NULL_RTX;
9542 op[6] = NULL_RTX;
9543 op[7] = NULL_RTX;
9544 op[8] = NULL_RTX;
9545
9546 /* Operand 9 can be used for temporary register. */
9547 op[9] = NULL_RTX;
9548
9549 /* Generate code. */
9550 if (TARGET_64BIT)
9551 {
9552 /* Setup literal pool pointer if required. */
9553 if ((!DISP_IN_RANGE (delta)
9554 && !CONST_OK_FOR_K (delta)
9555 && !CONST_OK_FOR_Os (delta))
9556 || (!DISP_IN_RANGE (vcall_offset)
9557 && !CONST_OK_FOR_K (vcall_offset)
9558 && !CONST_OK_FOR_Os (vcall_offset)))
9559 {
9560 op[5] = gen_label_rtx ();
9561 output_asm_insn ("larl\t%4,%5", op);
9562 }
9563
9564 /* Add DELTA to this pointer. */
9565 if (delta)
9566 {
9567 if (CONST_OK_FOR_J (delta))
9568 output_asm_insn ("la\t%1,%2(%1)", op);
9569 else if (DISP_IN_RANGE (delta))
9570 output_asm_insn ("lay\t%1,%2(%1)", op);
9571 else if (CONST_OK_FOR_K (delta))
9572 output_asm_insn ("aghi\t%1,%2", op);
9573 else if (CONST_OK_FOR_Os (delta))
9574 output_asm_insn ("agfi\t%1,%2", op);
9575 else
9576 {
9577 op[6] = gen_label_rtx ();
9578 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9579 }
9580 }
9581
9582 /* Perform vcall adjustment. */
9583 if (vcall_offset)
9584 {
9585 if (DISP_IN_RANGE (vcall_offset))
9586 {
9587 output_asm_insn ("lg\t%4,0(%1)", op);
9588 output_asm_insn ("ag\t%1,%3(%4)", op);
9589 }
9590 else if (CONST_OK_FOR_K (vcall_offset))
9591 {
9592 output_asm_insn ("lghi\t%4,%3", op);
9593 output_asm_insn ("ag\t%4,0(%1)", op);
9594 output_asm_insn ("ag\t%1,0(%4)", op);
9595 }
9596 else if (CONST_OK_FOR_Os (vcall_offset))
9597 {
9598 output_asm_insn ("lgfi\t%4,%3", op);
9599 output_asm_insn ("ag\t%4,0(%1)", op);
9600 output_asm_insn ("ag\t%1,0(%4)", op);
9601 }
9602 else
9603 {
9604 op[7] = gen_label_rtx ();
9605 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9606 output_asm_insn ("ag\t%4,0(%1)", op);
9607 output_asm_insn ("ag\t%1,0(%4)", op);
9608 }
9609 }
9610
9611 /* Jump to target. */
9612 output_asm_insn ("jg\t%0", op);
9613
9614 /* Output literal pool if required. */
9615 if (op[5])
9616 {
9617 output_asm_insn (".align\t4", op);
9618 targetm.asm_out.internal_label (file, "L",
9619 CODE_LABEL_NUMBER (op[5]));
9620 }
9621 if (op[6])
9622 {
9623 targetm.asm_out.internal_label (file, "L",
9624 CODE_LABEL_NUMBER (op[6]));
9625 output_asm_insn (".long\t%2", op);
9626 }
9627 if (op[7])
9628 {
9629 targetm.asm_out.internal_label (file, "L",
9630 CODE_LABEL_NUMBER (op[7]));
9631 output_asm_insn (".long\t%3", op);
9632 }
9633 }
9634 else
9635 {
9636 /* Setup base pointer if required. */
9637 if (!vcall_offset
9638 || (!DISP_IN_RANGE (delta)
9639 && !CONST_OK_FOR_K (delta)
9640 && !CONST_OK_FOR_Os (delta))
9641 || (!DISP_IN_RANGE (delta)
9642 && !CONST_OK_FOR_K (vcall_offset)
9643 && !CONST_OK_FOR_Os (vcall_offset)))
9644 {
9645 op[5] = gen_label_rtx ();
9646 output_asm_insn ("basr\t%4,0", op);
9647 targetm.asm_out.internal_label (file, "L",
9648 CODE_LABEL_NUMBER (op[5]));
9649 }
9650
9651 /* Add DELTA to this pointer. */
9652 if (delta)
9653 {
9654 if (CONST_OK_FOR_J (delta))
9655 output_asm_insn ("la\t%1,%2(%1)", op);
9656 else if (DISP_IN_RANGE (delta))
9657 output_asm_insn ("lay\t%1,%2(%1)", op);
9658 else if (CONST_OK_FOR_K (delta))
9659 output_asm_insn ("ahi\t%1,%2", op);
9660 else if (CONST_OK_FOR_Os (delta))
9661 output_asm_insn ("afi\t%1,%2", op);
9662 else
9663 {
9664 op[6] = gen_label_rtx ();
9665 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9666 }
9667 }
9668
9669 /* Perform vcall adjustment. */
9670 if (vcall_offset)
9671 {
9672 if (CONST_OK_FOR_J (vcall_offset))
9673 {
9674 output_asm_insn ("l\t%4,0(%1)", op);
9675 output_asm_insn ("a\t%1,%3(%4)", op);
9676 }
9677 else if (DISP_IN_RANGE (vcall_offset))
9678 {
9679 output_asm_insn ("l\t%4,0(%1)", op);
9680 output_asm_insn ("ay\t%1,%3(%4)", op);
9681 }
9682 else if (CONST_OK_FOR_K (vcall_offset))
9683 {
9684 output_asm_insn ("lhi\t%4,%3", op);
9685 output_asm_insn ("a\t%4,0(%1)", op);
9686 output_asm_insn ("a\t%1,0(%4)", op);
9687 }
9688 else if (CONST_OK_FOR_Os (vcall_offset))
9689 {
9690 output_asm_insn ("iilf\t%4,%3", op);
9691 output_asm_insn ("a\t%4,0(%1)", op);
9692 output_asm_insn ("a\t%1,0(%4)", op);
9693 }
9694 else
9695 {
9696 op[7] = gen_label_rtx ();
9697 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9698 output_asm_insn ("a\t%4,0(%1)", op);
9699 output_asm_insn ("a\t%1,0(%4)", op);
9700 }
9701
9702 /* We had to clobber the base pointer register.
9703 Re-setup the base pointer (with a different base). */
9704 op[5] = gen_label_rtx ();
9705 output_asm_insn ("basr\t%4,0", op);
9706 targetm.asm_out.internal_label (file, "L",
9707 CODE_LABEL_NUMBER (op[5]));
9708 }
9709
9710 /* Jump to target. */
9711 op[8] = gen_label_rtx ();
9712
9713 if (!flag_pic)
9714 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9715 else if (!nonlocal)
9716 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9717 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9718 else if (flag_pic == 1)
9719 {
9720 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9721 output_asm_insn ("l\t%4,%0(%4)", op);
9722 }
9723 else if (flag_pic == 2)
9724 {
9725 op[9] = gen_rtx_REG (Pmode, 0);
9726 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9727 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9728 output_asm_insn ("ar\t%4,%9", op);
9729 output_asm_insn ("l\t%4,0(%4)", op);
9730 }
9731
9732 output_asm_insn ("br\t%4", op);
9733
9734 /* Output literal pool. */
9735 output_asm_insn (".align\t4", op);
9736
9737 if (nonlocal && flag_pic == 2)
9738 output_asm_insn (".long\t%0", op);
9739 if (nonlocal)
9740 {
9741 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9742 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9743 }
9744
9745 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9746 if (!flag_pic)
9747 output_asm_insn (".long\t%0", op);
9748 else
9749 output_asm_insn (".long\t%0-%5", op);
9750
9751 if (op[6])
9752 {
9753 targetm.asm_out.internal_label (file, "L",
9754 CODE_LABEL_NUMBER (op[6]));
9755 output_asm_insn (".long\t%2", op);
9756 }
9757 if (op[7])
9758 {
9759 targetm.asm_out.internal_label (file, "L",
9760 CODE_LABEL_NUMBER (op[7]));
9761 output_asm_insn (".long\t%3", op);
9762 }
9763 }
9764 final_end_function ();
9765 }
9766
9767 static bool
9768 s390_valid_pointer_mode (enum machine_mode mode)
9769 {
9770 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9771 }
9772
9773 /* Checks whether the given CALL_EXPR would use a caller
9774 saved register. This is used to decide whether sibling call
9775 optimization could be performed on the respective function
9776 call. */
9777
9778 static bool
9779 s390_call_saved_register_used (tree call_expr)
9780 {
9781 CUMULATIVE_ARGS cum_v;
9782 cumulative_args_t cum;
9783 tree parameter;
9784 enum machine_mode mode;
9785 tree type;
9786 rtx parm_rtx;
9787 int reg, i;
9788
9789 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9790 cum = pack_cumulative_args (&cum_v);
9791
9792 for (i = 0; i < call_expr_nargs (call_expr); i++)
9793 {
9794 parameter = CALL_EXPR_ARG (call_expr, i);
9795 gcc_assert (parameter);
9796
9797 /* For an undeclared variable passed as parameter we will get
9798 an ERROR_MARK node here. */
9799 if (TREE_CODE (parameter) == ERROR_MARK)
9800 return true;
9801
9802 type = TREE_TYPE (parameter);
9803 gcc_assert (type);
9804
9805 mode = TYPE_MODE (type);
9806 gcc_assert (mode);
9807
9808 if (pass_by_reference (&cum_v, mode, type, true))
9809 {
9810 mode = Pmode;
9811 type = build_pointer_type (type);
9812 }
9813
9814 parm_rtx = s390_function_arg (cum, mode, type, 0);
9815
9816 s390_function_arg_advance (cum, mode, type, 0);
9817
9818 if (!parm_rtx)
9819 continue;
9820
9821 if (REG_P (parm_rtx))
9822 {
9823 for (reg = 0;
9824 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9825 reg++)
9826 if (!call_used_regs[reg + REGNO (parm_rtx)])
9827 return true;
9828 }
9829
9830 if (GET_CODE (parm_rtx) == PARALLEL)
9831 {
9832 int i;
9833
9834 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9835 {
9836 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9837
9838 gcc_assert (REG_P (r));
9839
9840 for (reg = 0;
9841 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9842 reg++)
9843 if (!call_used_regs[reg + REGNO (r)])
9844 return true;
9845 }
9846 }
9847
9848 }
9849 return false;
9850 }
9851
9852 /* Return true if the given call expression can be
9853 turned into a sibling call.
9854 DECL holds the declaration of the function to be called whereas
9855 EXP is the call expression itself. */
9856
9857 static bool
9858 s390_function_ok_for_sibcall (tree decl, tree exp)
9859 {
9860 /* The TPF epilogue uses register 1. */
9861 if (TARGET_TPF_PROFILING)
9862 return false;
9863
9864 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9865 which would have to be restored before the sibcall. */
9866 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9867 return false;
9868
9869 /* Register 6 on s390 is available as an argument register but unfortunately
9870 "caller saved". This makes functions needing this register for arguments
9871 not suitable for sibcalls. */
9872 return !s390_call_saved_register_used (exp);
9873 }
9874
9875 /* Return the fixed registers used for condition codes. */
9876
9877 static bool
9878 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9879 {
9880 *p1 = CC_REGNUM;
9881 *p2 = INVALID_REGNUM;
9882
9883 return true;
9884 }
9885
9886 /* This function is used by the call expanders of the machine description.
9887 It emits the call insn itself together with the necessary operations
9888 to adjust the target address and returns the emitted insn.
9889 ADDR_LOCATION is the target address rtx
9890 TLS_CALL the location of the thread-local symbol
9891 RESULT_REG the register where the result of the call should be stored
9892 RETADDR_REG the register where the return address should be stored
9893 If this parameter is NULL_RTX the call is considered
9894 to be a sibling call. */
9895
9896 rtx
9897 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9898 rtx retaddr_reg)
9899 {
9900 bool plt_call = false;
9901 rtx insn;
9902 rtx call;
9903 rtx clobber;
9904 rtvec vec;
9905
9906 /* Direct function calls need special treatment. */
9907 if (GET_CODE (addr_location) == SYMBOL_REF)
9908 {
9909 /* When calling a global routine in PIC mode, we must
9910 replace the symbol itself with the PLT stub. */
9911 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9912 {
9913 if (retaddr_reg != NULL_RTX)
9914 {
9915 addr_location = gen_rtx_UNSPEC (Pmode,
9916 gen_rtvec (1, addr_location),
9917 UNSPEC_PLT);
9918 addr_location = gen_rtx_CONST (Pmode, addr_location);
9919 plt_call = true;
9920 }
9921 else
9922 /* For -fpic code the PLT entries might use r12 which is
9923 call-saved. Therefore we cannot do a sibcall when
9924 calling directly using a symbol ref. When reaching
9925 this point we decided (in s390_function_ok_for_sibcall)
9926 to do a sibcall for a function pointer but one of the
9927 optimizers was able to get rid of the function pointer
9928 by propagating the symbol ref into the call. This
9929 optimization is illegal for S/390 so we turn the direct
9930 call into a indirect call again. */
9931 addr_location = force_reg (Pmode, addr_location);
9932 }
9933
9934 /* Unless we can use the bras(l) insn, force the
9935 routine address into a register. */
9936 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9937 {
9938 if (flag_pic)
9939 addr_location = legitimize_pic_address (addr_location, 0);
9940 else
9941 addr_location = force_reg (Pmode, addr_location);
9942 }
9943 }
9944
9945 /* If it is already an indirect call or the code above moved the
9946 SYMBOL_REF to somewhere else make sure the address can be found in
9947 register 1. */
9948 if (retaddr_reg == NULL_RTX
9949 && GET_CODE (addr_location) != SYMBOL_REF
9950 && !plt_call)
9951 {
9952 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9953 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9954 }
9955
9956 addr_location = gen_rtx_MEM (QImode, addr_location);
9957 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9958
9959 if (result_reg != NULL_RTX)
9960 call = gen_rtx_SET (VOIDmode, result_reg, call);
9961
9962 if (retaddr_reg != NULL_RTX)
9963 {
9964 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9965
9966 if (tls_call != NULL_RTX)
9967 vec = gen_rtvec (3, call, clobber,
9968 gen_rtx_USE (VOIDmode, tls_call));
9969 else
9970 vec = gen_rtvec (2, call, clobber);
9971
9972 call = gen_rtx_PARALLEL (VOIDmode, vec);
9973 }
9974
9975 insn = emit_call_insn (call);
9976
9977 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9978 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9979 {
9980 /* s390_function_ok_for_sibcall should
9981 have denied sibcalls in this case. */
9982 gcc_assert (retaddr_reg != NULL_RTX);
9983 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9984 }
9985 return insn;
9986 }
9987
9988 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9989
9990 static void
9991 s390_conditional_register_usage (void)
9992 {
9993 int i;
9994
9995 if (flag_pic)
9996 {
9997 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9998 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9999 }
10000 if (TARGET_CPU_ZARCH)
10001 {
10002 fixed_regs[BASE_REGNUM] = 0;
10003 call_used_regs[BASE_REGNUM] = 0;
10004 fixed_regs[RETURN_REGNUM] = 0;
10005 call_used_regs[RETURN_REGNUM] = 0;
10006 }
10007 if (TARGET_64BIT)
10008 {
10009 for (i = 24; i < 32; i++)
10010 call_used_regs[i] = call_really_used_regs[i] = 0;
10011 }
10012 else
10013 {
10014 for (i = 18; i < 20; i++)
10015 call_used_regs[i] = call_really_used_regs[i] = 0;
10016 }
10017
10018 if (TARGET_SOFT_FLOAT)
10019 {
10020 for (i = 16; i < 32; i++)
10021 call_used_regs[i] = fixed_regs[i] = 1;
10022 }
10023 }
10024
10025 /* Corresponding function to eh_return expander. */
10026
10027 static GTY(()) rtx s390_tpf_eh_return_symbol;
10028 void
10029 s390_emit_tpf_eh_return (rtx target)
10030 {
10031 rtx insn, reg;
10032
10033 if (!s390_tpf_eh_return_symbol)
10034 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10035
10036 reg = gen_rtx_REG (Pmode, 2);
10037
10038 emit_move_insn (reg, target);
10039 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10040 gen_rtx_REG (Pmode, RETURN_REGNUM));
10041 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10042
10043 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10044 }
10045
10046 /* Rework the prologue/epilogue to avoid saving/restoring
10047 registers unnecessarily. */
10048
10049 static void
10050 s390_optimize_prologue (void)
10051 {
10052 rtx insn, new_insn, next_insn;
10053
10054 /* Do a final recompute of the frame-related data. */
10055
10056 s390_update_frame_layout ();
10057
10058 /* If all special registers are in fact used, there's nothing we
10059 can do, so no point in walking the insn list. */
10060
10061 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10062 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10063 && (TARGET_CPU_ZARCH
10064 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10065 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10066 return;
10067
10068 /* Search for prologue/epilogue insns and replace them. */
10069
10070 for (insn = get_insns (); insn; insn = next_insn)
10071 {
10072 int first, last, off;
10073 rtx set, base, offset;
10074
10075 next_insn = NEXT_INSN (insn);
10076
10077 if (GET_CODE (insn) != INSN)
10078 continue;
10079
10080 if (GET_CODE (PATTERN (insn)) == PARALLEL
10081 && store_multiple_operation (PATTERN (insn), VOIDmode))
10082 {
10083 set = XVECEXP (PATTERN (insn), 0, 0);
10084 first = REGNO (SET_SRC (set));
10085 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10086 offset = const0_rtx;
10087 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10088 off = INTVAL (offset);
10089
10090 if (GET_CODE (base) != REG || off < 0)
10091 continue;
10092 if (cfun_frame_layout.first_save_gpr != -1
10093 && (cfun_frame_layout.first_save_gpr < first
10094 || cfun_frame_layout.last_save_gpr > last))
10095 continue;
10096 if (REGNO (base) != STACK_POINTER_REGNUM
10097 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10098 continue;
10099 if (first > BASE_REGNUM || last < BASE_REGNUM)
10100 continue;
10101
10102 if (cfun_frame_layout.first_save_gpr != -1)
10103 {
10104 new_insn = save_gprs (base,
10105 off + (cfun_frame_layout.first_save_gpr
10106 - first) * UNITS_PER_LONG,
10107 cfun_frame_layout.first_save_gpr,
10108 cfun_frame_layout.last_save_gpr);
10109 new_insn = emit_insn_before (new_insn, insn);
10110 INSN_ADDRESSES_NEW (new_insn, -1);
10111 }
10112
10113 remove_insn (insn);
10114 continue;
10115 }
10116
10117 if (cfun_frame_layout.first_save_gpr == -1
10118 && GET_CODE (PATTERN (insn)) == SET
10119 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10120 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10121 || (!TARGET_CPU_ZARCH
10122 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10123 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10124 {
10125 set = PATTERN (insn);
10126 first = REGNO (SET_SRC (set));
10127 offset = const0_rtx;
10128 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10129 off = INTVAL (offset);
10130
10131 if (GET_CODE (base) != REG || off < 0)
10132 continue;
10133 if (REGNO (base) != STACK_POINTER_REGNUM
10134 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10135 continue;
10136
10137 remove_insn (insn);
10138 continue;
10139 }
10140
10141 if (GET_CODE (PATTERN (insn)) == PARALLEL
10142 && load_multiple_operation (PATTERN (insn), VOIDmode))
10143 {
10144 set = XVECEXP (PATTERN (insn), 0, 0);
10145 first = REGNO (SET_DEST (set));
10146 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10147 offset = const0_rtx;
10148 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10149 off = INTVAL (offset);
10150
10151 if (GET_CODE (base) != REG || off < 0)
10152 continue;
10153 if (cfun_frame_layout.first_restore_gpr != -1
10154 && (cfun_frame_layout.first_restore_gpr < first
10155 || cfun_frame_layout.last_restore_gpr > last))
10156 continue;
10157 if (REGNO (base) != STACK_POINTER_REGNUM
10158 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10159 continue;
10160 if (first > BASE_REGNUM || last < BASE_REGNUM)
10161 continue;
10162
10163 if (cfun_frame_layout.first_restore_gpr != -1)
10164 {
10165 new_insn = restore_gprs (base,
10166 off + (cfun_frame_layout.first_restore_gpr
10167 - first) * UNITS_PER_LONG,
10168 cfun_frame_layout.first_restore_gpr,
10169 cfun_frame_layout.last_restore_gpr);
10170 new_insn = emit_insn_before (new_insn, insn);
10171 INSN_ADDRESSES_NEW (new_insn, -1);
10172 }
10173
10174 remove_insn (insn);
10175 continue;
10176 }
10177
10178 if (cfun_frame_layout.first_restore_gpr == -1
10179 && GET_CODE (PATTERN (insn)) == SET
10180 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10181 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10182 || (!TARGET_CPU_ZARCH
10183 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10184 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10185 {
10186 set = PATTERN (insn);
10187 first = REGNO (SET_DEST (set));
10188 offset = const0_rtx;
10189 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10190 off = INTVAL (offset);
10191
10192 if (GET_CODE (base) != REG || off < 0)
10193 continue;
10194 if (REGNO (base) != STACK_POINTER_REGNUM
10195 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10196 continue;
10197
10198 remove_insn (insn);
10199 continue;
10200 }
10201 }
10202 }
10203
10204 /* On z10 and later the dynamic branch prediction must see the
10205 backward jump within a certain windows. If not it falls back to
10206 the static prediction. This function rearranges the loop backward
10207 branch in a way which makes the static prediction always correct.
10208 The function returns true if it added an instruction. */
10209 static bool
10210 s390_fix_long_loop_prediction (rtx insn)
10211 {
10212 rtx set = single_set (insn);
10213 rtx code_label, label_ref, new_label;
10214 rtx uncond_jump;
10215 rtx cur_insn;
10216 rtx tmp;
10217 int distance;
10218
10219 /* This will exclude branch on count and branch on index patterns
10220 since these are correctly statically predicted. */
10221 if (!set
10222 || SET_DEST (set) != pc_rtx
10223 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10224 return false;
10225
10226 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10227 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10228
10229 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10230
10231 code_label = XEXP (label_ref, 0);
10232
10233 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10234 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10235 || (INSN_ADDRESSES (INSN_UID (insn))
10236 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10237 return false;
10238
10239 for (distance = 0, cur_insn = PREV_INSN (insn);
10240 distance < PREDICT_DISTANCE - 6;
10241 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10242 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10243 return false;
10244
10245 new_label = gen_label_rtx ();
10246 uncond_jump = emit_jump_insn_after (
10247 gen_rtx_SET (VOIDmode, pc_rtx,
10248 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10249 insn);
10250 emit_label_after (new_label, uncond_jump);
10251
10252 tmp = XEXP (SET_SRC (set), 1);
10253 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10254 XEXP (SET_SRC (set), 2) = tmp;
10255 INSN_CODE (insn) = -1;
10256
10257 XEXP (label_ref, 0) = new_label;
10258 JUMP_LABEL (insn) = new_label;
10259 JUMP_LABEL (uncond_jump) = code_label;
10260
10261 return true;
10262 }
10263
10264 /* Returns 1 if INSN reads the value of REG for purposes not related
10265 to addressing of memory, and 0 otherwise. */
10266 static int
10267 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10268 {
10269 return reg_referenced_p (reg, PATTERN (insn))
10270 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10271 }
10272
10273 /* Starting from INSN find_cond_jump looks downwards in the insn
10274 stream for a single jump insn which is the last user of the
10275 condition code set in INSN. */
10276 static rtx
10277 find_cond_jump (rtx insn)
10278 {
10279 for (; insn; insn = NEXT_INSN (insn))
10280 {
10281 rtx ite, cc;
10282
10283 if (LABEL_P (insn))
10284 break;
10285
10286 if (!JUMP_P (insn))
10287 {
10288 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10289 break;
10290 continue;
10291 }
10292
10293 /* This will be triggered by a return. */
10294 if (GET_CODE (PATTERN (insn)) != SET)
10295 break;
10296
10297 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10298 ite = SET_SRC (PATTERN (insn));
10299
10300 if (GET_CODE (ite) != IF_THEN_ELSE)
10301 break;
10302
10303 cc = XEXP (XEXP (ite, 0), 0);
10304 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10305 break;
10306
10307 if (find_reg_note (insn, REG_DEAD, cc))
10308 return insn;
10309 break;
10310 }
10311
10312 return NULL_RTX;
10313 }
10314
10315 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10316 the semantics does not change. If NULL_RTX is passed as COND the
10317 function tries to find the conditional jump starting with INSN. */
10318 static void
10319 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10320 {
10321 rtx tmp = *op0;
10322
10323 if (cond == NULL_RTX)
10324 {
10325 rtx jump = find_cond_jump (NEXT_INSN (insn));
10326 jump = jump ? single_set (jump) : NULL_RTX;
10327
10328 if (jump == NULL_RTX)
10329 return;
10330
10331 cond = XEXP (XEXP (jump, 1), 0);
10332 }
10333
10334 *op0 = *op1;
10335 *op1 = tmp;
10336 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10337 }
10338
10339 /* On z10, instructions of the compare-and-branch family have the
10340 property to access the register occurring as second operand with
10341 its bits complemented. If such a compare is grouped with a second
10342 instruction that accesses the same register non-complemented, and
10343 if that register's value is delivered via a bypass, then the
10344 pipeline recycles, thereby causing significant performance decline.
10345 This function locates such situations and exchanges the two
10346 operands of the compare. The function return true whenever it
10347 added an insn. */
10348 static bool
10349 s390_z10_optimize_cmp (rtx insn)
10350 {
10351 rtx prev_insn, next_insn;
10352 bool insn_added_p = false;
10353 rtx cond, *op0, *op1;
10354
10355 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10356 {
10357 /* Handle compare and branch and branch on count
10358 instructions. */
10359 rtx pattern = single_set (insn);
10360
10361 if (!pattern
10362 || SET_DEST (pattern) != pc_rtx
10363 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10364 return false;
10365
10366 cond = XEXP (SET_SRC (pattern), 0);
10367 op0 = &XEXP (cond, 0);
10368 op1 = &XEXP (cond, 1);
10369 }
10370 else if (GET_CODE (PATTERN (insn)) == SET)
10371 {
10372 rtx src, dest;
10373
10374 /* Handle normal compare instructions. */
10375 src = SET_SRC (PATTERN (insn));
10376 dest = SET_DEST (PATTERN (insn));
10377
10378 if (!REG_P (dest)
10379 || !CC_REGNO_P (REGNO (dest))
10380 || GET_CODE (src) != COMPARE)
10381 return false;
10382
10383 /* s390_swap_cmp will try to find the conditional
10384 jump when passing NULL_RTX as condition. */
10385 cond = NULL_RTX;
10386 op0 = &XEXP (src, 0);
10387 op1 = &XEXP (src, 1);
10388 }
10389 else
10390 return false;
10391
10392 if (!REG_P (*op0) || !REG_P (*op1))
10393 return false;
10394
10395 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10396 return false;
10397
10398 /* Swap the COMPARE arguments and its mask if there is a
10399 conflicting access in the previous insn. */
10400 prev_insn = prev_active_insn (insn);
10401 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10402 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10403 s390_swap_cmp (cond, op0, op1, insn);
10404
10405 /* Check if there is a conflict with the next insn. If there
10406 was no conflict with the previous insn, then swap the
10407 COMPARE arguments and its mask. If we already swapped
10408 the operands, or if swapping them would cause a conflict
10409 with the previous insn, issue a NOP after the COMPARE in
10410 order to separate the two instuctions. */
10411 next_insn = next_active_insn (insn);
10412 if (next_insn != NULL_RTX && INSN_P (next_insn)
10413 && s390_non_addr_reg_read_p (*op1, next_insn))
10414 {
10415 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10416 && s390_non_addr_reg_read_p (*op0, prev_insn))
10417 {
10418 if (REGNO (*op1) == 0)
10419 emit_insn_after (gen_nop1 (), insn);
10420 else
10421 emit_insn_after (gen_nop (), insn);
10422 insn_added_p = true;
10423 }
10424 else
10425 s390_swap_cmp (cond, op0, op1, insn);
10426 }
10427 return insn_added_p;
10428 }
10429
10430 /* Perform machine-dependent processing. */
10431
10432 static void
10433 s390_reorg (void)
10434 {
10435 bool pool_overflow = false;
10436
10437 /* Make sure all splits have been performed; splits after
10438 machine_dependent_reorg might confuse insn length counts. */
10439 split_all_insns_noflow ();
10440
10441 /* Install the main literal pool and the associated base
10442 register load insns.
10443
10444 In addition, there are two problematic situations we need
10445 to correct:
10446
10447 - the literal pool might be > 4096 bytes in size, so that
10448 some of its elements cannot be directly accessed
10449
10450 - a branch target might be > 64K away from the branch, so that
10451 it is not possible to use a PC-relative instruction.
10452
10453 To fix those, we split the single literal pool into multiple
10454 pool chunks, reloading the pool base register at various
10455 points throughout the function to ensure it always points to
10456 the pool chunk the following code expects, and / or replace
10457 PC-relative branches by absolute branches.
10458
10459 However, the two problems are interdependent: splitting the
10460 literal pool can move a branch further away from its target,
10461 causing the 64K limit to overflow, and on the other hand,
10462 replacing a PC-relative branch by an absolute branch means
10463 we need to put the branch target address into the literal
10464 pool, possibly causing it to overflow.
10465
10466 So, we loop trying to fix up both problems until we manage
10467 to satisfy both conditions at the same time. Note that the
10468 loop is guaranteed to terminate as every pass of the loop
10469 strictly decreases the total number of PC-relative branches
10470 in the function. (This is not completely true as there
10471 might be branch-over-pool insns introduced by chunkify_start.
10472 Those never need to be split however.) */
10473
10474 for (;;)
10475 {
10476 struct constant_pool *pool = NULL;
10477
10478 /* Collect the literal pool. */
10479 if (!pool_overflow)
10480 {
10481 pool = s390_mainpool_start ();
10482 if (!pool)
10483 pool_overflow = true;
10484 }
10485
10486 /* If literal pool overflowed, start to chunkify it. */
10487 if (pool_overflow)
10488 pool = s390_chunkify_start ();
10489
10490 /* Split out-of-range branches. If this has created new
10491 literal pool entries, cancel current chunk list and
10492 recompute it. zSeries machines have large branch
10493 instructions, so we never need to split a branch. */
10494 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10495 {
10496 if (pool_overflow)
10497 s390_chunkify_cancel (pool);
10498 else
10499 s390_mainpool_cancel (pool);
10500
10501 continue;
10502 }
10503
10504 /* If we made it up to here, both conditions are satisfied.
10505 Finish up literal pool related changes. */
10506 if (pool_overflow)
10507 s390_chunkify_finish (pool);
10508 else
10509 s390_mainpool_finish (pool);
10510
10511 /* We're done splitting branches. */
10512 cfun->machine->split_branches_pending_p = false;
10513 break;
10514 }
10515
10516 /* Generate out-of-pool execute target insns. */
10517 if (TARGET_CPU_ZARCH)
10518 {
10519 rtx insn, label, target;
10520
10521 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10522 {
10523 label = s390_execute_label (insn);
10524 if (!label)
10525 continue;
10526
10527 gcc_assert (label != const0_rtx);
10528
10529 target = emit_label (XEXP (label, 0));
10530 INSN_ADDRESSES_NEW (target, -1);
10531
10532 target = emit_insn (s390_execute_target (insn));
10533 INSN_ADDRESSES_NEW (target, -1);
10534 }
10535 }
10536
10537 /* Try to optimize prologue and epilogue further. */
10538 s390_optimize_prologue ();
10539
10540 /* Walk over the insns and do some >=z10 specific changes. */
10541 if (s390_tune == PROCESSOR_2097_Z10
10542 || s390_tune == PROCESSOR_2817_Z196)
10543 {
10544 rtx insn;
10545 bool insn_added_p = false;
10546
10547 /* The insn lengths and addresses have to be up to date for the
10548 following manipulations. */
10549 shorten_branches (get_insns ());
10550
10551 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10552 {
10553 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10554 continue;
10555
10556 if (JUMP_P (insn))
10557 insn_added_p |= s390_fix_long_loop_prediction (insn);
10558
10559 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10560 || GET_CODE (PATTERN (insn)) == SET)
10561 && s390_tune == PROCESSOR_2097_Z10)
10562 insn_added_p |= s390_z10_optimize_cmp (insn);
10563 }
10564
10565 /* Adjust branches if we added new instructions. */
10566 if (insn_added_p)
10567 shorten_branches (get_insns ());
10568 }
10569 }
10570
10571 /* Return true if INSN is a fp load insn writing register REGNO. */
10572 static inline bool
10573 s390_fpload_toreg (rtx insn, unsigned int regno)
10574 {
10575 rtx set;
10576 enum attr_type flag = s390_safe_attr_type (insn);
10577
10578 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10579 return false;
10580
10581 set = single_set (insn);
10582
10583 if (set == NULL_RTX)
10584 return false;
10585
10586 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10587 return false;
10588
10589 if (REGNO (SET_DEST (set)) != regno)
10590 return false;
10591
10592 return true;
10593 }
10594
10595 /* This value describes the distance to be avoided between an
10596 aritmetic fp instruction and an fp load writing the same register.
10597 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10598 fine but the exact value has to be avoided. Otherwise the FP
10599 pipeline will throw an exception causing a major penalty. */
10600 #define Z10_EARLYLOAD_DISTANCE 7
10601
10602 /* Rearrange the ready list in order to avoid the situation described
10603 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10604 moved to the very end of the ready list. */
10605 static void
10606 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10607 {
10608 unsigned int regno;
10609 int nready = *nready_p;
10610 rtx tmp;
10611 int i;
10612 rtx insn;
10613 rtx set;
10614 enum attr_type flag;
10615 int distance;
10616
10617 /* Skip DISTANCE - 1 active insns. */
10618 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10619 distance > 0 && insn != NULL_RTX;
10620 distance--, insn = prev_active_insn (insn))
10621 if (CALL_P (insn) || JUMP_P (insn))
10622 return;
10623
10624 if (insn == NULL_RTX)
10625 return;
10626
10627 set = single_set (insn);
10628
10629 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10630 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10631 return;
10632
10633 flag = s390_safe_attr_type (insn);
10634
10635 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10636 return;
10637
10638 regno = REGNO (SET_DEST (set));
10639 i = nready - 1;
10640
10641 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10642 i--;
10643
10644 if (!i)
10645 return;
10646
10647 tmp = ready[i];
10648 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10649 ready[0] = tmp;
10650 }
10651
10652 /* This function is called via hook TARGET_SCHED_REORDER before
10653 issuing one insn from list READY which contains *NREADYP entries.
10654 For target z10 it reorders load instructions to avoid early load
10655 conflicts in the floating point pipeline */
10656 static int
10657 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10658 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10659 {
10660 if (s390_tune == PROCESSOR_2097_Z10)
10661 if (reload_completed && *nreadyp > 1)
10662 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10663
10664 return s390_issue_rate ();
10665 }
10666
10667 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10668 the scheduler has issued INSN. It stores the last issued insn into
10669 last_scheduled_insn in order to make it available for
10670 s390_sched_reorder. */
10671 static int
10672 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10673 int verbose ATTRIBUTE_UNUSED,
10674 rtx insn, int more)
10675 {
10676 last_scheduled_insn = insn;
10677
10678 if (GET_CODE (PATTERN (insn)) != USE
10679 && GET_CODE (PATTERN (insn)) != CLOBBER)
10680 return more - 1;
10681 else
10682 return more;
10683 }
10684
10685 static void
10686 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10687 int verbose ATTRIBUTE_UNUSED,
10688 int max_ready ATTRIBUTE_UNUSED)
10689 {
10690 last_scheduled_insn = NULL_RTX;
10691 }
10692
10693 /* This function checks the whole of insn X for memory references. The
10694 function always returns zero because the framework it is called
10695 from would stop recursively analyzing the insn upon a return value
10696 other than zero. The real result of this function is updating
10697 counter variable MEM_COUNT. */
10698 static int
10699 check_dpu (rtx *x, unsigned *mem_count)
10700 {
10701 if (*x != NULL_RTX && MEM_P (*x))
10702 (*mem_count)++;
10703 return 0;
10704 }
10705
10706 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10707 a new number struct loop *loop should be unrolled if tuned for cpus with
10708 a built-in stride prefetcher.
10709 The loop is analyzed for memory accesses by calling check_dpu for
10710 each rtx of the loop. Depending on the loop_depth and the amount of
10711 memory accesses a new number <=nunroll is returned to improve the
10712 behaviour of the hardware prefetch unit. */
10713 static unsigned
10714 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10715 {
10716 basic_block *bbs;
10717 rtx insn;
10718 unsigned i;
10719 unsigned mem_count = 0;
10720
10721 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10722 return nunroll;
10723
10724 /* Count the number of memory references within the loop body. */
10725 bbs = get_loop_body (loop);
10726 for (i = 0; i < loop->num_nodes; i++)
10727 {
10728 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10729 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10730 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10731 }
10732 free (bbs);
10733
10734 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10735 if (mem_count == 0)
10736 return nunroll;
10737
10738 switch (loop_depth(loop))
10739 {
10740 case 1:
10741 return MIN (nunroll, 28 / mem_count);
10742 case 2:
10743 return MIN (nunroll, 22 / mem_count);
10744 default:
10745 return MIN (nunroll, 16 / mem_count);
10746 }
10747 }
10748
10749 /* Initialize GCC target structure. */
10750
10751 #undef TARGET_ASM_ALIGNED_HI_OP
10752 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10753 #undef TARGET_ASM_ALIGNED_DI_OP
10754 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10755 #undef TARGET_ASM_INTEGER
10756 #define TARGET_ASM_INTEGER s390_assemble_integer
10757
10758 #undef TARGET_ASM_OPEN_PAREN
10759 #define TARGET_ASM_OPEN_PAREN ""
10760
10761 #undef TARGET_ASM_CLOSE_PAREN
10762 #define TARGET_ASM_CLOSE_PAREN ""
10763
10764 #undef TARGET_OPTION_OVERRIDE
10765 #define TARGET_OPTION_OVERRIDE s390_option_override
10766
10767 #undef TARGET_ENCODE_SECTION_INFO
10768 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10769
10770 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10771 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10772
10773 #ifdef HAVE_AS_TLS
10774 #undef TARGET_HAVE_TLS
10775 #define TARGET_HAVE_TLS true
10776 #endif
10777 #undef TARGET_CANNOT_FORCE_CONST_MEM
10778 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10779
10780 #undef TARGET_DELEGITIMIZE_ADDRESS
10781 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10782
10783 #undef TARGET_LEGITIMIZE_ADDRESS
10784 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10785
10786 #undef TARGET_RETURN_IN_MEMORY
10787 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10788
10789 #undef TARGET_INIT_BUILTINS
10790 #define TARGET_INIT_BUILTINS s390_init_builtins
10791 #undef TARGET_EXPAND_BUILTIN
10792 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10793
10794 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10795 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10796
10797 #undef TARGET_ASM_OUTPUT_MI_THUNK
10798 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10799 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10800 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10801
10802 #undef TARGET_SCHED_ADJUST_PRIORITY
10803 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10804 #undef TARGET_SCHED_ISSUE_RATE
10805 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10806 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10807 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10808
10809 #undef TARGET_SCHED_VARIABLE_ISSUE
10810 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10811 #undef TARGET_SCHED_REORDER
10812 #define TARGET_SCHED_REORDER s390_sched_reorder
10813 #undef TARGET_SCHED_INIT
10814 #define TARGET_SCHED_INIT s390_sched_init
10815
10816 #undef TARGET_CANNOT_COPY_INSN_P
10817 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10818 #undef TARGET_RTX_COSTS
10819 #define TARGET_RTX_COSTS s390_rtx_costs
10820 #undef TARGET_ADDRESS_COST
10821 #define TARGET_ADDRESS_COST s390_address_cost
10822 #undef TARGET_REGISTER_MOVE_COST
10823 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10824 #undef TARGET_MEMORY_MOVE_COST
10825 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10826
10827 #undef TARGET_MACHINE_DEPENDENT_REORG
10828 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10829
10830 #undef TARGET_VALID_POINTER_MODE
10831 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10832
10833 #undef TARGET_BUILD_BUILTIN_VA_LIST
10834 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10835 #undef TARGET_EXPAND_BUILTIN_VA_START
10836 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10837 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10838 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10839
10840 #undef TARGET_PROMOTE_FUNCTION_MODE
10841 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10842 #undef TARGET_PASS_BY_REFERENCE
10843 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10844
10845 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10846 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10847 #undef TARGET_FUNCTION_ARG
10848 #define TARGET_FUNCTION_ARG s390_function_arg
10849 #undef TARGET_FUNCTION_ARG_ADVANCE
10850 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10851 #undef TARGET_FUNCTION_VALUE
10852 #define TARGET_FUNCTION_VALUE s390_function_value
10853 #undef TARGET_LIBCALL_VALUE
10854 #define TARGET_LIBCALL_VALUE s390_libcall_value
10855
10856 #undef TARGET_FIXED_CONDITION_CODE_REGS
10857 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10858
10859 #undef TARGET_CC_MODES_COMPATIBLE
10860 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10861
10862 #undef TARGET_INVALID_WITHIN_DOLOOP
10863 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10864
10865 #ifdef HAVE_AS_TLS
10866 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10867 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10868 #endif
10869
10870 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10871 #undef TARGET_MANGLE_TYPE
10872 #define TARGET_MANGLE_TYPE s390_mangle_type
10873 #endif
10874
10875 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10876 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10877
10878 #undef TARGET_PREFERRED_RELOAD_CLASS
10879 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10880
10881 #undef TARGET_SECONDARY_RELOAD
10882 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10883
10884 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10885 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10886
10887 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10888 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10889
10890 #undef TARGET_LEGITIMATE_ADDRESS_P
10891 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10892
10893 #undef TARGET_LEGITIMATE_CONSTANT_P
10894 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10895
10896 #undef TARGET_CAN_ELIMINATE
10897 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10898
10899 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10900 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10901
10902 #undef TARGET_LOOP_UNROLL_ADJUST
10903 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10904
10905 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10906 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10907 #undef TARGET_TRAMPOLINE_INIT
10908 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10909
10910 #undef TARGET_UNWIND_WORD_MODE
10911 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10912
10913 struct gcc_target targetm = TARGET_INITIALIZER;
10914
10915 #include "gt-s390.h"