s390: Convert from sync to atomic optabs
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "optabs.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "params.h"
54 #include "cfgloop.h"
55 #include "opts.h"
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (1) , /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 static const
225 struct processor_costs z196_cost =
226 {
227 COSTS_N_INSNS (7), /* M */
228 COSTS_N_INSNS (5), /* MGHI */
229 COSTS_N_INSNS (5), /* MH */
230 COSTS_N_INSNS (5), /* MHI */
231 COSTS_N_INSNS (7), /* ML */
232 COSTS_N_INSNS (7), /* MR */
233 COSTS_N_INSNS (6), /* MS */
234 COSTS_N_INSNS (8), /* MSG */
235 COSTS_N_INSNS (6), /* MSGF */
236 COSTS_N_INSNS (6), /* MSGFR */
237 COSTS_N_INSNS (8), /* MSGR */
238 COSTS_N_INSNS (6), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (40), /* MXBR B+40 */
241 COSTS_N_INSNS (100), /* SQXBR B+100 */
242 COSTS_N_INSNS (42), /* SQDBR B+42 */
243 COSTS_N_INSNS (28), /* SQEBR B+28 */
244 COSTS_N_INSNS (1), /* MADBR B */
245 COSTS_N_INSNS (1), /* MAEBR B */
246 COSTS_N_INSNS (101), /* DXBR B+101 */
247 COSTS_N_INSNS (29), /* DDBR */
248 COSTS_N_INSNS (22), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR cracked */
250 COSTS_N_INSNS (160), /* DLR cracked */
251 COSTS_N_INSNS (160), /* DR expanded */
252 COSTS_N_INSNS (160), /* DSGFR cracked */
253 COSTS_N_INSNS (160), /* DSGR cracked */
254 };
255
256 extern int reload_completed;
257
258 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
259 static rtx last_scheduled_insn;
260
261 /* Structure used to hold the components of a S/390 memory
262 address. A legitimate address on S/390 is of the general
263 form
264 base + index + displacement
265 where any of the components is optional.
266
267 base and index are registers of the class ADDR_REGS,
268 displacement is an unsigned 12-bit immediate constant. */
269
270 struct s390_address
271 {
272 rtx base;
273 rtx indx;
274 rtx disp;
275 bool pointer;
276 bool literal_pool;
277 };
278
279 /* The following structure is embedded in the machine
280 specific part of struct function. */
281
282 struct GTY (()) s390_frame_layout
283 {
284 /* Offset within stack frame. */
285 HOST_WIDE_INT gprs_offset;
286 HOST_WIDE_INT f0_offset;
287 HOST_WIDE_INT f4_offset;
288 HOST_WIDE_INT f8_offset;
289 HOST_WIDE_INT backchain_offset;
290
291 /* Number of first and last gpr where slots in the register
292 save area are reserved for. */
293 int first_save_gpr_slot;
294 int last_save_gpr_slot;
295
296 /* Number of first and last gpr to be saved, restored. */
297 int first_save_gpr;
298 int first_restore_gpr;
299 int last_save_gpr;
300 int last_restore_gpr;
301
302 /* Bits standing for floating point registers. Set, if the
303 respective register has to be saved. Starting with reg 16 (f0)
304 at the rightmost bit.
305 Bit 15 - 8 7 6 5 4 3 2 1 0
306 fpr 15 - 8 7 5 3 1 6 4 2 0
307 reg 31 - 24 23 22 21 20 19 18 17 16 */
308 unsigned int fpr_bitmap;
309
310 /* Number of floating point registers f8-f15 which must be saved. */
311 int high_fprs;
312
313 /* Set if return address needs to be saved.
314 This flag is set by s390_return_addr_rtx if it could not use
315 the initial value of r14 and therefore depends on r14 saved
316 to the stack. */
317 bool save_return_addr_p;
318
319 /* Size of stack frame. */
320 HOST_WIDE_INT frame_size;
321 };
322
323 /* Define the structure for the machine field in struct function. */
324
325 struct GTY(()) machine_function
326 {
327 struct s390_frame_layout frame_layout;
328
329 /* Literal pool base register. */
330 rtx base_reg;
331
332 /* True if we may need to perform branch splitting. */
333 bool split_branches_pending_p;
334
335 /* Some local-dynamic TLS symbol name. */
336 const char *some_ld_name;
337
338 bool has_landing_pad_p;
339 };
340
341 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
342
343 #define cfun_frame_layout (cfun->machine->frame_layout)
344 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
345 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
346 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
347 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
348 (1 << (BITNUM)))
349 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
350 (1 << (BITNUM))))
351
352 /* Number of GPRs and FPRs used for argument passing. */
353 #define GP_ARG_NUM_REG 5
354 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
355
356 /* A couple of shortcuts. */
357 #define CONST_OK_FOR_J(x) \
358 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
359 #define CONST_OK_FOR_K(x) \
360 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
361 #define CONST_OK_FOR_Os(x) \
362 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
363 #define CONST_OK_FOR_Op(x) \
364 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
365 #define CONST_OK_FOR_On(x) \
366 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
367
368 #define REGNO_PAIR_OK(REGNO, MODE) \
369 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
370
371 /* That's the read ahead of the dynamic branch prediction unit in
372 bytes on a z10 (or higher) CPU. */
373 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
374
375 /* Return the alignment for LABEL. We default to the -falign-labels
376 value except for the literal pool base label. */
377 int
378 s390_label_align (rtx label)
379 {
380 rtx prev_insn = prev_active_insn (label);
381
382 if (prev_insn == NULL_RTX)
383 goto old;
384
385 prev_insn = single_set (prev_insn);
386
387 if (prev_insn == NULL_RTX)
388 goto old;
389
390 prev_insn = SET_SRC (prev_insn);
391
392 /* Don't align literal pool base labels. */
393 if (GET_CODE (prev_insn) == UNSPEC
394 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
395 return 0;
396
397 old:
398 return align_labels_log;
399 }
400
401 static enum machine_mode
402 s390_libgcc_cmp_return_mode (void)
403 {
404 return TARGET_64BIT ? DImode : SImode;
405 }
406
407 static enum machine_mode
408 s390_libgcc_shift_count_mode (void)
409 {
410 return TARGET_64BIT ? DImode : SImode;
411 }
412
413 static enum machine_mode
414 s390_unwind_word_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 /* Return true if the back end supports mode MODE. */
420 static bool
421 s390_scalar_mode_supported_p (enum machine_mode mode)
422 {
423 /* In contrast to the default implementation reject TImode constants on 31bit
424 TARGET_ZARCH for ABI compliance. */
425 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
426 return false;
427
428 if (DECIMAL_FLOAT_MODE_P (mode))
429 return default_decimal_float_supported_p ();
430
431 return default_scalar_mode_supported_p (mode);
432 }
433
434 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
435
436 void
437 s390_set_has_landing_pad_p (bool value)
438 {
439 cfun->machine->has_landing_pad_p = value;
440 }
441
442 /* If two condition code modes are compatible, return a condition code
443 mode which is compatible with both. Otherwise, return
444 VOIDmode. */
445
446 static enum machine_mode
447 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
448 {
449 if (m1 == m2)
450 return m1;
451
452 switch (m1)
453 {
454 case CCZmode:
455 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
456 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
457 return m2;
458 return VOIDmode;
459
460 case CCSmode:
461 case CCUmode:
462 case CCTmode:
463 case CCSRmode:
464 case CCURmode:
465 case CCZ1mode:
466 if (m2 == CCZmode)
467 return m1;
468
469 return VOIDmode;
470
471 default:
472 return VOIDmode;
473 }
474 return VOIDmode;
475 }
476
477 /* Return true if SET either doesn't set the CC register, or else
478 the source and destination have matching CC modes and that
479 CC mode is at least as constrained as REQ_MODE. */
480
481 static bool
482 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
483 {
484 enum machine_mode set_mode;
485
486 gcc_assert (GET_CODE (set) == SET);
487
488 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
489 return 1;
490
491 set_mode = GET_MODE (SET_DEST (set));
492 switch (set_mode)
493 {
494 case CCSmode:
495 case CCSRmode:
496 case CCUmode:
497 case CCURmode:
498 case CCLmode:
499 case CCL1mode:
500 case CCL2mode:
501 case CCL3mode:
502 case CCT1mode:
503 case CCT2mode:
504 case CCT3mode:
505 if (req_mode != set_mode)
506 return 0;
507 break;
508
509 case CCZmode:
510 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
511 && req_mode != CCSRmode && req_mode != CCURmode)
512 return 0;
513 break;
514
515 case CCAPmode:
516 case CCANmode:
517 if (req_mode != CCAmode)
518 return 0;
519 break;
520
521 default:
522 gcc_unreachable ();
523 }
524
525 return (GET_MODE (SET_SRC (set)) == set_mode);
526 }
527
528 /* Return true if every SET in INSN that sets the CC register
529 has source and destination with matching CC modes and that
530 CC mode is at least as constrained as REQ_MODE.
531 If REQ_MODE is VOIDmode, always return false. */
532
533 bool
534 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
535 {
536 int i;
537
538 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
539 if (req_mode == VOIDmode)
540 return false;
541
542 if (GET_CODE (PATTERN (insn)) == SET)
543 return s390_match_ccmode_set (PATTERN (insn), req_mode);
544
545 if (GET_CODE (PATTERN (insn)) == PARALLEL)
546 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
547 {
548 rtx set = XVECEXP (PATTERN (insn), 0, i);
549 if (GET_CODE (set) == SET)
550 if (!s390_match_ccmode_set (set, req_mode))
551 return false;
552 }
553
554 return true;
555 }
556
557 /* If a test-under-mask instruction can be used to implement
558 (compare (and ... OP1) OP2), return the CC mode required
559 to do that. Otherwise, return VOIDmode.
560 MIXED is true if the instruction can distinguish between
561 CC1 and CC2 for mixed selected bits (TMxx), it is false
562 if the instruction cannot (TM). */
563
564 enum machine_mode
565 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
566 {
567 int bit0, bit1;
568
569 /* ??? Fixme: should work on CONST_DOUBLE as well. */
570 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
571 return VOIDmode;
572
573 /* Selected bits all zero: CC0.
574 e.g.: int a; if ((a & (16 + 128)) == 0) */
575 if (INTVAL (op2) == 0)
576 return CCTmode;
577
578 /* Selected bits all one: CC3.
579 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
580 if (INTVAL (op2) == INTVAL (op1))
581 return CCT3mode;
582
583 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
584 int a;
585 if ((a & (16 + 128)) == 16) -> CCT1
586 if ((a & (16 + 128)) == 128) -> CCT2 */
587 if (mixed)
588 {
589 bit1 = exact_log2 (INTVAL (op2));
590 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
591 if (bit0 != -1 && bit1 != -1)
592 return bit0 > bit1 ? CCT1mode : CCT2mode;
593 }
594
595 return VOIDmode;
596 }
597
598 /* Given a comparison code OP (EQ, NE, etc.) and the operands
599 OP0 and OP1 of a COMPARE, return the mode to be used for the
600 comparison. */
601
602 enum machine_mode
603 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
604 {
605 switch (code)
606 {
607 case EQ:
608 case NE:
609 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
610 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
611 return CCAPmode;
612 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
613 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
614 return CCAPmode;
615 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
616 || GET_CODE (op1) == NEG)
617 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
618 return CCLmode;
619
620 if (GET_CODE (op0) == AND)
621 {
622 /* Check whether we can potentially do it via TM. */
623 enum machine_mode ccmode;
624 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
625 if (ccmode != VOIDmode)
626 {
627 /* Relax CCTmode to CCZmode to allow fall-back to AND
628 if that turns out to be beneficial. */
629 return ccmode == CCTmode ? CCZmode : ccmode;
630 }
631 }
632
633 if (register_operand (op0, HImode)
634 && GET_CODE (op1) == CONST_INT
635 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
636 return CCT3mode;
637 if (register_operand (op0, QImode)
638 && GET_CODE (op1) == CONST_INT
639 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
640 return CCT3mode;
641
642 return CCZmode;
643
644 case LE:
645 case LT:
646 case GE:
647 case GT:
648 /* The only overflow condition of NEG and ABS happens when
649 -INT_MAX is used as parameter, which stays negative. So
650 we have an overflow from a positive value to a negative.
651 Using CCAP mode the resulting cc can be used for comparisons. */
652 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
653 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
654 return CCAPmode;
655
656 /* If constants are involved in an add instruction it is possible to use
657 the resulting cc for comparisons with zero. Knowing the sign of the
658 constant the overflow behavior gets predictable. e.g.:
659 int a, b; if ((b = a + c) > 0)
660 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
661 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
662 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
663 {
664 if (INTVAL (XEXP((op0), 1)) < 0)
665 return CCANmode;
666 else
667 return CCAPmode;
668 }
669 /* Fall through. */
670 case UNORDERED:
671 case ORDERED:
672 case UNEQ:
673 case UNLE:
674 case UNLT:
675 case UNGE:
676 case UNGT:
677 case LTGT:
678 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
679 && GET_CODE (op1) != CONST_INT)
680 return CCSRmode;
681 return CCSmode;
682
683 case LTU:
684 case GEU:
685 if (GET_CODE (op0) == PLUS
686 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
687 return CCL1mode;
688
689 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
690 && GET_CODE (op1) != CONST_INT)
691 return CCURmode;
692 return CCUmode;
693
694 case LEU:
695 case GTU:
696 if (GET_CODE (op0) == MINUS
697 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
698 return CCL2mode;
699
700 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
701 && GET_CODE (op1) != CONST_INT)
702 return CCURmode;
703 return CCUmode;
704
705 default:
706 gcc_unreachable ();
707 }
708 }
709
710 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
711 that we can implement more efficiently. */
712
713 void
714 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
715 {
716 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
717 if ((*code == EQ || *code == NE)
718 && *op1 == const0_rtx
719 && GET_CODE (*op0) == ZERO_EXTRACT
720 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
721 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
722 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
723 {
724 rtx inner = XEXP (*op0, 0);
725 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
726 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
727 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
728
729 if (len > 0 && len < modesize
730 && pos >= 0 && pos + len <= modesize
731 && modesize <= HOST_BITS_PER_WIDE_INT)
732 {
733 unsigned HOST_WIDE_INT block;
734 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
735 block <<= modesize - pos - len;
736
737 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
738 gen_int_mode (block, GET_MODE (inner)));
739 }
740 }
741
742 /* Narrow AND of memory against immediate to enable TM. */
743 if ((*code == EQ || *code == NE)
744 && *op1 == const0_rtx
745 && GET_CODE (*op0) == AND
746 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
747 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
748 {
749 rtx inner = XEXP (*op0, 0);
750 rtx mask = XEXP (*op0, 1);
751
752 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
753 if (GET_CODE (inner) == SUBREG
754 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
755 && (GET_MODE_SIZE (GET_MODE (inner))
756 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
757 && ((INTVAL (mask)
758 & GET_MODE_MASK (GET_MODE (inner))
759 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
760 == 0))
761 inner = SUBREG_REG (inner);
762
763 /* Do not change volatile MEMs. */
764 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
765 {
766 int part = s390_single_part (XEXP (*op0, 1),
767 GET_MODE (inner), QImode, 0);
768 if (part >= 0)
769 {
770 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
771 inner = adjust_address_nv (inner, QImode, part);
772 *op0 = gen_rtx_AND (QImode, inner, mask);
773 }
774 }
775 }
776
777 /* Narrow comparisons against 0xffff to HImode if possible. */
778 if ((*code == EQ || *code == NE)
779 && GET_CODE (*op1) == CONST_INT
780 && INTVAL (*op1) == 0xffff
781 && SCALAR_INT_MODE_P (GET_MODE (*op0))
782 && (nonzero_bits (*op0, GET_MODE (*op0))
783 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
784 {
785 *op0 = gen_lowpart (HImode, *op0);
786 *op1 = constm1_rtx;
787 }
788
789 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
790 if (GET_CODE (*op0) == UNSPEC
791 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
792 && XVECLEN (*op0, 0) == 1
793 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
794 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
795 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
796 && *op1 == const0_rtx)
797 {
798 enum rtx_code new_code = UNKNOWN;
799 switch (*code)
800 {
801 case EQ: new_code = EQ; break;
802 case NE: new_code = NE; break;
803 case LT: new_code = GTU; break;
804 case GT: new_code = LTU; break;
805 case LE: new_code = GEU; break;
806 case GE: new_code = LEU; break;
807 default: break;
808 }
809
810 if (new_code != UNKNOWN)
811 {
812 *op0 = XVECEXP (*op0, 0, 0);
813 *code = new_code;
814 }
815 }
816
817 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
818 if (GET_CODE (*op0) == UNSPEC
819 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
820 && XVECLEN (*op0, 0) == 1
821 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
822 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
823 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
824 && *op1 == const0_rtx)
825 {
826 enum rtx_code new_code = UNKNOWN;
827 switch (*code)
828 {
829 case EQ: new_code = EQ; break;
830 case NE: new_code = NE; break;
831 default: break;
832 }
833
834 if (new_code != UNKNOWN)
835 {
836 *op0 = XVECEXP (*op0, 0, 0);
837 *code = new_code;
838 }
839 }
840
841 /* Simplify cascaded EQ, NE with const0_rtx. */
842 if ((*code == NE || *code == EQ)
843 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
844 && GET_MODE (*op0) == SImode
845 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
846 && REG_P (XEXP (*op0, 0))
847 && XEXP (*op0, 1) == const0_rtx
848 && *op1 == const0_rtx)
849 {
850 if ((*code == EQ && GET_CODE (*op0) == NE)
851 || (*code == NE && GET_CODE (*op0) == EQ))
852 *code = EQ;
853 else
854 *code = NE;
855 *op0 = XEXP (*op0, 0);
856 }
857
858 /* Prefer register over memory as first operand. */
859 if (MEM_P (*op0) && REG_P (*op1))
860 {
861 rtx tem = *op0; *op0 = *op1; *op1 = tem;
862 *code = swap_condition (*code);
863 }
864 }
865
866 /* Emit a compare instruction suitable to implement the comparison
867 OP0 CODE OP1. Return the correct condition RTL to be placed in
868 the IF_THEN_ELSE of the conditional branch testing the result. */
869
870 rtx
871 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
872 {
873 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
874 rtx cc;
875
876 /* Do not output a redundant compare instruction if a compare_and_swap
877 pattern already computed the result and the machine modes are compatible. */
878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
879 {
880 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
881 == GET_MODE (op0));
882 cc = op0;
883 }
884 else
885 {
886 cc = gen_rtx_REG (mode, CC_REGNUM);
887 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
888 }
889
890 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
891 }
892
893 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
894 matches CMP.
895 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
896 conditional branch testing the result. */
897
898 static rtx
899 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
900 rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
904 const0_rtx);
905 }
906
907 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
908 unconditional jump, else a conditional jump under condition COND. */
909
910 void
911 s390_emit_jump (rtx target, rtx cond)
912 {
913 rtx insn;
914
915 target = gen_rtx_LABEL_REF (VOIDmode, target);
916 if (cond)
917 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
918
919 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
920 emit_jump_insn (insn);
921 }
922
923 /* Return branch condition mask to implement a branch
924 specified by CODE. Return -1 for invalid comparisons. */
925
926 int
927 s390_branch_condition_mask (rtx code)
928 {
929 const int CC0 = 1 << 3;
930 const int CC1 = 1 << 2;
931 const int CC2 = 1 << 1;
932 const int CC3 = 1 << 0;
933
934 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
935 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
936 gcc_assert (XEXP (code, 1) == const0_rtx);
937
938 switch (GET_MODE (XEXP (code, 0)))
939 {
940 case CCZmode:
941 case CCZ1mode:
942 switch (GET_CODE (code))
943 {
944 case EQ: return CC0;
945 case NE: return CC1 | CC2 | CC3;
946 default: return -1;
947 }
948 break;
949
950 case CCT1mode:
951 switch (GET_CODE (code))
952 {
953 case EQ: return CC1;
954 case NE: return CC0 | CC2 | CC3;
955 default: return -1;
956 }
957 break;
958
959 case CCT2mode:
960 switch (GET_CODE (code))
961 {
962 case EQ: return CC2;
963 case NE: return CC0 | CC1 | CC3;
964 default: return -1;
965 }
966 break;
967
968 case CCT3mode:
969 switch (GET_CODE (code))
970 {
971 case EQ: return CC3;
972 case NE: return CC0 | CC1 | CC2;
973 default: return -1;
974 }
975 break;
976
977 case CCLmode:
978 switch (GET_CODE (code))
979 {
980 case EQ: return CC0 | CC2;
981 case NE: return CC1 | CC3;
982 default: return -1;
983 }
984 break;
985
986 case CCL1mode:
987 switch (GET_CODE (code))
988 {
989 case LTU: return CC2 | CC3; /* carry */
990 case GEU: return CC0 | CC1; /* no carry */
991 default: return -1;
992 }
993 break;
994
995 case CCL2mode:
996 switch (GET_CODE (code))
997 {
998 case GTU: return CC0 | CC1; /* borrow */
999 case LEU: return CC2 | CC3; /* no borrow */
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCL3mode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0 | CC2;
1008 case NE: return CC1 | CC3;
1009 case LTU: return CC1;
1010 case GTU: return CC3;
1011 case LEU: return CC1 | CC2;
1012 case GEU: return CC2 | CC3;
1013 default: return -1;
1014 }
1015
1016 case CCUmode:
1017 switch (GET_CODE (code))
1018 {
1019 case EQ: return CC0;
1020 case NE: return CC1 | CC2 | CC3;
1021 case LTU: return CC1;
1022 case GTU: return CC2;
1023 case LEU: return CC0 | CC1;
1024 case GEU: return CC0 | CC2;
1025 default: return -1;
1026 }
1027 break;
1028
1029 case CCURmode:
1030 switch (GET_CODE (code))
1031 {
1032 case EQ: return CC0;
1033 case NE: return CC2 | CC1 | CC3;
1034 case LTU: return CC2;
1035 case GTU: return CC1;
1036 case LEU: return CC0 | CC2;
1037 case GEU: return CC0 | CC1;
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCAPmode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0;
1046 case NE: return CC1 | CC2 | CC3;
1047 case LT: return CC1 | CC3;
1048 case GT: return CC2;
1049 case LE: return CC0 | CC1 | CC3;
1050 case GE: return CC0 | CC2;
1051 default: return -1;
1052 }
1053 break;
1054
1055 case CCANmode:
1056 switch (GET_CODE (code))
1057 {
1058 case EQ: return CC0;
1059 case NE: return CC1 | CC2 | CC3;
1060 case LT: return CC1;
1061 case GT: return CC2 | CC3;
1062 case LE: return CC0 | CC1;
1063 case GE: return CC0 | CC2 | CC3;
1064 default: return -1;
1065 }
1066 break;
1067
1068 case CCSmode:
1069 switch (GET_CODE (code))
1070 {
1071 case EQ: return CC0;
1072 case NE: return CC1 | CC2 | CC3;
1073 case LT: return CC1;
1074 case GT: return CC2;
1075 case LE: return CC0 | CC1;
1076 case GE: return CC0 | CC2;
1077 case UNORDERED: return CC3;
1078 case ORDERED: return CC0 | CC1 | CC2;
1079 case UNEQ: return CC0 | CC3;
1080 case UNLT: return CC1 | CC3;
1081 case UNGT: return CC2 | CC3;
1082 case UNLE: return CC0 | CC1 | CC3;
1083 case UNGE: return CC0 | CC2 | CC3;
1084 case LTGT: return CC1 | CC2;
1085 default: return -1;
1086 }
1087 break;
1088
1089 case CCSRmode:
1090 switch (GET_CODE (code))
1091 {
1092 case EQ: return CC0;
1093 case NE: return CC2 | CC1 | CC3;
1094 case LT: return CC2;
1095 case GT: return CC1;
1096 case LE: return CC0 | CC2;
1097 case GE: return CC0 | CC1;
1098 case UNORDERED: return CC3;
1099 case ORDERED: return CC0 | CC2 | CC1;
1100 case UNEQ: return CC0 | CC3;
1101 case UNLT: return CC2 | CC3;
1102 case UNGT: return CC1 | CC3;
1103 case UNLE: return CC0 | CC2 | CC3;
1104 case UNGE: return CC0 | CC1 | CC3;
1105 case LTGT: return CC2 | CC1;
1106 default: return -1;
1107 }
1108 break;
1109
1110 default:
1111 return -1;
1112 }
1113 }
1114
1115
1116 /* Return branch condition mask to implement a compare and branch
1117 specified by CODE. Return -1 for invalid comparisons. */
1118
1119 int
1120 s390_compare_and_branch_condition_mask (rtx code)
1121 {
1122 const int CC0 = 1 << 3;
1123 const int CC1 = 1 << 2;
1124 const int CC2 = 1 << 1;
1125
1126 switch (GET_CODE (code))
1127 {
1128 case EQ:
1129 return CC0;
1130 case NE:
1131 return CC1 | CC2;
1132 case LT:
1133 case LTU:
1134 return CC1;
1135 case GT:
1136 case GTU:
1137 return CC2;
1138 case LE:
1139 case LEU:
1140 return CC0 | CC1;
1141 case GE:
1142 case GEU:
1143 return CC0 | CC2;
1144 default:
1145 gcc_unreachable ();
1146 }
1147 return -1;
1148 }
1149
1150 /* If INV is false, return assembler mnemonic string to implement
1151 a branch specified by CODE. If INV is true, return mnemonic
1152 for the corresponding inverted branch. */
1153
1154 static const char *
1155 s390_branch_condition_mnemonic (rtx code, int inv)
1156 {
1157 int mask;
1158
1159 static const char *const mnemonic[16] =
1160 {
1161 NULL, "o", "h", "nle",
1162 "l", "nhe", "lh", "ne",
1163 "e", "nlh", "he", "nl",
1164 "le", "nh", "no", NULL
1165 };
1166
1167 if (GET_CODE (XEXP (code, 0)) == REG
1168 && REGNO (XEXP (code, 0)) == CC_REGNUM
1169 && XEXP (code, 1) == const0_rtx)
1170 mask = s390_branch_condition_mask (code);
1171 else
1172 mask = s390_compare_and_branch_condition_mask (code);
1173
1174 gcc_assert (mask >= 0);
1175
1176 if (inv)
1177 mask ^= 15;
1178
1179 gcc_assert (mask >= 1 && mask <= 14);
1180
1181 return mnemonic[mask];
1182 }
1183
1184 /* Return the part of op which has a value different from def.
1185 The size of the part is determined by mode.
1186 Use this function only if you already know that op really
1187 contains such a part. */
1188
1189 unsigned HOST_WIDE_INT
1190 s390_extract_part (rtx op, enum machine_mode mode, int def)
1191 {
1192 unsigned HOST_WIDE_INT value = 0;
1193 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1194 int part_bits = GET_MODE_BITSIZE (mode);
1195 unsigned HOST_WIDE_INT part_mask
1196 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1197 int i;
1198
1199 for (i = 0; i < max_parts; i++)
1200 {
1201 if (i == 0)
1202 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1203 else
1204 value >>= part_bits;
1205
1206 if ((value & part_mask) != (def & part_mask))
1207 return value & part_mask;
1208 }
1209
1210 gcc_unreachable ();
1211 }
1212
1213 /* If OP is an integer constant of mode MODE with exactly one
1214 part of mode PART_MODE unequal to DEF, return the number of that
1215 part. Otherwise, return -1. */
1216
1217 int
1218 s390_single_part (rtx op,
1219 enum machine_mode mode,
1220 enum machine_mode part_mode,
1221 int def)
1222 {
1223 unsigned HOST_WIDE_INT value = 0;
1224 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1225 unsigned HOST_WIDE_INT part_mask
1226 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1227 int i, part = -1;
1228
1229 if (GET_CODE (op) != CONST_INT)
1230 return -1;
1231
1232 for (i = 0; i < n_parts; i++)
1233 {
1234 if (i == 0)
1235 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1236 else
1237 value >>= GET_MODE_BITSIZE (part_mode);
1238
1239 if ((value & part_mask) != (def & part_mask))
1240 {
1241 if (part != -1)
1242 return -1;
1243 else
1244 part = i;
1245 }
1246 }
1247 return part == -1 ? -1 : n_parts - 1 - part;
1248 }
1249
1250 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1251 bits and no other bits are set in IN. POS and LENGTH can be used
1252 to obtain the start position and the length of the bitfield.
1253
1254 POS gives the position of the first bit of the bitfield counting
1255 from the lowest order bit starting with zero. In order to use this
1256 value for S/390 instructions this has to be converted to "bits big
1257 endian" style. */
1258
1259 bool
1260 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1261 int *pos, int *length)
1262 {
1263 int tmp_pos = 0;
1264 int tmp_length = 0;
1265 int i;
1266 unsigned HOST_WIDE_INT mask = 1ULL;
1267 bool contiguous = false;
1268
1269 for (i = 0; i < size; mask <<= 1, i++)
1270 {
1271 if (contiguous)
1272 {
1273 if (mask & in)
1274 tmp_length++;
1275 else
1276 break;
1277 }
1278 else
1279 {
1280 if (mask & in)
1281 {
1282 contiguous = true;
1283 tmp_length++;
1284 }
1285 else
1286 tmp_pos++;
1287 }
1288 }
1289
1290 if (!tmp_length)
1291 return false;
1292
1293 /* Calculate a mask for all bits beyond the contiguous bits. */
1294 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1295
1296 if (mask & in)
1297 return false;
1298
1299 if (tmp_length + tmp_pos - 1 > size)
1300 return false;
1301
1302 if (length)
1303 *length = tmp_length;
1304
1305 if (pos)
1306 *pos = tmp_pos;
1307
1308 return true;
1309 }
1310
1311 /* Check whether we can (and want to) split a double-word
1312 move in mode MODE from SRC to DST into two single-word
1313 moves, moving the subword FIRST_SUBWORD first. */
1314
1315 bool
1316 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1317 {
1318 /* Floating point registers cannot be split. */
1319 if (FP_REG_P (src) || FP_REG_P (dst))
1320 return false;
1321
1322 /* We don't need to split if operands are directly accessible. */
1323 if (s_operand (src, mode) || s_operand (dst, mode))
1324 return false;
1325
1326 /* Non-offsettable memory references cannot be split. */
1327 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1328 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1329 return false;
1330
1331 /* Moving the first subword must not clobber a register
1332 needed to move the second subword. */
1333 if (register_operand (dst, mode))
1334 {
1335 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1336 if (reg_overlap_mentioned_p (subreg, src))
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
1343 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1344 and [MEM2, MEM2 + SIZE] do overlap and false
1345 otherwise. */
1346
1347 bool
1348 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1349 {
1350 rtx addr1, addr2, addr_delta;
1351 HOST_WIDE_INT delta;
1352
1353 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1354 return true;
1355
1356 if (size == 0)
1357 return false;
1358
1359 addr1 = XEXP (mem1, 0);
1360 addr2 = XEXP (mem2, 0);
1361
1362 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1363
1364 /* This overlapping check is used by peepholes merging memory block operations.
1365 Overlapping operations would otherwise be recognized by the S/390 hardware
1366 and would fall back to a slower implementation. Allowing overlapping
1367 operations would lead to slow code but not to wrong code. Therefore we are
1368 somewhat optimistic if we cannot prove that the memory blocks are
1369 overlapping.
1370 That's why we return false here although this may accept operations on
1371 overlapping memory areas. */
1372 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1373 return false;
1374
1375 delta = INTVAL (addr_delta);
1376
1377 if (delta == 0
1378 || (delta > 0 && delta < size)
1379 || (delta < 0 && -delta < size))
1380 return true;
1381
1382 return false;
1383 }
1384
1385 /* Check whether the address of memory reference MEM2 equals exactly
1386 the address of memory reference MEM1 plus DELTA. Return true if
1387 we can prove this to be the case, false otherwise. */
1388
1389 bool
1390 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1391 {
1392 rtx addr1, addr2, addr_delta;
1393
1394 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1395 return false;
1396
1397 addr1 = XEXP (mem1, 0);
1398 addr2 = XEXP (mem2, 0);
1399
1400 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1401 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1402 return false;
1403
1404 return true;
1405 }
1406
1407 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1408
1409 void
1410 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1411 rtx *operands)
1412 {
1413 enum machine_mode wmode = mode;
1414 rtx dst = operands[0];
1415 rtx src1 = operands[1];
1416 rtx src2 = operands[2];
1417 rtx op, clob, tem;
1418
1419 /* If we cannot handle the operation directly, use a temp register. */
1420 if (!s390_logical_operator_ok_p (operands))
1421 dst = gen_reg_rtx (mode);
1422
1423 /* QImode and HImode patterns make sense only if we have a destination
1424 in memory. Otherwise perform the operation in SImode. */
1425 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1426 wmode = SImode;
1427
1428 /* Widen operands if required. */
1429 if (mode != wmode)
1430 {
1431 if (GET_CODE (dst) == SUBREG
1432 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1433 dst = tem;
1434 else if (REG_P (dst))
1435 dst = gen_rtx_SUBREG (wmode, dst, 0);
1436 else
1437 dst = gen_reg_rtx (wmode);
1438
1439 if (GET_CODE (src1) == SUBREG
1440 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1441 src1 = tem;
1442 else if (GET_MODE (src1) != VOIDmode)
1443 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1444
1445 if (GET_CODE (src2) == SUBREG
1446 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1447 src2 = tem;
1448 else if (GET_MODE (src2) != VOIDmode)
1449 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1450 }
1451
1452 /* Emit the instruction. */
1453 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1454 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1455 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1456
1457 /* Fix up the destination if needed. */
1458 if (dst != operands[0])
1459 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1460 }
1461
1462 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1463
1464 bool
1465 s390_logical_operator_ok_p (rtx *operands)
1466 {
1467 /* If the destination operand is in memory, it needs to coincide
1468 with one of the source operands. After reload, it has to be
1469 the first source operand. */
1470 if (GET_CODE (operands[0]) == MEM)
1471 return rtx_equal_p (operands[0], operands[1])
1472 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1473
1474 return true;
1475 }
1476
1477 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1478 operand IMMOP to switch from SS to SI type instructions. */
1479
1480 void
1481 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1482 {
1483 int def = code == AND ? -1 : 0;
1484 HOST_WIDE_INT mask;
1485 int part;
1486
1487 gcc_assert (GET_CODE (*memop) == MEM);
1488 gcc_assert (!MEM_VOLATILE_P (*memop));
1489
1490 mask = s390_extract_part (*immop, QImode, def);
1491 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1492 gcc_assert (part >= 0);
1493
1494 *memop = adjust_address (*memop, QImode, part);
1495 *immop = gen_int_mode (mask, QImode);
1496 }
1497
1498
1499 /* How to allocate a 'struct machine_function'. */
1500
1501 static struct machine_function *
1502 s390_init_machine_status (void)
1503 {
1504 return ggc_alloc_cleared_machine_function ();
1505 }
1506
1507 static void
1508 s390_option_override (void)
1509 {
1510 /* Set up function hooks. */
1511 init_machine_status = s390_init_machine_status;
1512
1513 /* Architecture mode defaults according to ABI. */
1514 if (!(target_flags_explicit & MASK_ZARCH))
1515 {
1516 if (TARGET_64BIT)
1517 target_flags |= MASK_ZARCH;
1518 else
1519 target_flags &= ~MASK_ZARCH;
1520 }
1521
1522 /* Set the march default in case it hasn't been specified on
1523 cmdline. */
1524 if (s390_arch == PROCESSOR_max)
1525 {
1526 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1527 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1528 s390_arch_flags = processor_flags_table[(int)s390_arch];
1529 }
1530
1531 /* Determine processor to tune for. */
1532 if (s390_tune == PROCESSOR_max)
1533 {
1534 s390_tune = s390_arch;
1535 s390_tune_flags = s390_arch_flags;
1536 }
1537
1538 /* Sanity checks. */
1539 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1540 error ("z/Architecture mode not supported on %s", s390_arch_string);
1541 if (TARGET_64BIT && !TARGET_ZARCH)
1542 error ("64-bit ABI not supported in ESA/390 mode");
1543
1544 /* Use hardware DFP if available and not explicitly disabled by
1545 user. E.g. with -m31 -march=z10 -mzarch */
1546 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1547 target_flags |= MASK_HARD_DFP;
1548
1549 if (TARGET_HARD_DFP && !TARGET_DFP)
1550 {
1551 if (target_flags_explicit & MASK_HARD_DFP)
1552 {
1553 if (!TARGET_CPU_DFP)
1554 error ("hardware decimal floating point instructions"
1555 " not available on %s", s390_arch_string);
1556 if (!TARGET_ZARCH)
1557 error ("hardware decimal floating point instructions"
1558 " not available in ESA/390 mode");
1559 }
1560 else
1561 target_flags &= ~MASK_HARD_DFP;
1562 }
1563
1564 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1565 {
1566 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1567 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1568
1569 target_flags &= ~MASK_HARD_DFP;
1570 }
1571
1572 /* Set processor cost function. */
1573 switch (s390_tune)
1574 {
1575 case PROCESSOR_2084_Z990:
1576 s390_cost = &z990_cost;
1577 break;
1578 case PROCESSOR_2094_Z9_109:
1579 s390_cost = &z9_109_cost;
1580 break;
1581 case PROCESSOR_2097_Z10:
1582 s390_cost = &z10_cost;
1583 case PROCESSOR_2817_Z196:
1584 s390_cost = &z196_cost;
1585 break;
1586 default:
1587 s390_cost = &z900_cost;
1588 }
1589
1590 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1591 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1592 "in combination");
1593
1594 if (s390_stack_size)
1595 {
1596 if (s390_stack_guard >= s390_stack_size)
1597 error ("stack size must be greater than the stack guard value");
1598 else if (s390_stack_size > 1 << 16)
1599 error ("stack size must not be greater than 64k");
1600 }
1601 else if (s390_stack_guard)
1602 error ("-mstack-guard implies use of -mstack-size");
1603
1604 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1605 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1606 target_flags |= MASK_LONG_DOUBLE_128;
1607 #endif
1608
1609 if (s390_tune == PROCESSOR_2097_Z10
1610 || s390_tune == PROCESSOR_2817_Z196)
1611 {
1612 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1619 global_options.x_param_values,
1620 global_options_set.x_param_values);
1621 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1622 global_options.x_param_values,
1623 global_options_set.x_param_values);
1624 }
1625
1626 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1627 global_options.x_param_values,
1628 global_options_set.x_param_values);
1629 /* values for loop prefetching */
1630 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1631 global_options.x_param_values,
1632 global_options_set.x_param_values);
1633 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 /* s390 has more than 2 levels and the size is much larger. Since
1637 we are always running virtualized assume that we only get a small
1638 part of the caches above l1. */
1639 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1643 global_options.x_param_values,
1644 global_options_set.x_param_values);
1645 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1646 global_options.x_param_values,
1647 global_options_set.x_param_values);
1648
1649 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1650 requires the arch flags to be evaluated already. Since prefetching
1651 is beneficial on s390, we enable it if available. */
1652 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1653 flag_prefetch_loop_arrays = 1;
1654
1655 /* Use the alternative scheduling-pressure algorithm by default. */
1656 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1657 global_options.x_param_values,
1658 global_options_set.x_param_values);
1659
1660 if (TARGET_TPF)
1661 {
1662 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1663 debuggers do not yet support DWARF 3/4. */
1664 if (!global_options_set.x_dwarf_strict)
1665 dwarf_strict = 1;
1666 if (!global_options_set.x_dwarf_version)
1667 dwarf_version = 2;
1668 }
1669 }
1670
1671 /* Map for smallest class containing reg regno. */
1672
1673 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1674 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1675 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1682 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1683 ACCESS_REGS, ACCESS_REGS
1684 };
1685
1686 /* Return attribute type of insn. */
1687
1688 static enum attr_type
1689 s390_safe_attr_type (rtx insn)
1690 {
1691 if (recog_memoized (insn) >= 0)
1692 return get_attr_type (insn);
1693 else
1694 return TYPE_NONE;
1695 }
1696
1697 /* Return true if DISP is a valid short displacement. */
1698
1699 static bool
1700 s390_short_displacement (rtx disp)
1701 {
1702 /* No displacement is OK. */
1703 if (!disp)
1704 return true;
1705
1706 /* Without the long displacement facility we don't need to
1707 distingiush between long and short displacement. */
1708 if (!TARGET_LONG_DISPLACEMENT)
1709 return true;
1710
1711 /* Integer displacement in range. */
1712 if (GET_CODE (disp) == CONST_INT)
1713 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1714
1715 /* GOT offset is not OK, the GOT can be large. */
1716 if (GET_CODE (disp) == CONST
1717 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1718 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1719 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1720 return false;
1721
1722 /* All other symbolic constants are literal pool references,
1723 which are OK as the literal pool must be small. */
1724 if (GET_CODE (disp) == CONST)
1725 return true;
1726
1727 return false;
1728 }
1729
1730 /* Decompose a RTL expression ADDR for a memory address into
1731 its components, returned in OUT.
1732
1733 Returns false if ADDR is not a valid memory address, true
1734 otherwise. If OUT is NULL, don't return the components,
1735 but check for validity only.
1736
1737 Note: Only addresses in canonical form are recognized.
1738 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1739 canonical form so that they will be recognized. */
1740
1741 static int
1742 s390_decompose_address (rtx addr, struct s390_address *out)
1743 {
1744 HOST_WIDE_INT offset = 0;
1745 rtx base = NULL_RTX;
1746 rtx indx = NULL_RTX;
1747 rtx disp = NULL_RTX;
1748 rtx orig_disp;
1749 bool pointer = false;
1750 bool base_ptr = false;
1751 bool indx_ptr = false;
1752 bool literal_pool = false;
1753
1754 /* We may need to substitute the literal pool base register into the address
1755 below. However, at this point we do not know which register is going to
1756 be used as base, so we substitute the arg pointer register. This is going
1757 to be treated as holding a pointer below -- it shouldn't be used for any
1758 other purpose. */
1759 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1760
1761 /* Decompose address into base + index + displacement. */
1762
1763 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1764 base = addr;
1765
1766 else if (GET_CODE (addr) == PLUS)
1767 {
1768 rtx op0 = XEXP (addr, 0);
1769 rtx op1 = XEXP (addr, 1);
1770 enum rtx_code code0 = GET_CODE (op0);
1771 enum rtx_code code1 = GET_CODE (op1);
1772
1773 if (code0 == REG || code0 == UNSPEC)
1774 {
1775 if (code1 == REG || code1 == UNSPEC)
1776 {
1777 indx = op0; /* index + base */
1778 base = op1;
1779 }
1780
1781 else
1782 {
1783 base = op0; /* base + displacement */
1784 disp = op1;
1785 }
1786 }
1787
1788 else if (code0 == PLUS)
1789 {
1790 indx = XEXP (op0, 0); /* index + base + disp */
1791 base = XEXP (op0, 1);
1792 disp = op1;
1793 }
1794
1795 else
1796 {
1797 return false;
1798 }
1799 }
1800
1801 else
1802 disp = addr; /* displacement */
1803
1804 /* Extract integer part of displacement. */
1805 orig_disp = disp;
1806 if (disp)
1807 {
1808 if (GET_CODE (disp) == CONST_INT)
1809 {
1810 offset = INTVAL (disp);
1811 disp = NULL_RTX;
1812 }
1813 else if (GET_CODE (disp) == CONST
1814 && GET_CODE (XEXP (disp, 0)) == PLUS
1815 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1816 {
1817 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1818 disp = XEXP (XEXP (disp, 0), 0);
1819 }
1820 }
1821
1822 /* Strip off CONST here to avoid special case tests later. */
1823 if (disp && GET_CODE (disp) == CONST)
1824 disp = XEXP (disp, 0);
1825
1826 /* We can convert literal pool addresses to
1827 displacements by basing them off the base register. */
1828 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1829 {
1830 /* Either base or index must be free to hold the base register. */
1831 if (!base)
1832 base = fake_pool_base, literal_pool = true;
1833 else if (!indx)
1834 indx = fake_pool_base, literal_pool = true;
1835 else
1836 return false;
1837
1838 /* Mark up the displacement. */
1839 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1840 UNSPEC_LTREL_OFFSET);
1841 }
1842
1843 /* Validate base register. */
1844 if (base)
1845 {
1846 if (GET_CODE (base) == UNSPEC)
1847 switch (XINT (base, 1))
1848 {
1849 case UNSPEC_LTREF:
1850 if (!disp)
1851 disp = gen_rtx_UNSPEC (Pmode,
1852 gen_rtvec (1, XVECEXP (base, 0, 0)),
1853 UNSPEC_LTREL_OFFSET);
1854 else
1855 return false;
1856
1857 base = XVECEXP (base, 0, 1);
1858 break;
1859
1860 case UNSPEC_LTREL_BASE:
1861 if (XVECLEN (base, 0) == 1)
1862 base = fake_pool_base, literal_pool = true;
1863 else
1864 base = XVECEXP (base, 0, 1);
1865 break;
1866
1867 default:
1868 return false;
1869 }
1870
1871 if (!REG_P (base)
1872 || (GET_MODE (base) != SImode
1873 && GET_MODE (base) != Pmode))
1874 return false;
1875
1876 if (REGNO (base) == STACK_POINTER_REGNUM
1877 || REGNO (base) == FRAME_POINTER_REGNUM
1878 || ((reload_completed || reload_in_progress)
1879 && frame_pointer_needed
1880 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1881 || REGNO (base) == ARG_POINTER_REGNUM
1882 || (flag_pic
1883 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1884 pointer = base_ptr = true;
1885
1886 if ((reload_completed || reload_in_progress)
1887 && base == cfun->machine->base_reg)
1888 pointer = base_ptr = literal_pool = true;
1889 }
1890
1891 /* Validate index register. */
1892 if (indx)
1893 {
1894 if (GET_CODE (indx) == UNSPEC)
1895 switch (XINT (indx, 1))
1896 {
1897 case UNSPEC_LTREF:
1898 if (!disp)
1899 disp = gen_rtx_UNSPEC (Pmode,
1900 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1901 UNSPEC_LTREL_OFFSET);
1902 else
1903 return false;
1904
1905 indx = XVECEXP (indx, 0, 1);
1906 break;
1907
1908 case UNSPEC_LTREL_BASE:
1909 if (XVECLEN (indx, 0) == 1)
1910 indx = fake_pool_base, literal_pool = true;
1911 else
1912 indx = XVECEXP (indx, 0, 1);
1913 break;
1914
1915 default:
1916 return false;
1917 }
1918
1919 if (!REG_P (indx)
1920 || (GET_MODE (indx) != SImode
1921 && GET_MODE (indx) != Pmode))
1922 return false;
1923
1924 if (REGNO (indx) == STACK_POINTER_REGNUM
1925 || REGNO (indx) == FRAME_POINTER_REGNUM
1926 || ((reload_completed || reload_in_progress)
1927 && frame_pointer_needed
1928 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1929 || REGNO (indx) == ARG_POINTER_REGNUM
1930 || (flag_pic
1931 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1932 pointer = indx_ptr = true;
1933
1934 if ((reload_completed || reload_in_progress)
1935 && indx == cfun->machine->base_reg)
1936 pointer = indx_ptr = literal_pool = true;
1937 }
1938
1939 /* Prefer to use pointer as base, not index. */
1940 if (base && indx && !base_ptr
1941 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1942 {
1943 rtx tmp = base;
1944 base = indx;
1945 indx = tmp;
1946 }
1947
1948 /* Validate displacement. */
1949 if (!disp)
1950 {
1951 /* If virtual registers are involved, the displacement will change later
1952 anyway as the virtual registers get eliminated. This could make a
1953 valid displacement invalid, but it is more likely to make an invalid
1954 displacement valid, because we sometimes access the register save area
1955 via negative offsets to one of those registers.
1956 Thus we don't check the displacement for validity here. If after
1957 elimination the displacement turns out to be invalid after all,
1958 this is fixed up by reload in any case. */
1959 if (base != arg_pointer_rtx
1960 && indx != arg_pointer_rtx
1961 && base != return_address_pointer_rtx
1962 && indx != return_address_pointer_rtx
1963 && base != frame_pointer_rtx
1964 && indx != frame_pointer_rtx
1965 && base != virtual_stack_vars_rtx
1966 && indx != virtual_stack_vars_rtx)
1967 if (!DISP_IN_RANGE (offset))
1968 return false;
1969 }
1970 else
1971 {
1972 /* All the special cases are pointers. */
1973 pointer = true;
1974
1975 /* In the small-PIC case, the linker converts @GOT
1976 and @GOTNTPOFF offsets to possible displacements. */
1977 if (GET_CODE (disp) == UNSPEC
1978 && (XINT (disp, 1) == UNSPEC_GOT
1979 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1980 && flag_pic == 1)
1981 {
1982 ;
1983 }
1984
1985 /* Accept pool label offsets. */
1986 else if (GET_CODE (disp) == UNSPEC
1987 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1988 ;
1989
1990 /* Accept literal pool references. */
1991 else if (GET_CODE (disp) == UNSPEC
1992 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1993 {
1994 /* In case CSE pulled a non literal pool reference out of
1995 the pool we have to reject the address. This is
1996 especially important when loading the GOT pointer on non
1997 zarch CPUs. In this case the literal pool contains an lt
1998 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1999 will most likely exceed the displacement. */
2000 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2001 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2002 return false;
2003
2004 orig_disp = gen_rtx_CONST (Pmode, disp);
2005 if (offset)
2006 {
2007 /* If we have an offset, make sure it does not
2008 exceed the size of the constant pool entry. */
2009 rtx sym = XVECEXP (disp, 0, 0);
2010 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2011 return false;
2012
2013 orig_disp = plus_constant (Pmode, orig_disp, offset);
2014 }
2015 }
2016
2017 else
2018 return false;
2019 }
2020
2021 if (!base && !indx)
2022 pointer = true;
2023
2024 if (out)
2025 {
2026 out->base = base;
2027 out->indx = indx;
2028 out->disp = orig_disp;
2029 out->pointer = pointer;
2030 out->literal_pool = literal_pool;
2031 }
2032
2033 return true;
2034 }
2035
2036 /* Decompose a RTL expression OP for a shift count into its components,
2037 and return the base register in BASE and the offset in OFFSET.
2038
2039 Return true if OP is a valid shift count, false if not. */
2040
2041 bool
2042 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2043 {
2044 HOST_WIDE_INT off = 0;
2045
2046 /* We can have an integer constant, an address register,
2047 or a sum of the two. */
2048 if (GET_CODE (op) == CONST_INT)
2049 {
2050 off = INTVAL (op);
2051 op = NULL_RTX;
2052 }
2053 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2054 {
2055 off = INTVAL (XEXP (op, 1));
2056 op = XEXP (op, 0);
2057 }
2058 while (op && GET_CODE (op) == SUBREG)
2059 op = SUBREG_REG (op);
2060
2061 if (op && GET_CODE (op) != REG)
2062 return false;
2063
2064 if (offset)
2065 *offset = off;
2066 if (base)
2067 *base = op;
2068
2069 return true;
2070 }
2071
2072
2073 /* Return true if CODE is a valid address without index. */
2074
2075 bool
2076 s390_legitimate_address_without_index_p (rtx op)
2077 {
2078 struct s390_address addr;
2079
2080 if (!s390_decompose_address (XEXP (op, 0), &addr))
2081 return false;
2082 if (addr.indx)
2083 return false;
2084
2085 return true;
2086 }
2087
2088
2089 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2090 and return these parts in SYMREF and ADDEND. You can pass NULL in
2091 SYMREF and/or ADDEND if you are not interested in these values.
2092 Literal pool references are *not* considered symbol references. */
2093
2094 static bool
2095 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2096 {
2097 HOST_WIDE_INT tmpaddend = 0;
2098
2099 if (GET_CODE (addr) == CONST)
2100 addr = XEXP (addr, 0);
2101
2102 if (GET_CODE (addr) == PLUS)
2103 {
2104 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2105 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2106 && CONST_INT_P (XEXP (addr, 1)))
2107 {
2108 tmpaddend = INTVAL (XEXP (addr, 1));
2109 addr = XEXP (addr, 0);
2110 }
2111 else
2112 return false;
2113 }
2114 else
2115 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2116 return false;
2117
2118 if (symref)
2119 *symref = addr;
2120 if (addend)
2121 *addend = tmpaddend;
2122
2123 return true;
2124 }
2125
2126
2127 /* Return true if the address in OP is valid for constraint letter C
2128 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2129 pool MEMs should be accepted. Only the Q, R, S, T constraint
2130 letters are allowed for C. */
2131
2132 static int
2133 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2134 {
2135 struct s390_address addr;
2136 bool decomposed = false;
2137
2138 /* This check makes sure that no symbolic address (except literal
2139 pool references) are accepted by the R or T constraints. */
2140 if (s390_symref_operand_p (op, NULL, NULL))
2141 return 0;
2142
2143 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2144 if (!lit_pool_ok)
2145 {
2146 if (!s390_decompose_address (op, &addr))
2147 return 0;
2148 if (addr.literal_pool)
2149 return 0;
2150 decomposed = true;
2151 }
2152
2153 switch (c)
2154 {
2155 case 'Q': /* no index short displacement */
2156 if (!decomposed && !s390_decompose_address (op, &addr))
2157 return 0;
2158 if (addr.indx)
2159 return 0;
2160 if (!s390_short_displacement (addr.disp))
2161 return 0;
2162 break;
2163
2164 case 'R': /* with index short displacement */
2165 if (TARGET_LONG_DISPLACEMENT)
2166 {
2167 if (!decomposed && !s390_decompose_address (op, &addr))
2168 return 0;
2169 if (!s390_short_displacement (addr.disp))
2170 return 0;
2171 }
2172 /* Any invalid address here will be fixed up by reload,
2173 so accept it for the most generic constraint. */
2174 break;
2175
2176 case 'S': /* no index long displacement */
2177 if (!TARGET_LONG_DISPLACEMENT)
2178 return 0;
2179 if (!decomposed && !s390_decompose_address (op, &addr))
2180 return 0;
2181 if (addr.indx)
2182 return 0;
2183 if (s390_short_displacement (addr.disp))
2184 return 0;
2185 break;
2186
2187 case 'T': /* with index long displacement */
2188 if (!TARGET_LONG_DISPLACEMENT)
2189 return 0;
2190 /* Any invalid address here will be fixed up by reload,
2191 so accept it for the most generic constraint. */
2192 if ((decomposed || s390_decompose_address (op, &addr))
2193 && s390_short_displacement (addr.disp))
2194 return 0;
2195 break;
2196 default:
2197 return 0;
2198 }
2199 return 1;
2200 }
2201
2202
2203 /* Evaluates constraint strings described by the regular expression
2204 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2205 the constraint given in STR, or 0 else. */
2206
2207 int
2208 s390_mem_constraint (const char *str, rtx op)
2209 {
2210 char c = str[0];
2211
2212 switch (c)
2213 {
2214 case 'A':
2215 /* Check for offsettable variants of memory constraints. */
2216 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2217 return 0;
2218 if ((reload_completed || reload_in_progress)
2219 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2220 return 0;
2221 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2222 case 'B':
2223 /* Check for non-literal-pool variants of memory constraints. */
2224 if (!MEM_P (op))
2225 return 0;
2226 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2227 case 'Q':
2228 case 'R':
2229 case 'S':
2230 case 'T':
2231 if (GET_CODE (op) != MEM)
2232 return 0;
2233 return s390_check_qrst_address (c, XEXP (op, 0), true);
2234 case 'U':
2235 return (s390_check_qrst_address ('Q', op, true)
2236 || s390_check_qrst_address ('R', op, true));
2237 case 'W':
2238 return (s390_check_qrst_address ('S', op, true)
2239 || s390_check_qrst_address ('T', op, true));
2240 case 'Y':
2241 /* Simply check for the basic form of a shift count. Reload will
2242 take care of making sure we have a proper base register. */
2243 if (!s390_decompose_shift_count (op, NULL, NULL))
2244 return 0;
2245 break;
2246 case 'Z':
2247 return s390_check_qrst_address (str[1], op, true);
2248 default:
2249 return 0;
2250 }
2251 return 1;
2252 }
2253
2254
2255 /* Evaluates constraint strings starting with letter O. Input
2256 parameter C is the second letter following the "O" in the constraint
2257 string. Returns 1 if VALUE meets the respective constraint and 0
2258 otherwise. */
2259
2260 int
2261 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2262 {
2263 if (!TARGET_EXTIMM)
2264 return 0;
2265
2266 switch (c)
2267 {
2268 case 's':
2269 return trunc_int_for_mode (value, SImode) == value;
2270
2271 case 'p':
2272 return value == 0
2273 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2274
2275 case 'n':
2276 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2277
2278 default:
2279 gcc_unreachable ();
2280 }
2281 }
2282
2283
2284 /* Evaluates constraint strings starting with letter N. Parameter STR
2285 contains the letters following letter "N" in the constraint string.
2286 Returns true if VALUE matches the constraint. */
2287
2288 int
2289 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2290 {
2291 enum machine_mode mode, part_mode;
2292 int def;
2293 int part, part_goal;
2294
2295
2296 if (str[0] == 'x')
2297 part_goal = -1;
2298 else
2299 part_goal = str[0] - '0';
2300
2301 switch (str[1])
2302 {
2303 case 'Q':
2304 part_mode = QImode;
2305 break;
2306 case 'H':
2307 part_mode = HImode;
2308 break;
2309 case 'S':
2310 part_mode = SImode;
2311 break;
2312 default:
2313 return 0;
2314 }
2315
2316 switch (str[2])
2317 {
2318 case 'H':
2319 mode = HImode;
2320 break;
2321 case 'S':
2322 mode = SImode;
2323 break;
2324 case 'D':
2325 mode = DImode;
2326 break;
2327 default:
2328 return 0;
2329 }
2330
2331 switch (str[3])
2332 {
2333 case '0':
2334 def = 0;
2335 break;
2336 case 'F':
2337 def = -1;
2338 break;
2339 default:
2340 return 0;
2341 }
2342
2343 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2344 return 0;
2345
2346 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2347 if (part < 0)
2348 return 0;
2349 if (part_goal != -1 && part_goal != part)
2350 return 0;
2351
2352 return 1;
2353 }
2354
2355
2356 /* Returns true if the input parameter VALUE is a float zero. */
2357
2358 int
2359 s390_float_const_zero_p (rtx value)
2360 {
2361 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2362 && value == CONST0_RTX (GET_MODE (value)));
2363 }
2364
2365 /* Implement TARGET_REGISTER_MOVE_COST. */
2366
2367 static int
2368 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2369 reg_class_t from, reg_class_t to)
2370 {
2371 /* On s390, copy between fprs and gprs is expensive. */
2372 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2373 && reg_classes_intersect_p (to, FP_REGS))
2374 || (reg_classes_intersect_p (from, FP_REGS)
2375 && reg_classes_intersect_p (to, GENERAL_REGS)))
2376 return 10;
2377
2378 return 1;
2379 }
2380
2381 /* Implement TARGET_MEMORY_MOVE_COST. */
2382
2383 static int
2384 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2385 reg_class_t rclass ATTRIBUTE_UNUSED,
2386 bool in ATTRIBUTE_UNUSED)
2387 {
2388 return 1;
2389 }
2390
2391 /* Compute a (partial) cost for rtx X. Return true if the complete
2392 cost has been computed, and false if subexpressions should be
2393 scanned. In either case, *TOTAL contains the cost result.
2394 CODE contains GET_CODE (x), OUTER_CODE contains the code
2395 of the superexpression of x. */
2396
2397 static bool
2398 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2399 int *total, bool speed ATTRIBUTE_UNUSED)
2400 {
2401 switch (code)
2402 {
2403 case CONST:
2404 case CONST_INT:
2405 case LABEL_REF:
2406 case SYMBOL_REF:
2407 case CONST_DOUBLE:
2408 case MEM:
2409 *total = 0;
2410 return true;
2411
2412 case ASHIFT:
2413 case ASHIFTRT:
2414 case LSHIFTRT:
2415 case ROTATE:
2416 case ROTATERT:
2417 case AND:
2418 case IOR:
2419 case XOR:
2420 case NEG:
2421 case NOT:
2422 *total = COSTS_N_INSNS (1);
2423 return false;
2424
2425 case PLUS:
2426 case MINUS:
2427 *total = COSTS_N_INSNS (1);
2428 return false;
2429
2430 case MULT:
2431 switch (GET_MODE (x))
2432 {
2433 case SImode:
2434 {
2435 rtx left = XEXP (x, 0);
2436 rtx right = XEXP (x, 1);
2437 if (GET_CODE (right) == CONST_INT
2438 && CONST_OK_FOR_K (INTVAL (right)))
2439 *total = s390_cost->mhi;
2440 else if (GET_CODE (left) == SIGN_EXTEND)
2441 *total = s390_cost->mh;
2442 else
2443 *total = s390_cost->ms; /* msr, ms, msy */
2444 break;
2445 }
2446 case DImode:
2447 {
2448 rtx left = XEXP (x, 0);
2449 rtx right = XEXP (x, 1);
2450 if (TARGET_ZARCH)
2451 {
2452 if (GET_CODE (right) == CONST_INT
2453 && CONST_OK_FOR_K (INTVAL (right)))
2454 *total = s390_cost->mghi;
2455 else if (GET_CODE (left) == SIGN_EXTEND)
2456 *total = s390_cost->msgf;
2457 else
2458 *total = s390_cost->msg; /* msgr, msg */
2459 }
2460 else /* TARGET_31BIT */
2461 {
2462 if (GET_CODE (left) == SIGN_EXTEND
2463 && GET_CODE (right) == SIGN_EXTEND)
2464 /* mulsidi case: mr, m */
2465 *total = s390_cost->m;
2466 else if (GET_CODE (left) == ZERO_EXTEND
2467 && GET_CODE (right) == ZERO_EXTEND
2468 && TARGET_CPU_ZARCH)
2469 /* umulsidi case: ml, mlr */
2470 *total = s390_cost->ml;
2471 else
2472 /* Complex calculation is required. */
2473 *total = COSTS_N_INSNS (40);
2474 }
2475 break;
2476 }
2477 case SFmode:
2478 case DFmode:
2479 *total = s390_cost->mult_df;
2480 break;
2481 case TFmode:
2482 *total = s390_cost->mxbr;
2483 break;
2484 default:
2485 return false;
2486 }
2487 return false;
2488
2489 case FMA:
2490 switch (GET_MODE (x))
2491 {
2492 case DFmode:
2493 *total = s390_cost->madbr;
2494 break;
2495 case SFmode:
2496 *total = s390_cost->maebr;
2497 break;
2498 default:
2499 return false;
2500 }
2501 /* Negate in the third argument is free: FMSUB. */
2502 if (GET_CODE (XEXP (x, 2)) == NEG)
2503 {
2504 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2505 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2506 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2507 return true;
2508 }
2509 return false;
2510
2511 case UDIV:
2512 case UMOD:
2513 if (GET_MODE (x) == TImode) /* 128 bit division */
2514 *total = s390_cost->dlgr;
2515 else if (GET_MODE (x) == DImode)
2516 {
2517 rtx right = XEXP (x, 1);
2518 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2519 *total = s390_cost->dlr;
2520 else /* 64 by 64 bit division */
2521 *total = s390_cost->dlgr;
2522 }
2523 else if (GET_MODE (x) == SImode) /* 32 bit division */
2524 *total = s390_cost->dlr;
2525 return false;
2526
2527 case DIV:
2528 case MOD:
2529 if (GET_MODE (x) == DImode)
2530 {
2531 rtx right = XEXP (x, 1);
2532 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2533 if (TARGET_ZARCH)
2534 *total = s390_cost->dsgfr;
2535 else
2536 *total = s390_cost->dr;
2537 else /* 64 by 64 bit division */
2538 *total = s390_cost->dsgr;
2539 }
2540 else if (GET_MODE (x) == SImode) /* 32 bit division */
2541 *total = s390_cost->dlr;
2542 else if (GET_MODE (x) == SFmode)
2543 {
2544 *total = s390_cost->debr;
2545 }
2546 else if (GET_MODE (x) == DFmode)
2547 {
2548 *total = s390_cost->ddbr;
2549 }
2550 else if (GET_MODE (x) == TFmode)
2551 {
2552 *total = s390_cost->dxbr;
2553 }
2554 return false;
2555
2556 case SQRT:
2557 if (GET_MODE (x) == SFmode)
2558 *total = s390_cost->sqebr;
2559 else if (GET_MODE (x) == DFmode)
2560 *total = s390_cost->sqdbr;
2561 else /* TFmode */
2562 *total = s390_cost->sqxbr;
2563 return false;
2564
2565 case SIGN_EXTEND:
2566 case ZERO_EXTEND:
2567 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2568 || outer_code == PLUS || outer_code == MINUS
2569 || outer_code == COMPARE)
2570 *total = 0;
2571 return false;
2572
2573 case COMPARE:
2574 *total = COSTS_N_INSNS (1);
2575 if (GET_CODE (XEXP (x, 0)) == AND
2576 && GET_CODE (XEXP (x, 1)) == CONST_INT
2577 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2578 {
2579 rtx op0 = XEXP (XEXP (x, 0), 0);
2580 rtx op1 = XEXP (XEXP (x, 0), 1);
2581 rtx op2 = XEXP (x, 1);
2582
2583 if (memory_operand (op0, GET_MODE (op0))
2584 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2585 return true;
2586 if (register_operand (op0, GET_MODE (op0))
2587 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2588 return true;
2589 }
2590 return false;
2591
2592 default:
2593 return false;
2594 }
2595 }
2596
2597 /* Return the cost of an address rtx ADDR. */
2598
2599 static int
2600 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2601 {
2602 struct s390_address ad;
2603 if (!s390_decompose_address (addr, &ad))
2604 return 1000;
2605
2606 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2607 }
2608
2609 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2610 otherwise return 0. */
2611
2612 int
2613 tls_symbolic_operand (rtx op)
2614 {
2615 if (GET_CODE (op) != SYMBOL_REF)
2616 return 0;
2617 return SYMBOL_REF_TLS_MODEL (op);
2618 }
2619 \f
2620 /* Split DImode access register reference REG (on 64-bit) into its constituent
2621 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2622 gen_highpart cannot be used as they assume all registers are word-sized,
2623 while our access registers have only half that size. */
2624
2625 void
2626 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2627 {
2628 gcc_assert (TARGET_64BIT);
2629 gcc_assert (ACCESS_REG_P (reg));
2630 gcc_assert (GET_MODE (reg) == DImode);
2631 gcc_assert (!(REGNO (reg) & 1));
2632
2633 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2634 *hi = gen_rtx_REG (SImode, REGNO (reg));
2635 }
2636
2637 /* Return true if OP contains a symbol reference */
2638
2639 bool
2640 symbolic_reference_mentioned_p (rtx op)
2641 {
2642 const char *fmt;
2643 int i;
2644
2645 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2646 return 1;
2647
2648 fmt = GET_RTX_FORMAT (GET_CODE (op));
2649 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2650 {
2651 if (fmt[i] == 'E')
2652 {
2653 int j;
2654
2655 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2656 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2657 return 1;
2658 }
2659
2660 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2661 return 1;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /* Return true if OP contains a reference to a thread-local symbol. */
2668
2669 bool
2670 tls_symbolic_reference_mentioned_p (rtx op)
2671 {
2672 const char *fmt;
2673 int i;
2674
2675 if (GET_CODE (op) == SYMBOL_REF)
2676 return tls_symbolic_operand (op);
2677
2678 fmt = GET_RTX_FORMAT (GET_CODE (op));
2679 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2680 {
2681 if (fmt[i] == 'E')
2682 {
2683 int j;
2684
2685 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2686 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2687 return true;
2688 }
2689
2690 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2691 return true;
2692 }
2693
2694 return false;
2695 }
2696
2697
2698 /* Return true if OP is a legitimate general operand when
2699 generating PIC code. It is given that flag_pic is on
2700 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2701
2702 int
2703 legitimate_pic_operand_p (rtx op)
2704 {
2705 /* Accept all non-symbolic constants. */
2706 if (!SYMBOLIC_CONST (op))
2707 return 1;
2708
2709 /* Reject everything else; must be handled
2710 via emit_symbolic_move. */
2711 return 0;
2712 }
2713
2714 /* Returns true if the constant value OP is a legitimate general operand.
2715 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2716
2717 static bool
2718 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2719 {
2720 /* Accept all non-symbolic constants. */
2721 if (!SYMBOLIC_CONST (op))
2722 return 1;
2723
2724 /* Accept immediate LARL operands. */
2725 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2726 return 1;
2727
2728 /* Thread-local symbols are never legal constants. This is
2729 so that emit_call knows that computing such addresses
2730 might require a function call. */
2731 if (TLS_SYMBOLIC_CONST (op))
2732 return 0;
2733
2734 /* In the PIC case, symbolic constants must *not* be
2735 forced into the literal pool. We accept them here,
2736 so that they will be handled by emit_symbolic_move. */
2737 if (flag_pic)
2738 return 1;
2739
2740 /* All remaining non-PIC symbolic constants are
2741 forced into the literal pool. */
2742 return 0;
2743 }
2744
2745 /* Determine if it's legal to put X into the constant pool. This
2746 is not possible if X contains the address of a symbol that is
2747 not constant (TLS) or not known at final link time (PIC). */
2748
2749 static bool
2750 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2751 {
2752 switch (GET_CODE (x))
2753 {
2754 case CONST_INT:
2755 case CONST_DOUBLE:
2756 /* Accept all non-symbolic constants. */
2757 return false;
2758
2759 case LABEL_REF:
2760 /* Labels are OK iff we are non-PIC. */
2761 return flag_pic != 0;
2762
2763 case SYMBOL_REF:
2764 /* 'Naked' TLS symbol references are never OK,
2765 non-TLS symbols are OK iff we are non-PIC. */
2766 if (tls_symbolic_operand (x))
2767 return true;
2768 else
2769 return flag_pic != 0;
2770
2771 case CONST:
2772 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2773 case PLUS:
2774 case MINUS:
2775 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2776 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2777
2778 case UNSPEC:
2779 switch (XINT (x, 1))
2780 {
2781 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2782 case UNSPEC_LTREL_OFFSET:
2783 case UNSPEC_GOT:
2784 case UNSPEC_GOTOFF:
2785 case UNSPEC_PLTOFF:
2786 case UNSPEC_TLSGD:
2787 case UNSPEC_TLSLDM:
2788 case UNSPEC_NTPOFF:
2789 case UNSPEC_DTPOFF:
2790 case UNSPEC_GOTNTPOFF:
2791 case UNSPEC_INDNTPOFF:
2792 return false;
2793
2794 /* If the literal pool shares the code section, be put
2795 execute template placeholders into the pool as well. */
2796 case UNSPEC_INSN:
2797 return TARGET_CPU_ZARCH;
2798
2799 default:
2800 return true;
2801 }
2802 break;
2803
2804 default:
2805 gcc_unreachable ();
2806 }
2807 }
2808
2809 /* Returns true if the constant value OP is a legitimate general
2810 operand during and after reload. The difference to
2811 legitimate_constant_p is that this function will not accept
2812 a constant that would need to be forced to the literal pool
2813 before it can be used as operand.
2814 This function accepts all constants which can be loaded directly
2815 into a GPR. */
2816
2817 bool
2818 legitimate_reload_constant_p (rtx op)
2819 {
2820 /* Accept la(y) operands. */
2821 if (GET_CODE (op) == CONST_INT
2822 && DISP_IN_RANGE (INTVAL (op)))
2823 return true;
2824
2825 /* Accept l(g)hi/l(g)fi operands. */
2826 if (GET_CODE (op) == CONST_INT
2827 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2828 return true;
2829
2830 /* Accept lliXX operands. */
2831 if (TARGET_ZARCH
2832 && GET_CODE (op) == CONST_INT
2833 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2834 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2835 return true;
2836
2837 if (TARGET_EXTIMM
2838 && GET_CODE (op) == CONST_INT
2839 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2840 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2841 return true;
2842
2843 /* Accept larl operands. */
2844 if (TARGET_CPU_ZARCH
2845 && larl_operand (op, VOIDmode))
2846 return true;
2847
2848 /* Accept floating-point zero operands that fit into a single GPR. */
2849 if (GET_CODE (op) == CONST_DOUBLE
2850 && s390_float_const_zero_p (op)
2851 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2852 return true;
2853
2854 /* Accept double-word operands that can be split. */
2855 if (GET_CODE (op) == CONST_INT
2856 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2857 {
2858 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2859 rtx hi = operand_subword (op, 0, 0, dword_mode);
2860 rtx lo = operand_subword (op, 1, 0, dword_mode);
2861 return legitimate_reload_constant_p (hi)
2862 && legitimate_reload_constant_p (lo);
2863 }
2864
2865 /* Everything else cannot be handled without reload. */
2866 return false;
2867 }
2868
2869 /* Returns true if the constant value OP is a legitimate fp operand
2870 during and after reload.
2871 This function accepts all constants which can be loaded directly
2872 into an FPR. */
2873
2874 static bool
2875 legitimate_reload_fp_constant_p (rtx op)
2876 {
2877 /* Accept floating-point zero operands if the load zero instruction
2878 can be used. */
2879 if (TARGET_Z196
2880 && GET_CODE (op) == CONST_DOUBLE
2881 && s390_float_const_zero_p (op))
2882 return true;
2883
2884 return false;
2885 }
2886
2887 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2888 return the class of reg to actually use. */
2889
2890 static reg_class_t
2891 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2892 {
2893 switch (GET_CODE (op))
2894 {
2895 /* Constants we cannot reload into general registers
2896 must be forced into the literal pool. */
2897 case CONST_DOUBLE:
2898 case CONST_INT:
2899 if (reg_class_subset_p (GENERAL_REGS, rclass)
2900 && legitimate_reload_constant_p (op))
2901 return GENERAL_REGS;
2902 else if (reg_class_subset_p (ADDR_REGS, rclass)
2903 && legitimate_reload_constant_p (op))
2904 return ADDR_REGS;
2905 else if (reg_class_subset_p (FP_REGS, rclass)
2906 && legitimate_reload_fp_constant_p (op))
2907 return FP_REGS;
2908 return NO_REGS;
2909
2910 /* If a symbolic constant or a PLUS is reloaded,
2911 it is most likely being used as an address, so
2912 prefer ADDR_REGS. If 'class' is not a superset
2913 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2914 case LABEL_REF:
2915 case SYMBOL_REF:
2916 case CONST:
2917 if (!legitimate_reload_constant_p (op))
2918 return NO_REGS;
2919 /* fallthrough */
2920 case PLUS:
2921 /* load address will be used. */
2922 if (reg_class_subset_p (ADDR_REGS, rclass))
2923 return ADDR_REGS;
2924 else
2925 return NO_REGS;
2926
2927 default:
2928 break;
2929 }
2930
2931 return rclass;
2932 }
2933
2934 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2935 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2936 aligned. */
2937
2938 bool
2939 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2940 {
2941 HOST_WIDE_INT addend;
2942 rtx symref;
2943
2944 if (!s390_symref_operand_p (addr, &symref, &addend))
2945 return false;
2946
2947 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2948 && !(addend & (alignment - 1)));
2949 }
2950
2951 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2952 operand SCRATCH is used to reload the even part of the address and
2953 adding one. */
2954
2955 void
2956 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2957 {
2958 HOST_WIDE_INT addend;
2959 rtx symref;
2960
2961 if (!s390_symref_operand_p (addr, &symref, &addend))
2962 gcc_unreachable ();
2963
2964 if (!(addend & 1))
2965 /* Easy case. The addend is even so larl will do fine. */
2966 emit_move_insn (reg, addr);
2967 else
2968 {
2969 /* We can leave the scratch register untouched if the target
2970 register is a valid base register. */
2971 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2972 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2973 scratch = reg;
2974
2975 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2976 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2977
2978 if (addend != 1)
2979 emit_move_insn (scratch,
2980 gen_rtx_CONST (Pmode,
2981 gen_rtx_PLUS (Pmode, symref,
2982 GEN_INT (addend - 1))));
2983 else
2984 emit_move_insn (scratch, symref);
2985
2986 /* Increment the address using la in order to avoid clobbering cc. */
2987 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2988 }
2989 }
2990
2991 /* Generate what is necessary to move between REG and MEM using
2992 SCRATCH. The direction is given by TOMEM. */
2993
2994 void
2995 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2996 {
2997 /* Reload might have pulled a constant out of the literal pool.
2998 Force it back in. */
2999 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3000 || GET_CODE (mem) == CONST)
3001 mem = force_const_mem (GET_MODE (reg), mem);
3002
3003 gcc_assert (MEM_P (mem));
3004
3005 /* For a load from memory we can leave the scratch register
3006 untouched if the target register is a valid base register. */
3007 if (!tomem
3008 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3009 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3010 && GET_MODE (reg) == GET_MODE (scratch))
3011 scratch = reg;
3012
3013 /* Load address into scratch register. Since we can't have a
3014 secondary reload for a secondary reload we have to cover the case
3015 where larl would need a secondary reload here as well. */
3016 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3017
3018 /* Now we can use a standard load/store to do the move. */
3019 if (tomem)
3020 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3021 else
3022 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3023 }
3024
3025 /* Inform reload about cases where moving X with a mode MODE to a register in
3026 RCLASS requires an extra scratch or immediate register. Return the class
3027 needed for the immediate register. */
3028
3029 static reg_class_t
3030 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3031 enum machine_mode mode, secondary_reload_info *sri)
3032 {
3033 enum reg_class rclass = (enum reg_class) rclass_i;
3034
3035 /* Intermediate register needed. */
3036 if (reg_classes_intersect_p (CC_REGS, rclass))
3037 return GENERAL_REGS;
3038
3039 if (TARGET_Z10)
3040 {
3041 HOST_WIDE_INT offset;
3042 rtx symref;
3043
3044 /* On z10 several optimizer steps may generate larl operands with
3045 an odd addend. */
3046 if (in_p
3047 && s390_symref_operand_p (x, &symref, &offset)
3048 && mode == Pmode
3049 && !SYMBOL_REF_ALIGN1_P (symref)
3050 && (offset & 1) == 1)
3051 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3052 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3053
3054 /* On z10 we need a scratch register when moving QI, TI or floating
3055 point mode values from or to a memory location with a SYMBOL_REF
3056 or if the symref addend of a SI or DI move is not aligned to the
3057 width of the access. */
3058 if (MEM_P (x)
3059 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3060 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3061 || (!TARGET_ZARCH && mode == DImode)
3062 || ((mode == HImode || mode == SImode || mode == DImode)
3063 && (!s390_check_symref_alignment (XEXP (x, 0),
3064 GET_MODE_SIZE (mode))))))
3065 {
3066 #define __SECONDARY_RELOAD_CASE(M,m) \
3067 case M##mode: \
3068 if (TARGET_64BIT) \
3069 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3070 CODE_FOR_reload##m##di_tomem_z10; \
3071 else \
3072 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3073 CODE_FOR_reload##m##si_tomem_z10; \
3074 break;
3075
3076 switch (GET_MODE (x))
3077 {
3078 __SECONDARY_RELOAD_CASE (QI, qi);
3079 __SECONDARY_RELOAD_CASE (HI, hi);
3080 __SECONDARY_RELOAD_CASE (SI, si);
3081 __SECONDARY_RELOAD_CASE (DI, di);
3082 __SECONDARY_RELOAD_CASE (TI, ti);
3083 __SECONDARY_RELOAD_CASE (SF, sf);
3084 __SECONDARY_RELOAD_CASE (DF, df);
3085 __SECONDARY_RELOAD_CASE (TF, tf);
3086 __SECONDARY_RELOAD_CASE (SD, sd);
3087 __SECONDARY_RELOAD_CASE (DD, dd);
3088 __SECONDARY_RELOAD_CASE (TD, td);
3089
3090 default:
3091 gcc_unreachable ();
3092 }
3093 #undef __SECONDARY_RELOAD_CASE
3094 }
3095 }
3096
3097 /* We need a scratch register when loading a PLUS expression which
3098 is not a legitimate operand of the LOAD ADDRESS instruction. */
3099 if (in_p && s390_plus_operand (x, mode))
3100 sri->icode = (TARGET_64BIT ?
3101 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3102
3103 /* Performing a multiword move from or to memory we have to make sure the
3104 second chunk in memory is addressable without causing a displacement
3105 overflow. If that would be the case we calculate the address in
3106 a scratch register. */
3107 if (MEM_P (x)
3108 && GET_CODE (XEXP (x, 0)) == PLUS
3109 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3110 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3111 + GET_MODE_SIZE (mode) - 1))
3112 {
3113 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3114 in a s_operand address since we may fallback to lm/stm. So we only
3115 have to care about overflows in the b+i+d case. */
3116 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3117 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3118 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3119 /* For FP_REGS no lm/stm is available so this check is triggered
3120 for displacement overflows in b+i+d and b+d like addresses. */
3121 || (reg_classes_intersect_p (FP_REGS, rclass)
3122 && s390_class_max_nregs (FP_REGS, mode) > 1))
3123 {
3124 if (in_p)
3125 sri->icode = (TARGET_64BIT ?
3126 CODE_FOR_reloaddi_nonoffmem_in :
3127 CODE_FOR_reloadsi_nonoffmem_in);
3128 else
3129 sri->icode = (TARGET_64BIT ?
3130 CODE_FOR_reloaddi_nonoffmem_out :
3131 CODE_FOR_reloadsi_nonoffmem_out);
3132 }
3133 }
3134
3135 /* A scratch address register is needed when a symbolic constant is
3136 copied to r0 compiling with -fPIC. In other cases the target
3137 register might be used as temporary (see legitimize_pic_address). */
3138 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3139 sri->icode = (TARGET_64BIT ?
3140 CODE_FOR_reloaddi_PIC_addr :
3141 CODE_FOR_reloadsi_PIC_addr);
3142
3143 /* Either scratch or no register needed. */
3144 return NO_REGS;
3145 }
3146
3147 /* Generate code to load SRC, which is PLUS that is not a
3148 legitimate operand for the LA instruction, into TARGET.
3149 SCRATCH may be used as scratch register. */
3150
3151 void
3152 s390_expand_plus_operand (rtx target, rtx src,
3153 rtx scratch)
3154 {
3155 rtx sum1, sum2;
3156 struct s390_address ad;
3157
3158 /* src must be a PLUS; get its two operands. */
3159 gcc_assert (GET_CODE (src) == PLUS);
3160 gcc_assert (GET_MODE (src) == Pmode);
3161
3162 /* Check if any of the two operands is already scheduled
3163 for replacement by reload. This can happen e.g. when
3164 float registers occur in an address. */
3165 sum1 = find_replacement (&XEXP (src, 0));
3166 sum2 = find_replacement (&XEXP (src, 1));
3167 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3168
3169 /* If the address is already strictly valid, there's nothing to do. */
3170 if (!s390_decompose_address (src, &ad)
3171 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3172 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3173 {
3174 /* Otherwise, one of the operands cannot be an address register;
3175 we reload its value into the scratch register. */
3176 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3177 {
3178 emit_move_insn (scratch, sum1);
3179 sum1 = scratch;
3180 }
3181 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3182 {
3183 emit_move_insn (scratch, sum2);
3184 sum2 = scratch;
3185 }
3186
3187 /* According to the way these invalid addresses are generated
3188 in reload.c, it should never happen (at least on s390) that
3189 *neither* of the PLUS components, after find_replacements
3190 was applied, is an address register. */
3191 if (sum1 == scratch && sum2 == scratch)
3192 {
3193 debug_rtx (src);
3194 gcc_unreachable ();
3195 }
3196
3197 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3198 }
3199
3200 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3201 is only ever performed on addresses, so we can mark the
3202 sum as legitimate for LA in any case. */
3203 s390_load_address (target, src);
3204 }
3205
3206
3207 /* Return true if ADDR is a valid memory address.
3208 STRICT specifies whether strict register checking applies. */
3209
3210 static bool
3211 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3212 {
3213 struct s390_address ad;
3214
3215 if (TARGET_Z10
3216 && larl_operand (addr, VOIDmode)
3217 && (mode == VOIDmode
3218 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3219 return true;
3220
3221 if (!s390_decompose_address (addr, &ad))
3222 return false;
3223
3224 if (strict)
3225 {
3226 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3227 return false;
3228
3229 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3230 return false;
3231 }
3232 else
3233 {
3234 if (ad.base
3235 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3236 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3237 return false;
3238
3239 if (ad.indx
3240 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3241 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3242 return false;
3243 }
3244 return true;
3245 }
3246
3247 /* Return true if OP is a valid operand for the LA instruction.
3248 In 31-bit, we need to prove that the result is used as an
3249 address, as LA performs only a 31-bit addition. */
3250
3251 bool
3252 legitimate_la_operand_p (rtx op)
3253 {
3254 struct s390_address addr;
3255 if (!s390_decompose_address (op, &addr))
3256 return false;
3257
3258 return (TARGET_64BIT || addr.pointer);
3259 }
3260
3261 /* Return true if it is valid *and* preferable to use LA to
3262 compute the sum of OP1 and OP2. */
3263
3264 bool
3265 preferred_la_operand_p (rtx op1, rtx op2)
3266 {
3267 struct s390_address addr;
3268
3269 if (op2 != const0_rtx)
3270 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3271
3272 if (!s390_decompose_address (op1, &addr))
3273 return false;
3274 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3275 return false;
3276 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3277 return false;
3278
3279 /* Avoid LA instructions with index register on z196; it is
3280 preferable to use regular add instructions when possible. */
3281 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3282 return false;
3283
3284 if (!TARGET_64BIT && !addr.pointer)
3285 return false;
3286
3287 if (addr.pointer)
3288 return true;
3289
3290 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3291 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3292 return true;
3293
3294 return false;
3295 }
3296
3297 /* Emit a forced load-address operation to load SRC into DST.
3298 This will use the LOAD ADDRESS instruction even in situations
3299 where legitimate_la_operand_p (SRC) returns false. */
3300
3301 void
3302 s390_load_address (rtx dst, rtx src)
3303 {
3304 if (TARGET_64BIT)
3305 emit_move_insn (dst, src);
3306 else
3307 emit_insn (gen_force_la_31 (dst, src));
3308 }
3309
3310 /* Return a legitimate reference for ORIG (an address) using the
3311 register REG. If REG is 0, a new pseudo is generated.
3312
3313 There are two types of references that must be handled:
3314
3315 1. Global data references must load the address from the GOT, via
3316 the PIC reg. An insn is emitted to do this load, and the reg is
3317 returned.
3318
3319 2. Static data references, constant pool addresses, and code labels
3320 compute the address as an offset from the GOT, whose base is in
3321 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3322 differentiate them from global data objects. The returned
3323 address is the PIC reg + an unspec constant.
3324
3325 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3326 reg also appears in the address. */
3327
3328 rtx
3329 legitimize_pic_address (rtx orig, rtx reg)
3330 {
3331 rtx addr = orig;
3332 rtx new_rtx = orig;
3333 rtx base;
3334
3335 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3336
3337 if (GET_CODE (addr) == LABEL_REF
3338 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3339 {
3340 /* This is a local symbol. */
3341 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3342 {
3343 /* Access local symbols PC-relative via LARL.
3344 This is the same as in the non-PIC case, so it is
3345 handled automatically ... */
3346 }
3347 else
3348 {
3349 /* Access local symbols relative to the GOT. */
3350
3351 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3352
3353 if (reload_in_progress || reload_completed)
3354 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3355
3356 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3357 addr = gen_rtx_CONST (Pmode, addr);
3358 addr = force_const_mem (Pmode, addr);
3359 emit_move_insn (temp, addr);
3360
3361 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3362 if (reg != 0)
3363 {
3364 s390_load_address (reg, new_rtx);
3365 new_rtx = reg;
3366 }
3367 }
3368 }
3369 else if (GET_CODE (addr) == SYMBOL_REF)
3370 {
3371 if (reg == 0)
3372 reg = gen_reg_rtx (Pmode);
3373
3374 if (flag_pic == 1)
3375 {
3376 /* Assume GOT offset < 4k. This is handled the same way
3377 in both 31- and 64-bit code (@GOT). */
3378
3379 if (reload_in_progress || reload_completed)
3380 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3381
3382 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3383 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3384 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3385 new_rtx = gen_const_mem (Pmode, new_rtx);
3386 emit_move_insn (reg, new_rtx);
3387 new_rtx = reg;
3388 }
3389 else if (TARGET_CPU_ZARCH)
3390 {
3391 /* If the GOT offset might be >= 4k, we determine the position
3392 of the GOT entry via a PC-relative LARL (@GOTENT). */
3393
3394 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3395
3396 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3397 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3398
3399 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3400 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3401 emit_move_insn (temp, new_rtx);
3402
3403 new_rtx = gen_const_mem (Pmode, temp);
3404 emit_move_insn (reg, new_rtx);
3405 new_rtx = reg;
3406 }
3407 else
3408 {
3409 /* If the GOT offset might be >= 4k, we have to load it
3410 from the literal pool (@GOT). */
3411
3412 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3413
3414 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3415 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3416
3417 if (reload_in_progress || reload_completed)
3418 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3419
3420 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3421 addr = gen_rtx_CONST (Pmode, addr);
3422 addr = force_const_mem (Pmode, addr);
3423 emit_move_insn (temp, addr);
3424
3425 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3426 new_rtx = gen_const_mem (Pmode, new_rtx);
3427 emit_move_insn (reg, new_rtx);
3428 new_rtx = reg;
3429 }
3430 }
3431 else
3432 {
3433 if (GET_CODE (addr) == CONST)
3434 {
3435 addr = XEXP (addr, 0);
3436 if (GET_CODE (addr) == UNSPEC)
3437 {
3438 gcc_assert (XVECLEN (addr, 0) == 1);
3439 switch (XINT (addr, 1))
3440 {
3441 /* If someone moved a GOT-relative UNSPEC
3442 out of the literal pool, force them back in. */
3443 case UNSPEC_GOTOFF:
3444 case UNSPEC_PLTOFF:
3445 new_rtx = force_const_mem (Pmode, orig);
3446 break;
3447
3448 /* @GOT is OK as is if small. */
3449 case UNSPEC_GOT:
3450 if (flag_pic == 2)
3451 new_rtx = force_const_mem (Pmode, orig);
3452 break;
3453
3454 /* @GOTENT is OK as is. */
3455 case UNSPEC_GOTENT:
3456 break;
3457
3458 /* @PLT is OK as is on 64-bit, must be converted to
3459 GOT-relative @PLTOFF on 31-bit. */
3460 case UNSPEC_PLT:
3461 if (!TARGET_CPU_ZARCH)
3462 {
3463 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3464
3465 if (reload_in_progress || reload_completed)
3466 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3467
3468 addr = XVECEXP (addr, 0, 0);
3469 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3470 UNSPEC_PLTOFF);
3471 addr = gen_rtx_CONST (Pmode, addr);
3472 addr = force_const_mem (Pmode, addr);
3473 emit_move_insn (temp, addr);
3474
3475 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3476 if (reg != 0)
3477 {
3478 s390_load_address (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 }
3482 break;
3483
3484 /* Everything else cannot happen. */
3485 default:
3486 gcc_unreachable ();
3487 }
3488 }
3489 else
3490 gcc_assert (GET_CODE (addr) == PLUS);
3491 }
3492 if (GET_CODE (addr) == PLUS)
3493 {
3494 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3495
3496 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3497 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3498
3499 /* Check first to see if this is a constant offset
3500 from a local symbol reference. */
3501 if ((GET_CODE (op0) == LABEL_REF
3502 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3503 && GET_CODE (op1) == CONST_INT)
3504 {
3505 if (TARGET_CPU_ZARCH
3506 && larl_operand (op0, VOIDmode)
3507 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3508 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3509 {
3510 if (INTVAL (op1) & 1)
3511 {
3512 /* LARL can't handle odd offsets, so emit a
3513 pair of LARL and LA. */
3514 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3515
3516 if (!DISP_IN_RANGE (INTVAL (op1)))
3517 {
3518 HOST_WIDE_INT even = INTVAL (op1) - 1;
3519 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3520 op0 = gen_rtx_CONST (Pmode, op0);
3521 op1 = const1_rtx;
3522 }
3523
3524 emit_move_insn (temp, op0);
3525 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3526
3527 if (reg != 0)
3528 {
3529 s390_load_address (reg, new_rtx);
3530 new_rtx = reg;
3531 }
3532 }
3533 else
3534 {
3535 /* If the offset is even, we can just use LARL.
3536 This will happen automatically. */
3537 }
3538 }
3539 else
3540 {
3541 /* Access local symbols relative to the GOT. */
3542
3543 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3544
3545 if (reload_in_progress || reload_completed)
3546 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3547
3548 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3549 UNSPEC_GOTOFF);
3550 addr = gen_rtx_PLUS (Pmode, addr, op1);
3551 addr = gen_rtx_CONST (Pmode, addr);
3552 addr = force_const_mem (Pmode, addr);
3553 emit_move_insn (temp, addr);
3554
3555 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3556 if (reg != 0)
3557 {
3558 s390_load_address (reg, new_rtx);
3559 new_rtx = reg;
3560 }
3561 }
3562 }
3563
3564 /* Now, check whether it is a GOT relative symbol plus offset
3565 that was pulled out of the literal pool. Force it back in. */
3566
3567 else if (GET_CODE (op0) == UNSPEC
3568 && GET_CODE (op1) == CONST_INT
3569 && XINT (op0, 1) == UNSPEC_GOTOFF)
3570 {
3571 gcc_assert (XVECLEN (op0, 0) == 1);
3572
3573 new_rtx = force_const_mem (Pmode, orig);
3574 }
3575
3576 /* Otherwise, compute the sum. */
3577 else
3578 {
3579 base = legitimize_pic_address (XEXP (addr, 0), reg);
3580 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3581 base == reg ? NULL_RTX : reg);
3582 if (GET_CODE (new_rtx) == CONST_INT)
3583 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3584 else
3585 {
3586 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3587 {
3588 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3589 new_rtx = XEXP (new_rtx, 1);
3590 }
3591 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3592 }
3593
3594 if (GET_CODE (new_rtx) == CONST)
3595 new_rtx = XEXP (new_rtx, 0);
3596 new_rtx = force_operand (new_rtx, 0);
3597 }
3598 }
3599 }
3600 return new_rtx;
3601 }
3602
3603 /* Load the thread pointer into a register. */
3604
3605 rtx
3606 s390_get_thread_pointer (void)
3607 {
3608 rtx tp = gen_reg_rtx (Pmode);
3609
3610 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3611 mark_reg_pointer (tp, BITS_PER_WORD);
3612
3613 return tp;
3614 }
3615
3616 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3617 in s390_tls_symbol which always refers to __tls_get_offset.
3618 The returned offset is written to RESULT_REG and an USE rtx is
3619 generated for TLS_CALL. */
3620
3621 static GTY(()) rtx s390_tls_symbol;
3622
3623 static void
3624 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3625 {
3626 rtx insn;
3627
3628 if (!flag_pic)
3629 emit_insn (s390_load_got ());
3630
3631 if (!s390_tls_symbol)
3632 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3633
3634 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3635 gen_rtx_REG (Pmode, RETURN_REGNUM));
3636
3637 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3638 RTL_CONST_CALL_P (insn) = 1;
3639 }
3640
3641 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3642 this (thread-local) address. REG may be used as temporary. */
3643
3644 static rtx
3645 legitimize_tls_address (rtx addr, rtx reg)
3646 {
3647 rtx new_rtx, tls_call, temp, base, r2, insn;
3648
3649 if (GET_CODE (addr) == SYMBOL_REF)
3650 switch (tls_symbolic_operand (addr))
3651 {
3652 case TLS_MODEL_GLOBAL_DYNAMIC:
3653 start_sequence ();
3654 r2 = gen_rtx_REG (Pmode, 2);
3655 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3656 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3657 new_rtx = force_const_mem (Pmode, new_rtx);
3658 emit_move_insn (r2, new_rtx);
3659 s390_emit_tls_call_insn (r2, tls_call);
3660 insn = get_insns ();
3661 end_sequence ();
3662
3663 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3664 temp = gen_reg_rtx (Pmode);
3665 emit_libcall_block (insn, temp, r2, new_rtx);
3666
3667 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3668 if (reg != 0)
3669 {
3670 s390_load_address (reg, new_rtx);
3671 new_rtx = reg;
3672 }
3673 break;
3674
3675 case TLS_MODEL_LOCAL_DYNAMIC:
3676 start_sequence ();
3677 r2 = gen_rtx_REG (Pmode, 2);
3678 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3679 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3680 new_rtx = force_const_mem (Pmode, new_rtx);
3681 emit_move_insn (r2, new_rtx);
3682 s390_emit_tls_call_insn (r2, tls_call);
3683 insn = get_insns ();
3684 end_sequence ();
3685
3686 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3687 temp = gen_reg_rtx (Pmode);
3688 emit_libcall_block (insn, temp, r2, new_rtx);
3689
3690 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3691 base = gen_reg_rtx (Pmode);
3692 s390_load_address (base, new_rtx);
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3695 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3696 new_rtx = force_const_mem (Pmode, new_rtx);
3697 temp = gen_reg_rtx (Pmode);
3698 emit_move_insn (temp, new_rtx);
3699
3700 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3701 if (reg != 0)
3702 {
3703 s390_load_address (reg, new_rtx);
3704 new_rtx = reg;
3705 }
3706 break;
3707
3708 case TLS_MODEL_INITIAL_EXEC:
3709 if (flag_pic == 1)
3710 {
3711 /* Assume GOT offset < 4k. This is handled the same way
3712 in both 31- and 64-bit code. */
3713
3714 if (reload_in_progress || reload_completed)
3715 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3718 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3719 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3720 new_rtx = gen_const_mem (Pmode, new_rtx);
3721 temp = gen_reg_rtx (Pmode);
3722 emit_move_insn (temp, new_rtx);
3723 }
3724 else if (TARGET_CPU_ZARCH)
3725 {
3726 /* If the GOT offset might be >= 4k, we determine the position
3727 of the GOT entry via a PC-relative LARL. */
3728
3729 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3730 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3731 temp = gen_reg_rtx (Pmode);
3732 emit_move_insn (temp, new_rtx);
3733
3734 new_rtx = gen_const_mem (Pmode, temp);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_move_insn (temp, new_rtx);
3737 }
3738 else if (flag_pic)
3739 {
3740 /* If the GOT offset might be >= 4k, we have to load it
3741 from the literal pool. */
3742
3743 if (reload_in_progress || reload_completed)
3744 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3745
3746 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3747 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 temp = gen_reg_rtx (Pmode);
3750 emit_move_insn (temp, new_rtx);
3751
3752 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3753 new_rtx = gen_const_mem (Pmode, new_rtx);
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3758 }
3759 else
3760 {
3761 /* In position-dependent code, load the absolute address of
3762 the GOT entry from the literal pool. */
3763
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = temp;
3771 new_rtx = gen_const_mem (Pmode, new_rtx);
3772 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3773 temp = gen_reg_rtx (Pmode);
3774 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3775 }
3776
3777 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3778 if (reg != 0)
3779 {
3780 s390_load_address (reg, new_rtx);
3781 new_rtx = reg;
3782 }
3783 break;
3784
3785 case TLS_MODEL_LOCAL_EXEC:
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 default:
3801 gcc_unreachable ();
3802 }
3803
3804 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3805 {
3806 switch (XINT (XEXP (addr, 0), 1))
3807 {
3808 case UNSPEC_INDNTPOFF:
3809 gcc_assert (TARGET_CPU_ZARCH);
3810 new_rtx = addr;
3811 break;
3812
3813 default:
3814 gcc_unreachable ();
3815 }
3816 }
3817
3818 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3819 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3820 {
3821 new_rtx = XEXP (XEXP (addr, 0), 0);
3822 if (GET_CODE (new_rtx) != SYMBOL_REF)
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824
3825 new_rtx = legitimize_tls_address (new_rtx, reg);
3826 new_rtx = plus_constant (Pmode, new_rtx,
3827 INTVAL (XEXP (XEXP (addr, 0), 1)));
3828 new_rtx = force_operand (new_rtx, 0);
3829 }
3830
3831 else
3832 gcc_unreachable (); /* for now ... */
3833
3834 return new_rtx;
3835 }
3836
3837 /* Emit insns making the address in operands[1] valid for a standard
3838 move to operands[0]. operands[1] is replaced by an address which
3839 should be used instead of the former RTX to emit the move
3840 pattern. */
3841
3842 void
3843 emit_symbolic_move (rtx *operands)
3844 {
3845 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3846
3847 if (GET_CODE (operands[0]) == MEM)
3848 operands[1] = force_reg (Pmode, operands[1]);
3849 else if (TLS_SYMBOLIC_CONST (operands[1]))
3850 operands[1] = legitimize_tls_address (operands[1], temp);
3851 else if (flag_pic)
3852 operands[1] = legitimize_pic_address (operands[1], temp);
3853 }
3854
3855 /* Try machine-dependent ways of modifying an illegitimate address X
3856 to be legitimate. If we find one, return the new, valid address.
3857
3858 OLDX is the address as it was before break_out_memory_refs was called.
3859 In some cases it is useful to look at this to decide what needs to be done.
3860
3861 MODE is the mode of the operand pointed to by X.
3862
3863 When -fpic is used, special handling is needed for symbolic references.
3864 See comments by legitimize_pic_address for details. */
3865
3866 static rtx
3867 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3868 enum machine_mode mode ATTRIBUTE_UNUSED)
3869 {
3870 rtx constant_term = const0_rtx;
3871
3872 if (TLS_SYMBOLIC_CONST (x))
3873 {
3874 x = legitimize_tls_address (x, 0);
3875
3876 if (s390_legitimate_address_p (mode, x, FALSE))
3877 return x;
3878 }
3879 else if (GET_CODE (x) == PLUS
3880 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3881 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3882 {
3883 return x;
3884 }
3885 else if (flag_pic)
3886 {
3887 if (SYMBOLIC_CONST (x)
3888 || (GET_CODE (x) == PLUS
3889 && (SYMBOLIC_CONST (XEXP (x, 0))
3890 || SYMBOLIC_CONST (XEXP (x, 1)))))
3891 x = legitimize_pic_address (x, 0);
3892
3893 if (s390_legitimate_address_p (mode, x, FALSE))
3894 return x;
3895 }
3896
3897 x = eliminate_constant_term (x, &constant_term);
3898
3899 /* Optimize loading of large displacements by splitting them
3900 into the multiple of 4K and the rest; this allows the
3901 former to be CSE'd if possible.
3902
3903 Don't do this if the displacement is added to a register
3904 pointing into the stack frame, as the offsets will
3905 change later anyway. */
3906
3907 if (GET_CODE (constant_term) == CONST_INT
3908 && !TARGET_LONG_DISPLACEMENT
3909 && !DISP_IN_RANGE (INTVAL (constant_term))
3910 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3911 {
3912 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3913 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3914
3915 rtx temp = gen_reg_rtx (Pmode);
3916 rtx val = force_operand (GEN_INT (upper), temp);
3917 if (val != temp)
3918 emit_move_insn (temp, val);
3919
3920 x = gen_rtx_PLUS (Pmode, x, temp);
3921 constant_term = GEN_INT (lower);
3922 }
3923
3924 if (GET_CODE (x) == PLUS)
3925 {
3926 if (GET_CODE (XEXP (x, 0)) == REG)
3927 {
3928 rtx temp = gen_reg_rtx (Pmode);
3929 rtx val = force_operand (XEXP (x, 1), temp);
3930 if (val != temp)
3931 emit_move_insn (temp, val);
3932
3933 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3934 }
3935
3936 else if (GET_CODE (XEXP (x, 1)) == REG)
3937 {
3938 rtx temp = gen_reg_rtx (Pmode);
3939 rtx val = force_operand (XEXP (x, 0), temp);
3940 if (val != temp)
3941 emit_move_insn (temp, val);
3942
3943 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3944 }
3945 }
3946
3947 if (constant_term != const0_rtx)
3948 x = gen_rtx_PLUS (Pmode, x, constant_term);
3949
3950 return x;
3951 }
3952
3953 /* Try a machine-dependent way of reloading an illegitimate address AD
3954 operand. If we find one, push the reload and return the new address.
3955
3956 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3957 and TYPE is the reload type of the current reload. */
3958
3959 rtx
3960 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3961 int opnum, int type)
3962 {
3963 if (!optimize || TARGET_LONG_DISPLACEMENT)
3964 return NULL_RTX;
3965
3966 if (GET_CODE (ad) == PLUS)
3967 {
3968 rtx tem = simplify_binary_operation (PLUS, Pmode,
3969 XEXP (ad, 0), XEXP (ad, 1));
3970 if (tem)
3971 ad = tem;
3972 }
3973
3974 if (GET_CODE (ad) == PLUS
3975 && GET_CODE (XEXP (ad, 0)) == REG
3976 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3977 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3978 {
3979 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3980 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3981 rtx cst, tem, new_rtx;
3982
3983 cst = GEN_INT (upper);
3984 if (!legitimate_reload_constant_p (cst))
3985 cst = force_const_mem (Pmode, cst);
3986
3987 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3988 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3989
3990 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3991 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3992 opnum, (enum reload_type) type);
3993 return new_rtx;
3994 }
3995
3996 return NULL_RTX;
3997 }
3998
3999 /* Emit code to move LEN bytes from DST to SRC. */
4000
4001 bool
4002 s390_expand_movmem (rtx dst, rtx src, rtx len)
4003 {
4004 /* When tuning for z10 or higher we rely on the Glibc functions to
4005 do the right thing. Only for constant lengths below 64k we will
4006 generate inline code. */
4007 if (s390_tune >= PROCESSOR_2097_Z10
4008 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4009 return false;
4010
4011 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4012 {
4013 if (INTVAL (len) > 0)
4014 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4015 }
4016
4017 else if (TARGET_MVCLE)
4018 {
4019 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4020 }
4021
4022 else
4023 {
4024 rtx dst_addr, src_addr, count, blocks, temp;
4025 rtx loop_start_label = gen_label_rtx ();
4026 rtx loop_end_label = gen_label_rtx ();
4027 rtx end_label = gen_label_rtx ();
4028 enum machine_mode mode;
4029
4030 mode = GET_MODE (len);
4031 if (mode == VOIDmode)
4032 mode = Pmode;
4033
4034 dst_addr = gen_reg_rtx (Pmode);
4035 src_addr = gen_reg_rtx (Pmode);
4036 count = gen_reg_rtx (mode);
4037 blocks = gen_reg_rtx (mode);
4038
4039 convert_move (count, len, 1);
4040 emit_cmp_and_jump_insns (count, const0_rtx,
4041 EQ, NULL_RTX, mode, 1, end_label);
4042
4043 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4044 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4045 dst = change_address (dst, VOIDmode, dst_addr);
4046 src = change_address (src, VOIDmode, src_addr);
4047
4048 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4049 OPTAB_DIRECT);
4050 if (temp != count)
4051 emit_move_insn (count, temp);
4052
4053 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4054 OPTAB_DIRECT);
4055 if (temp != blocks)
4056 emit_move_insn (blocks, temp);
4057
4058 emit_cmp_and_jump_insns (blocks, const0_rtx,
4059 EQ, NULL_RTX, mode, 1, loop_end_label);
4060
4061 emit_label (loop_start_label);
4062
4063 if (TARGET_Z10
4064 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4065 {
4066 rtx prefetch;
4067
4068 /* Issue a read prefetch for the +3 cache line. */
4069 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4070 const0_rtx, const0_rtx);
4071 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4072 emit_insn (prefetch);
4073
4074 /* Issue a write prefetch for the +3 cache line. */
4075 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4076 const1_rtx, const0_rtx);
4077 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4078 emit_insn (prefetch);
4079 }
4080
4081 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4082 s390_load_address (dst_addr,
4083 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4084 s390_load_address (src_addr,
4085 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4086
4087 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4088 OPTAB_DIRECT);
4089 if (temp != blocks)
4090 emit_move_insn (blocks, temp);
4091
4092 emit_cmp_and_jump_insns (blocks, const0_rtx,
4093 EQ, NULL_RTX, mode, 1, loop_end_label);
4094
4095 emit_jump (loop_start_label);
4096 emit_label (loop_end_label);
4097
4098 emit_insn (gen_movmem_short (dst, src,
4099 convert_to_mode (Pmode, count, 1)));
4100 emit_label (end_label);
4101 }
4102 return true;
4103 }
4104
4105 /* Emit code to set LEN bytes at DST to VAL.
4106 Make use of clrmem if VAL is zero. */
4107
4108 void
4109 s390_expand_setmem (rtx dst, rtx len, rtx val)
4110 {
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4112 return;
4113
4114 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4115
4116 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4117 {
4118 if (val == const0_rtx && INTVAL (len) <= 256)
4119 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4120 else
4121 {
4122 /* Initialize memory by storing the first byte. */
4123 emit_move_insn (adjust_address (dst, QImode, 0), val);
4124
4125 if (INTVAL (len) > 1)
4126 {
4127 /* Initiate 1 byte overlap move.
4128 The first byte of DST is propagated through DSTP1.
4129 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4130 DST is set to size 1 so the rest of the memory location
4131 does not count as source operand. */
4132 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4133 set_mem_size (dst, 1);
4134
4135 emit_insn (gen_movmem_short (dstp1, dst,
4136 GEN_INT (INTVAL (len) - 2)));
4137 }
4138 }
4139 }
4140
4141 else if (TARGET_MVCLE)
4142 {
4143 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4144 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4145 }
4146
4147 else
4148 {
4149 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4150 rtx loop_start_label = gen_label_rtx ();
4151 rtx loop_end_label = gen_label_rtx ();
4152 rtx end_label = gen_label_rtx ();
4153 enum machine_mode mode;
4154
4155 mode = GET_MODE (len);
4156 if (mode == VOIDmode)
4157 mode = Pmode;
4158
4159 dst_addr = gen_reg_rtx (Pmode);
4160 count = gen_reg_rtx (mode);
4161 blocks = gen_reg_rtx (mode);
4162
4163 convert_move (count, len, 1);
4164 emit_cmp_and_jump_insns (count, const0_rtx,
4165 EQ, NULL_RTX, mode, 1, end_label);
4166
4167 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4168 dst = change_address (dst, VOIDmode, dst_addr);
4169
4170 if (val == const0_rtx)
4171 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4172 OPTAB_DIRECT);
4173 else
4174 {
4175 dstp1 = adjust_address (dst, VOIDmode, 1);
4176 set_mem_size (dst, 1);
4177
4178 /* Initialize memory by storing the first byte. */
4179 emit_move_insn (adjust_address (dst, QImode, 0), val);
4180
4181 /* If count is 1 we are done. */
4182 emit_cmp_and_jump_insns (count, const1_rtx,
4183 EQ, NULL_RTX, mode, 1, end_label);
4184
4185 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4186 OPTAB_DIRECT);
4187 }
4188 if (temp != count)
4189 emit_move_insn (count, temp);
4190
4191 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4192 OPTAB_DIRECT);
4193 if (temp != blocks)
4194 emit_move_insn (blocks, temp);
4195
4196 emit_cmp_and_jump_insns (blocks, const0_rtx,
4197 EQ, NULL_RTX, mode, 1, loop_end_label);
4198
4199 emit_label (loop_start_label);
4200
4201 if (TARGET_Z10
4202 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4203 {
4204 /* Issue a write prefetch for the +4 cache line. */
4205 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4206 GEN_INT (1024)),
4207 const1_rtx, const0_rtx);
4208 emit_insn (prefetch);
4209 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4210 }
4211
4212 if (val == const0_rtx)
4213 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4214 else
4215 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4216 s390_load_address (dst_addr,
4217 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4218
4219 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4220 OPTAB_DIRECT);
4221 if (temp != blocks)
4222 emit_move_insn (blocks, temp);
4223
4224 emit_cmp_and_jump_insns (blocks, const0_rtx,
4225 EQ, NULL_RTX, mode, 1, loop_end_label);
4226
4227 emit_jump (loop_start_label);
4228 emit_label (loop_end_label);
4229
4230 if (val == const0_rtx)
4231 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4232 else
4233 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4234 emit_label (end_label);
4235 }
4236 }
4237
4238 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4239 and return the result in TARGET. */
4240
4241 bool
4242 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4243 {
4244 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4245 rtx tmp;
4246
4247 /* When tuning for z10 or higher we rely on the Glibc functions to
4248 do the right thing. Only for constant lengths below 64k we will
4249 generate inline code. */
4250 if (s390_tune >= PROCESSOR_2097_Z10
4251 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4252 return false;
4253
4254 /* As the result of CMPINT is inverted compared to what we need,
4255 we have to swap the operands. */
4256 tmp = op0; op0 = op1; op1 = tmp;
4257
4258 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4259 {
4260 if (INTVAL (len) > 0)
4261 {
4262 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4263 emit_insn (gen_cmpint (target, ccreg));
4264 }
4265 else
4266 emit_move_insn (target, const0_rtx);
4267 }
4268 else if (TARGET_MVCLE)
4269 {
4270 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4271 emit_insn (gen_cmpint (target, ccreg));
4272 }
4273 else
4274 {
4275 rtx addr0, addr1, count, blocks, temp;
4276 rtx loop_start_label = gen_label_rtx ();
4277 rtx loop_end_label = gen_label_rtx ();
4278 rtx end_label = gen_label_rtx ();
4279 enum machine_mode mode;
4280
4281 mode = GET_MODE (len);
4282 if (mode == VOIDmode)
4283 mode = Pmode;
4284
4285 addr0 = gen_reg_rtx (Pmode);
4286 addr1 = gen_reg_rtx (Pmode);
4287 count = gen_reg_rtx (mode);
4288 blocks = gen_reg_rtx (mode);
4289
4290 convert_move (count, len, 1);
4291 emit_cmp_and_jump_insns (count, const0_rtx,
4292 EQ, NULL_RTX, mode, 1, end_label);
4293
4294 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4295 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4296 op0 = change_address (op0, VOIDmode, addr0);
4297 op1 = change_address (op1, VOIDmode, addr1);
4298
4299 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4300 OPTAB_DIRECT);
4301 if (temp != count)
4302 emit_move_insn (count, temp);
4303
4304 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4305 OPTAB_DIRECT);
4306 if (temp != blocks)
4307 emit_move_insn (blocks, temp);
4308
4309 emit_cmp_and_jump_insns (blocks, const0_rtx,
4310 EQ, NULL_RTX, mode, 1, loop_end_label);
4311
4312 emit_label (loop_start_label);
4313
4314 if (TARGET_Z10
4315 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4316 {
4317 rtx prefetch;
4318
4319 /* Issue a read prefetch for the +2 cache line of operand 1. */
4320 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4321 const0_rtx, const0_rtx);
4322 emit_insn (prefetch);
4323 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4324
4325 /* Issue a read prefetch for the +2 cache line of operand 2. */
4326 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4327 const0_rtx, const0_rtx);
4328 emit_insn (prefetch);
4329 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4330 }
4331
4332 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4333 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4334 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4335 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4336 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4337 emit_jump_insn (temp);
4338
4339 s390_load_address (addr0,
4340 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4341 s390_load_address (addr1,
4342 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4343
4344 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4345 OPTAB_DIRECT);
4346 if (temp != blocks)
4347 emit_move_insn (blocks, temp);
4348
4349 emit_cmp_and_jump_insns (blocks, const0_rtx,
4350 EQ, NULL_RTX, mode, 1, loop_end_label);
4351
4352 emit_jump (loop_start_label);
4353 emit_label (loop_end_label);
4354
4355 emit_insn (gen_cmpmem_short (op0, op1,
4356 convert_to_mode (Pmode, count, 1)));
4357 emit_label (end_label);
4358
4359 emit_insn (gen_cmpint (target, ccreg));
4360 }
4361 return true;
4362 }
4363
4364
4365 /* Expand conditional increment or decrement using alc/slb instructions.
4366 Should generate code setting DST to either SRC or SRC + INCREMENT,
4367 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4368 Returns true if successful, false otherwise.
4369
4370 That makes it possible to implement some if-constructs without jumps e.g.:
4371 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4372 unsigned int a, b, c;
4373 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4374 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4375 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4376 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4377
4378 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4379 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4380 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4381 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4382 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4383
4384 bool
4385 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4386 rtx dst, rtx src, rtx increment)
4387 {
4388 enum machine_mode cmp_mode;
4389 enum machine_mode cc_mode;
4390 rtx op_res;
4391 rtx insn;
4392 rtvec p;
4393 int ret;
4394
4395 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4396 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4397 cmp_mode = SImode;
4398 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4399 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4400 cmp_mode = DImode;
4401 else
4402 return false;
4403
4404 /* Try ADD LOGICAL WITH CARRY. */
4405 if (increment == const1_rtx)
4406 {
4407 /* Determine CC mode to use. */
4408 if (cmp_code == EQ || cmp_code == NE)
4409 {
4410 if (cmp_op1 != const0_rtx)
4411 {
4412 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4413 NULL_RTX, 0, OPTAB_WIDEN);
4414 cmp_op1 = const0_rtx;
4415 }
4416
4417 cmp_code = cmp_code == EQ ? LEU : GTU;
4418 }
4419
4420 if (cmp_code == LTU || cmp_code == LEU)
4421 {
4422 rtx tem = cmp_op0;
4423 cmp_op0 = cmp_op1;
4424 cmp_op1 = tem;
4425 cmp_code = swap_condition (cmp_code);
4426 }
4427
4428 switch (cmp_code)
4429 {
4430 case GTU:
4431 cc_mode = CCUmode;
4432 break;
4433
4434 case GEU:
4435 cc_mode = CCL3mode;
4436 break;
4437
4438 default:
4439 return false;
4440 }
4441
4442 /* Emit comparison instruction pattern. */
4443 if (!register_operand (cmp_op0, cmp_mode))
4444 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4445
4446 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4447 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4448 /* We use insn_invalid_p here to add clobbers if required. */
4449 ret = insn_invalid_p (emit_insn (insn), false);
4450 gcc_assert (!ret);
4451
4452 /* Emit ALC instruction pattern. */
4453 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4454 gen_rtx_REG (cc_mode, CC_REGNUM),
4455 const0_rtx);
4456
4457 if (src != const0_rtx)
4458 {
4459 if (!register_operand (src, GET_MODE (dst)))
4460 src = force_reg (GET_MODE (dst), src);
4461
4462 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4463 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4464 }
4465
4466 p = rtvec_alloc (2);
4467 RTVEC_ELT (p, 0) =
4468 gen_rtx_SET (VOIDmode, dst, op_res);
4469 RTVEC_ELT (p, 1) =
4470 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4471 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4472
4473 return true;
4474 }
4475
4476 /* Try SUBTRACT LOGICAL WITH BORROW. */
4477 if (increment == constm1_rtx)
4478 {
4479 /* Determine CC mode to use. */
4480 if (cmp_code == EQ || cmp_code == NE)
4481 {
4482 if (cmp_op1 != const0_rtx)
4483 {
4484 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4485 NULL_RTX, 0, OPTAB_WIDEN);
4486 cmp_op1 = const0_rtx;
4487 }
4488
4489 cmp_code = cmp_code == EQ ? LEU : GTU;
4490 }
4491
4492 if (cmp_code == GTU || cmp_code == GEU)
4493 {
4494 rtx tem = cmp_op0;
4495 cmp_op0 = cmp_op1;
4496 cmp_op1 = tem;
4497 cmp_code = swap_condition (cmp_code);
4498 }
4499
4500 switch (cmp_code)
4501 {
4502 case LEU:
4503 cc_mode = CCUmode;
4504 break;
4505
4506 case LTU:
4507 cc_mode = CCL3mode;
4508 break;
4509
4510 default:
4511 return false;
4512 }
4513
4514 /* Emit comparison instruction pattern. */
4515 if (!register_operand (cmp_op0, cmp_mode))
4516 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4517
4518 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4519 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4520 /* We use insn_invalid_p here to add clobbers if required. */
4521 ret = insn_invalid_p (emit_insn (insn), false);
4522 gcc_assert (!ret);
4523
4524 /* Emit SLB instruction pattern. */
4525 if (!register_operand (src, GET_MODE (dst)))
4526 src = force_reg (GET_MODE (dst), src);
4527
4528 op_res = gen_rtx_MINUS (GET_MODE (dst),
4529 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4530 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4531 gen_rtx_REG (cc_mode, CC_REGNUM),
4532 const0_rtx));
4533 p = rtvec_alloc (2);
4534 RTVEC_ELT (p, 0) =
4535 gen_rtx_SET (VOIDmode, dst, op_res);
4536 RTVEC_ELT (p, 1) =
4537 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4538 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4539
4540 return true;
4541 }
4542
4543 return false;
4544 }
4545
4546 /* Expand code for the insv template. Return true if successful. */
4547
4548 bool
4549 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4550 {
4551 int bitsize = INTVAL (op1);
4552 int bitpos = INTVAL (op2);
4553 enum machine_mode mode = GET_MODE (dest);
4554 enum machine_mode smode;
4555 int smode_bsize, mode_bsize;
4556 rtx op, clobber;
4557
4558 /* Generate INSERT IMMEDIATE (IILL et al). */
4559 /* (set (ze (reg)) (const_int)). */
4560 if (TARGET_ZARCH
4561 && register_operand (dest, word_mode)
4562 && (bitpos % 16) == 0
4563 && (bitsize % 16) == 0
4564 && const_int_operand (src, VOIDmode))
4565 {
4566 HOST_WIDE_INT val = INTVAL (src);
4567 int regpos = bitpos + bitsize;
4568
4569 while (regpos > bitpos)
4570 {
4571 enum machine_mode putmode;
4572 int putsize;
4573
4574 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4575 putmode = SImode;
4576 else
4577 putmode = HImode;
4578
4579 putsize = GET_MODE_BITSIZE (putmode);
4580 regpos -= putsize;
4581 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4582 GEN_INT (putsize),
4583 GEN_INT (regpos)),
4584 gen_int_mode (val, putmode));
4585 val >>= putsize;
4586 }
4587 gcc_assert (regpos == bitpos);
4588 return true;
4589 }
4590
4591 smode = smallest_mode_for_size (bitsize, MODE_INT);
4592 smode_bsize = GET_MODE_BITSIZE (smode);
4593 mode_bsize = GET_MODE_BITSIZE (mode);
4594
4595 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4596 if (bitpos == 0
4597 && (bitsize % BITS_PER_UNIT) == 0
4598 && MEM_P (dest)
4599 && (register_operand (src, word_mode)
4600 || const_int_operand (src, VOIDmode)))
4601 {
4602 /* Emit standard pattern if possible. */
4603 if (smode_bsize == bitsize)
4604 {
4605 emit_move_insn (adjust_address (dest, smode, 0),
4606 gen_lowpart (smode, src));
4607 return true;
4608 }
4609
4610 /* (set (ze (mem)) (const_int)). */
4611 else if (const_int_operand (src, VOIDmode))
4612 {
4613 int size = bitsize / BITS_PER_UNIT;
4614 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4615 BLKmode,
4616 UNITS_PER_WORD - size);
4617
4618 dest = adjust_address (dest, BLKmode, 0);
4619 set_mem_size (dest, size);
4620 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4621 return true;
4622 }
4623
4624 /* (set (ze (mem)) (reg)). */
4625 else if (register_operand (src, word_mode))
4626 {
4627 if (bitsize <= 32)
4628 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4629 const0_rtx), src);
4630 else
4631 {
4632 /* Emit st,stcmh sequence. */
4633 int stcmh_width = bitsize - 32;
4634 int size = stcmh_width / BITS_PER_UNIT;
4635
4636 emit_move_insn (adjust_address (dest, SImode, size),
4637 gen_lowpart (SImode, src));
4638 set_mem_size (dest, size);
4639 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4640 GEN_INT (stcmh_width),
4641 const0_rtx),
4642 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4643 }
4644 return true;
4645 }
4646 }
4647
4648 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4649 if ((bitpos % BITS_PER_UNIT) == 0
4650 && (bitsize % BITS_PER_UNIT) == 0
4651 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4652 && MEM_P (src)
4653 && (mode == DImode || mode == SImode)
4654 && register_operand (dest, mode))
4655 {
4656 /* Emit a strict_low_part pattern if possible. */
4657 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4658 {
4659 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4660 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4661 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4662 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4663 return true;
4664 }
4665
4666 /* ??? There are more powerful versions of ICM that are not
4667 completely represented in the md file. */
4668 }
4669
4670 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4671 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4672 {
4673 enum machine_mode mode_s = GET_MODE (src);
4674
4675 if (mode_s == VOIDmode)
4676 {
4677 /* Assume const_int etc already in the proper mode. */
4678 src = force_reg (mode, src);
4679 }
4680 else if (mode_s != mode)
4681 {
4682 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4683 src = force_reg (mode_s, src);
4684 src = gen_lowpart (mode, src);
4685 }
4686
4687 op = gen_rtx_SET (mode,
4688 gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4689 src);
4690 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4691 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4692
4693 return true;
4694 }
4695
4696 return false;
4697 }
4698
4699 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4700 register that holds VAL of mode MODE shifted by COUNT bits. */
4701
4702 static inline rtx
4703 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4704 {
4705 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4706 NULL_RTX, 1, OPTAB_DIRECT);
4707 return expand_simple_binop (SImode, ASHIFT, val, count,
4708 NULL_RTX, 1, OPTAB_DIRECT);
4709 }
4710
4711 /* Structure to hold the initial parameters for a compare_and_swap operation
4712 in HImode and QImode. */
4713
4714 struct alignment_context
4715 {
4716 rtx memsi; /* SI aligned memory location. */
4717 rtx shift; /* Bit offset with regard to lsb. */
4718 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4719 rtx modemaski; /* ~modemask */
4720 bool aligned; /* True if memory is aligned, false else. */
4721 };
4722
4723 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4724 structure AC for transparent simplifying, if the memory alignment is known
4725 to be at least 32bit. MEM is the memory location for the actual operation
4726 and MODE its mode. */
4727
4728 static void
4729 init_alignment_context (struct alignment_context *ac, rtx mem,
4730 enum machine_mode mode)
4731 {
4732 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4733 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4734
4735 if (ac->aligned)
4736 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4737 else
4738 {
4739 /* Alignment is unknown. */
4740 rtx byteoffset, addr, align;
4741
4742 /* Force the address into a register. */
4743 addr = force_reg (Pmode, XEXP (mem, 0));
4744
4745 /* Align it to SImode. */
4746 align = expand_simple_binop (Pmode, AND, addr,
4747 GEN_INT (-GET_MODE_SIZE (SImode)),
4748 NULL_RTX, 1, OPTAB_DIRECT);
4749 /* Generate MEM. */
4750 ac->memsi = gen_rtx_MEM (SImode, align);
4751 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4752 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4753 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4754
4755 /* Calculate shiftcount. */
4756 byteoffset = expand_simple_binop (Pmode, AND, addr,
4757 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4758 NULL_RTX, 1, OPTAB_DIRECT);
4759 /* As we already have some offset, evaluate the remaining distance. */
4760 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4761 NULL_RTX, 1, OPTAB_DIRECT);
4762 }
4763
4764 /* Shift is the byte count, but we need the bitcount. */
4765 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4766 NULL_RTX, 1, OPTAB_DIRECT);
4767
4768 /* Calculate masks. */
4769 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4770 GEN_INT (GET_MODE_MASK (mode)),
4771 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4772 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4773 NULL_RTX, 1);
4774 }
4775
4776 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4777 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4778 perform the merge in SEQ2. */
4779
4780 static rtx
4781 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4782 enum machine_mode mode, rtx val, rtx ins)
4783 {
4784 rtx tmp;
4785
4786 if (ac->aligned)
4787 {
4788 start_sequence ();
4789 tmp = copy_to_mode_reg (SImode, val);
4790 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4791 const0_rtx, ins))
4792 {
4793 *seq1 = NULL;
4794 *seq2 = get_insns ();
4795 end_sequence ();
4796 return tmp;
4797 }
4798 end_sequence ();
4799 }
4800
4801 /* Failed to use insv. Generate a two part shift and mask. */
4802 start_sequence ();
4803 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4804 *seq1 = get_insns ();
4805 end_sequence ();
4806
4807 start_sequence ();
4808 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4809 *seq2 = get_insns ();
4810 end_sequence ();
4811
4812 return tmp;
4813 }
4814
4815 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4816 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4817 value to set if CMP == MEM. */
4818
4819 void
4820 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4821 rtx cmp, rtx new_rtx, bool is_weak)
4822 {
4823 struct alignment_context ac;
4824 rtx cmpv, newv, val, resv, cc, seq0, seq1, seq2, seq3;
4825 rtx res = gen_reg_rtx (SImode);
4826 rtx csloop = NULL, csend = NULL;
4827
4828 gcc_assert (register_operand (vtarget, VOIDmode));
4829 gcc_assert (MEM_P (mem));
4830
4831 init_alignment_context (&ac, mem, mode);
4832
4833 /* Load full word. Subsequent loads are performed by CS. */
4834 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4835 NULL_RTX, 1, OPTAB_DIRECT);
4836
4837 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4838 possible, we try to use insv to make this happen efficiently. If
4839 that fails we'll generate code both inside and outside the loop. */
4840 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4841 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4842
4843 if (seq0)
4844 emit_insn (seq0);
4845 if (seq1)
4846 emit_insn (seq1);
4847
4848 /* Start CS loop. */
4849 if (!is_weak)
4850 {
4851 /* Begin assuming success. */
4852 emit_move_insn (btarget, const1_rtx);
4853
4854 csloop = gen_label_rtx ();
4855 csend = gen_label_rtx ();
4856 emit_label (csloop);
4857 }
4858
4859 /* val = "<mem>00..0<mem>"
4860 * cmp = "00..0<cmp>00..0"
4861 * new = "00..0<new>00..0"
4862 */
4863
4864 emit_insn (seq2);
4865 emit_insn (seq3);
4866
4867 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4868 if (is_weak)
4869 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4870 else
4871 {
4872 /* Jump to end if we're done (likely?). */
4873 s390_emit_jump (csend, cc);
4874
4875 /* Check for changes outside mode, and loop internal if so. */
4876 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4877 NULL_RTX, 1, OPTAB_DIRECT);
4878 cc = s390_emit_compare (NE, resv, val);
4879 emit_move_insn (val, resv);
4880 s390_emit_jump (csloop, cc);
4881
4882 /* Failed. */
4883 emit_move_insn (btarget, const0_rtx);
4884 emit_label (csend);
4885 }
4886
4887 /* Return the correct part of the bitfield. */
4888 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4889 NULL_RTX, 1, OPTAB_DIRECT), 1);
4890 }
4891
4892 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4893 and VAL the value to play with. If AFTER is true then store the value
4894 MEM holds after the operation, if AFTER is false then store the value MEM
4895 holds before the operation. If TARGET is zero then discard that value, else
4896 store it to TARGET. */
4897
4898 void
4899 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4900 rtx target, rtx mem, rtx val, bool after)
4901 {
4902 struct alignment_context ac;
4903 rtx cmp;
4904 rtx new_rtx = gen_reg_rtx (SImode);
4905 rtx orig = gen_reg_rtx (SImode);
4906 rtx csloop = gen_label_rtx ();
4907
4908 gcc_assert (!target || register_operand (target, VOIDmode));
4909 gcc_assert (MEM_P (mem));
4910
4911 init_alignment_context (&ac, mem, mode);
4912
4913 /* Shift val to the correct bit positions.
4914 Preserve "icm", but prevent "ex icm". */
4915 if (!(ac.aligned && code == SET && MEM_P (val)))
4916 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4917
4918 /* Further preparation insns. */
4919 if (code == PLUS || code == MINUS)
4920 emit_move_insn (orig, val);
4921 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4922 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4923 NULL_RTX, 1, OPTAB_DIRECT);
4924
4925 /* Load full word. Subsequent loads are performed by CS. */
4926 cmp = force_reg (SImode, ac.memsi);
4927
4928 /* Start CS loop. */
4929 emit_label (csloop);
4930 emit_move_insn (new_rtx, cmp);
4931
4932 /* Patch new with val at correct position. */
4933 switch (code)
4934 {
4935 case PLUS:
4936 case MINUS:
4937 val = expand_simple_binop (SImode, code, new_rtx, orig,
4938 NULL_RTX, 1, OPTAB_DIRECT);
4939 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4940 NULL_RTX, 1, OPTAB_DIRECT);
4941 /* FALLTHRU */
4942 case SET:
4943 if (ac.aligned && MEM_P (val))
4944 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4945 0, 0, SImode, val);
4946 else
4947 {
4948 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4949 NULL_RTX, 1, OPTAB_DIRECT);
4950 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4951 NULL_RTX, 1, OPTAB_DIRECT);
4952 }
4953 break;
4954 case AND:
4955 case IOR:
4956 case XOR:
4957 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4958 NULL_RTX, 1, OPTAB_DIRECT);
4959 break;
4960 case MULT: /* NAND */
4961 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4962 NULL_RTX, 1, OPTAB_DIRECT);
4963 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4964 NULL_RTX, 1, OPTAB_DIRECT);
4965 break;
4966 default:
4967 gcc_unreachable ();
4968 }
4969
4970 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4971 ac.memsi, cmp, new_rtx));
4972
4973 /* Return the correct part of the bitfield. */
4974 if (target)
4975 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4976 after ? new_rtx : cmp, ac.shift,
4977 NULL_RTX, 1, OPTAB_DIRECT), 1);
4978 }
4979
4980 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4981 We need to emit DTP-relative relocations. */
4982
4983 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4984
4985 static void
4986 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4987 {
4988 switch (size)
4989 {
4990 case 4:
4991 fputs ("\t.long\t", file);
4992 break;
4993 case 8:
4994 fputs ("\t.quad\t", file);
4995 break;
4996 default:
4997 gcc_unreachable ();
4998 }
4999 output_addr_const (file, x);
5000 fputs ("@DTPOFF", file);
5001 }
5002
5003 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5004 /* Implement TARGET_MANGLE_TYPE. */
5005
5006 static const char *
5007 s390_mangle_type (const_tree type)
5008 {
5009 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5010 && TARGET_LONG_DOUBLE_128)
5011 return "g";
5012
5013 /* For all other types, use normal C++ mangling. */
5014 return NULL;
5015 }
5016 #endif
5017
5018 /* In the name of slightly smaller debug output, and to cater to
5019 general assembler lossage, recognize various UNSPEC sequences
5020 and turn them back into a direct symbol reference. */
5021
5022 static rtx
5023 s390_delegitimize_address (rtx orig_x)
5024 {
5025 rtx x, y;
5026
5027 orig_x = delegitimize_mem_from_attrs (orig_x);
5028 x = orig_x;
5029
5030 /* Extract the symbol ref from:
5031 (plus:SI (reg:SI 12 %r12)
5032 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5033 UNSPEC_GOTOFF/PLTOFF)))
5034 and
5035 (plus:SI (reg:SI 12 %r12)
5036 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5037 UNSPEC_GOTOFF/PLTOFF)
5038 (const_int 4 [0x4])))) */
5039 if (GET_CODE (x) == PLUS
5040 && REG_P (XEXP (x, 0))
5041 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5042 && GET_CODE (XEXP (x, 1)) == CONST)
5043 {
5044 HOST_WIDE_INT offset = 0;
5045
5046 /* The const operand. */
5047 y = XEXP (XEXP (x, 1), 0);
5048
5049 if (GET_CODE (y) == PLUS
5050 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5051 {
5052 offset = INTVAL (XEXP (y, 1));
5053 y = XEXP (y, 0);
5054 }
5055
5056 if (GET_CODE (y) == UNSPEC
5057 && (XINT (y, 1) == UNSPEC_GOTOFF
5058 || XINT (y, 1) == UNSPEC_PLTOFF))
5059 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5060 }
5061
5062 if (GET_CODE (x) != MEM)
5063 return orig_x;
5064
5065 x = XEXP (x, 0);
5066 if (GET_CODE (x) == PLUS
5067 && GET_CODE (XEXP (x, 1)) == CONST
5068 && GET_CODE (XEXP (x, 0)) == REG
5069 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5070 {
5071 y = XEXP (XEXP (x, 1), 0);
5072 if (GET_CODE (y) == UNSPEC
5073 && XINT (y, 1) == UNSPEC_GOT)
5074 y = XVECEXP (y, 0, 0);
5075 else
5076 return orig_x;
5077 }
5078 else if (GET_CODE (x) == CONST)
5079 {
5080 /* Extract the symbol ref from:
5081 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5082 UNSPEC_PLT/GOTENT))) */
5083
5084 y = XEXP (x, 0);
5085 if (GET_CODE (y) == UNSPEC
5086 && (XINT (y, 1) == UNSPEC_GOTENT
5087 || XINT (y, 1) == UNSPEC_PLT))
5088 y = XVECEXP (y, 0, 0);
5089 else
5090 return orig_x;
5091 }
5092 else
5093 return orig_x;
5094
5095 if (GET_MODE (orig_x) != Pmode)
5096 {
5097 if (GET_MODE (orig_x) == BLKmode)
5098 return orig_x;
5099 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5100 if (y == NULL_RTX)
5101 return orig_x;
5102 }
5103 return y;
5104 }
5105
5106 /* Output operand OP to stdio stream FILE.
5107 OP is an address (register + offset) which is not used to address data;
5108 instead the rightmost bits are interpreted as the value. */
5109
5110 static void
5111 print_shift_count_operand (FILE *file, rtx op)
5112 {
5113 HOST_WIDE_INT offset;
5114 rtx base;
5115
5116 /* Extract base register and offset. */
5117 if (!s390_decompose_shift_count (op, &base, &offset))
5118 gcc_unreachable ();
5119
5120 /* Sanity check. */
5121 if (base)
5122 {
5123 gcc_assert (GET_CODE (base) == REG);
5124 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5125 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5126 }
5127
5128 /* Offsets are constricted to twelve bits. */
5129 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5130 if (base)
5131 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5132 }
5133
5134 /* See 'get_some_local_dynamic_name'. */
5135
5136 static int
5137 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5138 {
5139 rtx x = *px;
5140
5141 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5142 {
5143 x = get_pool_constant (x);
5144 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5145 }
5146
5147 if (GET_CODE (x) == SYMBOL_REF
5148 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5149 {
5150 cfun->machine->some_ld_name = XSTR (x, 0);
5151 return 1;
5152 }
5153
5154 return 0;
5155 }
5156
5157 /* Locate some local-dynamic symbol still in use by this function
5158 so that we can print its name in local-dynamic base patterns. */
5159
5160 static const char *
5161 get_some_local_dynamic_name (void)
5162 {
5163 rtx insn;
5164
5165 if (cfun->machine->some_ld_name)
5166 return cfun->machine->some_ld_name;
5167
5168 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5169 if (INSN_P (insn)
5170 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5171 return cfun->machine->some_ld_name;
5172
5173 gcc_unreachable ();
5174 }
5175
5176 /* Output machine-dependent UNSPECs occurring in address constant X
5177 in assembler syntax to stdio stream FILE. Returns true if the
5178 constant X could be recognized, false otherwise. */
5179
5180 static bool
5181 s390_output_addr_const_extra (FILE *file, rtx x)
5182 {
5183 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5184 switch (XINT (x, 1))
5185 {
5186 case UNSPEC_GOTENT:
5187 output_addr_const (file, XVECEXP (x, 0, 0));
5188 fprintf (file, "@GOTENT");
5189 return true;
5190 case UNSPEC_GOT:
5191 output_addr_const (file, XVECEXP (x, 0, 0));
5192 fprintf (file, "@GOT");
5193 return true;
5194 case UNSPEC_GOTOFF:
5195 output_addr_const (file, XVECEXP (x, 0, 0));
5196 fprintf (file, "@GOTOFF");
5197 return true;
5198 case UNSPEC_PLT:
5199 output_addr_const (file, XVECEXP (x, 0, 0));
5200 fprintf (file, "@PLT");
5201 return true;
5202 case UNSPEC_PLTOFF:
5203 output_addr_const (file, XVECEXP (x, 0, 0));
5204 fprintf (file, "@PLTOFF");
5205 return true;
5206 case UNSPEC_TLSGD:
5207 output_addr_const (file, XVECEXP (x, 0, 0));
5208 fprintf (file, "@TLSGD");
5209 return true;
5210 case UNSPEC_TLSLDM:
5211 assemble_name (file, get_some_local_dynamic_name ());
5212 fprintf (file, "@TLSLDM");
5213 return true;
5214 case UNSPEC_DTPOFF:
5215 output_addr_const (file, XVECEXP (x, 0, 0));
5216 fprintf (file, "@DTPOFF");
5217 return true;
5218 case UNSPEC_NTPOFF:
5219 output_addr_const (file, XVECEXP (x, 0, 0));
5220 fprintf (file, "@NTPOFF");
5221 return true;
5222 case UNSPEC_GOTNTPOFF:
5223 output_addr_const (file, XVECEXP (x, 0, 0));
5224 fprintf (file, "@GOTNTPOFF");
5225 return true;
5226 case UNSPEC_INDNTPOFF:
5227 output_addr_const (file, XVECEXP (x, 0, 0));
5228 fprintf (file, "@INDNTPOFF");
5229 return true;
5230 }
5231
5232 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5233 switch (XINT (x, 1))
5234 {
5235 case UNSPEC_POOL_OFFSET:
5236 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5237 output_addr_const (file, x);
5238 return true;
5239 }
5240 return false;
5241 }
5242
5243 /* Output address operand ADDR in assembler syntax to
5244 stdio stream FILE. */
5245
5246 void
5247 print_operand_address (FILE *file, rtx addr)
5248 {
5249 struct s390_address ad;
5250
5251 if (s390_symref_operand_p (addr, NULL, NULL))
5252 {
5253 if (!TARGET_Z10)
5254 {
5255 output_operand_lossage ("symbolic memory references are "
5256 "only supported on z10 or later");
5257 return;
5258 }
5259 output_addr_const (file, addr);
5260 return;
5261 }
5262
5263 if (!s390_decompose_address (addr, &ad)
5264 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5265 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5266 output_operand_lossage ("cannot decompose address");
5267
5268 if (ad.disp)
5269 output_addr_const (file, ad.disp);
5270 else
5271 fprintf (file, "0");
5272
5273 if (ad.base && ad.indx)
5274 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5275 reg_names[REGNO (ad.base)]);
5276 else if (ad.base)
5277 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5278 }
5279
5280 /* Output operand X in assembler syntax to stdio stream FILE.
5281 CODE specified the format flag. The following format flags
5282 are recognized:
5283
5284 'C': print opcode suffix for branch condition.
5285 'D': print opcode suffix for inverse branch condition.
5286 'E': print opcode suffix for branch on index instruction.
5287 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5288 'G': print the size of the operand in bytes.
5289 'O': print only the displacement of a memory reference.
5290 'R': print only the base register of a memory reference.
5291 'S': print S-type memory reference (base+displacement).
5292 'N': print the second word of a DImode operand.
5293 'M': print the second word of a TImode operand.
5294 'Y': print shift count operand.
5295
5296 'b': print integer X as if it's an unsigned byte.
5297 'c': print integer X as if it's an signed byte.
5298 'x': print integer X as if it's an unsigned halfword.
5299 'h': print integer X as if it's a signed halfword.
5300 'i': print the first nonzero HImode part of X.
5301 'j': print the first HImode part unequal to -1 of X.
5302 'k': print the first nonzero SImode part of X.
5303 'm': print the first SImode part unequal to -1 of X.
5304 'o': print integer X as if it's an unsigned 32bit word. */
5305
5306 void
5307 print_operand (FILE *file, rtx x, int code)
5308 {
5309 switch (code)
5310 {
5311 case 'C':
5312 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5313 return;
5314
5315 case 'D':
5316 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5317 return;
5318
5319 case 'E':
5320 if (GET_CODE (x) == LE)
5321 fprintf (file, "l");
5322 else if (GET_CODE (x) == GT)
5323 fprintf (file, "h");
5324 else
5325 output_operand_lossage ("invalid comparison operator "
5326 "for 'E' output modifier");
5327 return;
5328
5329 case 'J':
5330 if (GET_CODE (x) == SYMBOL_REF)
5331 {
5332 fprintf (file, "%s", ":tls_load:");
5333 output_addr_const (file, x);
5334 }
5335 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5336 {
5337 fprintf (file, "%s", ":tls_gdcall:");
5338 output_addr_const (file, XVECEXP (x, 0, 0));
5339 }
5340 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5341 {
5342 fprintf (file, "%s", ":tls_ldcall:");
5343 assemble_name (file, get_some_local_dynamic_name ());
5344 }
5345 else
5346 output_operand_lossage ("invalid reference for 'J' output modifier");
5347 return;
5348
5349 case 'G':
5350 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5351 return;
5352
5353 case 'O':
5354 {
5355 struct s390_address ad;
5356 int ret;
5357
5358 if (!MEM_P (x))
5359 {
5360 output_operand_lossage ("memory reference expected for "
5361 "'O' output modifier");
5362 return;
5363 }
5364
5365 ret = s390_decompose_address (XEXP (x, 0), &ad);
5366
5367 if (!ret
5368 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5369 || ad.indx)
5370 {
5371 output_operand_lossage ("invalid address for 'O' output modifier");
5372 return;
5373 }
5374
5375 if (ad.disp)
5376 output_addr_const (file, ad.disp);
5377 else
5378 fprintf (file, "0");
5379 }
5380 return;
5381
5382 case 'R':
5383 {
5384 struct s390_address ad;
5385 int ret;
5386
5387 if (!MEM_P (x))
5388 {
5389 output_operand_lossage ("memory reference expected for "
5390 "'R' output modifier");
5391 return;
5392 }
5393
5394 ret = s390_decompose_address (XEXP (x, 0), &ad);
5395
5396 if (!ret
5397 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5398 || ad.indx)
5399 {
5400 output_operand_lossage ("invalid address for 'R' output modifier");
5401 return;
5402 }
5403
5404 if (ad.base)
5405 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5406 else
5407 fprintf (file, "0");
5408 }
5409 return;
5410
5411 case 'S':
5412 {
5413 struct s390_address ad;
5414 int ret;
5415
5416 if (!MEM_P (x))
5417 {
5418 output_operand_lossage ("memory reference expected for "
5419 "'S' output modifier");
5420 return;
5421 }
5422 ret = s390_decompose_address (XEXP (x, 0), &ad);
5423
5424 if (!ret
5425 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5426 || ad.indx)
5427 {
5428 output_operand_lossage ("invalid address for 'S' output modifier");
5429 return;
5430 }
5431
5432 if (ad.disp)
5433 output_addr_const (file, ad.disp);
5434 else
5435 fprintf (file, "0");
5436
5437 if (ad.base)
5438 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5439 }
5440 return;
5441
5442 case 'N':
5443 if (GET_CODE (x) == REG)
5444 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5445 else if (GET_CODE (x) == MEM)
5446 x = change_address (x, VOIDmode,
5447 plus_constant (Pmode, XEXP (x, 0), 4));
5448 else
5449 output_operand_lossage ("register or memory expression expected "
5450 "for 'N' output modifier");
5451 break;
5452
5453 case 'M':
5454 if (GET_CODE (x) == REG)
5455 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5456 else if (GET_CODE (x) == MEM)
5457 x = change_address (x, VOIDmode,
5458 plus_constant (Pmode, XEXP (x, 0), 8));
5459 else
5460 output_operand_lossage ("register or memory expression expected "
5461 "for 'M' output modifier");
5462 break;
5463
5464 case 'Y':
5465 print_shift_count_operand (file, x);
5466 return;
5467 }
5468
5469 switch (GET_CODE (x))
5470 {
5471 case REG:
5472 fprintf (file, "%s", reg_names[REGNO (x)]);
5473 break;
5474
5475 case MEM:
5476 output_address (XEXP (x, 0));
5477 break;
5478
5479 case CONST:
5480 case CODE_LABEL:
5481 case LABEL_REF:
5482 case SYMBOL_REF:
5483 output_addr_const (file, x);
5484 break;
5485
5486 case CONST_INT:
5487 if (code == 'b')
5488 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5489 else if (code == 'c')
5490 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5491 else if (code == 'x')
5492 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5493 else if (code == 'h')
5494 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5495 else if (code == 'i')
5496 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5497 s390_extract_part (x, HImode, 0));
5498 else if (code == 'j')
5499 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5500 s390_extract_part (x, HImode, -1));
5501 else if (code == 'k')
5502 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5503 s390_extract_part (x, SImode, 0));
5504 else if (code == 'm')
5505 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5506 s390_extract_part (x, SImode, -1));
5507 else if (code == 'o')
5508 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5509 else
5510 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5511 break;
5512
5513 case CONST_DOUBLE:
5514 gcc_assert (GET_MODE (x) == VOIDmode);
5515 if (code == 'b')
5516 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5517 else if (code == 'x')
5518 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5519 else if (code == 'h')
5520 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5521 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5522 else
5523 {
5524 if (code == 0)
5525 output_operand_lossage ("invalid constant - try using "
5526 "an output modifier");
5527 else
5528 output_operand_lossage ("invalid constant for output modifier '%c'",
5529 code);
5530 }
5531 break;
5532
5533 default:
5534 if (code == 0)
5535 output_operand_lossage ("invalid expression - try using "
5536 "an output modifier");
5537 else
5538 output_operand_lossage ("invalid expression for output "
5539 "modifier '%c'", code);
5540 break;
5541 }
5542 }
5543
5544 /* Target hook for assembling integer objects. We need to define it
5545 here to work a round a bug in some versions of GAS, which couldn't
5546 handle values smaller than INT_MIN when printed in decimal. */
5547
5548 static bool
5549 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5550 {
5551 if (size == 8 && aligned_p
5552 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5553 {
5554 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5555 INTVAL (x));
5556 return true;
5557 }
5558 return default_assemble_integer (x, size, aligned_p);
5559 }
5560
5561 /* Returns true if register REGNO is used for forming
5562 a memory address in expression X. */
5563
5564 static bool
5565 reg_used_in_mem_p (int regno, rtx x)
5566 {
5567 enum rtx_code code = GET_CODE (x);
5568 int i, j;
5569 const char *fmt;
5570
5571 if (code == MEM)
5572 {
5573 if (refers_to_regno_p (regno, regno+1,
5574 XEXP (x, 0), 0))
5575 return true;
5576 }
5577 else if (code == SET
5578 && GET_CODE (SET_DEST (x)) == PC)
5579 {
5580 if (refers_to_regno_p (regno, regno+1,
5581 SET_SRC (x), 0))
5582 return true;
5583 }
5584
5585 fmt = GET_RTX_FORMAT (code);
5586 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5587 {
5588 if (fmt[i] == 'e'
5589 && reg_used_in_mem_p (regno, XEXP (x, i)))
5590 return true;
5591
5592 else if (fmt[i] == 'E')
5593 for (j = 0; j < XVECLEN (x, i); j++)
5594 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5595 return true;
5596 }
5597 return false;
5598 }
5599
5600 /* Returns true if expression DEP_RTX sets an address register
5601 used by instruction INSN to address memory. */
5602
5603 static bool
5604 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5605 {
5606 rtx target, pat;
5607
5608 if (GET_CODE (dep_rtx) == INSN)
5609 dep_rtx = PATTERN (dep_rtx);
5610
5611 if (GET_CODE (dep_rtx) == SET)
5612 {
5613 target = SET_DEST (dep_rtx);
5614 if (GET_CODE (target) == STRICT_LOW_PART)
5615 target = XEXP (target, 0);
5616 while (GET_CODE (target) == SUBREG)
5617 target = SUBREG_REG (target);
5618
5619 if (GET_CODE (target) == REG)
5620 {
5621 int regno = REGNO (target);
5622
5623 if (s390_safe_attr_type (insn) == TYPE_LA)
5624 {
5625 pat = PATTERN (insn);
5626 if (GET_CODE (pat) == PARALLEL)
5627 {
5628 gcc_assert (XVECLEN (pat, 0) == 2);
5629 pat = XVECEXP (pat, 0, 0);
5630 }
5631 gcc_assert (GET_CODE (pat) == SET);
5632 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5633 }
5634 else if (get_attr_atype (insn) == ATYPE_AGEN)
5635 return reg_used_in_mem_p (regno, PATTERN (insn));
5636 }
5637 }
5638 return false;
5639 }
5640
5641 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5642
5643 int
5644 s390_agen_dep_p (rtx dep_insn, rtx insn)
5645 {
5646 rtx dep_rtx = PATTERN (dep_insn);
5647 int i;
5648
5649 if (GET_CODE (dep_rtx) == SET
5650 && addr_generation_dependency_p (dep_rtx, insn))
5651 return 1;
5652 else if (GET_CODE (dep_rtx) == PARALLEL)
5653 {
5654 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5655 {
5656 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5657 return 1;
5658 }
5659 }
5660 return 0;
5661 }
5662
5663
5664 /* A C statement (sans semicolon) to update the integer scheduling priority
5665 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5666 reduce the priority to execute INSN later. Do not define this macro if
5667 you do not need to adjust the scheduling priorities of insns.
5668
5669 A STD instruction should be scheduled earlier,
5670 in order to use the bypass. */
5671 static int
5672 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5673 {
5674 if (! INSN_P (insn))
5675 return priority;
5676
5677 if (s390_tune != PROCESSOR_2084_Z990
5678 && s390_tune != PROCESSOR_2094_Z9_109
5679 && s390_tune != PROCESSOR_2097_Z10
5680 && s390_tune != PROCESSOR_2817_Z196)
5681 return priority;
5682
5683 switch (s390_safe_attr_type (insn))
5684 {
5685 case TYPE_FSTOREDF:
5686 case TYPE_FSTORESF:
5687 priority = priority << 3;
5688 break;
5689 case TYPE_STORE:
5690 case TYPE_STM:
5691 priority = priority << 1;
5692 break;
5693 default:
5694 break;
5695 }
5696 return priority;
5697 }
5698
5699
5700 /* The number of instructions that can be issued per cycle. */
5701
5702 static int
5703 s390_issue_rate (void)
5704 {
5705 switch (s390_tune)
5706 {
5707 case PROCESSOR_2084_Z990:
5708 case PROCESSOR_2094_Z9_109:
5709 case PROCESSOR_2817_Z196:
5710 return 3;
5711 case PROCESSOR_2097_Z10:
5712 return 2;
5713 default:
5714 return 1;
5715 }
5716 }
5717
5718 static int
5719 s390_first_cycle_multipass_dfa_lookahead (void)
5720 {
5721 return 4;
5722 }
5723
5724 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5725 Fix up MEMs as required. */
5726
5727 static void
5728 annotate_constant_pool_refs (rtx *x)
5729 {
5730 int i, j;
5731 const char *fmt;
5732
5733 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5734 || !CONSTANT_POOL_ADDRESS_P (*x));
5735
5736 /* Literal pool references can only occur inside a MEM ... */
5737 if (GET_CODE (*x) == MEM)
5738 {
5739 rtx memref = XEXP (*x, 0);
5740
5741 if (GET_CODE (memref) == SYMBOL_REF
5742 && CONSTANT_POOL_ADDRESS_P (memref))
5743 {
5744 rtx base = cfun->machine->base_reg;
5745 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5746 UNSPEC_LTREF);
5747
5748 *x = replace_equiv_address (*x, addr);
5749 return;
5750 }
5751
5752 if (GET_CODE (memref) == CONST
5753 && GET_CODE (XEXP (memref, 0)) == PLUS
5754 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5755 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5756 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5757 {
5758 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5759 rtx sym = XEXP (XEXP (memref, 0), 0);
5760 rtx base = cfun->machine->base_reg;
5761 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5762 UNSPEC_LTREF);
5763
5764 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5765 return;
5766 }
5767 }
5768
5769 /* ... or a load-address type pattern. */
5770 if (GET_CODE (*x) == SET)
5771 {
5772 rtx addrref = SET_SRC (*x);
5773
5774 if (GET_CODE (addrref) == SYMBOL_REF
5775 && CONSTANT_POOL_ADDRESS_P (addrref))
5776 {
5777 rtx base = cfun->machine->base_reg;
5778 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5779 UNSPEC_LTREF);
5780
5781 SET_SRC (*x) = addr;
5782 return;
5783 }
5784
5785 if (GET_CODE (addrref) == CONST
5786 && GET_CODE (XEXP (addrref, 0)) == PLUS
5787 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5788 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5789 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5790 {
5791 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5792 rtx sym = XEXP (XEXP (addrref, 0), 0);
5793 rtx base = cfun->machine->base_reg;
5794 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5795 UNSPEC_LTREF);
5796
5797 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5798 return;
5799 }
5800 }
5801
5802 /* Annotate LTREL_BASE as well. */
5803 if (GET_CODE (*x) == UNSPEC
5804 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5805 {
5806 rtx base = cfun->machine->base_reg;
5807 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5808 UNSPEC_LTREL_BASE);
5809 return;
5810 }
5811
5812 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5813 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5814 {
5815 if (fmt[i] == 'e')
5816 {
5817 annotate_constant_pool_refs (&XEXP (*x, i));
5818 }
5819 else if (fmt[i] == 'E')
5820 {
5821 for (j = 0; j < XVECLEN (*x, i); j++)
5822 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5823 }
5824 }
5825 }
5826
5827 /* Split all branches that exceed the maximum distance.
5828 Returns true if this created a new literal pool entry. */
5829
5830 static int
5831 s390_split_branches (void)
5832 {
5833 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5834 int new_literal = 0, ret;
5835 rtx insn, pat, tmp, target;
5836 rtx *label;
5837
5838 /* We need correct insn addresses. */
5839
5840 shorten_branches (get_insns ());
5841
5842 /* Find all branches that exceed 64KB, and split them. */
5843
5844 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5845 {
5846 if (GET_CODE (insn) != JUMP_INSN)
5847 continue;
5848
5849 pat = PATTERN (insn);
5850 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5851 pat = XVECEXP (pat, 0, 0);
5852 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5853 continue;
5854
5855 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5856 {
5857 label = &SET_SRC (pat);
5858 }
5859 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5860 {
5861 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5862 label = &XEXP (SET_SRC (pat), 1);
5863 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5864 label = &XEXP (SET_SRC (pat), 2);
5865 else
5866 continue;
5867 }
5868 else
5869 continue;
5870
5871 if (get_attr_length (insn) <= 4)
5872 continue;
5873
5874 /* We are going to use the return register as scratch register,
5875 make sure it will be saved/restored by the prologue/epilogue. */
5876 cfun_frame_layout.save_return_addr_p = 1;
5877
5878 if (!flag_pic)
5879 {
5880 new_literal = 1;
5881 tmp = force_const_mem (Pmode, *label);
5882 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5883 INSN_ADDRESSES_NEW (tmp, -1);
5884 annotate_constant_pool_refs (&PATTERN (tmp));
5885
5886 target = temp_reg;
5887 }
5888 else
5889 {
5890 new_literal = 1;
5891 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5892 UNSPEC_LTREL_OFFSET);
5893 target = gen_rtx_CONST (Pmode, target);
5894 target = force_const_mem (Pmode, target);
5895 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5896 INSN_ADDRESSES_NEW (tmp, -1);
5897 annotate_constant_pool_refs (&PATTERN (tmp));
5898
5899 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5900 cfun->machine->base_reg),
5901 UNSPEC_LTREL_BASE);
5902 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5903 }
5904
5905 ret = validate_change (insn, label, target, 0);
5906 gcc_assert (ret);
5907 }
5908
5909 return new_literal;
5910 }
5911
5912
5913 /* Find an annotated literal pool symbol referenced in RTX X,
5914 and store it at REF. Will abort if X contains references to
5915 more than one such pool symbol; multiple references to the same
5916 symbol are allowed, however.
5917
5918 The rtx pointed to by REF must be initialized to NULL_RTX
5919 by the caller before calling this routine. */
5920
5921 static void
5922 find_constant_pool_ref (rtx x, rtx *ref)
5923 {
5924 int i, j;
5925 const char *fmt;
5926
5927 /* Ignore LTREL_BASE references. */
5928 if (GET_CODE (x) == UNSPEC
5929 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5930 return;
5931 /* Likewise POOL_ENTRY insns. */
5932 if (GET_CODE (x) == UNSPEC_VOLATILE
5933 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5934 return;
5935
5936 gcc_assert (GET_CODE (x) != SYMBOL_REF
5937 || !CONSTANT_POOL_ADDRESS_P (x));
5938
5939 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5940 {
5941 rtx sym = XVECEXP (x, 0, 0);
5942 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5943 && CONSTANT_POOL_ADDRESS_P (sym));
5944
5945 if (*ref == NULL_RTX)
5946 *ref = sym;
5947 else
5948 gcc_assert (*ref == sym);
5949
5950 return;
5951 }
5952
5953 fmt = GET_RTX_FORMAT (GET_CODE (x));
5954 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5955 {
5956 if (fmt[i] == 'e')
5957 {
5958 find_constant_pool_ref (XEXP (x, i), ref);
5959 }
5960 else if (fmt[i] == 'E')
5961 {
5962 for (j = 0; j < XVECLEN (x, i); j++)
5963 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5964 }
5965 }
5966 }
5967
5968 /* Replace every reference to the annotated literal pool
5969 symbol REF in X by its base plus OFFSET. */
5970
5971 static void
5972 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5973 {
5974 int i, j;
5975 const char *fmt;
5976
5977 gcc_assert (*x != ref);
5978
5979 if (GET_CODE (*x) == UNSPEC
5980 && XINT (*x, 1) == UNSPEC_LTREF
5981 && XVECEXP (*x, 0, 0) == ref)
5982 {
5983 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5984 return;
5985 }
5986
5987 if (GET_CODE (*x) == PLUS
5988 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5989 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5990 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5991 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5992 {
5993 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5994 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
5995 return;
5996 }
5997
5998 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5999 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6000 {
6001 if (fmt[i] == 'e')
6002 {
6003 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6004 }
6005 else if (fmt[i] == 'E')
6006 {
6007 for (j = 0; j < XVECLEN (*x, i); j++)
6008 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6009 }
6010 }
6011 }
6012
6013 /* Check whether X contains an UNSPEC_LTREL_BASE.
6014 Return its constant pool symbol if found, NULL_RTX otherwise. */
6015
6016 static rtx
6017 find_ltrel_base (rtx x)
6018 {
6019 int i, j;
6020 const char *fmt;
6021
6022 if (GET_CODE (x) == UNSPEC
6023 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6024 return XVECEXP (x, 0, 0);
6025
6026 fmt = GET_RTX_FORMAT (GET_CODE (x));
6027 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6028 {
6029 if (fmt[i] == 'e')
6030 {
6031 rtx fnd = find_ltrel_base (XEXP (x, i));
6032 if (fnd)
6033 return fnd;
6034 }
6035 else if (fmt[i] == 'E')
6036 {
6037 for (j = 0; j < XVECLEN (x, i); j++)
6038 {
6039 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6040 if (fnd)
6041 return fnd;
6042 }
6043 }
6044 }
6045
6046 return NULL_RTX;
6047 }
6048
6049 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6050
6051 static void
6052 replace_ltrel_base (rtx *x)
6053 {
6054 int i, j;
6055 const char *fmt;
6056
6057 if (GET_CODE (*x) == UNSPEC
6058 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6059 {
6060 *x = XVECEXP (*x, 0, 1);
6061 return;
6062 }
6063
6064 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6065 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6066 {
6067 if (fmt[i] == 'e')
6068 {
6069 replace_ltrel_base (&XEXP (*x, i));
6070 }
6071 else if (fmt[i] == 'E')
6072 {
6073 for (j = 0; j < XVECLEN (*x, i); j++)
6074 replace_ltrel_base (&XVECEXP (*x, i, j));
6075 }
6076 }
6077 }
6078
6079
6080 /* We keep a list of constants which we have to add to internal
6081 constant tables in the middle of large functions. */
6082
6083 #define NR_C_MODES 11
6084 enum machine_mode constant_modes[NR_C_MODES] =
6085 {
6086 TFmode, TImode, TDmode,
6087 DFmode, DImode, DDmode,
6088 SFmode, SImode, SDmode,
6089 HImode,
6090 QImode
6091 };
6092
6093 struct constant
6094 {
6095 struct constant *next;
6096 rtx value;
6097 rtx label;
6098 };
6099
6100 struct constant_pool
6101 {
6102 struct constant_pool *next;
6103 rtx first_insn;
6104 rtx pool_insn;
6105 bitmap insns;
6106 rtx emit_pool_after;
6107
6108 struct constant *constants[NR_C_MODES];
6109 struct constant *execute;
6110 rtx label;
6111 int size;
6112 };
6113
6114 /* Allocate new constant_pool structure. */
6115
6116 static struct constant_pool *
6117 s390_alloc_pool (void)
6118 {
6119 struct constant_pool *pool;
6120 int i;
6121
6122 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6123 pool->next = NULL;
6124 for (i = 0; i < NR_C_MODES; i++)
6125 pool->constants[i] = NULL;
6126
6127 pool->execute = NULL;
6128 pool->label = gen_label_rtx ();
6129 pool->first_insn = NULL_RTX;
6130 pool->pool_insn = NULL_RTX;
6131 pool->insns = BITMAP_ALLOC (NULL);
6132 pool->size = 0;
6133 pool->emit_pool_after = NULL_RTX;
6134
6135 return pool;
6136 }
6137
6138 /* Create new constant pool covering instructions starting at INSN
6139 and chain it to the end of POOL_LIST. */
6140
6141 static struct constant_pool *
6142 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6143 {
6144 struct constant_pool *pool, **prev;
6145
6146 pool = s390_alloc_pool ();
6147 pool->first_insn = insn;
6148
6149 for (prev = pool_list; *prev; prev = &(*prev)->next)
6150 ;
6151 *prev = pool;
6152
6153 return pool;
6154 }
6155
6156 /* End range of instructions covered by POOL at INSN and emit
6157 placeholder insn representing the pool. */
6158
6159 static void
6160 s390_end_pool (struct constant_pool *pool, rtx insn)
6161 {
6162 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6163
6164 if (!insn)
6165 insn = get_last_insn ();
6166
6167 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6168 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6169 }
6170
6171 /* Add INSN to the list of insns covered by POOL. */
6172
6173 static void
6174 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6175 {
6176 bitmap_set_bit (pool->insns, INSN_UID (insn));
6177 }
6178
6179 /* Return pool out of POOL_LIST that covers INSN. */
6180
6181 static struct constant_pool *
6182 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6183 {
6184 struct constant_pool *pool;
6185
6186 for (pool = pool_list; pool; pool = pool->next)
6187 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6188 break;
6189
6190 return pool;
6191 }
6192
6193 /* Add constant VAL of mode MODE to the constant pool POOL. */
6194
6195 static void
6196 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6197 {
6198 struct constant *c;
6199 int i;
6200
6201 for (i = 0; i < NR_C_MODES; i++)
6202 if (constant_modes[i] == mode)
6203 break;
6204 gcc_assert (i != NR_C_MODES);
6205
6206 for (c = pool->constants[i]; c != NULL; c = c->next)
6207 if (rtx_equal_p (val, c->value))
6208 break;
6209
6210 if (c == NULL)
6211 {
6212 c = (struct constant *) xmalloc (sizeof *c);
6213 c->value = val;
6214 c->label = gen_label_rtx ();
6215 c->next = pool->constants[i];
6216 pool->constants[i] = c;
6217 pool->size += GET_MODE_SIZE (mode);
6218 }
6219 }
6220
6221 /* Return an rtx that represents the offset of X from the start of
6222 pool POOL. */
6223
6224 static rtx
6225 s390_pool_offset (struct constant_pool *pool, rtx x)
6226 {
6227 rtx label;
6228
6229 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6230 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6231 UNSPEC_POOL_OFFSET);
6232 return gen_rtx_CONST (GET_MODE (x), x);
6233 }
6234
6235 /* Find constant VAL of mode MODE in the constant pool POOL.
6236 Return an RTX describing the distance from the start of
6237 the pool to the location of the new constant. */
6238
6239 static rtx
6240 s390_find_constant (struct constant_pool *pool, rtx val,
6241 enum machine_mode mode)
6242 {
6243 struct constant *c;
6244 int i;
6245
6246 for (i = 0; i < NR_C_MODES; i++)
6247 if (constant_modes[i] == mode)
6248 break;
6249 gcc_assert (i != NR_C_MODES);
6250
6251 for (c = pool->constants[i]; c != NULL; c = c->next)
6252 if (rtx_equal_p (val, c->value))
6253 break;
6254
6255 gcc_assert (c);
6256
6257 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6258 }
6259
6260 /* Check whether INSN is an execute. Return the label_ref to its
6261 execute target template if so, NULL_RTX otherwise. */
6262
6263 static rtx
6264 s390_execute_label (rtx insn)
6265 {
6266 if (GET_CODE (insn) == INSN
6267 && GET_CODE (PATTERN (insn)) == PARALLEL
6268 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6269 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6270 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6271
6272 return NULL_RTX;
6273 }
6274
6275 /* Add execute target for INSN to the constant pool POOL. */
6276
6277 static void
6278 s390_add_execute (struct constant_pool *pool, rtx insn)
6279 {
6280 struct constant *c;
6281
6282 for (c = pool->execute; c != NULL; c = c->next)
6283 if (INSN_UID (insn) == INSN_UID (c->value))
6284 break;
6285
6286 if (c == NULL)
6287 {
6288 c = (struct constant *) xmalloc (sizeof *c);
6289 c->value = insn;
6290 c->label = gen_label_rtx ();
6291 c->next = pool->execute;
6292 pool->execute = c;
6293 pool->size += 6;
6294 }
6295 }
6296
6297 /* Find execute target for INSN in the constant pool POOL.
6298 Return an RTX describing the distance from the start of
6299 the pool to the location of the execute target. */
6300
6301 static rtx
6302 s390_find_execute (struct constant_pool *pool, rtx insn)
6303 {
6304 struct constant *c;
6305
6306 for (c = pool->execute; c != NULL; c = c->next)
6307 if (INSN_UID (insn) == INSN_UID (c->value))
6308 break;
6309
6310 gcc_assert (c);
6311
6312 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6313 }
6314
6315 /* For an execute INSN, extract the execute target template. */
6316
6317 static rtx
6318 s390_execute_target (rtx insn)
6319 {
6320 rtx pattern = PATTERN (insn);
6321 gcc_assert (s390_execute_label (insn));
6322
6323 if (XVECLEN (pattern, 0) == 2)
6324 {
6325 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6326 }
6327 else
6328 {
6329 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6330 int i;
6331
6332 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6333 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6334
6335 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6336 }
6337
6338 return pattern;
6339 }
6340
6341 /* Indicate that INSN cannot be duplicated. This is the case for
6342 execute insns that carry a unique label. */
6343
6344 static bool
6345 s390_cannot_copy_insn_p (rtx insn)
6346 {
6347 rtx label = s390_execute_label (insn);
6348 return label && label != const0_rtx;
6349 }
6350
6351 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6352 do not emit the pool base label. */
6353
6354 static void
6355 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6356 {
6357 struct constant *c;
6358 rtx insn = pool->pool_insn;
6359 int i;
6360
6361 /* Switch to rodata section. */
6362 if (TARGET_CPU_ZARCH)
6363 {
6364 insn = emit_insn_after (gen_pool_section_start (), insn);
6365 INSN_ADDRESSES_NEW (insn, -1);
6366 }
6367
6368 /* Ensure minimum pool alignment. */
6369 if (TARGET_CPU_ZARCH)
6370 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6371 else
6372 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6373 INSN_ADDRESSES_NEW (insn, -1);
6374
6375 /* Emit pool base label. */
6376 if (!remote_label)
6377 {
6378 insn = emit_label_after (pool->label, insn);
6379 INSN_ADDRESSES_NEW (insn, -1);
6380 }
6381
6382 /* Dump constants in descending alignment requirement order,
6383 ensuring proper alignment for every constant. */
6384 for (i = 0; i < NR_C_MODES; i++)
6385 for (c = pool->constants[i]; c; c = c->next)
6386 {
6387 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6388 rtx value = copy_rtx (c->value);
6389 if (GET_CODE (value) == CONST
6390 && GET_CODE (XEXP (value, 0)) == UNSPEC
6391 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6392 && XVECLEN (XEXP (value, 0), 0) == 1)
6393 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6394
6395 insn = emit_label_after (c->label, insn);
6396 INSN_ADDRESSES_NEW (insn, -1);
6397
6398 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6399 gen_rtvec (1, value),
6400 UNSPECV_POOL_ENTRY);
6401 insn = emit_insn_after (value, insn);
6402 INSN_ADDRESSES_NEW (insn, -1);
6403 }
6404
6405 /* Ensure minimum alignment for instructions. */
6406 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6407 INSN_ADDRESSES_NEW (insn, -1);
6408
6409 /* Output in-pool execute template insns. */
6410 for (c = pool->execute; c; c = c->next)
6411 {
6412 insn = emit_label_after (c->label, insn);
6413 INSN_ADDRESSES_NEW (insn, -1);
6414
6415 insn = emit_insn_after (s390_execute_target (c->value), insn);
6416 INSN_ADDRESSES_NEW (insn, -1);
6417 }
6418
6419 /* Switch back to previous section. */
6420 if (TARGET_CPU_ZARCH)
6421 {
6422 insn = emit_insn_after (gen_pool_section_end (), insn);
6423 INSN_ADDRESSES_NEW (insn, -1);
6424 }
6425
6426 insn = emit_barrier_after (insn);
6427 INSN_ADDRESSES_NEW (insn, -1);
6428
6429 /* Remove placeholder insn. */
6430 remove_insn (pool->pool_insn);
6431 }
6432
6433 /* Free all memory used by POOL. */
6434
6435 static void
6436 s390_free_pool (struct constant_pool *pool)
6437 {
6438 struct constant *c, *next;
6439 int i;
6440
6441 for (i = 0; i < NR_C_MODES; i++)
6442 for (c = pool->constants[i]; c; c = next)
6443 {
6444 next = c->next;
6445 free (c);
6446 }
6447
6448 for (c = pool->execute; c; c = next)
6449 {
6450 next = c->next;
6451 free (c);
6452 }
6453
6454 BITMAP_FREE (pool->insns);
6455 free (pool);
6456 }
6457
6458
6459 /* Collect main literal pool. Return NULL on overflow. */
6460
6461 static struct constant_pool *
6462 s390_mainpool_start (void)
6463 {
6464 struct constant_pool *pool;
6465 rtx insn;
6466
6467 pool = s390_alloc_pool ();
6468
6469 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6470 {
6471 if (GET_CODE (insn) == INSN
6472 && GET_CODE (PATTERN (insn)) == SET
6473 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6474 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6475 {
6476 gcc_assert (!pool->pool_insn);
6477 pool->pool_insn = insn;
6478 }
6479
6480 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6481 {
6482 s390_add_execute (pool, insn);
6483 }
6484 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6485 {
6486 rtx pool_ref = NULL_RTX;
6487 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6488 if (pool_ref)
6489 {
6490 rtx constant = get_pool_constant (pool_ref);
6491 enum machine_mode mode = get_pool_mode (pool_ref);
6492 s390_add_constant (pool, constant, mode);
6493 }
6494 }
6495
6496 /* If hot/cold partitioning is enabled we have to make sure that
6497 the literal pool is emitted in the same section where the
6498 initialization of the literal pool base pointer takes place.
6499 emit_pool_after is only used in the non-overflow case on non
6500 Z cpus where we can emit the literal pool at the end of the
6501 function body within the text section. */
6502 if (NOTE_P (insn)
6503 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6504 && !pool->emit_pool_after)
6505 pool->emit_pool_after = PREV_INSN (insn);
6506 }
6507
6508 gcc_assert (pool->pool_insn || pool->size == 0);
6509
6510 if (pool->size >= 4096)
6511 {
6512 /* We're going to chunkify the pool, so remove the main
6513 pool placeholder insn. */
6514 remove_insn (pool->pool_insn);
6515
6516 s390_free_pool (pool);
6517 pool = NULL;
6518 }
6519
6520 /* If the functions ends with the section where the literal pool
6521 should be emitted set the marker to its end. */
6522 if (pool && !pool->emit_pool_after)
6523 pool->emit_pool_after = get_last_insn ();
6524
6525 return pool;
6526 }
6527
6528 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6529 Modify the current function to output the pool constants as well as
6530 the pool register setup instruction. */
6531
6532 static void
6533 s390_mainpool_finish (struct constant_pool *pool)
6534 {
6535 rtx base_reg = cfun->machine->base_reg;
6536 rtx insn;
6537
6538 /* If the pool is empty, we're done. */
6539 if (pool->size == 0)
6540 {
6541 /* We don't actually need a base register after all. */
6542 cfun->machine->base_reg = NULL_RTX;
6543
6544 if (pool->pool_insn)
6545 remove_insn (pool->pool_insn);
6546 s390_free_pool (pool);
6547 return;
6548 }
6549
6550 /* We need correct insn addresses. */
6551 shorten_branches (get_insns ());
6552
6553 /* On zSeries, we use a LARL to load the pool register. The pool is
6554 located in the .rodata section, so we emit it after the function. */
6555 if (TARGET_CPU_ZARCH)
6556 {
6557 insn = gen_main_base_64 (base_reg, pool->label);
6558 insn = emit_insn_after (insn, pool->pool_insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6560 remove_insn (pool->pool_insn);
6561
6562 insn = get_last_insn ();
6563 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6564 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6565
6566 s390_dump_pool (pool, 0);
6567 }
6568
6569 /* On S/390, if the total size of the function's code plus literal pool
6570 does not exceed 4096 bytes, we use BASR to set up a function base
6571 pointer, and emit the literal pool at the end of the function. */
6572 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6573 + pool->size + 8 /* alignment slop */ < 4096)
6574 {
6575 insn = gen_main_base_31_small (base_reg, pool->label);
6576 insn = emit_insn_after (insn, pool->pool_insn);
6577 INSN_ADDRESSES_NEW (insn, -1);
6578 remove_insn (pool->pool_insn);
6579
6580 insn = emit_label_after (pool->label, insn);
6581 INSN_ADDRESSES_NEW (insn, -1);
6582
6583 /* emit_pool_after will be set by s390_mainpool_start to the
6584 last insn of the section where the literal pool should be
6585 emitted. */
6586 insn = pool->emit_pool_after;
6587
6588 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6589 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6590
6591 s390_dump_pool (pool, 1);
6592 }
6593
6594 /* Otherwise, we emit an inline literal pool and use BASR to branch
6595 over it, setting up the pool register at the same time. */
6596 else
6597 {
6598 rtx pool_end = gen_label_rtx ();
6599
6600 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6601 insn = emit_jump_insn_after (insn, pool->pool_insn);
6602 JUMP_LABEL (insn) = pool_end;
6603 INSN_ADDRESSES_NEW (insn, -1);
6604 remove_insn (pool->pool_insn);
6605
6606 insn = emit_label_after (pool->label, insn);
6607 INSN_ADDRESSES_NEW (insn, -1);
6608
6609 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6610 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6611
6612 insn = emit_label_after (pool_end, pool->pool_insn);
6613 INSN_ADDRESSES_NEW (insn, -1);
6614
6615 s390_dump_pool (pool, 1);
6616 }
6617
6618
6619 /* Replace all literal pool references. */
6620
6621 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6622 {
6623 if (INSN_P (insn))
6624 replace_ltrel_base (&PATTERN (insn));
6625
6626 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6627 {
6628 rtx addr, pool_ref = NULL_RTX;
6629 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6630 if (pool_ref)
6631 {
6632 if (s390_execute_label (insn))
6633 addr = s390_find_execute (pool, insn);
6634 else
6635 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6636 get_pool_mode (pool_ref));
6637
6638 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6639 INSN_CODE (insn) = -1;
6640 }
6641 }
6642 }
6643
6644
6645 /* Free the pool. */
6646 s390_free_pool (pool);
6647 }
6648
6649 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6650 We have decided we cannot use this pool, so revert all changes
6651 to the current function that were done by s390_mainpool_start. */
6652 static void
6653 s390_mainpool_cancel (struct constant_pool *pool)
6654 {
6655 /* We didn't actually change the instruction stream, so simply
6656 free the pool memory. */
6657 s390_free_pool (pool);
6658 }
6659
6660
6661 /* Chunkify the literal pool. */
6662
6663 #define S390_POOL_CHUNK_MIN 0xc00
6664 #define S390_POOL_CHUNK_MAX 0xe00
6665
6666 static struct constant_pool *
6667 s390_chunkify_start (void)
6668 {
6669 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6670 int extra_size = 0;
6671 bitmap far_labels;
6672 rtx pending_ltrel = NULL_RTX;
6673 rtx insn;
6674
6675 rtx (*gen_reload_base) (rtx, rtx) =
6676 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6677
6678
6679 /* We need correct insn addresses. */
6680
6681 shorten_branches (get_insns ());
6682
6683 /* Scan all insns and move literals to pool chunks. */
6684
6685 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6686 {
6687 bool section_switch_p = false;
6688
6689 /* Check for pending LTREL_BASE. */
6690 if (INSN_P (insn))
6691 {
6692 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6693 if (ltrel_base)
6694 {
6695 gcc_assert (ltrel_base == pending_ltrel);
6696 pending_ltrel = NULL_RTX;
6697 }
6698 }
6699
6700 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6701 {
6702 if (!curr_pool)
6703 curr_pool = s390_start_pool (&pool_list, insn);
6704
6705 s390_add_execute (curr_pool, insn);
6706 s390_add_pool_insn (curr_pool, insn);
6707 }
6708 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6709 {
6710 rtx pool_ref = NULL_RTX;
6711 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6712 if (pool_ref)
6713 {
6714 rtx constant = get_pool_constant (pool_ref);
6715 enum machine_mode mode = get_pool_mode (pool_ref);
6716
6717 if (!curr_pool)
6718 curr_pool = s390_start_pool (&pool_list, insn);
6719
6720 s390_add_constant (curr_pool, constant, mode);
6721 s390_add_pool_insn (curr_pool, insn);
6722
6723 /* Don't split the pool chunk between a LTREL_OFFSET load
6724 and the corresponding LTREL_BASE. */
6725 if (GET_CODE (constant) == CONST
6726 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6727 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6728 {
6729 gcc_assert (!pending_ltrel);
6730 pending_ltrel = pool_ref;
6731 }
6732 }
6733 }
6734
6735 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6736 {
6737 if (curr_pool)
6738 s390_add_pool_insn (curr_pool, insn);
6739 /* An LTREL_BASE must follow within the same basic block. */
6740 gcc_assert (!pending_ltrel);
6741 }
6742
6743 if (NOTE_P (insn))
6744 switch (NOTE_KIND (insn))
6745 {
6746 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6747 section_switch_p = true;
6748 break;
6749 case NOTE_INSN_VAR_LOCATION:
6750 case NOTE_INSN_CALL_ARG_LOCATION:
6751 continue;
6752 default:
6753 break;
6754 }
6755
6756 if (!curr_pool
6757 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6758 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6759 continue;
6760
6761 if (TARGET_CPU_ZARCH)
6762 {
6763 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6764 continue;
6765
6766 s390_end_pool (curr_pool, NULL_RTX);
6767 curr_pool = NULL;
6768 }
6769 else
6770 {
6771 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6772 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6773 + extra_size;
6774
6775 /* We will later have to insert base register reload insns.
6776 Those will have an effect on code size, which we need to
6777 consider here. This calculation makes rather pessimistic
6778 worst-case assumptions. */
6779 if (GET_CODE (insn) == CODE_LABEL)
6780 extra_size += 6;
6781
6782 if (chunk_size < S390_POOL_CHUNK_MIN
6783 && curr_pool->size < S390_POOL_CHUNK_MIN
6784 && !section_switch_p)
6785 continue;
6786
6787 /* Pool chunks can only be inserted after BARRIERs ... */
6788 if (GET_CODE (insn) == BARRIER)
6789 {
6790 s390_end_pool (curr_pool, insn);
6791 curr_pool = NULL;
6792 extra_size = 0;
6793 }
6794
6795 /* ... so if we don't find one in time, create one. */
6796 else if (chunk_size > S390_POOL_CHUNK_MAX
6797 || curr_pool->size > S390_POOL_CHUNK_MAX
6798 || section_switch_p)
6799 {
6800 rtx label, jump, barrier, next, prev;
6801
6802 if (!section_switch_p)
6803 {
6804 /* We can insert the barrier only after a 'real' insn. */
6805 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6806 continue;
6807 if (get_attr_length (insn) == 0)
6808 continue;
6809 /* Don't separate LTREL_BASE from the corresponding
6810 LTREL_OFFSET load. */
6811 if (pending_ltrel)
6812 continue;
6813 next = insn;
6814 do
6815 {
6816 insn = next;
6817 next = NEXT_INSN (insn);
6818 }
6819 while (next
6820 && NOTE_P (next)
6821 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6822 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6823 }
6824 else
6825 {
6826 gcc_assert (!pending_ltrel);
6827
6828 /* The old pool has to end before the section switch
6829 note in order to make it part of the current
6830 section. */
6831 insn = PREV_INSN (insn);
6832 }
6833
6834 label = gen_label_rtx ();
6835 prev = insn;
6836 if (prev && NOTE_P (prev))
6837 prev = prev_nonnote_insn (prev);
6838 if (prev)
6839 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6840 INSN_LOCATOR (prev));
6841 else
6842 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6843 barrier = emit_barrier_after (jump);
6844 insn = emit_label_after (label, barrier);
6845 JUMP_LABEL (jump) = label;
6846 LABEL_NUSES (label) = 1;
6847
6848 INSN_ADDRESSES_NEW (jump, -1);
6849 INSN_ADDRESSES_NEW (barrier, -1);
6850 INSN_ADDRESSES_NEW (insn, -1);
6851
6852 s390_end_pool (curr_pool, barrier);
6853 curr_pool = NULL;
6854 extra_size = 0;
6855 }
6856 }
6857 }
6858
6859 if (curr_pool)
6860 s390_end_pool (curr_pool, NULL_RTX);
6861 gcc_assert (!pending_ltrel);
6862
6863 /* Find all labels that are branched into
6864 from an insn belonging to a different chunk. */
6865
6866 far_labels = BITMAP_ALLOC (NULL);
6867
6868 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6869 {
6870 /* Labels marked with LABEL_PRESERVE_P can be target
6871 of non-local jumps, so we have to mark them.
6872 The same holds for named labels.
6873
6874 Don't do that, however, if it is the label before
6875 a jump table. */
6876
6877 if (GET_CODE (insn) == CODE_LABEL
6878 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6879 {
6880 rtx vec_insn = next_real_insn (insn);
6881 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6882 PATTERN (vec_insn) : NULL_RTX;
6883 if (!vec_pat
6884 || !(GET_CODE (vec_pat) == ADDR_VEC
6885 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6886 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6887 }
6888
6889 /* If we have a direct jump (conditional or unconditional)
6890 or a casesi jump, check all potential targets. */
6891 else if (GET_CODE (insn) == JUMP_INSN)
6892 {
6893 rtx pat = PATTERN (insn);
6894 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6895 pat = XVECEXP (pat, 0, 0);
6896
6897 if (GET_CODE (pat) == SET)
6898 {
6899 rtx label = JUMP_LABEL (insn);
6900 if (label)
6901 {
6902 if (s390_find_pool (pool_list, label)
6903 != s390_find_pool (pool_list, insn))
6904 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6905 }
6906 }
6907 else if (GET_CODE (pat) == PARALLEL
6908 && XVECLEN (pat, 0) == 2
6909 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6910 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6911 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6912 {
6913 /* Find the jump table used by this casesi jump. */
6914 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6915 rtx vec_insn = next_real_insn (vec_label);
6916 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6917 PATTERN (vec_insn) : NULL_RTX;
6918 if (vec_pat
6919 && (GET_CODE (vec_pat) == ADDR_VEC
6920 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6921 {
6922 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6923
6924 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6925 {
6926 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6927
6928 if (s390_find_pool (pool_list, label)
6929 != s390_find_pool (pool_list, insn))
6930 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6931 }
6932 }
6933 }
6934 }
6935 }
6936
6937 /* Insert base register reload insns before every pool. */
6938
6939 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6940 {
6941 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6942 curr_pool->label);
6943 rtx insn = curr_pool->first_insn;
6944 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6945 }
6946
6947 /* Insert base register reload insns at every far label. */
6948
6949 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6950 if (GET_CODE (insn) == CODE_LABEL
6951 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6952 {
6953 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6954 if (pool)
6955 {
6956 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6957 pool->label);
6958 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6959 }
6960 }
6961
6962
6963 BITMAP_FREE (far_labels);
6964
6965
6966 /* Recompute insn addresses. */
6967
6968 init_insn_lengths ();
6969 shorten_branches (get_insns ());
6970
6971 return pool_list;
6972 }
6973
6974 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6975 After we have decided to use this list, finish implementing
6976 all changes to the current function as required. */
6977
6978 static void
6979 s390_chunkify_finish (struct constant_pool *pool_list)
6980 {
6981 struct constant_pool *curr_pool = NULL;
6982 rtx insn;
6983
6984
6985 /* Replace all literal pool references. */
6986
6987 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6988 {
6989 if (INSN_P (insn))
6990 replace_ltrel_base (&PATTERN (insn));
6991
6992 curr_pool = s390_find_pool (pool_list, insn);
6993 if (!curr_pool)
6994 continue;
6995
6996 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6997 {
6998 rtx addr, pool_ref = NULL_RTX;
6999 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7000 if (pool_ref)
7001 {
7002 if (s390_execute_label (insn))
7003 addr = s390_find_execute (curr_pool, insn);
7004 else
7005 addr = s390_find_constant (curr_pool,
7006 get_pool_constant (pool_ref),
7007 get_pool_mode (pool_ref));
7008
7009 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7010 INSN_CODE (insn) = -1;
7011 }
7012 }
7013 }
7014
7015 /* Dump out all literal pools. */
7016
7017 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7018 s390_dump_pool (curr_pool, 0);
7019
7020 /* Free pool list. */
7021
7022 while (pool_list)
7023 {
7024 struct constant_pool *next = pool_list->next;
7025 s390_free_pool (pool_list);
7026 pool_list = next;
7027 }
7028 }
7029
7030 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7031 We have decided we cannot use this list, so revert all changes
7032 to the current function that were done by s390_chunkify_start. */
7033
7034 static void
7035 s390_chunkify_cancel (struct constant_pool *pool_list)
7036 {
7037 struct constant_pool *curr_pool = NULL;
7038 rtx insn;
7039
7040 /* Remove all pool placeholder insns. */
7041
7042 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7043 {
7044 /* Did we insert an extra barrier? Remove it. */
7045 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7046 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7047 rtx label = NEXT_INSN (curr_pool->pool_insn);
7048
7049 if (jump && GET_CODE (jump) == JUMP_INSN
7050 && barrier && GET_CODE (barrier) == BARRIER
7051 && label && GET_CODE (label) == CODE_LABEL
7052 && GET_CODE (PATTERN (jump)) == SET
7053 && SET_DEST (PATTERN (jump)) == pc_rtx
7054 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7055 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7056 {
7057 remove_insn (jump);
7058 remove_insn (barrier);
7059 remove_insn (label);
7060 }
7061
7062 remove_insn (curr_pool->pool_insn);
7063 }
7064
7065 /* Remove all base register reload insns. */
7066
7067 for (insn = get_insns (); insn; )
7068 {
7069 rtx next_insn = NEXT_INSN (insn);
7070
7071 if (GET_CODE (insn) == INSN
7072 && GET_CODE (PATTERN (insn)) == SET
7073 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7074 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7075 remove_insn (insn);
7076
7077 insn = next_insn;
7078 }
7079
7080 /* Free pool list. */
7081
7082 while (pool_list)
7083 {
7084 struct constant_pool *next = pool_list->next;
7085 s390_free_pool (pool_list);
7086 pool_list = next;
7087 }
7088 }
7089
7090 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7091
7092 void
7093 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7094 {
7095 REAL_VALUE_TYPE r;
7096
7097 switch (GET_MODE_CLASS (mode))
7098 {
7099 case MODE_FLOAT:
7100 case MODE_DECIMAL_FLOAT:
7101 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7102
7103 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7104 assemble_real (r, mode, align);
7105 break;
7106
7107 case MODE_INT:
7108 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7109 mark_symbol_refs_as_used (exp);
7110 break;
7111
7112 default:
7113 gcc_unreachable ();
7114 }
7115 }
7116
7117
7118 /* Return an RTL expression representing the value of the return address
7119 for the frame COUNT steps up from the current frame. FRAME is the
7120 frame pointer of that frame. */
7121
7122 rtx
7123 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7124 {
7125 int offset;
7126 rtx addr;
7127
7128 /* Without backchain, we fail for all but the current frame. */
7129
7130 if (!TARGET_BACKCHAIN && count > 0)
7131 return NULL_RTX;
7132
7133 /* For the current frame, we need to make sure the initial
7134 value of RETURN_REGNUM is actually saved. */
7135
7136 if (count == 0)
7137 {
7138 /* On non-z architectures branch splitting could overwrite r14. */
7139 if (TARGET_CPU_ZARCH)
7140 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7141 else
7142 {
7143 cfun_frame_layout.save_return_addr_p = true;
7144 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7145 }
7146 }
7147
7148 if (TARGET_PACKED_STACK)
7149 offset = -2 * UNITS_PER_LONG;
7150 else
7151 offset = RETURN_REGNUM * UNITS_PER_LONG;
7152
7153 addr = plus_constant (Pmode, frame, offset);
7154 addr = memory_address (Pmode, addr);
7155 return gen_rtx_MEM (Pmode, addr);
7156 }
7157
7158 /* Return an RTL expression representing the back chain stored in
7159 the current stack frame. */
7160
7161 rtx
7162 s390_back_chain_rtx (void)
7163 {
7164 rtx chain;
7165
7166 gcc_assert (TARGET_BACKCHAIN);
7167
7168 if (TARGET_PACKED_STACK)
7169 chain = plus_constant (Pmode, stack_pointer_rtx,
7170 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7171 else
7172 chain = stack_pointer_rtx;
7173
7174 chain = gen_rtx_MEM (Pmode, chain);
7175 return chain;
7176 }
7177
7178 /* Find first call clobbered register unused in a function.
7179 This could be used as base register in a leaf function
7180 or for holding the return address before epilogue. */
7181
7182 static int
7183 find_unused_clobbered_reg (void)
7184 {
7185 int i;
7186 for (i = 0; i < 6; i++)
7187 if (!df_regs_ever_live_p (i))
7188 return i;
7189 return 0;
7190 }
7191
7192
7193 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7194 clobbered hard regs in SETREG. */
7195
7196 static void
7197 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7198 {
7199 int *regs_ever_clobbered = (int *)data;
7200 unsigned int i, regno;
7201 enum machine_mode mode = GET_MODE (setreg);
7202
7203 if (GET_CODE (setreg) == SUBREG)
7204 {
7205 rtx inner = SUBREG_REG (setreg);
7206 if (!GENERAL_REG_P (inner))
7207 return;
7208 regno = subreg_regno (setreg);
7209 }
7210 else if (GENERAL_REG_P (setreg))
7211 regno = REGNO (setreg);
7212 else
7213 return;
7214
7215 for (i = regno;
7216 i < regno + HARD_REGNO_NREGS (regno, mode);
7217 i++)
7218 regs_ever_clobbered[i] = 1;
7219 }
7220
7221 /* Walks through all basic blocks of the current function looking
7222 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7223 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7224 each of those regs. */
7225
7226 static void
7227 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7228 {
7229 basic_block cur_bb;
7230 rtx cur_insn;
7231 unsigned int i;
7232
7233 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7234
7235 /* For non-leaf functions we have to consider all call clobbered regs to be
7236 clobbered. */
7237 if (!crtl->is_leaf)
7238 {
7239 for (i = 0; i < 16; i++)
7240 regs_ever_clobbered[i] = call_really_used_regs[i];
7241 }
7242
7243 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7244 this work is done by liveness analysis (mark_regs_live_at_end).
7245 Special care is needed for functions containing landing pads. Landing pads
7246 may use the eh registers, but the code which sets these registers is not
7247 contained in that function. Hence s390_regs_ever_clobbered is not able to
7248 deal with this automatically. */
7249 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7250 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7251 if (crtl->calls_eh_return
7252 || (cfun->machine->has_landing_pad_p
7253 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7254 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7255
7256 /* For nonlocal gotos all call-saved registers have to be saved.
7257 This flag is also set for the unwinding code in libgcc.
7258 See expand_builtin_unwind_init. For regs_ever_live this is done by
7259 reload. */
7260 if (cfun->has_nonlocal_label)
7261 for (i = 0; i < 16; i++)
7262 if (!call_really_used_regs[i])
7263 regs_ever_clobbered[i] = 1;
7264
7265 FOR_EACH_BB (cur_bb)
7266 {
7267 FOR_BB_INSNS (cur_bb, cur_insn)
7268 {
7269 if (INSN_P (cur_insn))
7270 note_stores (PATTERN (cur_insn),
7271 s390_reg_clobbered_rtx,
7272 regs_ever_clobbered);
7273 }
7274 }
7275 }
7276
7277 /* Determine the frame area which actually has to be accessed
7278 in the function epilogue. The values are stored at the
7279 given pointers AREA_BOTTOM (address of the lowest used stack
7280 address) and AREA_TOP (address of the first item which does
7281 not belong to the stack frame). */
7282
7283 static void
7284 s390_frame_area (int *area_bottom, int *area_top)
7285 {
7286 int b, t;
7287 int i;
7288
7289 b = INT_MAX;
7290 t = INT_MIN;
7291
7292 if (cfun_frame_layout.first_restore_gpr != -1)
7293 {
7294 b = (cfun_frame_layout.gprs_offset
7295 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7296 t = b + (cfun_frame_layout.last_restore_gpr
7297 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7298 }
7299
7300 if (TARGET_64BIT && cfun_save_high_fprs_p)
7301 {
7302 b = MIN (b, cfun_frame_layout.f8_offset);
7303 t = MAX (t, (cfun_frame_layout.f8_offset
7304 + cfun_frame_layout.high_fprs * 8));
7305 }
7306
7307 if (!TARGET_64BIT)
7308 for (i = 2; i < 4; i++)
7309 if (cfun_fpr_bit_p (i))
7310 {
7311 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7312 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7313 }
7314
7315 *area_bottom = b;
7316 *area_top = t;
7317 }
7318
7319 /* Fill cfun->machine with info about register usage of current function.
7320 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7321
7322 static void
7323 s390_register_info (int clobbered_regs[])
7324 {
7325 int i, j;
7326
7327 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7328 cfun_frame_layout.fpr_bitmap = 0;
7329 cfun_frame_layout.high_fprs = 0;
7330 if (TARGET_64BIT)
7331 for (i = 24; i < 32; i++)
7332 if (df_regs_ever_live_p (i) && !global_regs[i])
7333 {
7334 cfun_set_fpr_bit (i - 16);
7335 cfun_frame_layout.high_fprs++;
7336 }
7337
7338 /* Find first and last gpr to be saved. We trust regs_ever_live
7339 data, except that we don't save and restore global registers.
7340
7341 Also, all registers with special meaning to the compiler need
7342 to be handled extra. */
7343
7344 s390_regs_ever_clobbered (clobbered_regs);
7345
7346 for (i = 0; i < 16; i++)
7347 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7348
7349 if (frame_pointer_needed)
7350 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7351
7352 if (flag_pic)
7353 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7354 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7355
7356 clobbered_regs[BASE_REGNUM]
7357 |= (cfun->machine->base_reg
7358 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7359
7360 clobbered_regs[RETURN_REGNUM]
7361 |= (!crtl->is_leaf
7362 || TARGET_TPF_PROFILING
7363 || cfun->machine->split_branches_pending_p
7364 || cfun_frame_layout.save_return_addr_p
7365 || crtl->calls_eh_return
7366 || cfun->stdarg);
7367
7368 clobbered_regs[STACK_POINTER_REGNUM]
7369 |= (!crtl->is_leaf
7370 || TARGET_TPF_PROFILING
7371 || cfun_save_high_fprs_p
7372 || get_frame_size () > 0
7373 || cfun->calls_alloca
7374 || cfun->stdarg);
7375
7376 for (i = 6; i < 16; i++)
7377 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7378 break;
7379 for (j = 15; j > i; j--)
7380 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7381 break;
7382
7383 if (i == 16)
7384 {
7385 /* Nothing to save/restore. */
7386 cfun_frame_layout.first_save_gpr_slot = -1;
7387 cfun_frame_layout.last_save_gpr_slot = -1;
7388 cfun_frame_layout.first_save_gpr = -1;
7389 cfun_frame_layout.first_restore_gpr = -1;
7390 cfun_frame_layout.last_save_gpr = -1;
7391 cfun_frame_layout.last_restore_gpr = -1;
7392 }
7393 else
7394 {
7395 /* Save slots for gprs from i to j. */
7396 cfun_frame_layout.first_save_gpr_slot = i;
7397 cfun_frame_layout.last_save_gpr_slot = j;
7398
7399 for (i = cfun_frame_layout.first_save_gpr_slot;
7400 i < cfun_frame_layout.last_save_gpr_slot + 1;
7401 i++)
7402 if (clobbered_regs[i])
7403 break;
7404
7405 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7406 if (clobbered_regs[j])
7407 break;
7408
7409 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7410 {
7411 /* Nothing to save/restore. */
7412 cfun_frame_layout.first_save_gpr = -1;
7413 cfun_frame_layout.first_restore_gpr = -1;
7414 cfun_frame_layout.last_save_gpr = -1;
7415 cfun_frame_layout.last_restore_gpr = -1;
7416 }
7417 else
7418 {
7419 /* Save / Restore from gpr i to j. */
7420 cfun_frame_layout.first_save_gpr = i;
7421 cfun_frame_layout.first_restore_gpr = i;
7422 cfun_frame_layout.last_save_gpr = j;
7423 cfun_frame_layout.last_restore_gpr = j;
7424 }
7425 }
7426
7427 if (cfun->stdarg)
7428 {
7429 /* Varargs functions need to save gprs 2 to 6. */
7430 if (cfun->va_list_gpr_size
7431 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7432 {
7433 int min_gpr = crtl->args.info.gprs;
7434 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7435 if (max_gpr > GP_ARG_NUM_REG)
7436 max_gpr = GP_ARG_NUM_REG;
7437
7438 if (cfun_frame_layout.first_save_gpr == -1
7439 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7440 {
7441 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7442 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7443 }
7444
7445 if (cfun_frame_layout.last_save_gpr == -1
7446 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7447 {
7448 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7449 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7450 }
7451 }
7452
7453 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7454 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7455 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7456 {
7457 int min_fpr = crtl->args.info.fprs;
7458 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7459 if (max_fpr > FP_ARG_NUM_REG)
7460 max_fpr = FP_ARG_NUM_REG;
7461
7462 /* ??? This is currently required to ensure proper location
7463 of the fpr save slots within the va_list save area. */
7464 if (TARGET_PACKED_STACK)
7465 min_fpr = 0;
7466
7467 for (i = min_fpr; i < max_fpr; i++)
7468 cfun_set_fpr_bit (i);
7469 }
7470 }
7471
7472 if (!TARGET_64BIT)
7473 for (i = 2; i < 4; i++)
7474 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7475 cfun_set_fpr_bit (i);
7476 }
7477
7478 /* Fill cfun->machine with info about frame of current function. */
7479
7480 static void
7481 s390_frame_info (void)
7482 {
7483 int i;
7484
7485 cfun_frame_layout.frame_size = get_frame_size ();
7486 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7487 fatal_error ("total size of local variables exceeds architecture limit");
7488
7489 if (!TARGET_PACKED_STACK)
7490 {
7491 cfun_frame_layout.backchain_offset = 0;
7492 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7493 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7494 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7495 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7496 * UNITS_PER_LONG);
7497 }
7498 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7499 {
7500 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7501 - UNITS_PER_LONG);
7502 cfun_frame_layout.gprs_offset
7503 = (cfun_frame_layout.backchain_offset
7504 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7505 * UNITS_PER_LONG);
7506
7507 if (TARGET_64BIT)
7508 {
7509 cfun_frame_layout.f4_offset
7510 = (cfun_frame_layout.gprs_offset
7511 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7512
7513 cfun_frame_layout.f0_offset
7514 = (cfun_frame_layout.f4_offset
7515 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7516 }
7517 else
7518 {
7519 /* On 31 bit we have to care about alignment of the
7520 floating point regs to provide fastest access. */
7521 cfun_frame_layout.f0_offset
7522 = ((cfun_frame_layout.gprs_offset
7523 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7524 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7525
7526 cfun_frame_layout.f4_offset
7527 = (cfun_frame_layout.f0_offset
7528 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7529 }
7530 }
7531 else /* no backchain */
7532 {
7533 cfun_frame_layout.f4_offset
7534 = (STACK_POINTER_OFFSET
7535 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7536
7537 cfun_frame_layout.f0_offset
7538 = (cfun_frame_layout.f4_offset
7539 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7540
7541 cfun_frame_layout.gprs_offset
7542 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7543 }
7544
7545 if (crtl->is_leaf
7546 && !TARGET_TPF_PROFILING
7547 && cfun_frame_layout.frame_size == 0
7548 && !cfun_save_high_fprs_p
7549 && !cfun->calls_alloca
7550 && !cfun->stdarg)
7551 return;
7552
7553 if (!TARGET_PACKED_STACK)
7554 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7555 + crtl->outgoing_args_size
7556 + cfun_frame_layout.high_fprs * 8);
7557 else
7558 {
7559 if (TARGET_BACKCHAIN)
7560 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7561
7562 /* No alignment trouble here because f8-f15 are only saved under
7563 64 bit. */
7564 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7565 cfun_frame_layout.f4_offset),
7566 cfun_frame_layout.gprs_offset)
7567 - cfun_frame_layout.high_fprs * 8);
7568
7569 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7570
7571 for (i = 0; i < 8; i++)
7572 if (cfun_fpr_bit_p (i))
7573 cfun_frame_layout.frame_size += 8;
7574
7575 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7576
7577 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7578 the frame size to sustain 8 byte alignment of stack frames. */
7579 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7580 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7581 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7582
7583 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7584 }
7585 }
7586
7587 /* Generate frame layout. Fills in register and frame data for the current
7588 function in cfun->machine. This routine can be called multiple times;
7589 it will re-do the complete frame layout every time. */
7590
7591 static void
7592 s390_init_frame_layout (void)
7593 {
7594 HOST_WIDE_INT frame_size;
7595 int base_used;
7596 int clobbered_regs[16];
7597
7598 /* On S/390 machines, we may need to perform branch splitting, which
7599 will require both base and return address register. We have no
7600 choice but to assume we're going to need them until right at the
7601 end of the machine dependent reorg phase. */
7602 if (!TARGET_CPU_ZARCH)
7603 cfun->machine->split_branches_pending_p = true;
7604
7605 do
7606 {
7607 frame_size = cfun_frame_layout.frame_size;
7608
7609 /* Try to predict whether we'll need the base register. */
7610 base_used = cfun->machine->split_branches_pending_p
7611 || crtl->uses_const_pool
7612 || (!DISP_IN_RANGE (frame_size)
7613 && !CONST_OK_FOR_K (frame_size));
7614
7615 /* Decide which register to use as literal pool base. In small
7616 leaf functions, try to use an unused call-clobbered register
7617 as base register to avoid save/restore overhead. */
7618 if (!base_used)
7619 cfun->machine->base_reg = NULL_RTX;
7620 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7621 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7622 else
7623 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7624
7625 s390_register_info (clobbered_regs);
7626 s390_frame_info ();
7627 }
7628 while (frame_size != cfun_frame_layout.frame_size);
7629 }
7630
7631 /* Update frame layout. Recompute actual register save data based on
7632 current info and update regs_ever_live for the special registers.
7633 May be called multiple times, but may never cause *more* registers
7634 to be saved than s390_init_frame_layout allocated room for. */
7635
7636 static void
7637 s390_update_frame_layout (void)
7638 {
7639 int clobbered_regs[16];
7640
7641 s390_register_info (clobbered_regs);
7642
7643 df_set_regs_ever_live (BASE_REGNUM,
7644 clobbered_regs[BASE_REGNUM] ? true : false);
7645 df_set_regs_ever_live (RETURN_REGNUM,
7646 clobbered_regs[RETURN_REGNUM] ? true : false);
7647 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7648 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7649
7650 if (cfun->machine->base_reg)
7651 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7652 }
7653
7654 /* Return true if it is legal to put a value with MODE into REGNO. */
7655
7656 bool
7657 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7658 {
7659 switch (REGNO_REG_CLASS (regno))
7660 {
7661 case FP_REGS:
7662 if (REGNO_PAIR_OK (regno, mode))
7663 {
7664 if (mode == SImode || mode == DImode)
7665 return true;
7666
7667 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7668 return true;
7669 }
7670 break;
7671 case ADDR_REGS:
7672 if (FRAME_REGNO_P (regno) && mode == Pmode)
7673 return true;
7674
7675 /* fallthrough */
7676 case GENERAL_REGS:
7677 if (REGNO_PAIR_OK (regno, mode))
7678 {
7679 if (TARGET_ZARCH
7680 || (mode != TFmode && mode != TCmode && mode != TDmode))
7681 return true;
7682 }
7683 break;
7684 case CC_REGS:
7685 if (GET_MODE_CLASS (mode) == MODE_CC)
7686 return true;
7687 break;
7688 case ACCESS_REGS:
7689 if (REGNO_PAIR_OK (regno, mode))
7690 {
7691 if (mode == SImode || mode == Pmode)
7692 return true;
7693 }
7694 break;
7695 default:
7696 return false;
7697 }
7698
7699 return false;
7700 }
7701
7702 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7703
7704 bool
7705 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7706 {
7707 /* Once we've decided upon a register to use as base register, it must
7708 no longer be used for any other purpose. */
7709 if (cfun->machine->base_reg)
7710 if (REGNO (cfun->machine->base_reg) == old_reg
7711 || REGNO (cfun->machine->base_reg) == new_reg)
7712 return false;
7713
7714 return true;
7715 }
7716
7717 /* Maximum number of registers to represent a value of mode MODE
7718 in a register of class RCLASS. */
7719
7720 int
7721 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7722 {
7723 switch (rclass)
7724 {
7725 case FP_REGS:
7726 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7727 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7728 else
7729 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7730 case ACCESS_REGS:
7731 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7732 default:
7733 break;
7734 }
7735 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7736 }
7737
7738 /* Return true if register FROM can be eliminated via register TO. */
7739
7740 static bool
7741 s390_can_eliminate (const int from, const int to)
7742 {
7743 /* On zSeries machines, we have not marked the base register as fixed.
7744 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7745 If a function requires the base register, we say here that this
7746 elimination cannot be performed. This will cause reload to free
7747 up the base register (as if it were fixed). On the other hand,
7748 if the current function does *not* require the base register, we
7749 say here the elimination succeeds, which in turn allows reload
7750 to allocate the base register for any other purpose. */
7751 if (from == BASE_REGNUM && to == BASE_REGNUM)
7752 {
7753 if (TARGET_CPU_ZARCH)
7754 {
7755 s390_init_frame_layout ();
7756 return cfun->machine->base_reg == NULL_RTX;
7757 }
7758
7759 return false;
7760 }
7761
7762 /* Everything else must point into the stack frame. */
7763 gcc_assert (to == STACK_POINTER_REGNUM
7764 || to == HARD_FRAME_POINTER_REGNUM);
7765
7766 gcc_assert (from == FRAME_POINTER_REGNUM
7767 || from == ARG_POINTER_REGNUM
7768 || from == RETURN_ADDRESS_POINTER_REGNUM);
7769
7770 /* Make sure we actually saved the return address. */
7771 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7772 if (!crtl->calls_eh_return
7773 && !cfun->stdarg
7774 && !cfun_frame_layout.save_return_addr_p)
7775 return false;
7776
7777 return true;
7778 }
7779
7780 /* Return offset between register FROM and TO initially after prolog. */
7781
7782 HOST_WIDE_INT
7783 s390_initial_elimination_offset (int from, int to)
7784 {
7785 HOST_WIDE_INT offset;
7786 int index;
7787
7788 /* ??? Why are we called for non-eliminable pairs? */
7789 if (!s390_can_eliminate (from, to))
7790 return 0;
7791
7792 switch (from)
7793 {
7794 case FRAME_POINTER_REGNUM:
7795 offset = (get_frame_size()
7796 + STACK_POINTER_OFFSET
7797 + crtl->outgoing_args_size);
7798 break;
7799
7800 case ARG_POINTER_REGNUM:
7801 s390_init_frame_layout ();
7802 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7803 break;
7804
7805 case RETURN_ADDRESS_POINTER_REGNUM:
7806 s390_init_frame_layout ();
7807 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7808 gcc_assert (index >= 0);
7809 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7810 offset += index * UNITS_PER_LONG;
7811 break;
7812
7813 case BASE_REGNUM:
7814 offset = 0;
7815 break;
7816
7817 default:
7818 gcc_unreachable ();
7819 }
7820
7821 return offset;
7822 }
7823
7824 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7825 to register BASE. Return generated insn. */
7826
7827 static rtx
7828 save_fpr (rtx base, int offset, int regnum)
7829 {
7830 rtx addr;
7831 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7832
7833 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7834 set_mem_alias_set (addr, get_varargs_alias_set ());
7835 else
7836 set_mem_alias_set (addr, get_frame_alias_set ());
7837
7838 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7839 }
7840
7841 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7842 to register BASE. Return generated insn. */
7843
7844 static rtx
7845 restore_fpr (rtx base, int offset, int regnum)
7846 {
7847 rtx addr;
7848 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7849 set_mem_alias_set (addr, get_frame_alias_set ());
7850
7851 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7852 }
7853
7854 /* Return true if REGNO is a global register, but not one
7855 of the special ones that need to be saved/restored in anyway. */
7856
7857 static inline bool
7858 global_not_special_regno_p (int regno)
7859 {
7860 return (global_regs[regno]
7861 /* These registers are special and need to be
7862 restored in any case. */
7863 && !(regno == STACK_POINTER_REGNUM
7864 || regno == RETURN_REGNUM
7865 || regno == BASE_REGNUM
7866 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7867 }
7868
7869 /* Generate insn to save registers FIRST to LAST into
7870 the register save area located at offset OFFSET
7871 relative to register BASE. */
7872
7873 static rtx
7874 save_gprs (rtx base, int offset, int first, int last)
7875 {
7876 rtx addr, insn, note;
7877 int i;
7878
7879 addr = plus_constant (Pmode, base, offset);
7880 addr = gen_rtx_MEM (Pmode, addr);
7881
7882 set_mem_alias_set (addr, get_frame_alias_set ());
7883
7884 /* Special-case single register. */
7885 if (first == last)
7886 {
7887 if (TARGET_64BIT)
7888 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7889 else
7890 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7891
7892 if (!global_not_special_regno_p (first))
7893 RTX_FRAME_RELATED_P (insn) = 1;
7894 return insn;
7895 }
7896
7897
7898 insn = gen_store_multiple (addr,
7899 gen_rtx_REG (Pmode, first),
7900 GEN_INT (last - first + 1));
7901
7902 if (first <= 6 && cfun->stdarg)
7903 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7904 {
7905 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7906
7907 if (first + i <= 6)
7908 set_mem_alias_set (mem, get_varargs_alias_set ());
7909 }
7910
7911 /* We need to set the FRAME_RELATED flag on all SETs
7912 inside the store-multiple pattern.
7913
7914 However, we must not emit DWARF records for registers 2..5
7915 if they are stored for use by variable arguments ...
7916
7917 ??? Unfortunately, it is not enough to simply not the
7918 FRAME_RELATED flags for those SETs, because the first SET
7919 of the PARALLEL is always treated as if it had the flag
7920 set, even if it does not. Therefore we emit a new pattern
7921 without those registers as REG_FRAME_RELATED_EXPR note. */
7922
7923 if (first >= 6 && !global_not_special_regno_p (first))
7924 {
7925 rtx pat = PATTERN (insn);
7926
7927 for (i = 0; i < XVECLEN (pat, 0); i++)
7928 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7929 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7930 0, i)))))
7931 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7932
7933 RTX_FRAME_RELATED_P (insn) = 1;
7934 }
7935 else if (last >= 6)
7936 {
7937 int start;
7938
7939 for (start = first >= 6 ? first : 6; start <= last; start++)
7940 if (!global_not_special_regno_p (start))
7941 break;
7942
7943 if (start > last)
7944 return insn;
7945
7946 addr = plus_constant (Pmode, base,
7947 offset + (start - first) * UNITS_PER_LONG);
7948 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7949 gen_rtx_REG (Pmode, start),
7950 GEN_INT (last - start + 1));
7951 note = PATTERN (note);
7952
7953 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7954
7955 for (i = 0; i < XVECLEN (note, 0); i++)
7956 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7957 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7958 0, i)))))
7959 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7960
7961 RTX_FRAME_RELATED_P (insn) = 1;
7962 }
7963
7964 return insn;
7965 }
7966
7967 /* Generate insn to restore registers FIRST to LAST from
7968 the register save area located at offset OFFSET
7969 relative to register BASE. */
7970
7971 static rtx
7972 restore_gprs (rtx base, int offset, int first, int last)
7973 {
7974 rtx addr, insn;
7975
7976 addr = plus_constant (Pmode, base, offset);
7977 addr = gen_rtx_MEM (Pmode, addr);
7978 set_mem_alias_set (addr, get_frame_alias_set ());
7979
7980 /* Special-case single register. */
7981 if (first == last)
7982 {
7983 if (TARGET_64BIT)
7984 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7985 else
7986 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7987
7988 return insn;
7989 }
7990
7991 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7992 addr,
7993 GEN_INT (last - first + 1));
7994 return insn;
7995 }
7996
7997 /* Return insn sequence to load the GOT register. */
7998
7999 static GTY(()) rtx got_symbol;
8000 rtx
8001 s390_load_got (void)
8002 {
8003 rtx insns;
8004
8005 /* We cannot use pic_offset_table_rtx here since we use this
8006 function also for non-pic if __tls_get_offset is called and in
8007 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8008 aren't usable. */
8009 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8010
8011 if (!got_symbol)
8012 {
8013 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8014 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8015 }
8016
8017 start_sequence ();
8018
8019 if (TARGET_CPU_ZARCH)
8020 {
8021 emit_move_insn (got_rtx, got_symbol);
8022 }
8023 else
8024 {
8025 rtx offset;
8026
8027 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8028 UNSPEC_LTREL_OFFSET);
8029 offset = gen_rtx_CONST (Pmode, offset);
8030 offset = force_const_mem (Pmode, offset);
8031
8032 emit_move_insn (got_rtx, offset);
8033
8034 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8035 UNSPEC_LTREL_BASE);
8036 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8037
8038 emit_move_insn (got_rtx, offset);
8039 }
8040
8041 insns = get_insns ();
8042 end_sequence ();
8043 return insns;
8044 }
8045
8046 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8047 and the change to the stack pointer. */
8048
8049 static void
8050 s390_emit_stack_tie (void)
8051 {
8052 rtx mem = gen_frame_mem (BLKmode,
8053 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8054
8055 emit_insn (gen_stack_tie (mem));
8056 }
8057
8058 /* Expand the prologue into a bunch of separate insns. */
8059
8060 void
8061 s390_emit_prologue (void)
8062 {
8063 rtx insn, addr;
8064 rtx temp_reg;
8065 int i;
8066 int offset;
8067 int next_fpr = 0;
8068
8069 /* Complete frame layout. */
8070
8071 s390_update_frame_layout ();
8072
8073 /* Annotate all constant pool references to let the scheduler know
8074 they implicitly use the base register. */
8075
8076 push_topmost_sequence ();
8077
8078 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8079 if (INSN_P (insn))
8080 {
8081 annotate_constant_pool_refs (&PATTERN (insn));
8082 df_insn_rescan (insn);
8083 }
8084
8085 pop_topmost_sequence ();
8086
8087 /* Choose best register to use for temp use within prologue.
8088 See below for why TPF must use the register 1. */
8089
8090 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8091 && !crtl->is_leaf
8092 && !TARGET_TPF_PROFILING)
8093 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8094 else
8095 temp_reg = gen_rtx_REG (Pmode, 1);
8096
8097 /* Save call saved gprs. */
8098 if (cfun_frame_layout.first_save_gpr != -1)
8099 {
8100 insn = save_gprs (stack_pointer_rtx,
8101 cfun_frame_layout.gprs_offset +
8102 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8103 - cfun_frame_layout.first_save_gpr_slot),
8104 cfun_frame_layout.first_save_gpr,
8105 cfun_frame_layout.last_save_gpr);
8106 emit_insn (insn);
8107 }
8108
8109 /* Dummy insn to mark literal pool slot. */
8110
8111 if (cfun->machine->base_reg)
8112 emit_insn (gen_main_pool (cfun->machine->base_reg));
8113
8114 offset = cfun_frame_layout.f0_offset;
8115
8116 /* Save f0 and f2. */
8117 for (i = 0; i < 2; i++)
8118 {
8119 if (cfun_fpr_bit_p (i))
8120 {
8121 save_fpr (stack_pointer_rtx, offset, i + 16);
8122 offset += 8;
8123 }
8124 else if (!TARGET_PACKED_STACK)
8125 offset += 8;
8126 }
8127
8128 /* Save f4 and f6. */
8129 offset = cfun_frame_layout.f4_offset;
8130 for (i = 2; i < 4; i++)
8131 {
8132 if (cfun_fpr_bit_p (i))
8133 {
8134 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8135 offset += 8;
8136
8137 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8138 therefore are not frame related. */
8139 if (!call_really_used_regs[i + 16])
8140 RTX_FRAME_RELATED_P (insn) = 1;
8141 }
8142 else if (!TARGET_PACKED_STACK)
8143 offset += 8;
8144 }
8145
8146 if (TARGET_PACKED_STACK
8147 && cfun_save_high_fprs_p
8148 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8149 {
8150 offset = (cfun_frame_layout.f8_offset
8151 + (cfun_frame_layout.high_fprs - 1) * 8);
8152
8153 for (i = 15; i > 7 && offset >= 0; i--)
8154 if (cfun_fpr_bit_p (i))
8155 {
8156 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8157
8158 RTX_FRAME_RELATED_P (insn) = 1;
8159 offset -= 8;
8160 }
8161 if (offset >= cfun_frame_layout.f8_offset)
8162 next_fpr = i + 16;
8163 }
8164
8165 if (!TARGET_PACKED_STACK)
8166 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8167
8168 if (flag_stack_usage_info)
8169 current_function_static_stack_size = cfun_frame_layout.frame_size;
8170
8171 /* Decrement stack pointer. */
8172
8173 if (cfun_frame_layout.frame_size > 0)
8174 {
8175 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8176 rtx real_frame_off;
8177
8178 if (s390_stack_size)
8179 {
8180 HOST_WIDE_INT stack_guard;
8181
8182 if (s390_stack_guard)
8183 stack_guard = s390_stack_guard;
8184 else
8185 {
8186 /* If no value for stack guard is provided the smallest power of 2
8187 larger than the current frame size is chosen. */
8188 stack_guard = 1;
8189 while (stack_guard < cfun_frame_layout.frame_size)
8190 stack_guard <<= 1;
8191 }
8192
8193 if (cfun_frame_layout.frame_size >= s390_stack_size)
8194 {
8195 warning (0, "frame size of function %qs is %wd"
8196 " bytes exceeding user provided stack limit of "
8197 "%d bytes. "
8198 "An unconditional trap is added.",
8199 current_function_name(), cfun_frame_layout.frame_size,
8200 s390_stack_size);
8201 emit_insn (gen_trap ());
8202 }
8203 else
8204 {
8205 /* stack_guard has to be smaller than s390_stack_size.
8206 Otherwise we would emit an AND with zero which would
8207 not match the test under mask pattern. */
8208 if (stack_guard >= s390_stack_size)
8209 {
8210 warning (0, "frame size of function %qs is %wd"
8211 " bytes which is more than half the stack size. "
8212 "The dynamic check would not be reliable. "
8213 "No check emitted for this function.",
8214 current_function_name(),
8215 cfun_frame_layout.frame_size);
8216 }
8217 else
8218 {
8219 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8220 & ~(stack_guard - 1));
8221
8222 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8223 GEN_INT (stack_check_mask));
8224 if (TARGET_64BIT)
8225 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8226 t, const0_rtx),
8227 t, const0_rtx, const0_rtx));
8228 else
8229 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8230 t, const0_rtx),
8231 t, const0_rtx, const0_rtx));
8232 }
8233 }
8234 }
8235
8236 if (s390_warn_framesize > 0
8237 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8238 warning (0, "frame size of %qs is %wd bytes",
8239 current_function_name (), cfun_frame_layout.frame_size);
8240
8241 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8242 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8243
8244 /* Save incoming stack pointer into temp reg. */
8245 if (TARGET_BACKCHAIN || next_fpr)
8246 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8247
8248 /* Subtract frame size from stack pointer. */
8249
8250 if (DISP_IN_RANGE (INTVAL (frame_off)))
8251 {
8252 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8253 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8254 frame_off));
8255 insn = emit_insn (insn);
8256 }
8257 else
8258 {
8259 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8260 frame_off = force_const_mem (Pmode, frame_off);
8261
8262 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8263 annotate_constant_pool_refs (&PATTERN (insn));
8264 }
8265
8266 RTX_FRAME_RELATED_P (insn) = 1;
8267 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8268 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8269 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8270 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8271 real_frame_off)));
8272
8273 /* Set backchain. */
8274
8275 if (TARGET_BACKCHAIN)
8276 {
8277 if (cfun_frame_layout.backchain_offset)
8278 addr = gen_rtx_MEM (Pmode,
8279 plus_constant (Pmode, stack_pointer_rtx,
8280 cfun_frame_layout.backchain_offset));
8281 else
8282 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8283 set_mem_alias_set (addr, get_frame_alias_set ());
8284 insn = emit_insn (gen_move_insn (addr, temp_reg));
8285 }
8286
8287 /* If we support non-call exceptions (e.g. for Java),
8288 we need to make sure the backchain pointer is set up
8289 before any possibly trapping memory access. */
8290 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8291 {
8292 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8293 emit_clobber (addr);
8294 }
8295 }
8296
8297 /* Save fprs 8 - 15 (64 bit ABI). */
8298
8299 if (cfun_save_high_fprs_p && next_fpr)
8300 {
8301 /* If the stack might be accessed through a different register
8302 we have to make sure that the stack pointer decrement is not
8303 moved below the use of the stack slots. */
8304 s390_emit_stack_tie ();
8305
8306 insn = emit_insn (gen_add2_insn (temp_reg,
8307 GEN_INT (cfun_frame_layout.f8_offset)));
8308
8309 offset = 0;
8310
8311 for (i = 24; i <= next_fpr; i++)
8312 if (cfun_fpr_bit_p (i - 16))
8313 {
8314 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8315 cfun_frame_layout.frame_size
8316 + cfun_frame_layout.f8_offset
8317 + offset);
8318
8319 insn = save_fpr (temp_reg, offset, i);
8320 offset += 8;
8321 RTX_FRAME_RELATED_P (insn) = 1;
8322 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8323 gen_rtx_SET (VOIDmode,
8324 gen_rtx_MEM (DFmode, addr),
8325 gen_rtx_REG (DFmode, i)));
8326 }
8327 }
8328
8329 /* Set frame pointer, if needed. */
8330
8331 if (frame_pointer_needed)
8332 {
8333 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8334 RTX_FRAME_RELATED_P (insn) = 1;
8335 }
8336
8337 /* Set up got pointer, if needed. */
8338
8339 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8340 {
8341 rtx insns = s390_load_got ();
8342
8343 for (insn = insns; insn; insn = NEXT_INSN (insn))
8344 annotate_constant_pool_refs (&PATTERN (insn));
8345
8346 emit_insn (insns);
8347 }
8348
8349 if (TARGET_TPF_PROFILING)
8350 {
8351 /* Generate a BAS instruction to serve as a function
8352 entry intercept to facilitate the use of tracing
8353 algorithms located at the branch target. */
8354 emit_insn (gen_prologue_tpf ());
8355
8356 /* Emit a blockage here so that all code
8357 lies between the profiling mechanisms. */
8358 emit_insn (gen_blockage ());
8359 }
8360 }
8361
8362 /* Expand the epilogue into a bunch of separate insns. */
8363
8364 void
8365 s390_emit_epilogue (bool sibcall)
8366 {
8367 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8368 int area_bottom, area_top, offset = 0;
8369 int next_offset;
8370 rtvec p;
8371 int i;
8372
8373 if (TARGET_TPF_PROFILING)
8374 {
8375
8376 /* Generate a BAS instruction to serve as a function
8377 entry intercept to facilitate the use of tracing
8378 algorithms located at the branch target. */
8379
8380 /* Emit a blockage here so that all code
8381 lies between the profiling mechanisms. */
8382 emit_insn (gen_blockage ());
8383
8384 emit_insn (gen_epilogue_tpf ());
8385 }
8386
8387 /* Check whether to use frame or stack pointer for restore. */
8388
8389 frame_pointer = (frame_pointer_needed
8390 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8391
8392 s390_frame_area (&area_bottom, &area_top);
8393
8394 /* Check whether we can access the register save area.
8395 If not, increment the frame pointer as required. */
8396
8397 if (area_top <= area_bottom)
8398 {
8399 /* Nothing to restore. */
8400 }
8401 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8402 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8403 {
8404 /* Area is in range. */
8405 offset = cfun_frame_layout.frame_size;
8406 }
8407 else
8408 {
8409 rtx insn, frame_off, cfa;
8410
8411 offset = area_bottom < 0 ? -area_bottom : 0;
8412 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8413
8414 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8415 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8416 if (DISP_IN_RANGE (INTVAL (frame_off)))
8417 {
8418 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8419 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8420 insn = emit_insn (insn);
8421 }
8422 else
8423 {
8424 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8425 frame_off = force_const_mem (Pmode, frame_off);
8426
8427 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8428 annotate_constant_pool_refs (&PATTERN (insn));
8429 }
8430 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8431 RTX_FRAME_RELATED_P (insn) = 1;
8432 }
8433
8434 /* Restore call saved fprs. */
8435
8436 if (TARGET_64BIT)
8437 {
8438 if (cfun_save_high_fprs_p)
8439 {
8440 next_offset = cfun_frame_layout.f8_offset;
8441 for (i = 24; i < 32; i++)
8442 {
8443 if (cfun_fpr_bit_p (i - 16))
8444 {
8445 restore_fpr (frame_pointer,
8446 offset + next_offset, i);
8447 cfa_restores
8448 = alloc_reg_note (REG_CFA_RESTORE,
8449 gen_rtx_REG (DFmode, i), cfa_restores);
8450 next_offset += 8;
8451 }
8452 }
8453 }
8454
8455 }
8456 else
8457 {
8458 next_offset = cfun_frame_layout.f4_offset;
8459 for (i = 18; i < 20; i++)
8460 {
8461 if (cfun_fpr_bit_p (i - 16))
8462 {
8463 restore_fpr (frame_pointer,
8464 offset + next_offset, i);
8465 cfa_restores
8466 = alloc_reg_note (REG_CFA_RESTORE,
8467 gen_rtx_REG (DFmode, i), cfa_restores);
8468 next_offset += 8;
8469 }
8470 else if (!TARGET_PACKED_STACK)
8471 next_offset += 8;
8472 }
8473
8474 }
8475
8476 /* Return register. */
8477
8478 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8479
8480 /* Restore call saved gprs. */
8481
8482 if (cfun_frame_layout.first_restore_gpr != -1)
8483 {
8484 rtx insn, addr;
8485 int i;
8486
8487 /* Check for global register and save them
8488 to stack location from where they get restored. */
8489
8490 for (i = cfun_frame_layout.first_restore_gpr;
8491 i <= cfun_frame_layout.last_restore_gpr;
8492 i++)
8493 {
8494 if (global_not_special_regno_p (i))
8495 {
8496 addr = plus_constant (Pmode, frame_pointer,
8497 offset + cfun_frame_layout.gprs_offset
8498 + (i - cfun_frame_layout.first_save_gpr_slot)
8499 * UNITS_PER_LONG);
8500 addr = gen_rtx_MEM (Pmode, addr);
8501 set_mem_alias_set (addr, get_frame_alias_set ());
8502 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8503 }
8504 else
8505 cfa_restores
8506 = alloc_reg_note (REG_CFA_RESTORE,
8507 gen_rtx_REG (Pmode, i), cfa_restores);
8508 }
8509
8510 if (! sibcall)
8511 {
8512 /* Fetch return address from stack before load multiple,
8513 this will do good for scheduling. */
8514
8515 if (cfun_frame_layout.save_return_addr_p
8516 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8517 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8518 {
8519 int return_regnum = find_unused_clobbered_reg();
8520 if (!return_regnum)
8521 return_regnum = 4;
8522 return_reg = gen_rtx_REG (Pmode, return_regnum);
8523
8524 addr = plus_constant (Pmode, frame_pointer,
8525 offset + cfun_frame_layout.gprs_offset
8526 + (RETURN_REGNUM
8527 - cfun_frame_layout.first_save_gpr_slot)
8528 * UNITS_PER_LONG);
8529 addr = gen_rtx_MEM (Pmode, addr);
8530 set_mem_alias_set (addr, get_frame_alias_set ());
8531 emit_move_insn (return_reg, addr);
8532 }
8533 }
8534
8535 insn = restore_gprs (frame_pointer,
8536 offset + cfun_frame_layout.gprs_offset
8537 + (cfun_frame_layout.first_restore_gpr
8538 - cfun_frame_layout.first_save_gpr_slot)
8539 * UNITS_PER_LONG,
8540 cfun_frame_layout.first_restore_gpr,
8541 cfun_frame_layout.last_restore_gpr);
8542 insn = emit_insn (insn);
8543 REG_NOTES (insn) = cfa_restores;
8544 add_reg_note (insn, REG_CFA_DEF_CFA,
8545 plus_constant (Pmode, stack_pointer_rtx,
8546 STACK_POINTER_OFFSET));
8547 RTX_FRAME_RELATED_P (insn) = 1;
8548 }
8549
8550 if (! sibcall)
8551 {
8552
8553 /* Return to caller. */
8554
8555 p = rtvec_alloc (2);
8556
8557 RTVEC_ELT (p, 0) = ret_rtx;
8558 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8559 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8560 }
8561 }
8562
8563
8564 /* Return the size in bytes of a function argument of
8565 type TYPE and/or mode MODE. At least one of TYPE or
8566 MODE must be specified. */
8567
8568 static int
8569 s390_function_arg_size (enum machine_mode mode, const_tree type)
8570 {
8571 if (type)
8572 return int_size_in_bytes (type);
8573
8574 /* No type info available for some library calls ... */
8575 if (mode != BLKmode)
8576 return GET_MODE_SIZE (mode);
8577
8578 /* If we have neither type nor mode, abort */
8579 gcc_unreachable ();
8580 }
8581
8582 /* Return true if a function argument of type TYPE and mode MODE
8583 is to be passed in a floating-point register, if available. */
8584
8585 static bool
8586 s390_function_arg_float (enum machine_mode mode, const_tree type)
8587 {
8588 int size = s390_function_arg_size (mode, type);
8589 if (size > 8)
8590 return false;
8591
8592 /* Soft-float changes the ABI: no floating-point registers are used. */
8593 if (TARGET_SOFT_FLOAT)
8594 return false;
8595
8596 /* No type info available for some library calls ... */
8597 if (!type)
8598 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8599
8600 /* The ABI says that record types with a single member are treated
8601 just like that member would be. */
8602 while (TREE_CODE (type) == RECORD_TYPE)
8603 {
8604 tree field, single = NULL_TREE;
8605
8606 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8607 {
8608 if (TREE_CODE (field) != FIELD_DECL)
8609 continue;
8610
8611 if (single == NULL_TREE)
8612 single = TREE_TYPE (field);
8613 else
8614 return false;
8615 }
8616
8617 if (single == NULL_TREE)
8618 return false;
8619 else
8620 type = single;
8621 }
8622
8623 return TREE_CODE (type) == REAL_TYPE;
8624 }
8625
8626 /* Return true if a function argument of type TYPE and mode MODE
8627 is to be passed in an integer register, or a pair of integer
8628 registers, if available. */
8629
8630 static bool
8631 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8632 {
8633 int size = s390_function_arg_size (mode, type);
8634 if (size > 8)
8635 return false;
8636
8637 /* No type info available for some library calls ... */
8638 if (!type)
8639 return GET_MODE_CLASS (mode) == MODE_INT
8640 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8641
8642 /* We accept small integral (and similar) types. */
8643 if (INTEGRAL_TYPE_P (type)
8644 || POINTER_TYPE_P (type)
8645 || TREE_CODE (type) == NULLPTR_TYPE
8646 || TREE_CODE (type) == OFFSET_TYPE
8647 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8648 return true;
8649
8650 /* We also accept structs of size 1, 2, 4, 8 that are not
8651 passed in floating-point registers. */
8652 if (AGGREGATE_TYPE_P (type)
8653 && exact_log2 (size) >= 0
8654 && !s390_function_arg_float (mode, type))
8655 return true;
8656
8657 return false;
8658 }
8659
8660 /* Return 1 if a function argument of type TYPE and mode MODE
8661 is to be passed by reference. The ABI specifies that only
8662 structures of size 1, 2, 4, or 8 bytes are passed by value,
8663 all other structures (and complex numbers) are passed by
8664 reference. */
8665
8666 static bool
8667 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8668 enum machine_mode mode, const_tree type,
8669 bool named ATTRIBUTE_UNUSED)
8670 {
8671 int size = s390_function_arg_size (mode, type);
8672 if (size > 8)
8673 return true;
8674
8675 if (type)
8676 {
8677 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8678 return 1;
8679
8680 if (TREE_CODE (type) == COMPLEX_TYPE
8681 || TREE_CODE (type) == VECTOR_TYPE)
8682 return 1;
8683 }
8684
8685 return 0;
8686 }
8687
8688 /* Update the data in CUM to advance over an argument of mode MODE and
8689 data type TYPE. (TYPE is null for libcalls where that information
8690 may not be available.). The boolean NAMED specifies whether the
8691 argument is a named argument (as opposed to an unnamed argument
8692 matching an ellipsis). */
8693
8694 static void
8695 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8696 const_tree type, bool named ATTRIBUTE_UNUSED)
8697 {
8698 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8699
8700 if (s390_function_arg_float (mode, type))
8701 {
8702 cum->fprs += 1;
8703 }
8704 else if (s390_function_arg_integer (mode, type))
8705 {
8706 int size = s390_function_arg_size (mode, type);
8707 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8708 }
8709 else
8710 gcc_unreachable ();
8711 }
8712
8713 /* Define where to put the arguments to a function.
8714 Value is zero to push the argument on the stack,
8715 or a hard register in which to store the argument.
8716
8717 MODE is the argument's machine mode.
8718 TYPE is the data type of the argument (as a tree).
8719 This is null for libcalls where that information may
8720 not be available.
8721 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8722 the preceding args and about the function being called.
8723 NAMED is nonzero if this argument is a named parameter
8724 (otherwise it is an extra parameter matching an ellipsis).
8725
8726 On S/390, we use general purpose registers 2 through 6 to
8727 pass integer, pointer, and certain structure arguments, and
8728 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8729 to pass floating point arguments. All remaining arguments
8730 are pushed to the stack. */
8731
8732 static rtx
8733 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8734 const_tree type, bool named ATTRIBUTE_UNUSED)
8735 {
8736 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8737
8738 if (s390_function_arg_float (mode, type))
8739 {
8740 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8741 return 0;
8742 else
8743 return gen_rtx_REG (mode, cum->fprs + 16);
8744 }
8745 else if (s390_function_arg_integer (mode, type))
8746 {
8747 int size = s390_function_arg_size (mode, type);
8748 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8749
8750 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8751 return 0;
8752 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8753 return gen_rtx_REG (mode, cum->gprs + 2);
8754 else if (n_gprs == 2)
8755 {
8756 rtvec p = rtvec_alloc (2);
8757
8758 RTVEC_ELT (p, 0)
8759 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8760 const0_rtx);
8761 RTVEC_ELT (p, 1)
8762 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8763 GEN_INT (4));
8764
8765 return gen_rtx_PARALLEL (mode, p);
8766 }
8767 }
8768
8769 /* After the real arguments, expand_call calls us once again
8770 with a void_type_node type. Whatever we return here is
8771 passed as operand 2 to the call expanders.
8772
8773 We don't need this feature ... */
8774 else if (type == void_type_node)
8775 return const0_rtx;
8776
8777 gcc_unreachable ();
8778 }
8779
8780 /* Return true if return values of type TYPE should be returned
8781 in a memory buffer whose address is passed by the caller as
8782 hidden first argument. */
8783
8784 static bool
8785 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8786 {
8787 /* We accept small integral (and similar) types. */
8788 if (INTEGRAL_TYPE_P (type)
8789 || POINTER_TYPE_P (type)
8790 || TREE_CODE (type) == OFFSET_TYPE
8791 || TREE_CODE (type) == REAL_TYPE)
8792 return int_size_in_bytes (type) > 8;
8793
8794 /* Aggregates and similar constructs are always returned
8795 in memory. */
8796 if (AGGREGATE_TYPE_P (type)
8797 || TREE_CODE (type) == COMPLEX_TYPE
8798 || TREE_CODE (type) == VECTOR_TYPE)
8799 return true;
8800
8801 /* ??? We get called on all sorts of random stuff from
8802 aggregate_value_p. We can't abort, but it's not clear
8803 what's safe to return. Pretend it's a struct I guess. */
8804 return true;
8805 }
8806
8807 /* Function arguments and return values are promoted to word size. */
8808
8809 static enum machine_mode
8810 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8811 int *punsignedp,
8812 const_tree fntype ATTRIBUTE_UNUSED,
8813 int for_return ATTRIBUTE_UNUSED)
8814 {
8815 if (INTEGRAL_MODE_P (mode)
8816 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8817 {
8818 if (type != NULL_TREE && POINTER_TYPE_P (type))
8819 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8820 return Pmode;
8821 }
8822
8823 return mode;
8824 }
8825
8826 /* Define where to return a (scalar) value of type RET_TYPE.
8827 If RET_TYPE is null, define where to return a (scalar)
8828 value of mode MODE from a libcall. */
8829
8830 static rtx
8831 s390_function_and_libcall_value (enum machine_mode mode,
8832 const_tree ret_type,
8833 const_tree fntype_or_decl,
8834 bool outgoing ATTRIBUTE_UNUSED)
8835 {
8836 /* For normal functions perform the promotion as
8837 promote_function_mode would do. */
8838 if (ret_type)
8839 {
8840 int unsignedp = TYPE_UNSIGNED (ret_type);
8841 mode = promote_function_mode (ret_type, mode, &unsignedp,
8842 fntype_or_decl, 1);
8843 }
8844
8845 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8846 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8847
8848 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8849 return gen_rtx_REG (mode, 16);
8850 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8851 || UNITS_PER_LONG == UNITS_PER_WORD)
8852 return gen_rtx_REG (mode, 2);
8853 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8854 {
8855 /* This case is triggered when returning a 64 bit value with
8856 -m31 -mzarch. Although the value would fit into a single
8857 register it has to be forced into a 32 bit register pair in
8858 order to match the ABI. */
8859 rtvec p = rtvec_alloc (2);
8860
8861 RTVEC_ELT (p, 0)
8862 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8863 RTVEC_ELT (p, 1)
8864 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8865
8866 return gen_rtx_PARALLEL (mode, p);
8867 }
8868
8869 gcc_unreachable ();
8870 }
8871
8872 /* Define where to return a scalar return value of type RET_TYPE. */
8873
8874 static rtx
8875 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8876 bool outgoing)
8877 {
8878 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8879 fn_decl_or_type, outgoing);
8880 }
8881
8882 /* Define where to return a scalar libcall return value of mode
8883 MODE. */
8884
8885 static rtx
8886 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8887 {
8888 return s390_function_and_libcall_value (mode, NULL_TREE,
8889 NULL_TREE, true);
8890 }
8891
8892
8893 /* Create and return the va_list datatype.
8894
8895 On S/390, va_list is an array type equivalent to
8896
8897 typedef struct __va_list_tag
8898 {
8899 long __gpr;
8900 long __fpr;
8901 void *__overflow_arg_area;
8902 void *__reg_save_area;
8903 } va_list[1];
8904
8905 where __gpr and __fpr hold the number of general purpose
8906 or floating point arguments used up to now, respectively,
8907 __overflow_arg_area points to the stack location of the
8908 next argument passed on the stack, and __reg_save_area
8909 always points to the start of the register area in the
8910 call frame of the current function. The function prologue
8911 saves all registers used for argument passing into this
8912 area if the function uses variable arguments. */
8913
8914 static tree
8915 s390_build_builtin_va_list (void)
8916 {
8917 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8918
8919 record = lang_hooks.types.make_type (RECORD_TYPE);
8920
8921 type_decl =
8922 build_decl (BUILTINS_LOCATION,
8923 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8924
8925 f_gpr = build_decl (BUILTINS_LOCATION,
8926 FIELD_DECL, get_identifier ("__gpr"),
8927 long_integer_type_node);
8928 f_fpr = build_decl (BUILTINS_LOCATION,
8929 FIELD_DECL, get_identifier ("__fpr"),
8930 long_integer_type_node);
8931 f_ovf = build_decl (BUILTINS_LOCATION,
8932 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8933 ptr_type_node);
8934 f_sav = build_decl (BUILTINS_LOCATION,
8935 FIELD_DECL, get_identifier ("__reg_save_area"),
8936 ptr_type_node);
8937
8938 va_list_gpr_counter_field = f_gpr;
8939 va_list_fpr_counter_field = f_fpr;
8940
8941 DECL_FIELD_CONTEXT (f_gpr) = record;
8942 DECL_FIELD_CONTEXT (f_fpr) = record;
8943 DECL_FIELD_CONTEXT (f_ovf) = record;
8944 DECL_FIELD_CONTEXT (f_sav) = record;
8945
8946 TYPE_STUB_DECL (record) = type_decl;
8947 TYPE_NAME (record) = type_decl;
8948 TYPE_FIELDS (record) = f_gpr;
8949 DECL_CHAIN (f_gpr) = f_fpr;
8950 DECL_CHAIN (f_fpr) = f_ovf;
8951 DECL_CHAIN (f_ovf) = f_sav;
8952
8953 layout_type (record);
8954
8955 /* The correct type is an array type of one element. */
8956 return build_array_type (record, build_index_type (size_zero_node));
8957 }
8958
8959 /* Implement va_start by filling the va_list structure VALIST.
8960 STDARG_P is always true, and ignored.
8961 NEXTARG points to the first anonymous stack argument.
8962
8963 The following global variables are used to initialize
8964 the va_list structure:
8965
8966 crtl->args.info:
8967 holds number of gprs and fprs used for named arguments.
8968 crtl->args.arg_offset_rtx:
8969 holds the offset of the first anonymous stack argument
8970 (relative to the virtual arg pointer). */
8971
8972 static void
8973 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8974 {
8975 HOST_WIDE_INT n_gpr, n_fpr;
8976 int off;
8977 tree f_gpr, f_fpr, f_ovf, f_sav;
8978 tree gpr, fpr, ovf, sav, t;
8979
8980 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8981 f_fpr = DECL_CHAIN (f_gpr);
8982 f_ovf = DECL_CHAIN (f_fpr);
8983 f_sav = DECL_CHAIN (f_ovf);
8984
8985 valist = build_simple_mem_ref (valist);
8986 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8987 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8988 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8989 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8990
8991 /* Count number of gp and fp argument registers used. */
8992
8993 n_gpr = crtl->args.info.gprs;
8994 n_fpr = crtl->args.info.fprs;
8995
8996 if (cfun->va_list_gpr_size)
8997 {
8998 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8999 build_int_cst (NULL_TREE, n_gpr));
9000 TREE_SIDE_EFFECTS (t) = 1;
9001 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9002 }
9003
9004 if (cfun->va_list_fpr_size)
9005 {
9006 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9007 build_int_cst (NULL_TREE, n_fpr));
9008 TREE_SIDE_EFFECTS (t) = 1;
9009 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9010 }
9011
9012 /* Find the overflow area. */
9013 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9014 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9015 {
9016 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9017
9018 off = INTVAL (crtl->args.arg_offset_rtx);
9019 off = off < 0 ? 0 : off;
9020 if (TARGET_DEBUG_ARG)
9021 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9022 (int)n_gpr, (int)n_fpr, off);
9023
9024 t = fold_build_pointer_plus_hwi (t, off);
9025
9026 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9027 TREE_SIDE_EFFECTS (t) = 1;
9028 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9029 }
9030
9031 /* Find the register save area. */
9032 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9033 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9034 {
9035 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9036 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9037
9038 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9039 TREE_SIDE_EFFECTS (t) = 1;
9040 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9041 }
9042 }
9043
9044 /* Implement va_arg by updating the va_list structure
9045 VALIST as required to retrieve an argument of type
9046 TYPE, and returning that argument.
9047
9048 Generates code equivalent to:
9049
9050 if (integral value) {
9051 if (size <= 4 && args.gpr < 5 ||
9052 size > 4 && args.gpr < 4 )
9053 ret = args.reg_save_area[args.gpr+8]
9054 else
9055 ret = *args.overflow_arg_area++;
9056 } else if (float value) {
9057 if (args.fgpr < 2)
9058 ret = args.reg_save_area[args.fpr+64]
9059 else
9060 ret = *args.overflow_arg_area++;
9061 } else if (aggregate value) {
9062 if (args.gpr < 5)
9063 ret = *args.reg_save_area[args.gpr]
9064 else
9065 ret = **args.overflow_arg_area++;
9066 } */
9067
9068 static tree
9069 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9070 gimple_seq *post_p ATTRIBUTE_UNUSED)
9071 {
9072 tree f_gpr, f_fpr, f_ovf, f_sav;
9073 tree gpr, fpr, ovf, sav, reg, t, u;
9074 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9075 tree lab_false, lab_over, addr;
9076
9077 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9078 f_fpr = DECL_CHAIN (f_gpr);
9079 f_ovf = DECL_CHAIN (f_fpr);
9080 f_sav = DECL_CHAIN (f_ovf);
9081
9082 valist = build_va_arg_indirect_ref (valist);
9083 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9084 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9085 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9086
9087 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9088 both appear on a lhs. */
9089 valist = unshare_expr (valist);
9090 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9091
9092 size = int_size_in_bytes (type);
9093
9094 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9095 {
9096 if (TARGET_DEBUG_ARG)
9097 {
9098 fprintf (stderr, "va_arg: aggregate type");
9099 debug_tree (type);
9100 }
9101
9102 /* Aggregates are passed by reference. */
9103 indirect_p = 1;
9104 reg = gpr;
9105 n_reg = 1;
9106
9107 /* kernel stack layout on 31 bit: It is assumed here that no padding
9108 will be added by s390_frame_info because for va_args always an even
9109 number of gprs has to be saved r15-r2 = 14 regs. */
9110 sav_ofs = 2 * UNITS_PER_LONG;
9111 sav_scale = UNITS_PER_LONG;
9112 size = UNITS_PER_LONG;
9113 max_reg = GP_ARG_NUM_REG - n_reg;
9114 }
9115 else if (s390_function_arg_float (TYPE_MODE (type), type))
9116 {
9117 if (TARGET_DEBUG_ARG)
9118 {
9119 fprintf (stderr, "va_arg: float type");
9120 debug_tree (type);
9121 }
9122
9123 /* FP args go in FP registers, if present. */
9124 indirect_p = 0;
9125 reg = fpr;
9126 n_reg = 1;
9127 sav_ofs = 16 * UNITS_PER_LONG;
9128 sav_scale = 8;
9129 max_reg = FP_ARG_NUM_REG - n_reg;
9130 }
9131 else
9132 {
9133 if (TARGET_DEBUG_ARG)
9134 {
9135 fprintf (stderr, "va_arg: other type");
9136 debug_tree (type);
9137 }
9138
9139 /* Otherwise into GP registers. */
9140 indirect_p = 0;
9141 reg = gpr;
9142 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9143
9144 /* kernel stack layout on 31 bit: It is assumed here that no padding
9145 will be added by s390_frame_info because for va_args always an even
9146 number of gprs has to be saved r15-r2 = 14 regs. */
9147 sav_ofs = 2 * UNITS_PER_LONG;
9148
9149 if (size < UNITS_PER_LONG)
9150 sav_ofs += UNITS_PER_LONG - size;
9151
9152 sav_scale = UNITS_PER_LONG;
9153 max_reg = GP_ARG_NUM_REG - n_reg;
9154 }
9155
9156 /* Pull the value out of the saved registers ... */
9157
9158 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9159 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9160 addr = create_tmp_var (ptr_type_node, "addr");
9161
9162 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9163 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9164 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9165 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9166 gimplify_and_add (t, pre_p);
9167
9168 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9169 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9170 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9171 t = fold_build_pointer_plus (t, u);
9172
9173 gimplify_assign (addr, t, pre_p);
9174
9175 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9176
9177 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9178
9179
9180 /* ... Otherwise out of the overflow area. */
9181
9182 t = ovf;
9183 if (size < UNITS_PER_LONG)
9184 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9185
9186 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9187
9188 gimplify_assign (addr, t, pre_p);
9189
9190 t = fold_build_pointer_plus_hwi (t, size);
9191 gimplify_assign (ovf, t, pre_p);
9192
9193 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9194
9195
9196 /* Increment register save count. */
9197
9198 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9199 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9200 gimplify_and_add (u, pre_p);
9201
9202 if (indirect_p)
9203 {
9204 t = build_pointer_type_for_mode (build_pointer_type (type),
9205 ptr_mode, true);
9206 addr = fold_convert (t, addr);
9207 addr = build_va_arg_indirect_ref (addr);
9208 }
9209 else
9210 {
9211 t = build_pointer_type_for_mode (type, ptr_mode, true);
9212 addr = fold_convert (t, addr);
9213 }
9214
9215 return build_va_arg_indirect_ref (addr);
9216 }
9217
9218
9219 /* Builtins. */
9220
9221 enum s390_builtin
9222 {
9223 S390_BUILTIN_THREAD_POINTER,
9224 S390_BUILTIN_SET_THREAD_POINTER,
9225
9226 S390_BUILTIN_max
9227 };
9228
9229 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9230 CODE_FOR_get_tp_64,
9231 CODE_FOR_set_tp_64
9232 };
9233
9234 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9235 CODE_FOR_get_tp_31,
9236 CODE_FOR_set_tp_31
9237 };
9238
9239 static void
9240 s390_init_builtins (void)
9241 {
9242 tree ftype;
9243
9244 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9245 add_builtin_function ("__builtin_thread_pointer", ftype,
9246 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9247 NULL, NULL_TREE);
9248
9249 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9250 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9251 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9252 NULL, NULL_TREE);
9253 }
9254
9255 /* Expand an expression EXP that calls a built-in function,
9256 with result going to TARGET if that's convenient
9257 (and in mode MODE if that's convenient).
9258 SUBTARGET may be used as the target for computing one of EXP's operands.
9259 IGNORE is nonzero if the value is to be ignored. */
9260
9261 static rtx
9262 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9263 enum machine_mode mode ATTRIBUTE_UNUSED,
9264 int ignore ATTRIBUTE_UNUSED)
9265 {
9266 #define MAX_ARGS 2
9267
9268 enum insn_code const *code_for_builtin =
9269 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9270
9271 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9272 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9273 enum insn_code icode;
9274 rtx op[MAX_ARGS], pat;
9275 int arity;
9276 bool nonvoid;
9277 tree arg;
9278 call_expr_arg_iterator iter;
9279
9280 if (fcode >= S390_BUILTIN_max)
9281 internal_error ("bad builtin fcode");
9282 icode = code_for_builtin[fcode];
9283 if (icode == 0)
9284 internal_error ("bad builtin fcode");
9285
9286 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9287
9288 arity = 0;
9289 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9290 {
9291 const struct insn_operand_data *insn_op;
9292
9293 if (arg == error_mark_node)
9294 return NULL_RTX;
9295 if (arity > MAX_ARGS)
9296 return NULL_RTX;
9297
9298 insn_op = &insn_data[icode].operand[arity + nonvoid];
9299
9300 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9301
9302 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9303 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9304 arity++;
9305 }
9306
9307 if (nonvoid)
9308 {
9309 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9310 if (!target
9311 || GET_MODE (target) != tmode
9312 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9313 target = gen_reg_rtx (tmode);
9314 }
9315
9316 switch (arity)
9317 {
9318 case 0:
9319 pat = GEN_FCN (icode) (target);
9320 break;
9321 case 1:
9322 if (nonvoid)
9323 pat = GEN_FCN (icode) (target, op[0]);
9324 else
9325 pat = GEN_FCN (icode) (op[0]);
9326 break;
9327 case 2:
9328 pat = GEN_FCN (icode) (target, op[0], op[1]);
9329 break;
9330 default:
9331 gcc_unreachable ();
9332 }
9333 if (!pat)
9334 return NULL_RTX;
9335 emit_insn (pat);
9336
9337 if (nonvoid)
9338 return target;
9339 else
9340 return const0_rtx;
9341 }
9342
9343
9344 /* Output assembly code for the trampoline template to
9345 stdio stream FILE.
9346
9347 On S/390, we use gpr 1 internally in the trampoline code;
9348 gpr 0 is used to hold the static chain. */
9349
9350 static void
9351 s390_asm_trampoline_template (FILE *file)
9352 {
9353 rtx op[2];
9354 op[0] = gen_rtx_REG (Pmode, 0);
9355 op[1] = gen_rtx_REG (Pmode, 1);
9356
9357 if (TARGET_64BIT)
9358 {
9359 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9360 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9361 output_asm_insn ("br\t%1", op); /* 2 byte */
9362 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9363 }
9364 else
9365 {
9366 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9367 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9368 output_asm_insn ("br\t%1", op); /* 2 byte */
9369 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9370 }
9371 }
9372
9373 /* Emit RTL insns to initialize the variable parts of a trampoline.
9374 FNADDR is an RTX for the address of the function's pure code.
9375 CXT is an RTX for the static chain value for the function. */
9376
9377 static void
9378 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9379 {
9380 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9381 rtx mem;
9382
9383 emit_block_move (m_tramp, assemble_trampoline_template (),
9384 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9385
9386 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9387 emit_move_insn (mem, cxt);
9388 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9389 emit_move_insn (mem, fnaddr);
9390 }
9391
9392 /* Output assembler code to FILE to increment profiler label # LABELNO
9393 for profiling a function entry. */
9394
9395 void
9396 s390_function_profiler (FILE *file, int labelno)
9397 {
9398 rtx op[7];
9399
9400 char label[128];
9401 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9402
9403 fprintf (file, "# function profiler \n");
9404
9405 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9406 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9407 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9408
9409 op[2] = gen_rtx_REG (Pmode, 1);
9410 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9411 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9412
9413 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9414 if (flag_pic)
9415 {
9416 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9417 op[4] = gen_rtx_CONST (Pmode, op[4]);
9418 }
9419
9420 if (TARGET_64BIT)
9421 {
9422 output_asm_insn ("stg\t%0,%1", op);
9423 output_asm_insn ("larl\t%2,%3", op);
9424 output_asm_insn ("brasl\t%0,%4", op);
9425 output_asm_insn ("lg\t%0,%1", op);
9426 }
9427 else if (!flag_pic)
9428 {
9429 op[6] = gen_label_rtx ();
9430
9431 output_asm_insn ("st\t%0,%1", op);
9432 output_asm_insn ("bras\t%2,%l6", op);
9433 output_asm_insn (".long\t%4", op);
9434 output_asm_insn (".long\t%3", op);
9435 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9436 output_asm_insn ("l\t%0,0(%2)", op);
9437 output_asm_insn ("l\t%2,4(%2)", op);
9438 output_asm_insn ("basr\t%0,%0", op);
9439 output_asm_insn ("l\t%0,%1", op);
9440 }
9441 else
9442 {
9443 op[5] = gen_label_rtx ();
9444 op[6] = gen_label_rtx ();
9445
9446 output_asm_insn ("st\t%0,%1", op);
9447 output_asm_insn ("bras\t%2,%l6", op);
9448 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9449 output_asm_insn (".long\t%4-%l5", op);
9450 output_asm_insn (".long\t%3-%l5", op);
9451 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9452 output_asm_insn ("lr\t%0,%2", op);
9453 output_asm_insn ("a\t%0,0(%2)", op);
9454 output_asm_insn ("a\t%2,4(%2)", op);
9455 output_asm_insn ("basr\t%0,%0", op);
9456 output_asm_insn ("l\t%0,%1", op);
9457 }
9458 }
9459
9460 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9461 into its SYMBOL_REF_FLAGS. */
9462
9463 static void
9464 s390_encode_section_info (tree decl, rtx rtl, int first)
9465 {
9466 default_encode_section_info (decl, rtl, first);
9467
9468 if (TREE_CODE (decl) == VAR_DECL)
9469 {
9470 /* If a variable has a forced alignment to < 2 bytes, mark it
9471 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9472 operand. */
9473 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9474 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9475 if (!DECL_SIZE (decl)
9476 || !DECL_ALIGN (decl)
9477 || !host_integerp (DECL_SIZE (decl), 0)
9478 || (DECL_ALIGN (decl) <= 64
9479 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9480 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9481 }
9482
9483 /* Literal pool references don't have a decl so they are handled
9484 differently here. We rely on the information in the MEM_ALIGN
9485 entry to decide upon natural alignment. */
9486 if (MEM_P (rtl)
9487 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9488 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9489 && (MEM_ALIGN (rtl) == 0
9490 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9491 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9492 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9493 }
9494
9495 /* Output thunk to FILE that implements a C++ virtual function call (with
9496 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9497 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9498 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9499 relative to the resulting this pointer. */
9500
9501 static void
9502 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9503 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9504 tree function)
9505 {
9506 rtx op[10];
9507 int nonlocal = 0;
9508
9509 /* Make sure unwind info is emitted for the thunk if needed. */
9510 final_start_function (emit_barrier (), file, 1);
9511
9512 /* Operand 0 is the target function. */
9513 op[0] = XEXP (DECL_RTL (function), 0);
9514 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9515 {
9516 nonlocal = 1;
9517 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9518 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9519 op[0] = gen_rtx_CONST (Pmode, op[0]);
9520 }
9521
9522 /* Operand 1 is the 'this' pointer. */
9523 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9524 op[1] = gen_rtx_REG (Pmode, 3);
9525 else
9526 op[1] = gen_rtx_REG (Pmode, 2);
9527
9528 /* Operand 2 is the delta. */
9529 op[2] = GEN_INT (delta);
9530
9531 /* Operand 3 is the vcall_offset. */
9532 op[3] = GEN_INT (vcall_offset);
9533
9534 /* Operand 4 is the temporary register. */
9535 op[4] = gen_rtx_REG (Pmode, 1);
9536
9537 /* Operands 5 to 8 can be used as labels. */
9538 op[5] = NULL_RTX;
9539 op[6] = NULL_RTX;
9540 op[7] = NULL_RTX;
9541 op[8] = NULL_RTX;
9542
9543 /* Operand 9 can be used for temporary register. */
9544 op[9] = NULL_RTX;
9545
9546 /* Generate code. */
9547 if (TARGET_64BIT)
9548 {
9549 /* Setup literal pool pointer if required. */
9550 if ((!DISP_IN_RANGE (delta)
9551 && !CONST_OK_FOR_K (delta)
9552 && !CONST_OK_FOR_Os (delta))
9553 || (!DISP_IN_RANGE (vcall_offset)
9554 && !CONST_OK_FOR_K (vcall_offset)
9555 && !CONST_OK_FOR_Os (vcall_offset)))
9556 {
9557 op[5] = gen_label_rtx ();
9558 output_asm_insn ("larl\t%4,%5", op);
9559 }
9560
9561 /* Add DELTA to this pointer. */
9562 if (delta)
9563 {
9564 if (CONST_OK_FOR_J (delta))
9565 output_asm_insn ("la\t%1,%2(%1)", op);
9566 else if (DISP_IN_RANGE (delta))
9567 output_asm_insn ("lay\t%1,%2(%1)", op);
9568 else if (CONST_OK_FOR_K (delta))
9569 output_asm_insn ("aghi\t%1,%2", op);
9570 else if (CONST_OK_FOR_Os (delta))
9571 output_asm_insn ("agfi\t%1,%2", op);
9572 else
9573 {
9574 op[6] = gen_label_rtx ();
9575 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9576 }
9577 }
9578
9579 /* Perform vcall adjustment. */
9580 if (vcall_offset)
9581 {
9582 if (DISP_IN_RANGE (vcall_offset))
9583 {
9584 output_asm_insn ("lg\t%4,0(%1)", op);
9585 output_asm_insn ("ag\t%1,%3(%4)", op);
9586 }
9587 else if (CONST_OK_FOR_K (vcall_offset))
9588 {
9589 output_asm_insn ("lghi\t%4,%3", op);
9590 output_asm_insn ("ag\t%4,0(%1)", op);
9591 output_asm_insn ("ag\t%1,0(%4)", op);
9592 }
9593 else if (CONST_OK_FOR_Os (vcall_offset))
9594 {
9595 output_asm_insn ("lgfi\t%4,%3", op);
9596 output_asm_insn ("ag\t%4,0(%1)", op);
9597 output_asm_insn ("ag\t%1,0(%4)", op);
9598 }
9599 else
9600 {
9601 op[7] = gen_label_rtx ();
9602 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9603 output_asm_insn ("ag\t%4,0(%1)", op);
9604 output_asm_insn ("ag\t%1,0(%4)", op);
9605 }
9606 }
9607
9608 /* Jump to target. */
9609 output_asm_insn ("jg\t%0", op);
9610
9611 /* Output literal pool if required. */
9612 if (op[5])
9613 {
9614 output_asm_insn (".align\t4", op);
9615 targetm.asm_out.internal_label (file, "L",
9616 CODE_LABEL_NUMBER (op[5]));
9617 }
9618 if (op[6])
9619 {
9620 targetm.asm_out.internal_label (file, "L",
9621 CODE_LABEL_NUMBER (op[6]));
9622 output_asm_insn (".long\t%2", op);
9623 }
9624 if (op[7])
9625 {
9626 targetm.asm_out.internal_label (file, "L",
9627 CODE_LABEL_NUMBER (op[7]));
9628 output_asm_insn (".long\t%3", op);
9629 }
9630 }
9631 else
9632 {
9633 /* Setup base pointer if required. */
9634 if (!vcall_offset
9635 || (!DISP_IN_RANGE (delta)
9636 && !CONST_OK_FOR_K (delta)
9637 && !CONST_OK_FOR_Os (delta))
9638 || (!DISP_IN_RANGE (delta)
9639 && !CONST_OK_FOR_K (vcall_offset)
9640 && !CONST_OK_FOR_Os (vcall_offset)))
9641 {
9642 op[5] = gen_label_rtx ();
9643 output_asm_insn ("basr\t%4,0", op);
9644 targetm.asm_out.internal_label (file, "L",
9645 CODE_LABEL_NUMBER (op[5]));
9646 }
9647
9648 /* Add DELTA to this pointer. */
9649 if (delta)
9650 {
9651 if (CONST_OK_FOR_J (delta))
9652 output_asm_insn ("la\t%1,%2(%1)", op);
9653 else if (DISP_IN_RANGE (delta))
9654 output_asm_insn ("lay\t%1,%2(%1)", op);
9655 else if (CONST_OK_FOR_K (delta))
9656 output_asm_insn ("ahi\t%1,%2", op);
9657 else if (CONST_OK_FOR_Os (delta))
9658 output_asm_insn ("afi\t%1,%2", op);
9659 else
9660 {
9661 op[6] = gen_label_rtx ();
9662 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9663 }
9664 }
9665
9666 /* Perform vcall adjustment. */
9667 if (vcall_offset)
9668 {
9669 if (CONST_OK_FOR_J (vcall_offset))
9670 {
9671 output_asm_insn ("l\t%4,0(%1)", op);
9672 output_asm_insn ("a\t%1,%3(%4)", op);
9673 }
9674 else if (DISP_IN_RANGE (vcall_offset))
9675 {
9676 output_asm_insn ("l\t%4,0(%1)", op);
9677 output_asm_insn ("ay\t%1,%3(%4)", op);
9678 }
9679 else if (CONST_OK_FOR_K (vcall_offset))
9680 {
9681 output_asm_insn ("lhi\t%4,%3", op);
9682 output_asm_insn ("a\t%4,0(%1)", op);
9683 output_asm_insn ("a\t%1,0(%4)", op);
9684 }
9685 else if (CONST_OK_FOR_Os (vcall_offset))
9686 {
9687 output_asm_insn ("iilf\t%4,%3", op);
9688 output_asm_insn ("a\t%4,0(%1)", op);
9689 output_asm_insn ("a\t%1,0(%4)", op);
9690 }
9691 else
9692 {
9693 op[7] = gen_label_rtx ();
9694 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9695 output_asm_insn ("a\t%4,0(%1)", op);
9696 output_asm_insn ("a\t%1,0(%4)", op);
9697 }
9698
9699 /* We had to clobber the base pointer register.
9700 Re-setup the base pointer (with a different base). */
9701 op[5] = gen_label_rtx ();
9702 output_asm_insn ("basr\t%4,0", op);
9703 targetm.asm_out.internal_label (file, "L",
9704 CODE_LABEL_NUMBER (op[5]));
9705 }
9706
9707 /* Jump to target. */
9708 op[8] = gen_label_rtx ();
9709
9710 if (!flag_pic)
9711 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9712 else if (!nonlocal)
9713 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9714 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9715 else if (flag_pic == 1)
9716 {
9717 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9718 output_asm_insn ("l\t%4,%0(%4)", op);
9719 }
9720 else if (flag_pic == 2)
9721 {
9722 op[9] = gen_rtx_REG (Pmode, 0);
9723 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9724 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9725 output_asm_insn ("ar\t%4,%9", op);
9726 output_asm_insn ("l\t%4,0(%4)", op);
9727 }
9728
9729 output_asm_insn ("br\t%4", op);
9730
9731 /* Output literal pool. */
9732 output_asm_insn (".align\t4", op);
9733
9734 if (nonlocal && flag_pic == 2)
9735 output_asm_insn (".long\t%0", op);
9736 if (nonlocal)
9737 {
9738 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9739 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9740 }
9741
9742 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9743 if (!flag_pic)
9744 output_asm_insn (".long\t%0", op);
9745 else
9746 output_asm_insn (".long\t%0-%5", op);
9747
9748 if (op[6])
9749 {
9750 targetm.asm_out.internal_label (file, "L",
9751 CODE_LABEL_NUMBER (op[6]));
9752 output_asm_insn (".long\t%2", op);
9753 }
9754 if (op[7])
9755 {
9756 targetm.asm_out.internal_label (file, "L",
9757 CODE_LABEL_NUMBER (op[7]));
9758 output_asm_insn (".long\t%3", op);
9759 }
9760 }
9761 final_end_function ();
9762 }
9763
9764 static bool
9765 s390_valid_pointer_mode (enum machine_mode mode)
9766 {
9767 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9768 }
9769
9770 /* Checks whether the given CALL_EXPR would use a caller
9771 saved register. This is used to decide whether sibling call
9772 optimization could be performed on the respective function
9773 call. */
9774
9775 static bool
9776 s390_call_saved_register_used (tree call_expr)
9777 {
9778 CUMULATIVE_ARGS cum_v;
9779 cumulative_args_t cum;
9780 tree parameter;
9781 enum machine_mode mode;
9782 tree type;
9783 rtx parm_rtx;
9784 int reg, i;
9785
9786 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9787 cum = pack_cumulative_args (&cum_v);
9788
9789 for (i = 0; i < call_expr_nargs (call_expr); i++)
9790 {
9791 parameter = CALL_EXPR_ARG (call_expr, i);
9792 gcc_assert (parameter);
9793
9794 /* For an undeclared variable passed as parameter we will get
9795 an ERROR_MARK node here. */
9796 if (TREE_CODE (parameter) == ERROR_MARK)
9797 return true;
9798
9799 type = TREE_TYPE (parameter);
9800 gcc_assert (type);
9801
9802 mode = TYPE_MODE (type);
9803 gcc_assert (mode);
9804
9805 if (pass_by_reference (&cum_v, mode, type, true))
9806 {
9807 mode = Pmode;
9808 type = build_pointer_type (type);
9809 }
9810
9811 parm_rtx = s390_function_arg (cum, mode, type, 0);
9812
9813 s390_function_arg_advance (cum, mode, type, 0);
9814
9815 if (!parm_rtx)
9816 continue;
9817
9818 if (REG_P (parm_rtx))
9819 {
9820 for (reg = 0;
9821 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9822 reg++)
9823 if (!call_used_regs[reg + REGNO (parm_rtx)])
9824 return true;
9825 }
9826
9827 if (GET_CODE (parm_rtx) == PARALLEL)
9828 {
9829 int i;
9830
9831 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9832 {
9833 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9834
9835 gcc_assert (REG_P (r));
9836
9837 for (reg = 0;
9838 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9839 reg++)
9840 if (!call_used_regs[reg + REGNO (r)])
9841 return true;
9842 }
9843 }
9844
9845 }
9846 return false;
9847 }
9848
9849 /* Return true if the given call expression can be
9850 turned into a sibling call.
9851 DECL holds the declaration of the function to be called whereas
9852 EXP is the call expression itself. */
9853
9854 static bool
9855 s390_function_ok_for_sibcall (tree decl, tree exp)
9856 {
9857 /* The TPF epilogue uses register 1. */
9858 if (TARGET_TPF_PROFILING)
9859 return false;
9860
9861 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9862 which would have to be restored before the sibcall. */
9863 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9864 return false;
9865
9866 /* Register 6 on s390 is available as an argument register but unfortunately
9867 "caller saved". This makes functions needing this register for arguments
9868 not suitable for sibcalls. */
9869 return !s390_call_saved_register_used (exp);
9870 }
9871
9872 /* Return the fixed registers used for condition codes. */
9873
9874 static bool
9875 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9876 {
9877 *p1 = CC_REGNUM;
9878 *p2 = INVALID_REGNUM;
9879
9880 return true;
9881 }
9882
9883 /* This function is used by the call expanders of the machine description.
9884 It emits the call insn itself together with the necessary operations
9885 to adjust the target address and returns the emitted insn.
9886 ADDR_LOCATION is the target address rtx
9887 TLS_CALL the location of the thread-local symbol
9888 RESULT_REG the register where the result of the call should be stored
9889 RETADDR_REG the register where the return address should be stored
9890 If this parameter is NULL_RTX the call is considered
9891 to be a sibling call. */
9892
9893 rtx
9894 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9895 rtx retaddr_reg)
9896 {
9897 bool plt_call = false;
9898 rtx insn;
9899 rtx call;
9900 rtx clobber;
9901 rtvec vec;
9902
9903 /* Direct function calls need special treatment. */
9904 if (GET_CODE (addr_location) == SYMBOL_REF)
9905 {
9906 /* When calling a global routine in PIC mode, we must
9907 replace the symbol itself with the PLT stub. */
9908 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9909 {
9910 if (retaddr_reg != NULL_RTX)
9911 {
9912 addr_location = gen_rtx_UNSPEC (Pmode,
9913 gen_rtvec (1, addr_location),
9914 UNSPEC_PLT);
9915 addr_location = gen_rtx_CONST (Pmode, addr_location);
9916 plt_call = true;
9917 }
9918 else
9919 /* For -fpic code the PLT entries might use r12 which is
9920 call-saved. Therefore we cannot do a sibcall when
9921 calling directly using a symbol ref. When reaching
9922 this point we decided (in s390_function_ok_for_sibcall)
9923 to do a sibcall for a function pointer but one of the
9924 optimizers was able to get rid of the function pointer
9925 by propagating the symbol ref into the call. This
9926 optimization is illegal for S/390 so we turn the direct
9927 call into a indirect call again. */
9928 addr_location = force_reg (Pmode, addr_location);
9929 }
9930
9931 /* Unless we can use the bras(l) insn, force the
9932 routine address into a register. */
9933 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9934 {
9935 if (flag_pic)
9936 addr_location = legitimize_pic_address (addr_location, 0);
9937 else
9938 addr_location = force_reg (Pmode, addr_location);
9939 }
9940 }
9941
9942 /* If it is already an indirect call or the code above moved the
9943 SYMBOL_REF to somewhere else make sure the address can be found in
9944 register 1. */
9945 if (retaddr_reg == NULL_RTX
9946 && GET_CODE (addr_location) != SYMBOL_REF
9947 && !plt_call)
9948 {
9949 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9950 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9951 }
9952
9953 addr_location = gen_rtx_MEM (QImode, addr_location);
9954 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9955
9956 if (result_reg != NULL_RTX)
9957 call = gen_rtx_SET (VOIDmode, result_reg, call);
9958
9959 if (retaddr_reg != NULL_RTX)
9960 {
9961 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9962
9963 if (tls_call != NULL_RTX)
9964 vec = gen_rtvec (3, call, clobber,
9965 gen_rtx_USE (VOIDmode, tls_call));
9966 else
9967 vec = gen_rtvec (2, call, clobber);
9968
9969 call = gen_rtx_PARALLEL (VOIDmode, vec);
9970 }
9971
9972 insn = emit_call_insn (call);
9973
9974 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9975 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9976 {
9977 /* s390_function_ok_for_sibcall should
9978 have denied sibcalls in this case. */
9979 gcc_assert (retaddr_reg != NULL_RTX);
9980 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9981 }
9982 return insn;
9983 }
9984
9985 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9986
9987 static void
9988 s390_conditional_register_usage (void)
9989 {
9990 int i;
9991
9992 if (flag_pic)
9993 {
9994 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9995 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9996 }
9997 if (TARGET_CPU_ZARCH)
9998 {
9999 fixed_regs[BASE_REGNUM] = 0;
10000 call_used_regs[BASE_REGNUM] = 0;
10001 fixed_regs[RETURN_REGNUM] = 0;
10002 call_used_regs[RETURN_REGNUM] = 0;
10003 }
10004 if (TARGET_64BIT)
10005 {
10006 for (i = 24; i < 32; i++)
10007 call_used_regs[i] = call_really_used_regs[i] = 0;
10008 }
10009 else
10010 {
10011 for (i = 18; i < 20; i++)
10012 call_used_regs[i] = call_really_used_regs[i] = 0;
10013 }
10014
10015 if (TARGET_SOFT_FLOAT)
10016 {
10017 for (i = 16; i < 32; i++)
10018 call_used_regs[i] = fixed_regs[i] = 1;
10019 }
10020 }
10021
10022 /* Corresponding function to eh_return expander. */
10023
10024 static GTY(()) rtx s390_tpf_eh_return_symbol;
10025 void
10026 s390_emit_tpf_eh_return (rtx target)
10027 {
10028 rtx insn, reg;
10029
10030 if (!s390_tpf_eh_return_symbol)
10031 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10032
10033 reg = gen_rtx_REG (Pmode, 2);
10034
10035 emit_move_insn (reg, target);
10036 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10037 gen_rtx_REG (Pmode, RETURN_REGNUM));
10038 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10039
10040 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10041 }
10042
10043 /* Rework the prologue/epilogue to avoid saving/restoring
10044 registers unnecessarily. */
10045
10046 static void
10047 s390_optimize_prologue (void)
10048 {
10049 rtx insn, new_insn, next_insn;
10050
10051 /* Do a final recompute of the frame-related data. */
10052
10053 s390_update_frame_layout ();
10054
10055 /* If all special registers are in fact used, there's nothing we
10056 can do, so no point in walking the insn list. */
10057
10058 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10059 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10060 && (TARGET_CPU_ZARCH
10061 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10062 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10063 return;
10064
10065 /* Search for prologue/epilogue insns and replace them. */
10066
10067 for (insn = get_insns (); insn; insn = next_insn)
10068 {
10069 int first, last, off;
10070 rtx set, base, offset;
10071
10072 next_insn = NEXT_INSN (insn);
10073
10074 if (GET_CODE (insn) != INSN)
10075 continue;
10076
10077 if (GET_CODE (PATTERN (insn)) == PARALLEL
10078 && store_multiple_operation (PATTERN (insn), VOIDmode))
10079 {
10080 set = XVECEXP (PATTERN (insn), 0, 0);
10081 first = REGNO (SET_SRC (set));
10082 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10083 offset = const0_rtx;
10084 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10085 off = INTVAL (offset);
10086
10087 if (GET_CODE (base) != REG || off < 0)
10088 continue;
10089 if (cfun_frame_layout.first_save_gpr != -1
10090 && (cfun_frame_layout.first_save_gpr < first
10091 || cfun_frame_layout.last_save_gpr > last))
10092 continue;
10093 if (REGNO (base) != STACK_POINTER_REGNUM
10094 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10095 continue;
10096 if (first > BASE_REGNUM || last < BASE_REGNUM)
10097 continue;
10098
10099 if (cfun_frame_layout.first_save_gpr != -1)
10100 {
10101 new_insn = save_gprs (base,
10102 off + (cfun_frame_layout.first_save_gpr
10103 - first) * UNITS_PER_LONG,
10104 cfun_frame_layout.first_save_gpr,
10105 cfun_frame_layout.last_save_gpr);
10106 new_insn = emit_insn_before (new_insn, insn);
10107 INSN_ADDRESSES_NEW (new_insn, -1);
10108 }
10109
10110 remove_insn (insn);
10111 continue;
10112 }
10113
10114 if (cfun_frame_layout.first_save_gpr == -1
10115 && GET_CODE (PATTERN (insn)) == SET
10116 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10117 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10118 || (!TARGET_CPU_ZARCH
10119 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10120 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10121 {
10122 set = PATTERN (insn);
10123 first = REGNO (SET_SRC (set));
10124 offset = const0_rtx;
10125 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10126 off = INTVAL (offset);
10127
10128 if (GET_CODE (base) != REG || off < 0)
10129 continue;
10130 if (REGNO (base) != STACK_POINTER_REGNUM
10131 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10132 continue;
10133
10134 remove_insn (insn);
10135 continue;
10136 }
10137
10138 if (GET_CODE (PATTERN (insn)) == PARALLEL
10139 && load_multiple_operation (PATTERN (insn), VOIDmode))
10140 {
10141 set = XVECEXP (PATTERN (insn), 0, 0);
10142 first = REGNO (SET_DEST (set));
10143 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10144 offset = const0_rtx;
10145 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10146 off = INTVAL (offset);
10147
10148 if (GET_CODE (base) != REG || off < 0)
10149 continue;
10150 if (cfun_frame_layout.first_restore_gpr != -1
10151 && (cfun_frame_layout.first_restore_gpr < first
10152 || cfun_frame_layout.last_restore_gpr > last))
10153 continue;
10154 if (REGNO (base) != STACK_POINTER_REGNUM
10155 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10156 continue;
10157 if (first > BASE_REGNUM || last < BASE_REGNUM)
10158 continue;
10159
10160 if (cfun_frame_layout.first_restore_gpr != -1)
10161 {
10162 new_insn = restore_gprs (base,
10163 off + (cfun_frame_layout.first_restore_gpr
10164 - first) * UNITS_PER_LONG,
10165 cfun_frame_layout.first_restore_gpr,
10166 cfun_frame_layout.last_restore_gpr);
10167 new_insn = emit_insn_before (new_insn, insn);
10168 INSN_ADDRESSES_NEW (new_insn, -1);
10169 }
10170
10171 remove_insn (insn);
10172 continue;
10173 }
10174
10175 if (cfun_frame_layout.first_restore_gpr == -1
10176 && GET_CODE (PATTERN (insn)) == SET
10177 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10178 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10179 || (!TARGET_CPU_ZARCH
10180 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10181 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10182 {
10183 set = PATTERN (insn);
10184 first = REGNO (SET_DEST (set));
10185 offset = const0_rtx;
10186 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10187 off = INTVAL (offset);
10188
10189 if (GET_CODE (base) != REG || off < 0)
10190 continue;
10191 if (REGNO (base) != STACK_POINTER_REGNUM
10192 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10193 continue;
10194
10195 remove_insn (insn);
10196 continue;
10197 }
10198 }
10199 }
10200
10201 /* On z10 and later the dynamic branch prediction must see the
10202 backward jump within a certain windows. If not it falls back to
10203 the static prediction. This function rearranges the loop backward
10204 branch in a way which makes the static prediction always correct.
10205 The function returns true if it added an instruction. */
10206 static bool
10207 s390_fix_long_loop_prediction (rtx insn)
10208 {
10209 rtx set = single_set (insn);
10210 rtx code_label, label_ref, new_label;
10211 rtx uncond_jump;
10212 rtx cur_insn;
10213 rtx tmp;
10214 int distance;
10215
10216 /* This will exclude branch on count and branch on index patterns
10217 since these are correctly statically predicted. */
10218 if (!set
10219 || SET_DEST (set) != pc_rtx
10220 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10221 return false;
10222
10223 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10224 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10225
10226 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10227
10228 code_label = XEXP (label_ref, 0);
10229
10230 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10231 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10232 || (INSN_ADDRESSES (INSN_UID (insn))
10233 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10234 return false;
10235
10236 for (distance = 0, cur_insn = PREV_INSN (insn);
10237 distance < PREDICT_DISTANCE - 6;
10238 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10239 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10240 return false;
10241
10242 new_label = gen_label_rtx ();
10243 uncond_jump = emit_jump_insn_after (
10244 gen_rtx_SET (VOIDmode, pc_rtx,
10245 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10246 insn);
10247 emit_label_after (new_label, uncond_jump);
10248
10249 tmp = XEXP (SET_SRC (set), 1);
10250 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10251 XEXP (SET_SRC (set), 2) = tmp;
10252 INSN_CODE (insn) = -1;
10253
10254 XEXP (label_ref, 0) = new_label;
10255 JUMP_LABEL (insn) = new_label;
10256 JUMP_LABEL (uncond_jump) = code_label;
10257
10258 return true;
10259 }
10260
10261 /* Returns 1 if INSN reads the value of REG for purposes not related
10262 to addressing of memory, and 0 otherwise. */
10263 static int
10264 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10265 {
10266 return reg_referenced_p (reg, PATTERN (insn))
10267 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10268 }
10269
10270 /* Starting from INSN find_cond_jump looks downwards in the insn
10271 stream for a single jump insn which is the last user of the
10272 condition code set in INSN. */
10273 static rtx
10274 find_cond_jump (rtx insn)
10275 {
10276 for (; insn; insn = NEXT_INSN (insn))
10277 {
10278 rtx ite, cc;
10279
10280 if (LABEL_P (insn))
10281 break;
10282
10283 if (!JUMP_P (insn))
10284 {
10285 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10286 break;
10287 continue;
10288 }
10289
10290 /* This will be triggered by a return. */
10291 if (GET_CODE (PATTERN (insn)) != SET)
10292 break;
10293
10294 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10295 ite = SET_SRC (PATTERN (insn));
10296
10297 if (GET_CODE (ite) != IF_THEN_ELSE)
10298 break;
10299
10300 cc = XEXP (XEXP (ite, 0), 0);
10301 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10302 break;
10303
10304 if (find_reg_note (insn, REG_DEAD, cc))
10305 return insn;
10306 break;
10307 }
10308
10309 return NULL_RTX;
10310 }
10311
10312 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10313 the semantics does not change. If NULL_RTX is passed as COND the
10314 function tries to find the conditional jump starting with INSN. */
10315 static void
10316 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10317 {
10318 rtx tmp = *op0;
10319
10320 if (cond == NULL_RTX)
10321 {
10322 rtx jump = find_cond_jump (NEXT_INSN (insn));
10323 jump = jump ? single_set (jump) : NULL_RTX;
10324
10325 if (jump == NULL_RTX)
10326 return;
10327
10328 cond = XEXP (XEXP (jump, 1), 0);
10329 }
10330
10331 *op0 = *op1;
10332 *op1 = tmp;
10333 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10334 }
10335
10336 /* On z10, instructions of the compare-and-branch family have the
10337 property to access the register occurring as second operand with
10338 its bits complemented. If such a compare is grouped with a second
10339 instruction that accesses the same register non-complemented, and
10340 if that register's value is delivered via a bypass, then the
10341 pipeline recycles, thereby causing significant performance decline.
10342 This function locates such situations and exchanges the two
10343 operands of the compare. The function return true whenever it
10344 added an insn. */
10345 static bool
10346 s390_z10_optimize_cmp (rtx insn)
10347 {
10348 rtx prev_insn, next_insn;
10349 bool insn_added_p = false;
10350 rtx cond, *op0, *op1;
10351
10352 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10353 {
10354 /* Handle compare and branch and branch on count
10355 instructions. */
10356 rtx pattern = single_set (insn);
10357
10358 if (!pattern
10359 || SET_DEST (pattern) != pc_rtx
10360 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10361 return false;
10362
10363 cond = XEXP (SET_SRC (pattern), 0);
10364 op0 = &XEXP (cond, 0);
10365 op1 = &XEXP (cond, 1);
10366 }
10367 else if (GET_CODE (PATTERN (insn)) == SET)
10368 {
10369 rtx src, dest;
10370
10371 /* Handle normal compare instructions. */
10372 src = SET_SRC (PATTERN (insn));
10373 dest = SET_DEST (PATTERN (insn));
10374
10375 if (!REG_P (dest)
10376 || !CC_REGNO_P (REGNO (dest))
10377 || GET_CODE (src) != COMPARE)
10378 return false;
10379
10380 /* s390_swap_cmp will try to find the conditional
10381 jump when passing NULL_RTX as condition. */
10382 cond = NULL_RTX;
10383 op0 = &XEXP (src, 0);
10384 op1 = &XEXP (src, 1);
10385 }
10386 else
10387 return false;
10388
10389 if (!REG_P (*op0) || !REG_P (*op1))
10390 return false;
10391
10392 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10393 return false;
10394
10395 /* Swap the COMPARE arguments and its mask if there is a
10396 conflicting access in the previous insn. */
10397 prev_insn = prev_active_insn (insn);
10398 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10399 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10400 s390_swap_cmp (cond, op0, op1, insn);
10401
10402 /* Check if there is a conflict with the next insn. If there
10403 was no conflict with the previous insn, then swap the
10404 COMPARE arguments and its mask. If we already swapped
10405 the operands, or if swapping them would cause a conflict
10406 with the previous insn, issue a NOP after the COMPARE in
10407 order to separate the two instuctions. */
10408 next_insn = next_active_insn (insn);
10409 if (next_insn != NULL_RTX && INSN_P (next_insn)
10410 && s390_non_addr_reg_read_p (*op1, next_insn))
10411 {
10412 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10413 && s390_non_addr_reg_read_p (*op0, prev_insn))
10414 {
10415 if (REGNO (*op1) == 0)
10416 emit_insn_after (gen_nop1 (), insn);
10417 else
10418 emit_insn_after (gen_nop (), insn);
10419 insn_added_p = true;
10420 }
10421 else
10422 s390_swap_cmp (cond, op0, op1, insn);
10423 }
10424 return insn_added_p;
10425 }
10426
10427 /* Perform machine-dependent processing. */
10428
10429 static void
10430 s390_reorg (void)
10431 {
10432 bool pool_overflow = false;
10433
10434 /* Make sure all splits have been performed; splits after
10435 machine_dependent_reorg might confuse insn length counts. */
10436 split_all_insns_noflow ();
10437
10438 /* Install the main literal pool and the associated base
10439 register load insns.
10440
10441 In addition, there are two problematic situations we need
10442 to correct:
10443
10444 - the literal pool might be > 4096 bytes in size, so that
10445 some of its elements cannot be directly accessed
10446
10447 - a branch target might be > 64K away from the branch, so that
10448 it is not possible to use a PC-relative instruction.
10449
10450 To fix those, we split the single literal pool into multiple
10451 pool chunks, reloading the pool base register at various
10452 points throughout the function to ensure it always points to
10453 the pool chunk the following code expects, and / or replace
10454 PC-relative branches by absolute branches.
10455
10456 However, the two problems are interdependent: splitting the
10457 literal pool can move a branch further away from its target,
10458 causing the 64K limit to overflow, and on the other hand,
10459 replacing a PC-relative branch by an absolute branch means
10460 we need to put the branch target address into the literal
10461 pool, possibly causing it to overflow.
10462
10463 So, we loop trying to fix up both problems until we manage
10464 to satisfy both conditions at the same time. Note that the
10465 loop is guaranteed to terminate as every pass of the loop
10466 strictly decreases the total number of PC-relative branches
10467 in the function. (This is not completely true as there
10468 might be branch-over-pool insns introduced by chunkify_start.
10469 Those never need to be split however.) */
10470
10471 for (;;)
10472 {
10473 struct constant_pool *pool = NULL;
10474
10475 /* Collect the literal pool. */
10476 if (!pool_overflow)
10477 {
10478 pool = s390_mainpool_start ();
10479 if (!pool)
10480 pool_overflow = true;
10481 }
10482
10483 /* If literal pool overflowed, start to chunkify it. */
10484 if (pool_overflow)
10485 pool = s390_chunkify_start ();
10486
10487 /* Split out-of-range branches. If this has created new
10488 literal pool entries, cancel current chunk list and
10489 recompute it. zSeries machines have large branch
10490 instructions, so we never need to split a branch. */
10491 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10492 {
10493 if (pool_overflow)
10494 s390_chunkify_cancel (pool);
10495 else
10496 s390_mainpool_cancel (pool);
10497
10498 continue;
10499 }
10500
10501 /* If we made it up to here, both conditions are satisfied.
10502 Finish up literal pool related changes. */
10503 if (pool_overflow)
10504 s390_chunkify_finish (pool);
10505 else
10506 s390_mainpool_finish (pool);
10507
10508 /* We're done splitting branches. */
10509 cfun->machine->split_branches_pending_p = false;
10510 break;
10511 }
10512
10513 /* Generate out-of-pool execute target insns. */
10514 if (TARGET_CPU_ZARCH)
10515 {
10516 rtx insn, label, target;
10517
10518 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10519 {
10520 label = s390_execute_label (insn);
10521 if (!label)
10522 continue;
10523
10524 gcc_assert (label != const0_rtx);
10525
10526 target = emit_label (XEXP (label, 0));
10527 INSN_ADDRESSES_NEW (target, -1);
10528
10529 target = emit_insn (s390_execute_target (insn));
10530 INSN_ADDRESSES_NEW (target, -1);
10531 }
10532 }
10533
10534 /* Try to optimize prologue and epilogue further. */
10535 s390_optimize_prologue ();
10536
10537 /* Walk over the insns and do some >=z10 specific changes. */
10538 if (s390_tune == PROCESSOR_2097_Z10
10539 || s390_tune == PROCESSOR_2817_Z196)
10540 {
10541 rtx insn;
10542 bool insn_added_p = false;
10543
10544 /* The insn lengths and addresses have to be up to date for the
10545 following manipulations. */
10546 shorten_branches (get_insns ());
10547
10548 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10549 {
10550 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10551 continue;
10552
10553 if (JUMP_P (insn))
10554 insn_added_p |= s390_fix_long_loop_prediction (insn);
10555
10556 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10557 || GET_CODE (PATTERN (insn)) == SET)
10558 && s390_tune == PROCESSOR_2097_Z10)
10559 insn_added_p |= s390_z10_optimize_cmp (insn);
10560 }
10561
10562 /* Adjust branches if we added new instructions. */
10563 if (insn_added_p)
10564 shorten_branches (get_insns ());
10565 }
10566 }
10567
10568 /* Return true if INSN is a fp load insn writing register REGNO. */
10569 static inline bool
10570 s390_fpload_toreg (rtx insn, unsigned int regno)
10571 {
10572 rtx set;
10573 enum attr_type flag = s390_safe_attr_type (insn);
10574
10575 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10576 return false;
10577
10578 set = single_set (insn);
10579
10580 if (set == NULL_RTX)
10581 return false;
10582
10583 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10584 return false;
10585
10586 if (REGNO (SET_DEST (set)) != regno)
10587 return false;
10588
10589 return true;
10590 }
10591
10592 /* This value describes the distance to be avoided between an
10593 aritmetic fp instruction and an fp load writing the same register.
10594 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10595 fine but the exact value has to be avoided. Otherwise the FP
10596 pipeline will throw an exception causing a major penalty. */
10597 #define Z10_EARLYLOAD_DISTANCE 7
10598
10599 /* Rearrange the ready list in order to avoid the situation described
10600 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10601 moved to the very end of the ready list. */
10602 static void
10603 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10604 {
10605 unsigned int regno;
10606 int nready = *nready_p;
10607 rtx tmp;
10608 int i;
10609 rtx insn;
10610 rtx set;
10611 enum attr_type flag;
10612 int distance;
10613
10614 /* Skip DISTANCE - 1 active insns. */
10615 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10616 distance > 0 && insn != NULL_RTX;
10617 distance--, insn = prev_active_insn (insn))
10618 if (CALL_P (insn) || JUMP_P (insn))
10619 return;
10620
10621 if (insn == NULL_RTX)
10622 return;
10623
10624 set = single_set (insn);
10625
10626 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10627 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10628 return;
10629
10630 flag = s390_safe_attr_type (insn);
10631
10632 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10633 return;
10634
10635 regno = REGNO (SET_DEST (set));
10636 i = nready - 1;
10637
10638 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10639 i--;
10640
10641 if (!i)
10642 return;
10643
10644 tmp = ready[i];
10645 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10646 ready[0] = tmp;
10647 }
10648
10649 /* This function is called via hook TARGET_SCHED_REORDER before
10650 issuing one insn from list READY which contains *NREADYP entries.
10651 For target z10 it reorders load instructions to avoid early load
10652 conflicts in the floating point pipeline */
10653 static int
10654 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10655 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10656 {
10657 if (s390_tune == PROCESSOR_2097_Z10)
10658 if (reload_completed && *nreadyp > 1)
10659 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10660
10661 return s390_issue_rate ();
10662 }
10663
10664 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10665 the scheduler has issued INSN. It stores the last issued insn into
10666 last_scheduled_insn in order to make it available for
10667 s390_sched_reorder. */
10668 static int
10669 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10670 int verbose ATTRIBUTE_UNUSED,
10671 rtx insn, int more)
10672 {
10673 last_scheduled_insn = insn;
10674
10675 if (GET_CODE (PATTERN (insn)) != USE
10676 && GET_CODE (PATTERN (insn)) != CLOBBER)
10677 return more - 1;
10678 else
10679 return more;
10680 }
10681
10682 static void
10683 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10684 int verbose ATTRIBUTE_UNUSED,
10685 int max_ready ATTRIBUTE_UNUSED)
10686 {
10687 last_scheduled_insn = NULL_RTX;
10688 }
10689
10690 /* This function checks the whole of insn X for memory references. The
10691 function always returns zero because the framework it is called
10692 from would stop recursively analyzing the insn upon a return value
10693 other than zero. The real result of this function is updating
10694 counter variable MEM_COUNT. */
10695 static int
10696 check_dpu (rtx *x, unsigned *mem_count)
10697 {
10698 if (*x != NULL_RTX && MEM_P (*x))
10699 (*mem_count)++;
10700 return 0;
10701 }
10702
10703 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10704 a new number struct loop *loop should be unrolled if tuned for cpus with
10705 a built-in stride prefetcher.
10706 The loop is analyzed for memory accesses by calling check_dpu for
10707 each rtx of the loop. Depending on the loop_depth and the amount of
10708 memory accesses a new number <=nunroll is returned to improve the
10709 behaviour of the hardware prefetch unit. */
10710 static unsigned
10711 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10712 {
10713 basic_block *bbs;
10714 rtx insn;
10715 unsigned i;
10716 unsigned mem_count = 0;
10717
10718 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10719 return nunroll;
10720
10721 /* Count the number of memory references within the loop body. */
10722 bbs = get_loop_body (loop);
10723 for (i = 0; i < loop->num_nodes; i++)
10724 {
10725 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10726 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10727 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10728 }
10729 free (bbs);
10730
10731 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10732 if (mem_count == 0)
10733 return nunroll;
10734
10735 switch (loop_depth(loop))
10736 {
10737 case 1:
10738 return MIN (nunroll, 28 / mem_count);
10739 case 2:
10740 return MIN (nunroll, 22 / mem_count);
10741 default:
10742 return MIN (nunroll, 16 / mem_count);
10743 }
10744 }
10745
10746 /* Initialize GCC target structure. */
10747
10748 #undef TARGET_ASM_ALIGNED_HI_OP
10749 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10750 #undef TARGET_ASM_ALIGNED_DI_OP
10751 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10752 #undef TARGET_ASM_INTEGER
10753 #define TARGET_ASM_INTEGER s390_assemble_integer
10754
10755 #undef TARGET_ASM_OPEN_PAREN
10756 #define TARGET_ASM_OPEN_PAREN ""
10757
10758 #undef TARGET_ASM_CLOSE_PAREN
10759 #define TARGET_ASM_CLOSE_PAREN ""
10760
10761 #undef TARGET_OPTION_OVERRIDE
10762 #define TARGET_OPTION_OVERRIDE s390_option_override
10763
10764 #undef TARGET_ENCODE_SECTION_INFO
10765 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10766
10767 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10768 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10769
10770 #ifdef HAVE_AS_TLS
10771 #undef TARGET_HAVE_TLS
10772 #define TARGET_HAVE_TLS true
10773 #endif
10774 #undef TARGET_CANNOT_FORCE_CONST_MEM
10775 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10776
10777 #undef TARGET_DELEGITIMIZE_ADDRESS
10778 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10779
10780 #undef TARGET_LEGITIMIZE_ADDRESS
10781 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10782
10783 #undef TARGET_RETURN_IN_MEMORY
10784 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10785
10786 #undef TARGET_INIT_BUILTINS
10787 #define TARGET_INIT_BUILTINS s390_init_builtins
10788 #undef TARGET_EXPAND_BUILTIN
10789 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10790
10791 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10792 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10793
10794 #undef TARGET_ASM_OUTPUT_MI_THUNK
10795 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10796 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10797 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10798
10799 #undef TARGET_SCHED_ADJUST_PRIORITY
10800 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10801 #undef TARGET_SCHED_ISSUE_RATE
10802 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10803 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10804 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10805
10806 #undef TARGET_SCHED_VARIABLE_ISSUE
10807 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10808 #undef TARGET_SCHED_REORDER
10809 #define TARGET_SCHED_REORDER s390_sched_reorder
10810 #undef TARGET_SCHED_INIT
10811 #define TARGET_SCHED_INIT s390_sched_init
10812
10813 #undef TARGET_CANNOT_COPY_INSN_P
10814 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10815 #undef TARGET_RTX_COSTS
10816 #define TARGET_RTX_COSTS s390_rtx_costs
10817 #undef TARGET_ADDRESS_COST
10818 #define TARGET_ADDRESS_COST s390_address_cost
10819 #undef TARGET_REGISTER_MOVE_COST
10820 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10821 #undef TARGET_MEMORY_MOVE_COST
10822 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10823
10824 #undef TARGET_MACHINE_DEPENDENT_REORG
10825 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10826
10827 #undef TARGET_VALID_POINTER_MODE
10828 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10829
10830 #undef TARGET_BUILD_BUILTIN_VA_LIST
10831 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10832 #undef TARGET_EXPAND_BUILTIN_VA_START
10833 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10834 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10835 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10836
10837 #undef TARGET_PROMOTE_FUNCTION_MODE
10838 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10839 #undef TARGET_PASS_BY_REFERENCE
10840 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10841
10842 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10843 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10844 #undef TARGET_FUNCTION_ARG
10845 #define TARGET_FUNCTION_ARG s390_function_arg
10846 #undef TARGET_FUNCTION_ARG_ADVANCE
10847 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10848 #undef TARGET_FUNCTION_VALUE
10849 #define TARGET_FUNCTION_VALUE s390_function_value
10850 #undef TARGET_LIBCALL_VALUE
10851 #define TARGET_LIBCALL_VALUE s390_libcall_value
10852
10853 #undef TARGET_FIXED_CONDITION_CODE_REGS
10854 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10855
10856 #undef TARGET_CC_MODES_COMPATIBLE
10857 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10858
10859 #undef TARGET_INVALID_WITHIN_DOLOOP
10860 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10861
10862 #ifdef HAVE_AS_TLS
10863 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10864 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10865 #endif
10866
10867 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10868 #undef TARGET_MANGLE_TYPE
10869 #define TARGET_MANGLE_TYPE s390_mangle_type
10870 #endif
10871
10872 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10873 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10874
10875 #undef TARGET_PREFERRED_RELOAD_CLASS
10876 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10877
10878 #undef TARGET_SECONDARY_RELOAD
10879 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10880
10881 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10882 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10883
10884 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10885 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10886
10887 #undef TARGET_LEGITIMATE_ADDRESS_P
10888 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10889
10890 #undef TARGET_LEGITIMATE_CONSTANT_P
10891 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10892
10893 #undef TARGET_CAN_ELIMINATE
10894 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10895
10896 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10897 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10898
10899 #undef TARGET_LOOP_UNROLL_ADJUST
10900 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10901
10902 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10903 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10904 #undef TARGET_TRAMPOLINE_INIT
10905 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10906
10907 #undef TARGET_UNWIND_WORD_MODE
10908 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10909
10910 struct gcc_target targetm = TARGET_INITIALIZER;
10911
10912 #include "gt-s390.h"