re PR target/54746 (config/s390/s390.c:1583: possible missing break in switch ?)
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "optabs.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "params.h"
54 #include "cfgloop.h"
55 #include "opts.h"
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (1) , /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 static const
225 struct processor_costs z196_cost =
226 {
227 COSTS_N_INSNS (7), /* M */
228 COSTS_N_INSNS (5), /* MGHI */
229 COSTS_N_INSNS (5), /* MH */
230 COSTS_N_INSNS (5), /* MHI */
231 COSTS_N_INSNS (7), /* ML */
232 COSTS_N_INSNS (7), /* MR */
233 COSTS_N_INSNS (6), /* MS */
234 COSTS_N_INSNS (8), /* MSG */
235 COSTS_N_INSNS (6), /* MSGF */
236 COSTS_N_INSNS (6), /* MSGFR */
237 COSTS_N_INSNS (8), /* MSGR */
238 COSTS_N_INSNS (6), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (40), /* MXBR B+40 */
241 COSTS_N_INSNS (100), /* SQXBR B+100 */
242 COSTS_N_INSNS (42), /* SQDBR B+42 */
243 COSTS_N_INSNS (28), /* SQEBR B+28 */
244 COSTS_N_INSNS (1), /* MADBR B */
245 COSTS_N_INSNS (1), /* MAEBR B */
246 COSTS_N_INSNS (101), /* DXBR B+101 */
247 COSTS_N_INSNS (29), /* DDBR */
248 COSTS_N_INSNS (22), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR cracked */
250 COSTS_N_INSNS (160), /* DLR cracked */
251 COSTS_N_INSNS (160), /* DR expanded */
252 COSTS_N_INSNS (160), /* DSGFR cracked */
253 COSTS_N_INSNS (160), /* DSGR cracked */
254 };
255
256 extern int reload_completed;
257
258 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
259 static rtx last_scheduled_insn;
260
261 /* Structure used to hold the components of a S/390 memory
262 address. A legitimate address on S/390 is of the general
263 form
264 base + index + displacement
265 where any of the components is optional.
266
267 base and index are registers of the class ADDR_REGS,
268 displacement is an unsigned 12-bit immediate constant. */
269
270 struct s390_address
271 {
272 rtx base;
273 rtx indx;
274 rtx disp;
275 bool pointer;
276 bool literal_pool;
277 };
278
279 /* The following structure is embedded in the machine
280 specific part of struct function. */
281
282 struct GTY (()) s390_frame_layout
283 {
284 /* Offset within stack frame. */
285 HOST_WIDE_INT gprs_offset;
286 HOST_WIDE_INT f0_offset;
287 HOST_WIDE_INT f4_offset;
288 HOST_WIDE_INT f8_offset;
289 HOST_WIDE_INT backchain_offset;
290
291 /* Number of first and last gpr where slots in the register
292 save area are reserved for. */
293 int first_save_gpr_slot;
294 int last_save_gpr_slot;
295
296 /* Number of first and last gpr to be saved, restored. */
297 int first_save_gpr;
298 int first_restore_gpr;
299 int last_save_gpr;
300 int last_restore_gpr;
301
302 /* Bits standing for floating point registers. Set, if the
303 respective register has to be saved. Starting with reg 16 (f0)
304 at the rightmost bit.
305 Bit 15 - 8 7 6 5 4 3 2 1 0
306 fpr 15 - 8 7 5 3 1 6 4 2 0
307 reg 31 - 24 23 22 21 20 19 18 17 16 */
308 unsigned int fpr_bitmap;
309
310 /* Number of floating point registers f8-f15 which must be saved. */
311 int high_fprs;
312
313 /* Set if return address needs to be saved.
314 This flag is set by s390_return_addr_rtx if it could not use
315 the initial value of r14 and therefore depends on r14 saved
316 to the stack. */
317 bool save_return_addr_p;
318
319 /* Size of stack frame. */
320 HOST_WIDE_INT frame_size;
321 };
322
323 /* Define the structure for the machine field in struct function. */
324
325 struct GTY(()) machine_function
326 {
327 struct s390_frame_layout frame_layout;
328
329 /* Literal pool base register. */
330 rtx base_reg;
331
332 /* True if we may need to perform branch splitting. */
333 bool split_branches_pending_p;
334
335 /* Some local-dynamic TLS symbol name. */
336 const char *some_ld_name;
337
338 bool has_landing_pad_p;
339 };
340
341 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
342
343 #define cfun_frame_layout (cfun->machine->frame_layout)
344 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
345 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
346 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
347 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
348 (1 << (BITNUM)))
349 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
350 (1 << (BITNUM))))
351
352 /* Number of GPRs and FPRs used for argument passing. */
353 #define GP_ARG_NUM_REG 5
354 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
355
356 /* A couple of shortcuts. */
357 #define CONST_OK_FOR_J(x) \
358 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
359 #define CONST_OK_FOR_K(x) \
360 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
361 #define CONST_OK_FOR_Os(x) \
362 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
363 #define CONST_OK_FOR_Op(x) \
364 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
365 #define CONST_OK_FOR_On(x) \
366 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
367
368 #define REGNO_PAIR_OK(REGNO, MODE) \
369 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
370
371 /* That's the read ahead of the dynamic branch prediction unit in
372 bytes on a z10 (or higher) CPU. */
373 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
374
375 /* Return the alignment for LABEL. We default to the -falign-labels
376 value except for the literal pool base label. */
377 int
378 s390_label_align (rtx label)
379 {
380 rtx prev_insn = prev_active_insn (label);
381
382 if (prev_insn == NULL_RTX)
383 goto old;
384
385 prev_insn = single_set (prev_insn);
386
387 if (prev_insn == NULL_RTX)
388 goto old;
389
390 prev_insn = SET_SRC (prev_insn);
391
392 /* Don't align literal pool base labels. */
393 if (GET_CODE (prev_insn) == UNSPEC
394 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
395 return 0;
396
397 old:
398 return align_labels_log;
399 }
400
401 static enum machine_mode
402 s390_libgcc_cmp_return_mode (void)
403 {
404 return TARGET_64BIT ? DImode : SImode;
405 }
406
407 static enum machine_mode
408 s390_libgcc_shift_count_mode (void)
409 {
410 return TARGET_64BIT ? DImode : SImode;
411 }
412
413 static enum machine_mode
414 s390_unwind_word_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 /* Return true if the back end supports mode MODE. */
420 static bool
421 s390_scalar_mode_supported_p (enum machine_mode mode)
422 {
423 /* In contrast to the default implementation reject TImode constants on 31bit
424 TARGET_ZARCH for ABI compliance. */
425 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
426 return false;
427
428 if (DECIMAL_FLOAT_MODE_P (mode))
429 return default_decimal_float_supported_p ();
430
431 return default_scalar_mode_supported_p (mode);
432 }
433
434 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
435
436 void
437 s390_set_has_landing_pad_p (bool value)
438 {
439 cfun->machine->has_landing_pad_p = value;
440 }
441
442 /* If two condition code modes are compatible, return a condition code
443 mode which is compatible with both. Otherwise, return
444 VOIDmode. */
445
446 static enum machine_mode
447 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
448 {
449 if (m1 == m2)
450 return m1;
451
452 switch (m1)
453 {
454 case CCZmode:
455 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
456 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
457 return m2;
458 return VOIDmode;
459
460 case CCSmode:
461 case CCUmode:
462 case CCTmode:
463 case CCSRmode:
464 case CCURmode:
465 case CCZ1mode:
466 if (m2 == CCZmode)
467 return m1;
468
469 return VOIDmode;
470
471 default:
472 return VOIDmode;
473 }
474 return VOIDmode;
475 }
476
477 /* Return true if SET either doesn't set the CC register, or else
478 the source and destination have matching CC modes and that
479 CC mode is at least as constrained as REQ_MODE. */
480
481 static bool
482 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
483 {
484 enum machine_mode set_mode;
485
486 gcc_assert (GET_CODE (set) == SET);
487
488 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
489 return 1;
490
491 set_mode = GET_MODE (SET_DEST (set));
492 switch (set_mode)
493 {
494 case CCSmode:
495 case CCSRmode:
496 case CCUmode:
497 case CCURmode:
498 case CCLmode:
499 case CCL1mode:
500 case CCL2mode:
501 case CCL3mode:
502 case CCT1mode:
503 case CCT2mode:
504 case CCT3mode:
505 if (req_mode != set_mode)
506 return 0;
507 break;
508
509 case CCZmode:
510 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
511 && req_mode != CCSRmode && req_mode != CCURmode)
512 return 0;
513 break;
514
515 case CCAPmode:
516 case CCANmode:
517 if (req_mode != CCAmode)
518 return 0;
519 break;
520
521 default:
522 gcc_unreachable ();
523 }
524
525 return (GET_MODE (SET_SRC (set)) == set_mode);
526 }
527
528 /* Return true if every SET in INSN that sets the CC register
529 has source and destination with matching CC modes and that
530 CC mode is at least as constrained as REQ_MODE.
531 If REQ_MODE is VOIDmode, always return false. */
532
533 bool
534 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
535 {
536 int i;
537
538 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
539 if (req_mode == VOIDmode)
540 return false;
541
542 if (GET_CODE (PATTERN (insn)) == SET)
543 return s390_match_ccmode_set (PATTERN (insn), req_mode);
544
545 if (GET_CODE (PATTERN (insn)) == PARALLEL)
546 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
547 {
548 rtx set = XVECEXP (PATTERN (insn), 0, i);
549 if (GET_CODE (set) == SET)
550 if (!s390_match_ccmode_set (set, req_mode))
551 return false;
552 }
553
554 return true;
555 }
556
557 /* If a test-under-mask instruction can be used to implement
558 (compare (and ... OP1) OP2), return the CC mode required
559 to do that. Otherwise, return VOIDmode.
560 MIXED is true if the instruction can distinguish between
561 CC1 and CC2 for mixed selected bits (TMxx), it is false
562 if the instruction cannot (TM). */
563
564 enum machine_mode
565 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
566 {
567 int bit0, bit1;
568
569 /* ??? Fixme: should work on CONST_DOUBLE as well. */
570 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
571 return VOIDmode;
572
573 /* Selected bits all zero: CC0.
574 e.g.: int a; if ((a & (16 + 128)) == 0) */
575 if (INTVAL (op2) == 0)
576 return CCTmode;
577
578 /* Selected bits all one: CC3.
579 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
580 if (INTVAL (op2) == INTVAL (op1))
581 return CCT3mode;
582
583 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
584 int a;
585 if ((a & (16 + 128)) == 16) -> CCT1
586 if ((a & (16 + 128)) == 128) -> CCT2 */
587 if (mixed)
588 {
589 bit1 = exact_log2 (INTVAL (op2));
590 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
591 if (bit0 != -1 && bit1 != -1)
592 return bit0 > bit1 ? CCT1mode : CCT2mode;
593 }
594
595 return VOIDmode;
596 }
597
598 /* Given a comparison code OP (EQ, NE, etc.) and the operands
599 OP0 and OP1 of a COMPARE, return the mode to be used for the
600 comparison. */
601
602 enum machine_mode
603 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
604 {
605 switch (code)
606 {
607 case EQ:
608 case NE:
609 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
610 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
611 return CCAPmode;
612 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
613 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
614 return CCAPmode;
615 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
616 || GET_CODE (op1) == NEG)
617 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
618 return CCLmode;
619
620 if (GET_CODE (op0) == AND)
621 {
622 /* Check whether we can potentially do it via TM. */
623 enum machine_mode ccmode;
624 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
625 if (ccmode != VOIDmode)
626 {
627 /* Relax CCTmode to CCZmode to allow fall-back to AND
628 if that turns out to be beneficial. */
629 return ccmode == CCTmode ? CCZmode : ccmode;
630 }
631 }
632
633 if (register_operand (op0, HImode)
634 && GET_CODE (op1) == CONST_INT
635 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
636 return CCT3mode;
637 if (register_operand (op0, QImode)
638 && GET_CODE (op1) == CONST_INT
639 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
640 return CCT3mode;
641
642 return CCZmode;
643
644 case LE:
645 case LT:
646 case GE:
647 case GT:
648 /* The only overflow condition of NEG and ABS happens when
649 -INT_MAX is used as parameter, which stays negative. So
650 we have an overflow from a positive value to a negative.
651 Using CCAP mode the resulting cc can be used for comparisons. */
652 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
653 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
654 return CCAPmode;
655
656 /* If constants are involved in an add instruction it is possible to use
657 the resulting cc for comparisons with zero. Knowing the sign of the
658 constant the overflow behavior gets predictable. e.g.:
659 int a, b; if ((b = a + c) > 0)
660 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
661 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
662 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
663 {
664 if (INTVAL (XEXP((op0), 1)) < 0)
665 return CCANmode;
666 else
667 return CCAPmode;
668 }
669 /* Fall through. */
670 case UNORDERED:
671 case ORDERED:
672 case UNEQ:
673 case UNLE:
674 case UNLT:
675 case UNGE:
676 case UNGT:
677 case LTGT:
678 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
679 && GET_CODE (op1) != CONST_INT)
680 return CCSRmode;
681 return CCSmode;
682
683 case LTU:
684 case GEU:
685 if (GET_CODE (op0) == PLUS
686 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
687 return CCL1mode;
688
689 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
690 && GET_CODE (op1) != CONST_INT)
691 return CCURmode;
692 return CCUmode;
693
694 case LEU:
695 case GTU:
696 if (GET_CODE (op0) == MINUS
697 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
698 return CCL2mode;
699
700 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
701 && GET_CODE (op1) != CONST_INT)
702 return CCURmode;
703 return CCUmode;
704
705 default:
706 gcc_unreachable ();
707 }
708 }
709
710 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
711 that we can implement more efficiently. */
712
713 void
714 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
715 {
716 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
717 if ((*code == EQ || *code == NE)
718 && *op1 == const0_rtx
719 && GET_CODE (*op0) == ZERO_EXTRACT
720 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
721 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
722 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
723 {
724 rtx inner = XEXP (*op0, 0);
725 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
726 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
727 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
728
729 if (len > 0 && len < modesize
730 && pos >= 0 && pos + len <= modesize
731 && modesize <= HOST_BITS_PER_WIDE_INT)
732 {
733 unsigned HOST_WIDE_INT block;
734 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
735 block <<= modesize - pos - len;
736
737 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
738 gen_int_mode (block, GET_MODE (inner)));
739 }
740 }
741
742 /* Narrow AND of memory against immediate to enable TM. */
743 if ((*code == EQ || *code == NE)
744 && *op1 == const0_rtx
745 && GET_CODE (*op0) == AND
746 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
747 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
748 {
749 rtx inner = XEXP (*op0, 0);
750 rtx mask = XEXP (*op0, 1);
751
752 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
753 if (GET_CODE (inner) == SUBREG
754 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
755 && (GET_MODE_SIZE (GET_MODE (inner))
756 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
757 && ((INTVAL (mask)
758 & GET_MODE_MASK (GET_MODE (inner))
759 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
760 == 0))
761 inner = SUBREG_REG (inner);
762
763 /* Do not change volatile MEMs. */
764 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
765 {
766 int part = s390_single_part (XEXP (*op0, 1),
767 GET_MODE (inner), QImode, 0);
768 if (part >= 0)
769 {
770 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
771 inner = adjust_address_nv (inner, QImode, part);
772 *op0 = gen_rtx_AND (QImode, inner, mask);
773 }
774 }
775 }
776
777 /* Narrow comparisons against 0xffff to HImode if possible. */
778 if ((*code == EQ || *code == NE)
779 && GET_CODE (*op1) == CONST_INT
780 && INTVAL (*op1) == 0xffff
781 && SCALAR_INT_MODE_P (GET_MODE (*op0))
782 && (nonzero_bits (*op0, GET_MODE (*op0))
783 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
784 {
785 *op0 = gen_lowpart (HImode, *op0);
786 *op1 = constm1_rtx;
787 }
788
789 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
790 if (GET_CODE (*op0) == UNSPEC
791 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
792 && XVECLEN (*op0, 0) == 1
793 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
794 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
795 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
796 && *op1 == const0_rtx)
797 {
798 enum rtx_code new_code = UNKNOWN;
799 switch (*code)
800 {
801 case EQ: new_code = EQ; break;
802 case NE: new_code = NE; break;
803 case LT: new_code = GTU; break;
804 case GT: new_code = LTU; break;
805 case LE: new_code = GEU; break;
806 case GE: new_code = LEU; break;
807 default: break;
808 }
809
810 if (new_code != UNKNOWN)
811 {
812 *op0 = XVECEXP (*op0, 0, 0);
813 *code = new_code;
814 }
815 }
816
817 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
818 if (GET_CODE (*op0) == UNSPEC
819 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
820 && XVECLEN (*op0, 0) == 1
821 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
822 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
823 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
824 && *op1 == const0_rtx)
825 {
826 enum rtx_code new_code = UNKNOWN;
827 switch (*code)
828 {
829 case EQ: new_code = EQ; break;
830 case NE: new_code = NE; break;
831 default: break;
832 }
833
834 if (new_code != UNKNOWN)
835 {
836 *op0 = XVECEXP (*op0, 0, 0);
837 *code = new_code;
838 }
839 }
840
841 /* Simplify cascaded EQ, NE with const0_rtx. */
842 if ((*code == NE || *code == EQ)
843 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
844 && GET_MODE (*op0) == SImode
845 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
846 && REG_P (XEXP (*op0, 0))
847 && XEXP (*op0, 1) == const0_rtx
848 && *op1 == const0_rtx)
849 {
850 if ((*code == EQ && GET_CODE (*op0) == NE)
851 || (*code == NE && GET_CODE (*op0) == EQ))
852 *code = EQ;
853 else
854 *code = NE;
855 *op0 = XEXP (*op0, 0);
856 }
857
858 /* Prefer register over memory as first operand. */
859 if (MEM_P (*op0) && REG_P (*op1))
860 {
861 rtx tem = *op0; *op0 = *op1; *op1 = tem;
862 *code = swap_condition (*code);
863 }
864 }
865
866 /* Emit a compare instruction suitable to implement the comparison
867 OP0 CODE OP1. Return the correct condition RTL to be placed in
868 the IF_THEN_ELSE of the conditional branch testing the result. */
869
870 rtx
871 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
872 {
873 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
874 rtx cc;
875
876 /* Do not output a redundant compare instruction if a compare_and_swap
877 pattern already computed the result and the machine modes are compatible. */
878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
879 {
880 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
881 == GET_MODE (op0));
882 cc = op0;
883 }
884 else
885 {
886 cc = gen_rtx_REG (mode, CC_REGNUM);
887 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
888 }
889
890 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
891 }
892
893 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
894 matches CMP.
895 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
896 conditional branch testing the result. */
897
898 static rtx
899 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
900 rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
904 const0_rtx);
905 }
906
907 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
908 unconditional jump, else a conditional jump under condition COND. */
909
910 void
911 s390_emit_jump (rtx target, rtx cond)
912 {
913 rtx insn;
914
915 target = gen_rtx_LABEL_REF (VOIDmode, target);
916 if (cond)
917 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
918
919 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
920 emit_jump_insn (insn);
921 }
922
923 /* Return branch condition mask to implement a branch
924 specified by CODE. Return -1 for invalid comparisons. */
925
926 int
927 s390_branch_condition_mask (rtx code)
928 {
929 const int CC0 = 1 << 3;
930 const int CC1 = 1 << 2;
931 const int CC2 = 1 << 1;
932 const int CC3 = 1 << 0;
933
934 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
935 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
936 gcc_assert (XEXP (code, 1) == const0_rtx);
937
938 switch (GET_MODE (XEXP (code, 0)))
939 {
940 case CCZmode:
941 case CCZ1mode:
942 switch (GET_CODE (code))
943 {
944 case EQ: return CC0;
945 case NE: return CC1 | CC2 | CC3;
946 default: return -1;
947 }
948 break;
949
950 case CCT1mode:
951 switch (GET_CODE (code))
952 {
953 case EQ: return CC1;
954 case NE: return CC0 | CC2 | CC3;
955 default: return -1;
956 }
957 break;
958
959 case CCT2mode:
960 switch (GET_CODE (code))
961 {
962 case EQ: return CC2;
963 case NE: return CC0 | CC1 | CC3;
964 default: return -1;
965 }
966 break;
967
968 case CCT3mode:
969 switch (GET_CODE (code))
970 {
971 case EQ: return CC3;
972 case NE: return CC0 | CC1 | CC2;
973 default: return -1;
974 }
975 break;
976
977 case CCLmode:
978 switch (GET_CODE (code))
979 {
980 case EQ: return CC0 | CC2;
981 case NE: return CC1 | CC3;
982 default: return -1;
983 }
984 break;
985
986 case CCL1mode:
987 switch (GET_CODE (code))
988 {
989 case LTU: return CC2 | CC3; /* carry */
990 case GEU: return CC0 | CC1; /* no carry */
991 default: return -1;
992 }
993 break;
994
995 case CCL2mode:
996 switch (GET_CODE (code))
997 {
998 case GTU: return CC0 | CC1; /* borrow */
999 case LEU: return CC2 | CC3; /* no borrow */
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCL3mode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0 | CC2;
1008 case NE: return CC1 | CC3;
1009 case LTU: return CC1;
1010 case GTU: return CC3;
1011 case LEU: return CC1 | CC2;
1012 case GEU: return CC2 | CC3;
1013 default: return -1;
1014 }
1015
1016 case CCUmode:
1017 switch (GET_CODE (code))
1018 {
1019 case EQ: return CC0;
1020 case NE: return CC1 | CC2 | CC3;
1021 case LTU: return CC1;
1022 case GTU: return CC2;
1023 case LEU: return CC0 | CC1;
1024 case GEU: return CC0 | CC2;
1025 default: return -1;
1026 }
1027 break;
1028
1029 case CCURmode:
1030 switch (GET_CODE (code))
1031 {
1032 case EQ: return CC0;
1033 case NE: return CC2 | CC1 | CC3;
1034 case LTU: return CC2;
1035 case GTU: return CC1;
1036 case LEU: return CC0 | CC2;
1037 case GEU: return CC0 | CC1;
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCAPmode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0;
1046 case NE: return CC1 | CC2 | CC3;
1047 case LT: return CC1 | CC3;
1048 case GT: return CC2;
1049 case LE: return CC0 | CC1 | CC3;
1050 case GE: return CC0 | CC2;
1051 default: return -1;
1052 }
1053 break;
1054
1055 case CCANmode:
1056 switch (GET_CODE (code))
1057 {
1058 case EQ: return CC0;
1059 case NE: return CC1 | CC2 | CC3;
1060 case LT: return CC1;
1061 case GT: return CC2 | CC3;
1062 case LE: return CC0 | CC1;
1063 case GE: return CC0 | CC2 | CC3;
1064 default: return -1;
1065 }
1066 break;
1067
1068 case CCSmode:
1069 switch (GET_CODE (code))
1070 {
1071 case EQ: return CC0;
1072 case NE: return CC1 | CC2 | CC3;
1073 case LT: return CC1;
1074 case GT: return CC2;
1075 case LE: return CC0 | CC1;
1076 case GE: return CC0 | CC2;
1077 case UNORDERED: return CC3;
1078 case ORDERED: return CC0 | CC1 | CC2;
1079 case UNEQ: return CC0 | CC3;
1080 case UNLT: return CC1 | CC3;
1081 case UNGT: return CC2 | CC3;
1082 case UNLE: return CC0 | CC1 | CC3;
1083 case UNGE: return CC0 | CC2 | CC3;
1084 case LTGT: return CC1 | CC2;
1085 default: return -1;
1086 }
1087 break;
1088
1089 case CCSRmode:
1090 switch (GET_CODE (code))
1091 {
1092 case EQ: return CC0;
1093 case NE: return CC2 | CC1 | CC3;
1094 case LT: return CC2;
1095 case GT: return CC1;
1096 case LE: return CC0 | CC2;
1097 case GE: return CC0 | CC1;
1098 case UNORDERED: return CC3;
1099 case ORDERED: return CC0 | CC2 | CC1;
1100 case UNEQ: return CC0 | CC3;
1101 case UNLT: return CC2 | CC3;
1102 case UNGT: return CC1 | CC3;
1103 case UNLE: return CC0 | CC2 | CC3;
1104 case UNGE: return CC0 | CC1 | CC3;
1105 case LTGT: return CC2 | CC1;
1106 default: return -1;
1107 }
1108 break;
1109
1110 default:
1111 return -1;
1112 }
1113 }
1114
1115
1116 /* Return branch condition mask to implement a compare and branch
1117 specified by CODE. Return -1 for invalid comparisons. */
1118
1119 int
1120 s390_compare_and_branch_condition_mask (rtx code)
1121 {
1122 const int CC0 = 1 << 3;
1123 const int CC1 = 1 << 2;
1124 const int CC2 = 1 << 1;
1125
1126 switch (GET_CODE (code))
1127 {
1128 case EQ:
1129 return CC0;
1130 case NE:
1131 return CC1 | CC2;
1132 case LT:
1133 case LTU:
1134 return CC1;
1135 case GT:
1136 case GTU:
1137 return CC2;
1138 case LE:
1139 case LEU:
1140 return CC0 | CC1;
1141 case GE:
1142 case GEU:
1143 return CC0 | CC2;
1144 default:
1145 gcc_unreachable ();
1146 }
1147 return -1;
1148 }
1149
1150 /* If INV is false, return assembler mnemonic string to implement
1151 a branch specified by CODE. If INV is true, return mnemonic
1152 for the corresponding inverted branch. */
1153
1154 static const char *
1155 s390_branch_condition_mnemonic (rtx code, int inv)
1156 {
1157 int mask;
1158
1159 static const char *const mnemonic[16] =
1160 {
1161 NULL, "o", "h", "nle",
1162 "l", "nhe", "lh", "ne",
1163 "e", "nlh", "he", "nl",
1164 "le", "nh", "no", NULL
1165 };
1166
1167 if (GET_CODE (XEXP (code, 0)) == REG
1168 && REGNO (XEXP (code, 0)) == CC_REGNUM
1169 && XEXP (code, 1) == const0_rtx)
1170 mask = s390_branch_condition_mask (code);
1171 else
1172 mask = s390_compare_and_branch_condition_mask (code);
1173
1174 gcc_assert (mask >= 0);
1175
1176 if (inv)
1177 mask ^= 15;
1178
1179 gcc_assert (mask >= 1 && mask <= 14);
1180
1181 return mnemonic[mask];
1182 }
1183
1184 /* Return the part of op which has a value different from def.
1185 The size of the part is determined by mode.
1186 Use this function only if you already know that op really
1187 contains such a part. */
1188
1189 unsigned HOST_WIDE_INT
1190 s390_extract_part (rtx op, enum machine_mode mode, int def)
1191 {
1192 unsigned HOST_WIDE_INT value = 0;
1193 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1194 int part_bits = GET_MODE_BITSIZE (mode);
1195 unsigned HOST_WIDE_INT part_mask
1196 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1197 int i;
1198
1199 for (i = 0; i < max_parts; i++)
1200 {
1201 if (i == 0)
1202 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1203 else
1204 value >>= part_bits;
1205
1206 if ((value & part_mask) != (def & part_mask))
1207 return value & part_mask;
1208 }
1209
1210 gcc_unreachable ();
1211 }
1212
1213 /* If OP is an integer constant of mode MODE with exactly one
1214 part of mode PART_MODE unequal to DEF, return the number of that
1215 part. Otherwise, return -1. */
1216
1217 int
1218 s390_single_part (rtx op,
1219 enum machine_mode mode,
1220 enum machine_mode part_mode,
1221 int def)
1222 {
1223 unsigned HOST_WIDE_INT value = 0;
1224 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1225 unsigned HOST_WIDE_INT part_mask
1226 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1227 int i, part = -1;
1228
1229 if (GET_CODE (op) != CONST_INT)
1230 return -1;
1231
1232 for (i = 0; i < n_parts; i++)
1233 {
1234 if (i == 0)
1235 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1236 else
1237 value >>= GET_MODE_BITSIZE (part_mode);
1238
1239 if ((value & part_mask) != (def & part_mask))
1240 {
1241 if (part != -1)
1242 return -1;
1243 else
1244 part = i;
1245 }
1246 }
1247 return part == -1 ? -1 : n_parts - 1 - part;
1248 }
1249
1250 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1251 bits and no other bits are set in IN. POS and LENGTH can be used
1252 to obtain the start position and the length of the bitfield.
1253
1254 POS gives the position of the first bit of the bitfield counting
1255 from the lowest order bit starting with zero. In order to use this
1256 value for S/390 instructions this has to be converted to "bits big
1257 endian" style. */
1258
1259 bool
1260 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1261 int *pos, int *length)
1262 {
1263 int tmp_pos = 0;
1264 int tmp_length = 0;
1265 int i;
1266 unsigned HOST_WIDE_INT mask = 1ULL;
1267 bool contiguous = false;
1268
1269 for (i = 0; i < size; mask <<= 1, i++)
1270 {
1271 if (contiguous)
1272 {
1273 if (mask & in)
1274 tmp_length++;
1275 else
1276 break;
1277 }
1278 else
1279 {
1280 if (mask & in)
1281 {
1282 contiguous = true;
1283 tmp_length++;
1284 }
1285 else
1286 tmp_pos++;
1287 }
1288 }
1289
1290 if (!tmp_length)
1291 return false;
1292
1293 /* Calculate a mask for all bits beyond the contiguous bits. */
1294 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1295
1296 if (mask & in)
1297 return false;
1298
1299 if (tmp_length + tmp_pos - 1 > size)
1300 return false;
1301
1302 if (length)
1303 *length = tmp_length;
1304
1305 if (pos)
1306 *pos = tmp_pos;
1307
1308 return true;
1309 }
1310
1311 /* Check whether we can (and want to) split a double-word
1312 move in mode MODE from SRC to DST into two single-word
1313 moves, moving the subword FIRST_SUBWORD first. */
1314
1315 bool
1316 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1317 {
1318 /* Floating point registers cannot be split. */
1319 if (FP_REG_P (src) || FP_REG_P (dst))
1320 return false;
1321
1322 /* We don't need to split if operands are directly accessible. */
1323 if (s_operand (src, mode) || s_operand (dst, mode))
1324 return false;
1325
1326 /* Non-offsettable memory references cannot be split. */
1327 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1328 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1329 return false;
1330
1331 /* Moving the first subword must not clobber a register
1332 needed to move the second subword. */
1333 if (register_operand (dst, mode))
1334 {
1335 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1336 if (reg_overlap_mentioned_p (subreg, src))
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
1343 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1344 and [MEM2, MEM2 + SIZE] do overlap and false
1345 otherwise. */
1346
1347 bool
1348 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1349 {
1350 rtx addr1, addr2, addr_delta;
1351 HOST_WIDE_INT delta;
1352
1353 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1354 return true;
1355
1356 if (size == 0)
1357 return false;
1358
1359 addr1 = XEXP (mem1, 0);
1360 addr2 = XEXP (mem2, 0);
1361
1362 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1363
1364 /* This overlapping check is used by peepholes merging memory block operations.
1365 Overlapping operations would otherwise be recognized by the S/390 hardware
1366 and would fall back to a slower implementation. Allowing overlapping
1367 operations would lead to slow code but not to wrong code. Therefore we are
1368 somewhat optimistic if we cannot prove that the memory blocks are
1369 overlapping.
1370 That's why we return false here although this may accept operations on
1371 overlapping memory areas. */
1372 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1373 return false;
1374
1375 delta = INTVAL (addr_delta);
1376
1377 if (delta == 0
1378 || (delta > 0 && delta < size)
1379 || (delta < 0 && -delta < size))
1380 return true;
1381
1382 return false;
1383 }
1384
1385 /* Check whether the address of memory reference MEM2 equals exactly
1386 the address of memory reference MEM1 plus DELTA. Return true if
1387 we can prove this to be the case, false otherwise. */
1388
1389 bool
1390 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1391 {
1392 rtx addr1, addr2, addr_delta;
1393
1394 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1395 return false;
1396
1397 addr1 = XEXP (mem1, 0);
1398 addr2 = XEXP (mem2, 0);
1399
1400 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1401 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1402 return false;
1403
1404 return true;
1405 }
1406
1407 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1408
1409 void
1410 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1411 rtx *operands)
1412 {
1413 enum machine_mode wmode = mode;
1414 rtx dst = operands[0];
1415 rtx src1 = operands[1];
1416 rtx src2 = operands[2];
1417 rtx op, clob, tem;
1418
1419 /* If we cannot handle the operation directly, use a temp register. */
1420 if (!s390_logical_operator_ok_p (operands))
1421 dst = gen_reg_rtx (mode);
1422
1423 /* QImode and HImode patterns make sense only if we have a destination
1424 in memory. Otherwise perform the operation in SImode. */
1425 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1426 wmode = SImode;
1427
1428 /* Widen operands if required. */
1429 if (mode != wmode)
1430 {
1431 if (GET_CODE (dst) == SUBREG
1432 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1433 dst = tem;
1434 else if (REG_P (dst))
1435 dst = gen_rtx_SUBREG (wmode, dst, 0);
1436 else
1437 dst = gen_reg_rtx (wmode);
1438
1439 if (GET_CODE (src1) == SUBREG
1440 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1441 src1 = tem;
1442 else if (GET_MODE (src1) != VOIDmode)
1443 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1444
1445 if (GET_CODE (src2) == SUBREG
1446 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1447 src2 = tem;
1448 else if (GET_MODE (src2) != VOIDmode)
1449 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1450 }
1451
1452 /* Emit the instruction. */
1453 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1454 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1455 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1456
1457 /* Fix up the destination if needed. */
1458 if (dst != operands[0])
1459 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1460 }
1461
1462 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1463
1464 bool
1465 s390_logical_operator_ok_p (rtx *operands)
1466 {
1467 /* If the destination operand is in memory, it needs to coincide
1468 with one of the source operands. After reload, it has to be
1469 the first source operand. */
1470 if (GET_CODE (operands[0]) == MEM)
1471 return rtx_equal_p (operands[0], operands[1])
1472 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1473
1474 return true;
1475 }
1476
1477 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1478 operand IMMOP to switch from SS to SI type instructions. */
1479
1480 void
1481 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1482 {
1483 int def = code == AND ? -1 : 0;
1484 HOST_WIDE_INT mask;
1485 int part;
1486
1487 gcc_assert (GET_CODE (*memop) == MEM);
1488 gcc_assert (!MEM_VOLATILE_P (*memop));
1489
1490 mask = s390_extract_part (*immop, QImode, def);
1491 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1492 gcc_assert (part >= 0);
1493
1494 *memop = adjust_address (*memop, QImode, part);
1495 *immop = gen_int_mode (mask, QImode);
1496 }
1497
1498
1499 /* How to allocate a 'struct machine_function'. */
1500
1501 static struct machine_function *
1502 s390_init_machine_status (void)
1503 {
1504 return ggc_alloc_cleared_machine_function ();
1505 }
1506
1507 static void
1508 s390_option_override (void)
1509 {
1510 /* Set up function hooks. */
1511 init_machine_status = s390_init_machine_status;
1512
1513 /* Architecture mode defaults according to ABI. */
1514 if (!(target_flags_explicit & MASK_ZARCH))
1515 {
1516 if (TARGET_64BIT)
1517 target_flags |= MASK_ZARCH;
1518 else
1519 target_flags &= ~MASK_ZARCH;
1520 }
1521
1522 /* Set the march default in case it hasn't been specified on
1523 cmdline. */
1524 if (s390_arch == PROCESSOR_max)
1525 {
1526 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1527 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1528 s390_arch_flags = processor_flags_table[(int)s390_arch];
1529 }
1530
1531 /* Determine processor to tune for. */
1532 if (s390_tune == PROCESSOR_max)
1533 {
1534 s390_tune = s390_arch;
1535 s390_tune_flags = s390_arch_flags;
1536 }
1537
1538 /* Sanity checks. */
1539 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1540 error ("z/Architecture mode not supported on %s", s390_arch_string);
1541 if (TARGET_64BIT && !TARGET_ZARCH)
1542 error ("64-bit ABI not supported in ESA/390 mode");
1543
1544 /* Use hardware DFP if available and not explicitly disabled by
1545 user. E.g. with -m31 -march=z10 -mzarch */
1546 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1547 target_flags |= MASK_HARD_DFP;
1548
1549 if (TARGET_HARD_DFP && !TARGET_DFP)
1550 {
1551 if (target_flags_explicit & MASK_HARD_DFP)
1552 {
1553 if (!TARGET_CPU_DFP)
1554 error ("hardware decimal floating point instructions"
1555 " not available on %s", s390_arch_string);
1556 if (!TARGET_ZARCH)
1557 error ("hardware decimal floating point instructions"
1558 " not available in ESA/390 mode");
1559 }
1560 else
1561 target_flags &= ~MASK_HARD_DFP;
1562 }
1563
1564 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1565 {
1566 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1567 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1568
1569 target_flags &= ~MASK_HARD_DFP;
1570 }
1571
1572 /* Set processor cost function. */
1573 switch (s390_tune)
1574 {
1575 case PROCESSOR_2084_Z990:
1576 s390_cost = &z990_cost;
1577 break;
1578 case PROCESSOR_2094_Z9_109:
1579 s390_cost = &z9_109_cost;
1580 break;
1581 case PROCESSOR_2097_Z10:
1582 s390_cost = &z10_cost;
1583 break;
1584 case PROCESSOR_2817_Z196:
1585 s390_cost = &z196_cost;
1586 break;
1587 default:
1588 s390_cost = &z900_cost;
1589 }
1590
1591 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1592 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1593 "in combination");
1594
1595 if (s390_stack_size)
1596 {
1597 if (s390_stack_guard >= s390_stack_size)
1598 error ("stack size must be greater than the stack guard value");
1599 else if (s390_stack_size > 1 << 16)
1600 error ("stack size must not be greater than 64k");
1601 }
1602 else if (s390_stack_guard)
1603 error ("-mstack-guard implies use of -mstack-size");
1604
1605 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1606 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1607 target_flags |= MASK_LONG_DOUBLE_128;
1608 #endif
1609
1610 if (s390_tune == PROCESSOR_2097_Z10
1611 || s390_tune == PROCESSOR_2817_Z196)
1612 {
1613 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1614 global_options.x_param_values,
1615 global_options_set.x_param_values);
1616 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1617 global_options.x_param_values,
1618 global_options_set.x_param_values);
1619 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1620 global_options.x_param_values,
1621 global_options_set.x_param_values);
1622 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1623 global_options.x_param_values,
1624 global_options_set.x_param_values);
1625 }
1626
1627 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1628 global_options.x_param_values,
1629 global_options_set.x_param_values);
1630 /* values for loop prefetching */
1631 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1632 global_options.x_param_values,
1633 global_options_set.x_param_values);
1634 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1635 global_options.x_param_values,
1636 global_options_set.x_param_values);
1637 /* s390 has more than 2 levels and the size is much larger. Since
1638 we are always running virtualized assume that we only get a small
1639 part of the caches above l1. */
1640 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1641 global_options.x_param_values,
1642 global_options_set.x_param_values);
1643 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1644 global_options.x_param_values,
1645 global_options_set.x_param_values);
1646 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1647 global_options.x_param_values,
1648 global_options_set.x_param_values);
1649
1650 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1651 requires the arch flags to be evaluated already. Since prefetching
1652 is beneficial on s390, we enable it if available. */
1653 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1654 flag_prefetch_loop_arrays = 1;
1655
1656 /* Use the alternative scheduling-pressure algorithm by default. */
1657 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1658 global_options.x_param_values,
1659 global_options_set.x_param_values);
1660
1661 if (TARGET_TPF)
1662 {
1663 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1664 debuggers do not yet support DWARF 3/4. */
1665 if (!global_options_set.x_dwarf_strict)
1666 dwarf_strict = 1;
1667 if (!global_options_set.x_dwarf_version)
1668 dwarf_version = 2;
1669 }
1670 }
1671
1672 /* Map for smallest class containing reg regno. */
1673
1674 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1675 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1678 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1682 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1683 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1684 ACCESS_REGS, ACCESS_REGS
1685 };
1686
1687 /* Return attribute type of insn. */
1688
1689 static enum attr_type
1690 s390_safe_attr_type (rtx insn)
1691 {
1692 if (recog_memoized (insn) >= 0)
1693 return get_attr_type (insn);
1694 else
1695 return TYPE_NONE;
1696 }
1697
1698 /* Return true if DISP is a valid short displacement. */
1699
1700 static bool
1701 s390_short_displacement (rtx disp)
1702 {
1703 /* No displacement is OK. */
1704 if (!disp)
1705 return true;
1706
1707 /* Without the long displacement facility we don't need to
1708 distingiush between long and short displacement. */
1709 if (!TARGET_LONG_DISPLACEMENT)
1710 return true;
1711
1712 /* Integer displacement in range. */
1713 if (GET_CODE (disp) == CONST_INT)
1714 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1715
1716 /* GOT offset is not OK, the GOT can be large. */
1717 if (GET_CODE (disp) == CONST
1718 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1719 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1720 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1721 return false;
1722
1723 /* All other symbolic constants are literal pool references,
1724 which are OK as the literal pool must be small. */
1725 if (GET_CODE (disp) == CONST)
1726 return true;
1727
1728 return false;
1729 }
1730
1731 /* Decompose a RTL expression ADDR for a memory address into
1732 its components, returned in OUT.
1733
1734 Returns false if ADDR is not a valid memory address, true
1735 otherwise. If OUT is NULL, don't return the components,
1736 but check for validity only.
1737
1738 Note: Only addresses in canonical form are recognized.
1739 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1740 canonical form so that they will be recognized. */
1741
1742 static int
1743 s390_decompose_address (rtx addr, struct s390_address *out)
1744 {
1745 HOST_WIDE_INT offset = 0;
1746 rtx base = NULL_RTX;
1747 rtx indx = NULL_RTX;
1748 rtx disp = NULL_RTX;
1749 rtx orig_disp;
1750 bool pointer = false;
1751 bool base_ptr = false;
1752 bool indx_ptr = false;
1753 bool literal_pool = false;
1754
1755 /* We may need to substitute the literal pool base register into the address
1756 below. However, at this point we do not know which register is going to
1757 be used as base, so we substitute the arg pointer register. This is going
1758 to be treated as holding a pointer below -- it shouldn't be used for any
1759 other purpose. */
1760 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1761
1762 /* Decompose address into base + index + displacement. */
1763
1764 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1765 base = addr;
1766
1767 else if (GET_CODE (addr) == PLUS)
1768 {
1769 rtx op0 = XEXP (addr, 0);
1770 rtx op1 = XEXP (addr, 1);
1771 enum rtx_code code0 = GET_CODE (op0);
1772 enum rtx_code code1 = GET_CODE (op1);
1773
1774 if (code0 == REG || code0 == UNSPEC)
1775 {
1776 if (code1 == REG || code1 == UNSPEC)
1777 {
1778 indx = op0; /* index + base */
1779 base = op1;
1780 }
1781
1782 else
1783 {
1784 base = op0; /* base + displacement */
1785 disp = op1;
1786 }
1787 }
1788
1789 else if (code0 == PLUS)
1790 {
1791 indx = XEXP (op0, 0); /* index + base + disp */
1792 base = XEXP (op0, 1);
1793 disp = op1;
1794 }
1795
1796 else
1797 {
1798 return false;
1799 }
1800 }
1801
1802 else
1803 disp = addr; /* displacement */
1804
1805 /* Extract integer part of displacement. */
1806 orig_disp = disp;
1807 if (disp)
1808 {
1809 if (GET_CODE (disp) == CONST_INT)
1810 {
1811 offset = INTVAL (disp);
1812 disp = NULL_RTX;
1813 }
1814 else if (GET_CODE (disp) == CONST
1815 && GET_CODE (XEXP (disp, 0)) == PLUS
1816 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1817 {
1818 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1819 disp = XEXP (XEXP (disp, 0), 0);
1820 }
1821 }
1822
1823 /* Strip off CONST here to avoid special case tests later. */
1824 if (disp && GET_CODE (disp) == CONST)
1825 disp = XEXP (disp, 0);
1826
1827 /* We can convert literal pool addresses to
1828 displacements by basing them off the base register. */
1829 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1830 {
1831 /* Either base or index must be free to hold the base register. */
1832 if (!base)
1833 base = fake_pool_base, literal_pool = true;
1834 else if (!indx)
1835 indx = fake_pool_base, literal_pool = true;
1836 else
1837 return false;
1838
1839 /* Mark up the displacement. */
1840 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1841 UNSPEC_LTREL_OFFSET);
1842 }
1843
1844 /* Validate base register. */
1845 if (base)
1846 {
1847 if (GET_CODE (base) == UNSPEC)
1848 switch (XINT (base, 1))
1849 {
1850 case UNSPEC_LTREF:
1851 if (!disp)
1852 disp = gen_rtx_UNSPEC (Pmode,
1853 gen_rtvec (1, XVECEXP (base, 0, 0)),
1854 UNSPEC_LTREL_OFFSET);
1855 else
1856 return false;
1857
1858 base = XVECEXP (base, 0, 1);
1859 break;
1860
1861 case UNSPEC_LTREL_BASE:
1862 if (XVECLEN (base, 0) == 1)
1863 base = fake_pool_base, literal_pool = true;
1864 else
1865 base = XVECEXP (base, 0, 1);
1866 break;
1867
1868 default:
1869 return false;
1870 }
1871
1872 if (!REG_P (base)
1873 || (GET_MODE (base) != SImode
1874 && GET_MODE (base) != Pmode))
1875 return false;
1876
1877 if (REGNO (base) == STACK_POINTER_REGNUM
1878 || REGNO (base) == FRAME_POINTER_REGNUM
1879 || ((reload_completed || reload_in_progress)
1880 && frame_pointer_needed
1881 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1882 || REGNO (base) == ARG_POINTER_REGNUM
1883 || (flag_pic
1884 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1885 pointer = base_ptr = true;
1886
1887 if ((reload_completed || reload_in_progress)
1888 && base == cfun->machine->base_reg)
1889 pointer = base_ptr = literal_pool = true;
1890 }
1891
1892 /* Validate index register. */
1893 if (indx)
1894 {
1895 if (GET_CODE (indx) == UNSPEC)
1896 switch (XINT (indx, 1))
1897 {
1898 case UNSPEC_LTREF:
1899 if (!disp)
1900 disp = gen_rtx_UNSPEC (Pmode,
1901 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1902 UNSPEC_LTREL_OFFSET);
1903 else
1904 return false;
1905
1906 indx = XVECEXP (indx, 0, 1);
1907 break;
1908
1909 case UNSPEC_LTREL_BASE:
1910 if (XVECLEN (indx, 0) == 1)
1911 indx = fake_pool_base, literal_pool = true;
1912 else
1913 indx = XVECEXP (indx, 0, 1);
1914 break;
1915
1916 default:
1917 return false;
1918 }
1919
1920 if (!REG_P (indx)
1921 || (GET_MODE (indx) != SImode
1922 && GET_MODE (indx) != Pmode))
1923 return false;
1924
1925 if (REGNO (indx) == STACK_POINTER_REGNUM
1926 || REGNO (indx) == FRAME_POINTER_REGNUM
1927 || ((reload_completed || reload_in_progress)
1928 && frame_pointer_needed
1929 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1930 || REGNO (indx) == ARG_POINTER_REGNUM
1931 || (flag_pic
1932 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1933 pointer = indx_ptr = true;
1934
1935 if ((reload_completed || reload_in_progress)
1936 && indx == cfun->machine->base_reg)
1937 pointer = indx_ptr = literal_pool = true;
1938 }
1939
1940 /* Prefer to use pointer as base, not index. */
1941 if (base && indx && !base_ptr
1942 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1943 {
1944 rtx tmp = base;
1945 base = indx;
1946 indx = tmp;
1947 }
1948
1949 /* Validate displacement. */
1950 if (!disp)
1951 {
1952 /* If virtual registers are involved, the displacement will change later
1953 anyway as the virtual registers get eliminated. This could make a
1954 valid displacement invalid, but it is more likely to make an invalid
1955 displacement valid, because we sometimes access the register save area
1956 via negative offsets to one of those registers.
1957 Thus we don't check the displacement for validity here. If after
1958 elimination the displacement turns out to be invalid after all,
1959 this is fixed up by reload in any case. */
1960 if (base != arg_pointer_rtx
1961 && indx != arg_pointer_rtx
1962 && base != return_address_pointer_rtx
1963 && indx != return_address_pointer_rtx
1964 && base != frame_pointer_rtx
1965 && indx != frame_pointer_rtx
1966 && base != virtual_stack_vars_rtx
1967 && indx != virtual_stack_vars_rtx)
1968 if (!DISP_IN_RANGE (offset))
1969 return false;
1970 }
1971 else
1972 {
1973 /* All the special cases are pointers. */
1974 pointer = true;
1975
1976 /* In the small-PIC case, the linker converts @GOT
1977 and @GOTNTPOFF offsets to possible displacements. */
1978 if (GET_CODE (disp) == UNSPEC
1979 && (XINT (disp, 1) == UNSPEC_GOT
1980 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1981 && flag_pic == 1)
1982 {
1983 ;
1984 }
1985
1986 /* Accept pool label offsets. */
1987 else if (GET_CODE (disp) == UNSPEC
1988 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1989 ;
1990
1991 /* Accept literal pool references. */
1992 else if (GET_CODE (disp) == UNSPEC
1993 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1994 {
1995 /* In case CSE pulled a non literal pool reference out of
1996 the pool we have to reject the address. This is
1997 especially important when loading the GOT pointer on non
1998 zarch CPUs. In this case the literal pool contains an lt
1999 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2000 will most likely exceed the displacement. */
2001 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2002 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2003 return false;
2004
2005 orig_disp = gen_rtx_CONST (Pmode, disp);
2006 if (offset)
2007 {
2008 /* If we have an offset, make sure it does not
2009 exceed the size of the constant pool entry. */
2010 rtx sym = XVECEXP (disp, 0, 0);
2011 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2012 return false;
2013
2014 orig_disp = plus_constant (Pmode, orig_disp, offset);
2015 }
2016 }
2017
2018 else
2019 return false;
2020 }
2021
2022 if (!base && !indx)
2023 pointer = true;
2024
2025 if (out)
2026 {
2027 out->base = base;
2028 out->indx = indx;
2029 out->disp = orig_disp;
2030 out->pointer = pointer;
2031 out->literal_pool = literal_pool;
2032 }
2033
2034 return true;
2035 }
2036
2037 /* Decompose a RTL expression OP for a shift count into its components,
2038 and return the base register in BASE and the offset in OFFSET.
2039
2040 Return true if OP is a valid shift count, false if not. */
2041
2042 bool
2043 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2044 {
2045 HOST_WIDE_INT off = 0;
2046
2047 /* We can have an integer constant, an address register,
2048 or a sum of the two. */
2049 if (GET_CODE (op) == CONST_INT)
2050 {
2051 off = INTVAL (op);
2052 op = NULL_RTX;
2053 }
2054 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2055 {
2056 off = INTVAL (XEXP (op, 1));
2057 op = XEXP (op, 0);
2058 }
2059 while (op && GET_CODE (op) == SUBREG)
2060 op = SUBREG_REG (op);
2061
2062 if (op && GET_CODE (op) != REG)
2063 return false;
2064
2065 if (offset)
2066 *offset = off;
2067 if (base)
2068 *base = op;
2069
2070 return true;
2071 }
2072
2073
2074 /* Return true if CODE is a valid address without index. */
2075
2076 bool
2077 s390_legitimate_address_without_index_p (rtx op)
2078 {
2079 struct s390_address addr;
2080
2081 if (!s390_decompose_address (XEXP (op, 0), &addr))
2082 return false;
2083 if (addr.indx)
2084 return false;
2085
2086 return true;
2087 }
2088
2089
2090 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2091 and return these parts in SYMREF and ADDEND. You can pass NULL in
2092 SYMREF and/or ADDEND if you are not interested in these values.
2093 Literal pool references are *not* considered symbol references. */
2094
2095 static bool
2096 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2097 {
2098 HOST_WIDE_INT tmpaddend = 0;
2099
2100 if (GET_CODE (addr) == CONST)
2101 addr = XEXP (addr, 0);
2102
2103 if (GET_CODE (addr) == PLUS)
2104 {
2105 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2106 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2107 && CONST_INT_P (XEXP (addr, 1)))
2108 {
2109 tmpaddend = INTVAL (XEXP (addr, 1));
2110 addr = XEXP (addr, 0);
2111 }
2112 else
2113 return false;
2114 }
2115 else
2116 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2117 return false;
2118
2119 if (symref)
2120 *symref = addr;
2121 if (addend)
2122 *addend = tmpaddend;
2123
2124 return true;
2125 }
2126
2127 /* Return TRUE if ADDR is an operand valid for a load/store relative
2128 instructions. Be aware that the alignment of the operand needs to
2129 be checked separately. */
2130 static bool
2131 s390_loadrelative_operand_p (rtx addr)
2132 {
2133 if (GET_CODE (addr) == CONST)
2134 addr = XEXP (addr, 0);
2135
2136 /* Enable load relative for symbol@GOTENT. */
2137 if (GET_CODE (addr) == UNSPEC
2138 && XINT (addr, 1) == UNSPEC_GOTENT)
2139 return true;
2140
2141 return s390_symref_operand_p (addr, NULL, NULL);
2142 }
2143
2144 /* Return true if the address in OP is valid for constraint letter C
2145 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2146 pool MEMs should be accepted. Only the Q, R, S, T constraint
2147 letters are allowed for C. */
2148
2149 static int
2150 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2151 {
2152 struct s390_address addr;
2153 bool decomposed = false;
2154
2155 /* This check makes sure that no symbolic address (except literal
2156 pool references) are accepted by the R or T constraints. */
2157 if (s390_loadrelative_operand_p (op))
2158 return 0;
2159
2160 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2161 if (!lit_pool_ok)
2162 {
2163 if (!s390_decompose_address (op, &addr))
2164 return 0;
2165 if (addr.literal_pool)
2166 return 0;
2167 decomposed = true;
2168 }
2169
2170 switch (c)
2171 {
2172 case 'Q': /* no index short displacement */
2173 if (!decomposed && !s390_decompose_address (op, &addr))
2174 return 0;
2175 if (addr.indx)
2176 return 0;
2177 if (!s390_short_displacement (addr.disp))
2178 return 0;
2179 break;
2180
2181 case 'R': /* with index short displacement */
2182 if (TARGET_LONG_DISPLACEMENT)
2183 {
2184 if (!decomposed && !s390_decompose_address (op, &addr))
2185 return 0;
2186 if (!s390_short_displacement (addr.disp))
2187 return 0;
2188 }
2189 /* Any invalid address here will be fixed up by reload,
2190 so accept it for the most generic constraint. */
2191 break;
2192
2193 case 'S': /* no index long displacement */
2194 if (!TARGET_LONG_DISPLACEMENT)
2195 return 0;
2196 if (!decomposed && !s390_decompose_address (op, &addr))
2197 return 0;
2198 if (addr.indx)
2199 return 0;
2200 if (s390_short_displacement (addr.disp))
2201 return 0;
2202 break;
2203
2204 case 'T': /* with index long displacement */
2205 if (!TARGET_LONG_DISPLACEMENT)
2206 return 0;
2207 /* Any invalid address here will be fixed up by reload,
2208 so accept it for the most generic constraint. */
2209 if ((decomposed || s390_decompose_address (op, &addr))
2210 && s390_short_displacement (addr.disp))
2211 return 0;
2212 break;
2213 default:
2214 return 0;
2215 }
2216 return 1;
2217 }
2218
2219
2220 /* Evaluates constraint strings described by the regular expression
2221 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2222 the constraint given in STR, or 0 else. */
2223
2224 int
2225 s390_mem_constraint (const char *str, rtx op)
2226 {
2227 char c = str[0];
2228
2229 switch (c)
2230 {
2231 case 'A':
2232 /* Check for offsettable variants of memory constraints. */
2233 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2234 return 0;
2235 if ((reload_completed || reload_in_progress)
2236 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2237 return 0;
2238 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2239 case 'B':
2240 /* Check for non-literal-pool variants of memory constraints. */
2241 if (!MEM_P (op))
2242 return 0;
2243 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2244 case 'Q':
2245 case 'R':
2246 case 'S':
2247 case 'T':
2248 if (GET_CODE (op) != MEM)
2249 return 0;
2250 return s390_check_qrst_address (c, XEXP (op, 0), true);
2251 case 'U':
2252 return (s390_check_qrst_address ('Q', op, true)
2253 || s390_check_qrst_address ('R', op, true));
2254 case 'W':
2255 return (s390_check_qrst_address ('S', op, true)
2256 || s390_check_qrst_address ('T', op, true));
2257 case 'Y':
2258 /* Simply check for the basic form of a shift count. Reload will
2259 take care of making sure we have a proper base register. */
2260 if (!s390_decompose_shift_count (op, NULL, NULL))
2261 return 0;
2262 break;
2263 case 'Z':
2264 return s390_check_qrst_address (str[1], op, true);
2265 default:
2266 return 0;
2267 }
2268 return 1;
2269 }
2270
2271
2272 /* Evaluates constraint strings starting with letter O. Input
2273 parameter C is the second letter following the "O" in the constraint
2274 string. Returns 1 if VALUE meets the respective constraint and 0
2275 otherwise. */
2276
2277 int
2278 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2279 {
2280 if (!TARGET_EXTIMM)
2281 return 0;
2282
2283 switch (c)
2284 {
2285 case 's':
2286 return trunc_int_for_mode (value, SImode) == value;
2287
2288 case 'p':
2289 return value == 0
2290 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2291
2292 case 'n':
2293 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2294
2295 default:
2296 gcc_unreachable ();
2297 }
2298 }
2299
2300
2301 /* Evaluates constraint strings starting with letter N. Parameter STR
2302 contains the letters following letter "N" in the constraint string.
2303 Returns true if VALUE matches the constraint. */
2304
2305 int
2306 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2307 {
2308 enum machine_mode mode, part_mode;
2309 int def;
2310 int part, part_goal;
2311
2312
2313 if (str[0] == 'x')
2314 part_goal = -1;
2315 else
2316 part_goal = str[0] - '0';
2317
2318 switch (str[1])
2319 {
2320 case 'Q':
2321 part_mode = QImode;
2322 break;
2323 case 'H':
2324 part_mode = HImode;
2325 break;
2326 case 'S':
2327 part_mode = SImode;
2328 break;
2329 default:
2330 return 0;
2331 }
2332
2333 switch (str[2])
2334 {
2335 case 'H':
2336 mode = HImode;
2337 break;
2338 case 'S':
2339 mode = SImode;
2340 break;
2341 case 'D':
2342 mode = DImode;
2343 break;
2344 default:
2345 return 0;
2346 }
2347
2348 switch (str[3])
2349 {
2350 case '0':
2351 def = 0;
2352 break;
2353 case 'F':
2354 def = -1;
2355 break;
2356 default:
2357 return 0;
2358 }
2359
2360 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2361 return 0;
2362
2363 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2364 if (part < 0)
2365 return 0;
2366 if (part_goal != -1 && part_goal != part)
2367 return 0;
2368
2369 return 1;
2370 }
2371
2372
2373 /* Returns true if the input parameter VALUE is a float zero. */
2374
2375 int
2376 s390_float_const_zero_p (rtx value)
2377 {
2378 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2379 && value == CONST0_RTX (GET_MODE (value)));
2380 }
2381
2382 /* Implement TARGET_REGISTER_MOVE_COST. */
2383
2384 static int
2385 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2386 reg_class_t from, reg_class_t to)
2387 {
2388 /* On s390, copy between fprs and gprs is expensive. */
2389 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2390 && reg_classes_intersect_p (to, FP_REGS))
2391 || (reg_classes_intersect_p (from, FP_REGS)
2392 && reg_classes_intersect_p (to, GENERAL_REGS)))
2393 return 10;
2394
2395 return 1;
2396 }
2397
2398 /* Implement TARGET_MEMORY_MOVE_COST. */
2399
2400 static int
2401 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2402 reg_class_t rclass ATTRIBUTE_UNUSED,
2403 bool in ATTRIBUTE_UNUSED)
2404 {
2405 return 1;
2406 }
2407
2408 /* Compute a (partial) cost for rtx X. Return true if the complete
2409 cost has been computed, and false if subexpressions should be
2410 scanned. In either case, *TOTAL contains the cost result.
2411 CODE contains GET_CODE (x), OUTER_CODE contains the code
2412 of the superexpression of x. */
2413
2414 static bool
2415 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2416 int *total, bool speed ATTRIBUTE_UNUSED)
2417 {
2418 switch (code)
2419 {
2420 case CONST:
2421 case CONST_INT:
2422 case LABEL_REF:
2423 case SYMBOL_REF:
2424 case CONST_DOUBLE:
2425 case MEM:
2426 *total = 0;
2427 return true;
2428
2429 case ASHIFT:
2430 case ASHIFTRT:
2431 case LSHIFTRT:
2432 case ROTATE:
2433 case ROTATERT:
2434 case AND:
2435 case IOR:
2436 case XOR:
2437 case NEG:
2438 case NOT:
2439 *total = COSTS_N_INSNS (1);
2440 return false;
2441
2442 case PLUS:
2443 case MINUS:
2444 *total = COSTS_N_INSNS (1);
2445 return false;
2446
2447 case MULT:
2448 switch (GET_MODE (x))
2449 {
2450 case SImode:
2451 {
2452 rtx left = XEXP (x, 0);
2453 rtx right = XEXP (x, 1);
2454 if (GET_CODE (right) == CONST_INT
2455 && CONST_OK_FOR_K (INTVAL (right)))
2456 *total = s390_cost->mhi;
2457 else if (GET_CODE (left) == SIGN_EXTEND)
2458 *total = s390_cost->mh;
2459 else
2460 *total = s390_cost->ms; /* msr, ms, msy */
2461 break;
2462 }
2463 case DImode:
2464 {
2465 rtx left = XEXP (x, 0);
2466 rtx right = XEXP (x, 1);
2467 if (TARGET_ZARCH)
2468 {
2469 if (GET_CODE (right) == CONST_INT
2470 && CONST_OK_FOR_K (INTVAL (right)))
2471 *total = s390_cost->mghi;
2472 else if (GET_CODE (left) == SIGN_EXTEND)
2473 *total = s390_cost->msgf;
2474 else
2475 *total = s390_cost->msg; /* msgr, msg */
2476 }
2477 else /* TARGET_31BIT */
2478 {
2479 if (GET_CODE (left) == SIGN_EXTEND
2480 && GET_CODE (right) == SIGN_EXTEND)
2481 /* mulsidi case: mr, m */
2482 *total = s390_cost->m;
2483 else if (GET_CODE (left) == ZERO_EXTEND
2484 && GET_CODE (right) == ZERO_EXTEND
2485 && TARGET_CPU_ZARCH)
2486 /* umulsidi case: ml, mlr */
2487 *total = s390_cost->ml;
2488 else
2489 /* Complex calculation is required. */
2490 *total = COSTS_N_INSNS (40);
2491 }
2492 break;
2493 }
2494 case SFmode:
2495 case DFmode:
2496 *total = s390_cost->mult_df;
2497 break;
2498 case TFmode:
2499 *total = s390_cost->mxbr;
2500 break;
2501 default:
2502 return false;
2503 }
2504 return false;
2505
2506 case FMA:
2507 switch (GET_MODE (x))
2508 {
2509 case DFmode:
2510 *total = s390_cost->madbr;
2511 break;
2512 case SFmode:
2513 *total = s390_cost->maebr;
2514 break;
2515 default:
2516 return false;
2517 }
2518 /* Negate in the third argument is free: FMSUB. */
2519 if (GET_CODE (XEXP (x, 2)) == NEG)
2520 {
2521 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2522 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2523 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2524 return true;
2525 }
2526 return false;
2527
2528 case UDIV:
2529 case UMOD:
2530 if (GET_MODE (x) == TImode) /* 128 bit division */
2531 *total = s390_cost->dlgr;
2532 else if (GET_MODE (x) == DImode)
2533 {
2534 rtx right = XEXP (x, 1);
2535 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2536 *total = s390_cost->dlr;
2537 else /* 64 by 64 bit division */
2538 *total = s390_cost->dlgr;
2539 }
2540 else if (GET_MODE (x) == SImode) /* 32 bit division */
2541 *total = s390_cost->dlr;
2542 return false;
2543
2544 case DIV:
2545 case MOD:
2546 if (GET_MODE (x) == DImode)
2547 {
2548 rtx right = XEXP (x, 1);
2549 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2550 if (TARGET_ZARCH)
2551 *total = s390_cost->dsgfr;
2552 else
2553 *total = s390_cost->dr;
2554 else /* 64 by 64 bit division */
2555 *total = s390_cost->dsgr;
2556 }
2557 else if (GET_MODE (x) == SImode) /* 32 bit division */
2558 *total = s390_cost->dlr;
2559 else if (GET_MODE (x) == SFmode)
2560 {
2561 *total = s390_cost->debr;
2562 }
2563 else if (GET_MODE (x) == DFmode)
2564 {
2565 *total = s390_cost->ddbr;
2566 }
2567 else if (GET_MODE (x) == TFmode)
2568 {
2569 *total = s390_cost->dxbr;
2570 }
2571 return false;
2572
2573 case SQRT:
2574 if (GET_MODE (x) == SFmode)
2575 *total = s390_cost->sqebr;
2576 else if (GET_MODE (x) == DFmode)
2577 *total = s390_cost->sqdbr;
2578 else /* TFmode */
2579 *total = s390_cost->sqxbr;
2580 return false;
2581
2582 case SIGN_EXTEND:
2583 case ZERO_EXTEND:
2584 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2585 || outer_code == PLUS || outer_code == MINUS
2586 || outer_code == COMPARE)
2587 *total = 0;
2588 return false;
2589
2590 case COMPARE:
2591 *total = COSTS_N_INSNS (1);
2592 if (GET_CODE (XEXP (x, 0)) == AND
2593 && GET_CODE (XEXP (x, 1)) == CONST_INT
2594 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2595 {
2596 rtx op0 = XEXP (XEXP (x, 0), 0);
2597 rtx op1 = XEXP (XEXP (x, 0), 1);
2598 rtx op2 = XEXP (x, 1);
2599
2600 if (memory_operand (op0, GET_MODE (op0))
2601 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2602 return true;
2603 if (register_operand (op0, GET_MODE (op0))
2604 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2605 return true;
2606 }
2607 return false;
2608
2609 default:
2610 return false;
2611 }
2612 }
2613
2614 /* Return the cost of an address rtx ADDR. */
2615
2616 static int
2617 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2618 addr_space_t as ATTRIBUTE_UNUSED,
2619 bool speed ATTRIBUTE_UNUSED)
2620 {
2621 struct s390_address ad;
2622 if (!s390_decompose_address (addr, &ad))
2623 return 1000;
2624
2625 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2626 }
2627
2628 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2629 otherwise return 0. */
2630
2631 int
2632 tls_symbolic_operand (rtx op)
2633 {
2634 if (GET_CODE (op) != SYMBOL_REF)
2635 return 0;
2636 return SYMBOL_REF_TLS_MODEL (op);
2637 }
2638 \f
2639 /* Split DImode access register reference REG (on 64-bit) into its constituent
2640 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2641 gen_highpart cannot be used as they assume all registers are word-sized,
2642 while our access registers have only half that size. */
2643
2644 void
2645 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2646 {
2647 gcc_assert (TARGET_64BIT);
2648 gcc_assert (ACCESS_REG_P (reg));
2649 gcc_assert (GET_MODE (reg) == DImode);
2650 gcc_assert (!(REGNO (reg) & 1));
2651
2652 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2653 *hi = gen_rtx_REG (SImode, REGNO (reg));
2654 }
2655
2656 /* Return true if OP contains a symbol reference */
2657
2658 bool
2659 symbolic_reference_mentioned_p (rtx op)
2660 {
2661 const char *fmt;
2662 int i;
2663
2664 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2665 return 1;
2666
2667 fmt = GET_RTX_FORMAT (GET_CODE (op));
2668 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2669 {
2670 if (fmt[i] == 'E')
2671 {
2672 int j;
2673
2674 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2675 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2676 return 1;
2677 }
2678
2679 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2680 return 1;
2681 }
2682
2683 return 0;
2684 }
2685
2686 /* Return true if OP contains a reference to a thread-local symbol. */
2687
2688 bool
2689 tls_symbolic_reference_mentioned_p (rtx op)
2690 {
2691 const char *fmt;
2692 int i;
2693
2694 if (GET_CODE (op) == SYMBOL_REF)
2695 return tls_symbolic_operand (op);
2696
2697 fmt = GET_RTX_FORMAT (GET_CODE (op));
2698 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2699 {
2700 if (fmt[i] == 'E')
2701 {
2702 int j;
2703
2704 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2705 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2706 return true;
2707 }
2708
2709 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2710 return true;
2711 }
2712
2713 return false;
2714 }
2715
2716
2717 /* Return true if OP is a legitimate general operand when
2718 generating PIC code. It is given that flag_pic is on
2719 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2720
2721 int
2722 legitimate_pic_operand_p (rtx op)
2723 {
2724 /* Accept all non-symbolic constants. */
2725 if (!SYMBOLIC_CONST (op))
2726 return 1;
2727
2728 /* Reject everything else; must be handled
2729 via emit_symbolic_move. */
2730 return 0;
2731 }
2732
2733 /* Returns true if the constant value OP is a legitimate general operand.
2734 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2735
2736 static bool
2737 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2738 {
2739 /* Accept all non-symbolic constants. */
2740 if (!SYMBOLIC_CONST (op))
2741 return 1;
2742
2743 /* Accept immediate LARL operands. */
2744 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2745 return 1;
2746
2747 /* Thread-local symbols are never legal constants. This is
2748 so that emit_call knows that computing such addresses
2749 might require a function call. */
2750 if (TLS_SYMBOLIC_CONST (op))
2751 return 0;
2752
2753 /* In the PIC case, symbolic constants must *not* be
2754 forced into the literal pool. We accept them here,
2755 so that they will be handled by emit_symbolic_move. */
2756 if (flag_pic)
2757 return 1;
2758
2759 /* All remaining non-PIC symbolic constants are
2760 forced into the literal pool. */
2761 return 0;
2762 }
2763
2764 /* Determine if it's legal to put X into the constant pool. This
2765 is not possible if X contains the address of a symbol that is
2766 not constant (TLS) or not known at final link time (PIC). */
2767
2768 static bool
2769 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2770 {
2771 switch (GET_CODE (x))
2772 {
2773 case CONST_INT:
2774 case CONST_DOUBLE:
2775 /* Accept all non-symbolic constants. */
2776 return false;
2777
2778 case LABEL_REF:
2779 /* Labels are OK iff we are non-PIC. */
2780 return flag_pic != 0;
2781
2782 case SYMBOL_REF:
2783 /* 'Naked' TLS symbol references are never OK,
2784 non-TLS symbols are OK iff we are non-PIC. */
2785 if (tls_symbolic_operand (x))
2786 return true;
2787 else
2788 return flag_pic != 0;
2789
2790 case CONST:
2791 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2792 case PLUS:
2793 case MINUS:
2794 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2795 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2796
2797 case UNSPEC:
2798 switch (XINT (x, 1))
2799 {
2800 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2801 case UNSPEC_LTREL_OFFSET:
2802 case UNSPEC_GOT:
2803 case UNSPEC_GOTOFF:
2804 case UNSPEC_PLTOFF:
2805 case UNSPEC_TLSGD:
2806 case UNSPEC_TLSLDM:
2807 case UNSPEC_NTPOFF:
2808 case UNSPEC_DTPOFF:
2809 case UNSPEC_GOTNTPOFF:
2810 case UNSPEC_INDNTPOFF:
2811 return false;
2812
2813 /* If the literal pool shares the code section, be put
2814 execute template placeholders into the pool as well. */
2815 case UNSPEC_INSN:
2816 return TARGET_CPU_ZARCH;
2817
2818 default:
2819 return true;
2820 }
2821 break;
2822
2823 default:
2824 gcc_unreachable ();
2825 }
2826 }
2827
2828 /* Returns true if the constant value OP is a legitimate general
2829 operand during and after reload. The difference to
2830 legitimate_constant_p is that this function will not accept
2831 a constant that would need to be forced to the literal pool
2832 before it can be used as operand.
2833 This function accepts all constants which can be loaded directly
2834 into a GPR. */
2835
2836 bool
2837 legitimate_reload_constant_p (rtx op)
2838 {
2839 /* Accept la(y) operands. */
2840 if (GET_CODE (op) == CONST_INT
2841 && DISP_IN_RANGE (INTVAL (op)))
2842 return true;
2843
2844 /* Accept l(g)hi/l(g)fi operands. */
2845 if (GET_CODE (op) == CONST_INT
2846 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2847 return true;
2848
2849 /* Accept lliXX operands. */
2850 if (TARGET_ZARCH
2851 && GET_CODE (op) == CONST_INT
2852 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2853 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2854 return true;
2855
2856 if (TARGET_EXTIMM
2857 && GET_CODE (op) == CONST_INT
2858 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2859 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2860 return true;
2861
2862 /* Accept larl operands. */
2863 if (TARGET_CPU_ZARCH
2864 && larl_operand (op, VOIDmode))
2865 return true;
2866
2867 /* Accept floating-point zero operands that fit into a single GPR. */
2868 if (GET_CODE (op) == CONST_DOUBLE
2869 && s390_float_const_zero_p (op)
2870 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2871 return true;
2872
2873 /* Accept double-word operands that can be split. */
2874 if (GET_CODE (op) == CONST_INT
2875 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2876 {
2877 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2878 rtx hi = operand_subword (op, 0, 0, dword_mode);
2879 rtx lo = operand_subword (op, 1, 0, dword_mode);
2880 return legitimate_reload_constant_p (hi)
2881 && legitimate_reload_constant_p (lo);
2882 }
2883
2884 /* Everything else cannot be handled without reload. */
2885 return false;
2886 }
2887
2888 /* Returns true if the constant value OP is a legitimate fp operand
2889 during and after reload.
2890 This function accepts all constants which can be loaded directly
2891 into an FPR. */
2892
2893 static bool
2894 legitimate_reload_fp_constant_p (rtx op)
2895 {
2896 /* Accept floating-point zero operands if the load zero instruction
2897 can be used. */
2898 if (TARGET_Z196
2899 && GET_CODE (op) == CONST_DOUBLE
2900 && s390_float_const_zero_p (op))
2901 return true;
2902
2903 return false;
2904 }
2905
2906 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2907 return the class of reg to actually use. */
2908
2909 static reg_class_t
2910 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2911 {
2912 switch (GET_CODE (op))
2913 {
2914 /* Constants we cannot reload into general registers
2915 must be forced into the literal pool. */
2916 case CONST_DOUBLE:
2917 case CONST_INT:
2918 if (reg_class_subset_p (GENERAL_REGS, rclass)
2919 && legitimate_reload_constant_p (op))
2920 return GENERAL_REGS;
2921 else if (reg_class_subset_p (ADDR_REGS, rclass)
2922 && legitimate_reload_constant_p (op))
2923 return ADDR_REGS;
2924 else if (reg_class_subset_p (FP_REGS, rclass)
2925 && legitimate_reload_fp_constant_p (op))
2926 return FP_REGS;
2927 return NO_REGS;
2928
2929 /* If a symbolic constant or a PLUS is reloaded,
2930 it is most likely being used as an address, so
2931 prefer ADDR_REGS. If 'class' is not a superset
2932 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2933 case LABEL_REF:
2934 case SYMBOL_REF:
2935 case CONST:
2936 if (!legitimate_reload_constant_p (op))
2937 return NO_REGS;
2938 /* fallthrough */
2939 case PLUS:
2940 /* load address will be used. */
2941 if (reg_class_subset_p (ADDR_REGS, rclass))
2942 return ADDR_REGS;
2943 else
2944 return NO_REGS;
2945
2946 default:
2947 break;
2948 }
2949
2950 return rclass;
2951 }
2952
2953 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2954 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2955 aligned. */
2956
2957 bool
2958 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2959 {
2960 HOST_WIDE_INT addend;
2961 rtx symref;
2962
2963 /* Accept symbol@GOTENT with pointer size alignment. */
2964 if (GET_CODE (addr) == CONST
2965 && GET_CODE (XEXP (addr, 0)) == UNSPEC
2966 && XINT (XEXP (addr, 0), 1) == UNSPEC_GOTENT
2967 && alignment <= UNITS_PER_LONG)
2968 return true;
2969
2970 if (!s390_symref_operand_p (addr, &symref, &addend))
2971 return false;
2972
2973 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2974 && !(addend & (alignment - 1)));
2975 }
2976
2977 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2978 operand SCRATCH is used to reload the even part of the address and
2979 adding one. */
2980
2981 void
2982 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2983 {
2984 HOST_WIDE_INT addend;
2985 rtx symref;
2986
2987 if (!s390_symref_operand_p (addr, &symref, &addend))
2988 gcc_unreachable ();
2989
2990 if (!(addend & 1))
2991 /* Easy case. The addend is even so larl will do fine. */
2992 emit_move_insn (reg, addr);
2993 else
2994 {
2995 /* We can leave the scratch register untouched if the target
2996 register is a valid base register. */
2997 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2998 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2999 scratch = reg;
3000
3001 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3002 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3003
3004 if (addend != 1)
3005 emit_move_insn (scratch,
3006 gen_rtx_CONST (Pmode,
3007 gen_rtx_PLUS (Pmode, symref,
3008 GEN_INT (addend - 1))));
3009 else
3010 emit_move_insn (scratch, symref);
3011
3012 /* Increment the address using la in order to avoid clobbering cc. */
3013 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3014 }
3015 }
3016
3017 /* Generate what is necessary to move between REG and MEM using
3018 SCRATCH. The direction is given by TOMEM. */
3019
3020 void
3021 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3022 {
3023 /* Reload might have pulled a constant out of the literal pool.
3024 Force it back in. */
3025 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3026 || GET_CODE (mem) == CONST)
3027 mem = force_const_mem (GET_MODE (reg), mem);
3028
3029 gcc_assert (MEM_P (mem));
3030
3031 /* For a load from memory we can leave the scratch register
3032 untouched if the target register is a valid base register. */
3033 if (!tomem
3034 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3035 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3036 && GET_MODE (reg) == GET_MODE (scratch))
3037 scratch = reg;
3038
3039 /* Load address into scratch register. Since we can't have a
3040 secondary reload for a secondary reload we have to cover the case
3041 where larl would need a secondary reload here as well. */
3042 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3043
3044 /* Now we can use a standard load/store to do the move. */
3045 if (tomem)
3046 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3047 else
3048 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3049 }
3050
3051 /* Inform reload about cases where moving X with a mode MODE to a register in
3052 RCLASS requires an extra scratch or immediate register. Return the class
3053 needed for the immediate register. */
3054
3055 static reg_class_t
3056 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3057 enum machine_mode mode, secondary_reload_info *sri)
3058 {
3059 enum reg_class rclass = (enum reg_class) rclass_i;
3060
3061 /* Intermediate register needed. */
3062 if (reg_classes_intersect_p (CC_REGS, rclass))
3063 return GENERAL_REGS;
3064
3065 if (TARGET_Z10)
3066 {
3067 HOST_WIDE_INT offset;
3068 rtx symref;
3069
3070 /* On z10 several optimizer steps may generate larl operands with
3071 an odd addend. */
3072 if (in_p
3073 && s390_symref_operand_p (x, &symref, &offset)
3074 && mode == Pmode
3075 && !SYMBOL_REF_ALIGN1_P (symref)
3076 && (offset & 1) == 1)
3077 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3078 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3079
3080 /* On z10 we need a scratch register when moving QI, TI or floating
3081 point mode values from or to a memory location with a SYMBOL_REF
3082 or if the symref addend of a SI or DI move is not aligned to the
3083 width of the access. */
3084 if (MEM_P (x)
3085 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3086 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3087 || (!TARGET_ZARCH && mode == DImode)
3088 || ((mode == HImode || mode == SImode || mode == DImode)
3089 && (!s390_check_symref_alignment (XEXP (x, 0),
3090 GET_MODE_SIZE (mode))))))
3091 {
3092 #define __SECONDARY_RELOAD_CASE(M,m) \
3093 case M##mode: \
3094 if (TARGET_64BIT) \
3095 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3096 CODE_FOR_reload##m##di_tomem_z10; \
3097 else \
3098 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3099 CODE_FOR_reload##m##si_tomem_z10; \
3100 break;
3101
3102 switch (GET_MODE (x))
3103 {
3104 __SECONDARY_RELOAD_CASE (QI, qi);
3105 __SECONDARY_RELOAD_CASE (HI, hi);
3106 __SECONDARY_RELOAD_CASE (SI, si);
3107 __SECONDARY_RELOAD_CASE (DI, di);
3108 __SECONDARY_RELOAD_CASE (TI, ti);
3109 __SECONDARY_RELOAD_CASE (SF, sf);
3110 __SECONDARY_RELOAD_CASE (DF, df);
3111 __SECONDARY_RELOAD_CASE (TF, tf);
3112 __SECONDARY_RELOAD_CASE (SD, sd);
3113 __SECONDARY_RELOAD_CASE (DD, dd);
3114 __SECONDARY_RELOAD_CASE (TD, td);
3115
3116 default:
3117 gcc_unreachable ();
3118 }
3119 #undef __SECONDARY_RELOAD_CASE
3120 }
3121 }
3122
3123 /* We need a scratch register when loading a PLUS expression which
3124 is not a legitimate operand of the LOAD ADDRESS instruction. */
3125 if (in_p && s390_plus_operand (x, mode))
3126 sri->icode = (TARGET_64BIT ?
3127 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3128
3129 /* Performing a multiword move from or to memory we have to make sure the
3130 second chunk in memory is addressable without causing a displacement
3131 overflow. If that would be the case we calculate the address in
3132 a scratch register. */
3133 if (MEM_P (x)
3134 && GET_CODE (XEXP (x, 0)) == PLUS
3135 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3136 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3137 + GET_MODE_SIZE (mode) - 1))
3138 {
3139 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3140 in a s_operand address since we may fallback to lm/stm. So we only
3141 have to care about overflows in the b+i+d case. */
3142 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3143 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3144 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3145 /* For FP_REGS no lm/stm is available so this check is triggered
3146 for displacement overflows in b+i+d and b+d like addresses. */
3147 || (reg_classes_intersect_p (FP_REGS, rclass)
3148 && s390_class_max_nregs (FP_REGS, mode) > 1))
3149 {
3150 if (in_p)
3151 sri->icode = (TARGET_64BIT ?
3152 CODE_FOR_reloaddi_nonoffmem_in :
3153 CODE_FOR_reloadsi_nonoffmem_in);
3154 else
3155 sri->icode = (TARGET_64BIT ?
3156 CODE_FOR_reloaddi_nonoffmem_out :
3157 CODE_FOR_reloadsi_nonoffmem_out);
3158 }
3159 }
3160
3161 /* A scratch address register is needed when a symbolic constant is
3162 copied to r0 compiling with -fPIC. In other cases the target
3163 register might be used as temporary (see legitimize_pic_address). */
3164 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3165 sri->icode = (TARGET_64BIT ?
3166 CODE_FOR_reloaddi_PIC_addr :
3167 CODE_FOR_reloadsi_PIC_addr);
3168
3169 /* Either scratch or no register needed. */
3170 return NO_REGS;
3171 }
3172
3173 /* Generate code to load SRC, which is PLUS that is not a
3174 legitimate operand for the LA instruction, into TARGET.
3175 SCRATCH may be used as scratch register. */
3176
3177 void
3178 s390_expand_plus_operand (rtx target, rtx src,
3179 rtx scratch)
3180 {
3181 rtx sum1, sum2;
3182 struct s390_address ad;
3183
3184 /* src must be a PLUS; get its two operands. */
3185 gcc_assert (GET_CODE (src) == PLUS);
3186 gcc_assert (GET_MODE (src) == Pmode);
3187
3188 /* Check if any of the two operands is already scheduled
3189 for replacement by reload. This can happen e.g. when
3190 float registers occur in an address. */
3191 sum1 = find_replacement (&XEXP (src, 0));
3192 sum2 = find_replacement (&XEXP (src, 1));
3193 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3194
3195 /* If the address is already strictly valid, there's nothing to do. */
3196 if (!s390_decompose_address (src, &ad)
3197 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3198 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3199 {
3200 /* Otherwise, one of the operands cannot be an address register;
3201 we reload its value into the scratch register. */
3202 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3203 {
3204 emit_move_insn (scratch, sum1);
3205 sum1 = scratch;
3206 }
3207 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3208 {
3209 emit_move_insn (scratch, sum2);
3210 sum2 = scratch;
3211 }
3212
3213 /* According to the way these invalid addresses are generated
3214 in reload.c, it should never happen (at least on s390) that
3215 *neither* of the PLUS components, after find_replacements
3216 was applied, is an address register. */
3217 if (sum1 == scratch && sum2 == scratch)
3218 {
3219 debug_rtx (src);
3220 gcc_unreachable ();
3221 }
3222
3223 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3224 }
3225
3226 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3227 is only ever performed on addresses, so we can mark the
3228 sum as legitimate for LA in any case. */
3229 s390_load_address (target, src);
3230 }
3231
3232
3233 /* Return true if ADDR is a valid memory address.
3234 STRICT specifies whether strict register checking applies. */
3235
3236 static bool
3237 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3238 {
3239 struct s390_address ad;
3240
3241 if (TARGET_Z10
3242 && larl_operand (addr, VOIDmode)
3243 && (mode == VOIDmode
3244 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3245 return true;
3246
3247 if (!s390_decompose_address (addr, &ad))
3248 return false;
3249
3250 if (strict)
3251 {
3252 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3253 return false;
3254
3255 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3256 return false;
3257 }
3258 else
3259 {
3260 if (ad.base
3261 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3262 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3263 return false;
3264
3265 if (ad.indx
3266 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3267 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3268 return false;
3269 }
3270 return true;
3271 }
3272
3273 /* Return true if OP is a valid operand for the LA instruction.
3274 In 31-bit, we need to prove that the result is used as an
3275 address, as LA performs only a 31-bit addition. */
3276
3277 bool
3278 legitimate_la_operand_p (rtx op)
3279 {
3280 struct s390_address addr;
3281 if (!s390_decompose_address (op, &addr))
3282 return false;
3283
3284 return (TARGET_64BIT || addr.pointer);
3285 }
3286
3287 /* Return true if it is valid *and* preferable to use LA to
3288 compute the sum of OP1 and OP2. */
3289
3290 bool
3291 preferred_la_operand_p (rtx op1, rtx op2)
3292 {
3293 struct s390_address addr;
3294
3295 if (op2 != const0_rtx)
3296 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3297
3298 if (!s390_decompose_address (op1, &addr))
3299 return false;
3300 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3301 return false;
3302 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3303 return false;
3304
3305 /* Avoid LA instructions with index register on z196; it is
3306 preferable to use regular add instructions when possible. */
3307 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3308 return false;
3309
3310 if (!TARGET_64BIT && !addr.pointer)
3311 return false;
3312
3313 if (addr.pointer)
3314 return true;
3315
3316 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3317 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3318 return true;
3319
3320 return false;
3321 }
3322
3323 /* Emit a forced load-address operation to load SRC into DST.
3324 This will use the LOAD ADDRESS instruction even in situations
3325 where legitimate_la_operand_p (SRC) returns false. */
3326
3327 void
3328 s390_load_address (rtx dst, rtx src)
3329 {
3330 if (TARGET_64BIT)
3331 emit_move_insn (dst, src);
3332 else
3333 emit_insn (gen_force_la_31 (dst, src));
3334 }
3335
3336 /* Return a legitimate reference for ORIG (an address) using the
3337 register REG. If REG is 0, a new pseudo is generated.
3338
3339 There are two types of references that must be handled:
3340
3341 1. Global data references must load the address from the GOT, via
3342 the PIC reg. An insn is emitted to do this load, and the reg is
3343 returned.
3344
3345 2. Static data references, constant pool addresses, and code labels
3346 compute the address as an offset from the GOT, whose base is in
3347 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3348 differentiate them from global data objects. The returned
3349 address is the PIC reg + an unspec constant.
3350
3351 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3352 reg also appears in the address. */
3353
3354 rtx
3355 legitimize_pic_address (rtx orig, rtx reg)
3356 {
3357 rtx addr = orig;
3358 rtx new_rtx = orig;
3359 rtx base;
3360
3361 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3362
3363 if (GET_CODE (addr) == LABEL_REF
3364 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3365 {
3366 /* This is a local symbol. */
3367 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3368 {
3369 /* Access local symbols PC-relative via LARL.
3370 This is the same as in the non-PIC case, so it is
3371 handled automatically ... */
3372 }
3373 else
3374 {
3375 /* Access local symbols relative to the GOT. */
3376
3377 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3378
3379 if (reload_in_progress || reload_completed)
3380 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3381
3382 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3383 addr = gen_rtx_CONST (Pmode, addr);
3384 addr = force_const_mem (Pmode, addr);
3385 emit_move_insn (temp, addr);
3386
3387 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3388 if (reg != 0)
3389 {
3390 s390_load_address (reg, new_rtx);
3391 new_rtx = reg;
3392 }
3393 }
3394 }
3395 else if (GET_CODE (addr) == SYMBOL_REF)
3396 {
3397 if (reg == 0)
3398 reg = gen_reg_rtx (Pmode);
3399
3400 if (flag_pic == 1)
3401 {
3402 /* Assume GOT offset < 4k. This is handled the same way
3403 in both 31- and 64-bit code (@GOT). */
3404
3405 if (reload_in_progress || reload_completed)
3406 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3407
3408 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3409 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3410 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3411 new_rtx = gen_const_mem (Pmode, new_rtx);
3412 emit_move_insn (reg, new_rtx);
3413 new_rtx = reg;
3414 }
3415 else if (TARGET_CPU_ZARCH)
3416 {
3417 /* If the GOT offset might be >= 4k, we determine the position
3418 of the GOT entry via a PC-relative LARL (@GOTENT). */
3419
3420 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3421
3422 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3423 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3424
3425 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3426 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3427
3428 if (!TARGET_Z10)
3429 {
3430 emit_move_insn (temp, new_rtx);
3431 new_rtx = gen_const_mem (Pmode, temp);
3432 }
3433 else
3434 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3435 emit_move_insn (reg, new_rtx);
3436 new_rtx = reg;
3437 }
3438 else
3439 {
3440 /* If the GOT offset might be >= 4k, we have to load it
3441 from the literal pool (@GOT). */
3442
3443 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3444
3445 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3446 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3447
3448 if (reload_in_progress || reload_completed)
3449 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3450
3451 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3452 addr = gen_rtx_CONST (Pmode, addr);
3453 addr = force_const_mem (Pmode, addr);
3454 emit_move_insn (temp, addr);
3455
3456 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3457 new_rtx = gen_const_mem (Pmode, new_rtx);
3458 emit_move_insn (reg, new_rtx);
3459 new_rtx = reg;
3460 }
3461 }
3462 else
3463 {
3464 if (GET_CODE (addr) == CONST)
3465 {
3466 addr = XEXP (addr, 0);
3467 if (GET_CODE (addr) == UNSPEC)
3468 {
3469 gcc_assert (XVECLEN (addr, 0) == 1);
3470 switch (XINT (addr, 1))
3471 {
3472 /* If someone moved a GOT-relative UNSPEC
3473 out of the literal pool, force them back in. */
3474 case UNSPEC_GOTOFF:
3475 case UNSPEC_PLTOFF:
3476 new_rtx = force_const_mem (Pmode, orig);
3477 break;
3478
3479 /* @GOT is OK as is if small. */
3480 case UNSPEC_GOT:
3481 if (flag_pic == 2)
3482 new_rtx = force_const_mem (Pmode, orig);
3483 break;
3484
3485 /* @GOTENT is OK as is. */
3486 case UNSPEC_GOTENT:
3487 break;
3488
3489 /* @PLT is OK as is on 64-bit, must be converted to
3490 GOT-relative @PLTOFF on 31-bit. */
3491 case UNSPEC_PLT:
3492 if (!TARGET_CPU_ZARCH)
3493 {
3494 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3495
3496 if (reload_in_progress || reload_completed)
3497 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3498
3499 addr = XVECEXP (addr, 0, 0);
3500 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3501 UNSPEC_PLTOFF);
3502 addr = gen_rtx_CONST (Pmode, addr);
3503 addr = force_const_mem (Pmode, addr);
3504 emit_move_insn (temp, addr);
3505
3506 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3507 if (reg != 0)
3508 {
3509 s390_load_address (reg, new_rtx);
3510 new_rtx = reg;
3511 }
3512 }
3513 break;
3514
3515 /* Everything else cannot happen. */
3516 default:
3517 gcc_unreachable ();
3518 }
3519 }
3520 else
3521 gcc_assert (GET_CODE (addr) == PLUS);
3522 }
3523 if (GET_CODE (addr) == PLUS)
3524 {
3525 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3526
3527 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3528 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3529
3530 /* Check first to see if this is a constant offset
3531 from a local symbol reference. */
3532 if ((GET_CODE (op0) == LABEL_REF
3533 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3534 && GET_CODE (op1) == CONST_INT)
3535 {
3536 if (TARGET_CPU_ZARCH
3537 && larl_operand (op0, VOIDmode)
3538 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3539 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3540 {
3541 if (INTVAL (op1) & 1)
3542 {
3543 /* LARL can't handle odd offsets, so emit a
3544 pair of LARL and LA. */
3545 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3546
3547 if (!DISP_IN_RANGE (INTVAL (op1)))
3548 {
3549 HOST_WIDE_INT even = INTVAL (op1) - 1;
3550 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3551 op0 = gen_rtx_CONST (Pmode, op0);
3552 op1 = const1_rtx;
3553 }
3554
3555 emit_move_insn (temp, op0);
3556 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3557
3558 if (reg != 0)
3559 {
3560 s390_load_address (reg, new_rtx);
3561 new_rtx = reg;
3562 }
3563 }
3564 else
3565 {
3566 /* If the offset is even, we can just use LARL.
3567 This will happen automatically. */
3568 }
3569 }
3570 else
3571 {
3572 /* Access local symbols relative to the GOT. */
3573
3574 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3575
3576 if (reload_in_progress || reload_completed)
3577 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3578
3579 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3580 UNSPEC_GOTOFF);
3581 addr = gen_rtx_PLUS (Pmode, addr, op1);
3582 addr = gen_rtx_CONST (Pmode, addr);
3583 addr = force_const_mem (Pmode, addr);
3584 emit_move_insn (temp, addr);
3585
3586 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3587 if (reg != 0)
3588 {
3589 s390_load_address (reg, new_rtx);
3590 new_rtx = reg;
3591 }
3592 }
3593 }
3594
3595 /* Now, check whether it is a GOT relative symbol plus offset
3596 that was pulled out of the literal pool. Force it back in. */
3597
3598 else if (GET_CODE (op0) == UNSPEC
3599 && GET_CODE (op1) == CONST_INT
3600 && XINT (op0, 1) == UNSPEC_GOTOFF)
3601 {
3602 gcc_assert (XVECLEN (op0, 0) == 1);
3603
3604 new_rtx = force_const_mem (Pmode, orig);
3605 }
3606
3607 /* Otherwise, compute the sum. */
3608 else
3609 {
3610 base = legitimize_pic_address (XEXP (addr, 0), reg);
3611 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3612 base == reg ? NULL_RTX : reg);
3613 if (GET_CODE (new_rtx) == CONST_INT)
3614 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3615 else
3616 {
3617 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3618 {
3619 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3620 new_rtx = XEXP (new_rtx, 1);
3621 }
3622 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3623 }
3624
3625 if (GET_CODE (new_rtx) == CONST)
3626 new_rtx = XEXP (new_rtx, 0);
3627 new_rtx = force_operand (new_rtx, 0);
3628 }
3629 }
3630 }
3631 return new_rtx;
3632 }
3633
3634 /* Load the thread pointer into a register. */
3635
3636 rtx
3637 s390_get_thread_pointer (void)
3638 {
3639 rtx tp = gen_reg_rtx (Pmode);
3640
3641 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3642 mark_reg_pointer (tp, BITS_PER_WORD);
3643
3644 return tp;
3645 }
3646
3647 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3648 in s390_tls_symbol which always refers to __tls_get_offset.
3649 The returned offset is written to RESULT_REG and an USE rtx is
3650 generated for TLS_CALL. */
3651
3652 static GTY(()) rtx s390_tls_symbol;
3653
3654 static void
3655 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3656 {
3657 rtx insn;
3658
3659 if (!flag_pic)
3660 emit_insn (s390_load_got ());
3661
3662 if (!s390_tls_symbol)
3663 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3664
3665 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3666 gen_rtx_REG (Pmode, RETURN_REGNUM));
3667
3668 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3669 RTL_CONST_CALL_P (insn) = 1;
3670 }
3671
3672 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3673 this (thread-local) address. REG may be used as temporary. */
3674
3675 static rtx
3676 legitimize_tls_address (rtx addr, rtx reg)
3677 {
3678 rtx new_rtx, tls_call, temp, base, r2, insn;
3679
3680 if (GET_CODE (addr) == SYMBOL_REF)
3681 switch (tls_symbolic_operand (addr))
3682 {
3683 case TLS_MODEL_GLOBAL_DYNAMIC:
3684 start_sequence ();
3685 r2 = gen_rtx_REG (Pmode, 2);
3686 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3687 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3688 new_rtx = force_const_mem (Pmode, new_rtx);
3689 emit_move_insn (r2, new_rtx);
3690 s390_emit_tls_call_insn (r2, tls_call);
3691 insn = get_insns ();
3692 end_sequence ();
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3695 temp = gen_reg_rtx (Pmode);
3696 emit_libcall_block (insn, temp, r2, new_rtx);
3697
3698 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3699 if (reg != 0)
3700 {
3701 s390_load_address (reg, new_rtx);
3702 new_rtx = reg;
3703 }
3704 break;
3705
3706 case TLS_MODEL_LOCAL_DYNAMIC:
3707 start_sequence ();
3708 r2 = gen_rtx_REG (Pmode, 2);
3709 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3710 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3711 new_rtx = force_const_mem (Pmode, new_rtx);
3712 emit_move_insn (r2, new_rtx);
3713 s390_emit_tls_call_insn (r2, tls_call);
3714 insn = get_insns ();
3715 end_sequence ();
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3718 temp = gen_reg_rtx (Pmode);
3719 emit_libcall_block (insn, temp, r2, new_rtx);
3720
3721 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3722 base = gen_reg_rtx (Pmode);
3723 s390_load_address (base, new_rtx);
3724
3725 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3726 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3727 new_rtx = force_const_mem (Pmode, new_rtx);
3728 temp = gen_reg_rtx (Pmode);
3729 emit_move_insn (temp, new_rtx);
3730
3731 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3732 if (reg != 0)
3733 {
3734 s390_load_address (reg, new_rtx);
3735 new_rtx = reg;
3736 }
3737 break;
3738
3739 case TLS_MODEL_INITIAL_EXEC:
3740 if (flag_pic == 1)
3741 {
3742 /* Assume GOT offset < 4k. This is handled the same way
3743 in both 31- and 64-bit code. */
3744
3745 if (reload_in_progress || reload_completed)
3746 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3747
3748 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3749 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3750 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3751 new_rtx = gen_const_mem (Pmode, new_rtx);
3752 temp = gen_reg_rtx (Pmode);
3753 emit_move_insn (temp, new_rtx);
3754 }
3755 else if (TARGET_CPU_ZARCH)
3756 {
3757 /* If the GOT offset might be >= 4k, we determine the position
3758 of the GOT entry via a PC-relative LARL. */
3759
3760 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3761 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3762 temp = gen_reg_rtx (Pmode);
3763 emit_move_insn (temp, new_rtx);
3764
3765 new_rtx = gen_const_mem (Pmode, temp);
3766 temp = gen_reg_rtx (Pmode);
3767 emit_move_insn (temp, new_rtx);
3768 }
3769 else if (flag_pic)
3770 {
3771 /* If the GOT offset might be >= 4k, we have to load it
3772 from the literal pool. */
3773
3774 if (reload_in_progress || reload_completed)
3775 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3776
3777 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3778 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3779 new_rtx = force_const_mem (Pmode, new_rtx);
3780 temp = gen_reg_rtx (Pmode);
3781 emit_move_insn (temp, new_rtx);
3782
3783 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3784 new_rtx = gen_const_mem (Pmode, new_rtx);
3785
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3787 temp = gen_reg_rtx (Pmode);
3788 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3789 }
3790 else
3791 {
3792 /* In position-dependent code, load the absolute address of
3793 the GOT entry from the literal pool. */
3794
3795 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3796 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3797 new_rtx = force_const_mem (Pmode, new_rtx);
3798 temp = gen_reg_rtx (Pmode);
3799 emit_move_insn (temp, new_rtx);
3800
3801 new_rtx = temp;
3802 new_rtx = gen_const_mem (Pmode, new_rtx);
3803 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3804 temp = gen_reg_rtx (Pmode);
3805 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3806 }
3807
3808 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3809 if (reg != 0)
3810 {
3811 s390_load_address (reg, new_rtx);
3812 new_rtx = reg;
3813 }
3814 break;
3815
3816 case TLS_MODEL_LOCAL_EXEC:
3817 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3818 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3819 new_rtx = force_const_mem (Pmode, new_rtx);
3820 temp = gen_reg_rtx (Pmode);
3821 emit_move_insn (temp, new_rtx);
3822
3823 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3824 if (reg != 0)
3825 {
3826 s390_load_address (reg, new_rtx);
3827 new_rtx = reg;
3828 }
3829 break;
3830
3831 default:
3832 gcc_unreachable ();
3833 }
3834
3835 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3836 {
3837 switch (XINT (XEXP (addr, 0), 1))
3838 {
3839 case UNSPEC_INDNTPOFF:
3840 gcc_assert (TARGET_CPU_ZARCH);
3841 new_rtx = addr;
3842 break;
3843
3844 default:
3845 gcc_unreachable ();
3846 }
3847 }
3848
3849 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3850 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3851 {
3852 new_rtx = XEXP (XEXP (addr, 0), 0);
3853 if (GET_CODE (new_rtx) != SYMBOL_REF)
3854 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3855
3856 new_rtx = legitimize_tls_address (new_rtx, reg);
3857 new_rtx = plus_constant (Pmode, new_rtx,
3858 INTVAL (XEXP (XEXP (addr, 0), 1)));
3859 new_rtx = force_operand (new_rtx, 0);
3860 }
3861
3862 else
3863 gcc_unreachable (); /* for now ... */
3864
3865 return new_rtx;
3866 }
3867
3868 /* Emit insns making the address in operands[1] valid for a standard
3869 move to operands[0]. operands[1] is replaced by an address which
3870 should be used instead of the former RTX to emit the move
3871 pattern. */
3872
3873 void
3874 emit_symbolic_move (rtx *operands)
3875 {
3876 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3877
3878 if (GET_CODE (operands[0]) == MEM)
3879 operands[1] = force_reg (Pmode, operands[1]);
3880 else if (TLS_SYMBOLIC_CONST (operands[1]))
3881 operands[1] = legitimize_tls_address (operands[1], temp);
3882 else if (flag_pic)
3883 operands[1] = legitimize_pic_address (operands[1], temp);
3884 }
3885
3886 /* Try machine-dependent ways of modifying an illegitimate address X
3887 to be legitimate. If we find one, return the new, valid address.
3888
3889 OLDX is the address as it was before break_out_memory_refs was called.
3890 In some cases it is useful to look at this to decide what needs to be done.
3891
3892 MODE is the mode of the operand pointed to by X.
3893
3894 When -fpic is used, special handling is needed for symbolic references.
3895 See comments by legitimize_pic_address for details. */
3896
3897 static rtx
3898 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3899 enum machine_mode mode ATTRIBUTE_UNUSED)
3900 {
3901 rtx constant_term = const0_rtx;
3902
3903 if (TLS_SYMBOLIC_CONST (x))
3904 {
3905 x = legitimize_tls_address (x, 0);
3906
3907 if (s390_legitimate_address_p (mode, x, FALSE))
3908 return x;
3909 }
3910 else if (GET_CODE (x) == PLUS
3911 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3912 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3913 {
3914 return x;
3915 }
3916 else if (flag_pic)
3917 {
3918 if (SYMBOLIC_CONST (x)
3919 || (GET_CODE (x) == PLUS
3920 && (SYMBOLIC_CONST (XEXP (x, 0))
3921 || SYMBOLIC_CONST (XEXP (x, 1)))))
3922 x = legitimize_pic_address (x, 0);
3923
3924 if (s390_legitimate_address_p (mode, x, FALSE))
3925 return x;
3926 }
3927
3928 x = eliminate_constant_term (x, &constant_term);
3929
3930 /* Optimize loading of large displacements by splitting them
3931 into the multiple of 4K and the rest; this allows the
3932 former to be CSE'd if possible.
3933
3934 Don't do this if the displacement is added to a register
3935 pointing into the stack frame, as the offsets will
3936 change later anyway. */
3937
3938 if (GET_CODE (constant_term) == CONST_INT
3939 && !TARGET_LONG_DISPLACEMENT
3940 && !DISP_IN_RANGE (INTVAL (constant_term))
3941 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3942 {
3943 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3944 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3945
3946 rtx temp = gen_reg_rtx (Pmode);
3947 rtx val = force_operand (GEN_INT (upper), temp);
3948 if (val != temp)
3949 emit_move_insn (temp, val);
3950
3951 x = gen_rtx_PLUS (Pmode, x, temp);
3952 constant_term = GEN_INT (lower);
3953 }
3954
3955 if (GET_CODE (x) == PLUS)
3956 {
3957 if (GET_CODE (XEXP (x, 0)) == REG)
3958 {
3959 rtx temp = gen_reg_rtx (Pmode);
3960 rtx val = force_operand (XEXP (x, 1), temp);
3961 if (val != temp)
3962 emit_move_insn (temp, val);
3963
3964 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3965 }
3966
3967 else if (GET_CODE (XEXP (x, 1)) == REG)
3968 {
3969 rtx temp = gen_reg_rtx (Pmode);
3970 rtx val = force_operand (XEXP (x, 0), temp);
3971 if (val != temp)
3972 emit_move_insn (temp, val);
3973
3974 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3975 }
3976 }
3977
3978 if (constant_term != const0_rtx)
3979 x = gen_rtx_PLUS (Pmode, x, constant_term);
3980
3981 return x;
3982 }
3983
3984 /* Try a machine-dependent way of reloading an illegitimate address AD
3985 operand. If we find one, push the reload and return the new address.
3986
3987 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3988 and TYPE is the reload type of the current reload. */
3989
3990 rtx
3991 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3992 int opnum, int type)
3993 {
3994 if (!optimize || TARGET_LONG_DISPLACEMENT)
3995 return NULL_RTX;
3996
3997 if (GET_CODE (ad) == PLUS)
3998 {
3999 rtx tem = simplify_binary_operation (PLUS, Pmode,
4000 XEXP (ad, 0), XEXP (ad, 1));
4001 if (tem)
4002 ad = tem;
4003 }
4004
4005 if (GET_CODE (ad) == PLUS
4006 && GET_CODE (XEXP (ad, 0)) == REG
4007 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4008 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4009 {
4010 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4011 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4012 rtx cst, tem, new_rtx;
4013
4014 cst = GEN_INT (upper);
4015 if (!legitimate_reload_constant_p (cst))
4016 cst = force_const_mem (Pmode, cst);
4017
4018 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4019 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4020
4021 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4022 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4023 opnum, (enum reload_type) type);
4024 return new_rtx;
4025 }
4026
4027 return NULL_RTX;
4028 }
4029
4030 /* Emit code to move LEN bytes from DST to SRC. */
4031
4032 bool
4033 s390_expand_movmem (rtx dst, rtx src, rtx len)
4034 {
4035 /* When tuning for z10 or higher we rely on the Glibc functions to
4036 do the right thing. Only for constant lengths below 64k we will
4037 generate inline code. */
4038 if (s390_tune >= PROCESSOR_2097_Z10
4039 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4040 return false;
4041
4042 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4043 {
4044 if (INTVAL (len) > 0)
4045 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4046 }
4047
4048 else if (TARGET_MVCLE)
4049 {
4050 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4051 }
4052
4053 else
4054 {
4055 rtx dst_addr, src_addr, count, blocks, temp;
4056 rtx loop_start_label = gen_label_rtx ();
4057 rtx loop_end_label = gen_label_rtx ();
4058 rtx end_label = gen_label_rtx ();
4059 enum machine_mode mode;
4060
4061 mode = GET_MODE (len);
4062 if (mode == VOIDmode)
4063 mode = Pmode;
4064
4065 dst_addr = gen_reg_rtx (Pmode);
4066 src_addr = gen_reg_rtx (Pmode);
4067 count = gen_reg_rtx (mode);
4068 blocks = gen_reg_rtx (mode);
4069
4070 convert_move (count, len, 1);
4071 emit_cmp_and_jump_insns (count, const0_rtx,
4072 EQ, NULL_RTX, mode, 1, end_label);
4073
4074 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4075 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4076 dst = change_address (dst, VOIDmode, dst_addr);
4077 src = change_address (src, VOIDmode, src_addr);
4078
4079 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4080 OPTAB_DIRECT);
4081 if (temp != count)
4082 emit_move_insn (count, temp);
4083
4084 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4085 OPTAB_DIRECT);
4086 if (temp != blocks)
4087 emit_move_insn (blocks, temp);
4088
4089 emit_cmp_and_jump_insns (blocks, const0_rtx,
4090 EQ, NULL_RTX, mode, 1, loop_end_label);
4091
4092 emit_label (loop_start_label);
4093
4094 if (TARGET_Z10
4095 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4096 {
4097 rtx prefetch;
4098
4099 /* Issue a read prefetch for the +3 cache line. */
4100 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4101 const0_rtx, const0_rtx);
4102 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4103 emit_insn (prefetch);
4104
4105 /* Issue a write prefetch for the +3 cache line. */
4106 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4107 const1_rtx, const0_rtx);
4108 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4109 emit_insn (prefetch);
4110 }
4111
4112 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4113 s390_load_address (dst_addr,
4114 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4115 s390_load_address (src_addr,
4116 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4117
4118 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4119 OPTAB_DIRECT);
4120 if (temp != blocks)
4121 emit_move_insn (blocks, temp);
4122
4123 emit_cmp_and_jump_insns (blocks, const0_rtx,
4124 EQ, NULL_RTX, mode, 1, loop_end_label);
4125
4126 emit_jump (loop_start_label);
4127 emit_label (loop_end_label);
4128
4129 emit_insn (gen_movmem_short (dst, src,
4130 convert_to_mode (Pmode, count, 1)));
4131 emit_label (end_label);
4132 }
4133 return true;
4134 }
4135
4136 /* Emit code to set LEN bytes at DST to VAL.
4137 Make use of clrmem if VAL is zero. */
4138
4139 void
4140 s390_expand_setmem (rtx dst, rtx len, rtx val)
4141 {
4142 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4143 return;
4144
4145 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4146
4147 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4148 {
4149 if (val == const0_rtx && INTVAL (len) <= 256)
4150 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4151 else
4152 {
4153 /* Initialize memory by storing the first byte. */
4154 emit_move_insn (adjust_address (dst, QImode, 0), val);
4155
4156 if (INTVAL (len) > 1)
4157 {
4158 /* Initiate 1 byte overlap move.
4159 The first byte of DST is propagated through DSTP1.
4160 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4161 DST is set to size 1 so the rest of the memory location
4162 does not count as source operand. */
4163 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4164 set_mem_size (dst, 1);
4165
4166 emit_insn (gen_movmem_short (dstp1, dst,
4167 GEN_INT (INTVAL (len) - 2)));
4168 }
4169 }
4170 }
4171
4172 else if (TARGET_MVCLE)
4173 {
4174 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4175 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4176 }
4177
4178 else
4179 {
4180 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4181 rtx loop_start_label = gen_label_rtx ();
4182 rtx loop_end_label = gen_label_rtx ();
4183 rtx end_label = gen_label_rtx ();
4184 enum machine_mode mode;
4185
4186 mode = GET_MODE (len);
4187 if (mode == VOIDmode)
4188 mode = Pmode;
4189
4190 dst_addr = gen_reg_rtx (Pmode);
4191 count = gen_reg_rtx (mode);
4192 blocks = gen_reg_rtx (mode);
4193
4194 convert_move (count, len, 1);
4195 emit_cmp_and_jump_insns (count, const0_rtx,
4196 EQ, NULL_RTX, mode, 1, end_label);
4197
4198 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4199 dst = change_address (dst, VOIDmode, dst_addr);
4200
4201 if (val == const0_rtx)
4202 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4203 OPTAB_DIRECT);
4204 else
4205 {
4206 dstp1 = adjust_address (dst, VOIDmode, 1);
4207 set_mem_size (dst, 1);
4208
4209 /* Initialize memory by storing the first byte. */
4210 emit_move_insn (adjust_address (dst, QImode, 0), val);
4211
4212 /* If count is 1 we are done. */
4213 emit_cmp_and_jump_insns (count, const1_rtx,
4214 EQ, NULL_RTX, mode, 1, end_label);
4215
4216 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4217 OPTAB_DIRECT);
4218 }
4219 if (temp != count)
4220 emit_move_insn (count, temp);
4221
4222 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4223 OPTAB_DIRECT);
4224 if (temp != blocks)
4225 emit_move_insn (blocks, temp);
4226
4227 emit_cmp_and_jump_insns (blocks, const0_rtx,
4228 EQ, NULL_RTX, mode, 1, loop_end_label);
4229
4230 emit_label (loop_start_label);
4231
4232 if (TARGET_Z10
4233 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4234 {
4235 /* Issue a write prefetch for the +4 cache line. */
4236 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4237 GEN_INT (1024)),
4238 const1_rtx, const0_rtx);
4239 emit_insn (prefetch);
4240 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4241 }
4242
4243 if (val == const0_rtx)
4244 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4245 else
4246 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4247 s390_load_address (dst_addr,
4248 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4249
4250 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4251 OPTAB_DIRECT);
4252 if (temp != blocks)
4253 emit_move_insn (blocks, temp);
4254
4255 emit_cmp_and_jump_insns (blocks, const0_rtx,
4256 EQ, NULL_RTX, mode, 1, loop_end_label);
4257
4258 emit_jump (loop_start_label);
4259 emit_label (loop_end_label);
4260
4261 if (val == const0_rtx)
4262 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4263 else
4264 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4265 emit_label (end_label);
4266 }
4267 }
4268
4269 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4270 and return the result in TARGET. */
4271
4272 bool
4273 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4274 {
4275 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4276 rtx tmp;
4277
4278 /* When tuning for z10 or higher we rely on the Glibc functions to
4279 do the right thing. Only for constant lengths below 64k we will
4280 generate inline code. */
4281 if (s390_tune >= PROCESSOR_2097_Z10
4282 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4283 return false;
4284
4285 /* As the result of CMPINT is inverted compared to what we need,
4286 we have to swap the operands. */
4287 tmp = op0; op0 = op1; op1 = tmp;
4288
4289 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4290 {
4291 if (INTVAL (len) > 0)
4292 {
4293 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4294 emit_insn (gen_cmpint (target, ccreg));
4295 }
4296 else
4297 emit_move_insn (target, const0_rtx);
4298 }
4299 else if (TARGET_MVCLE)
4300 {
4301 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4302 emit_insn (gen_cmpint (target, ccreg));
4303 }
4304 else
4305 {
4306 rtx addr0, addr1, count, blocks, temp;
4307 rtx loop_start_label = gen_label_rtx ();
4308 rtx loop_end_label = gen_label_rtx ();
4309 rtx end_label = gen_label_rtx ();
4310 enum machine_mode mode;
4311
4312 mode = GET_MODE (len);
4313 if (mode == VOIDmode)
4314 mode = Pmode;
4315
4316 addr0 = gen_reg_rtx (Pmode);
4317 addr1 = gen_reg_rtx (Pmode);
4318 count = gen_reg_rtx (mode);
4319 blocks = gen_reg_rtx (mode);
4320
4321 convert_move (count, len, 1);
4322 emit_cmp_and_jump_insns (count, const0_rtx,
4323 EQ, NULL_RTX, mode, 1, end_label);
4324
4325 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4326 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4327 op0 = change_address (op0, VOIDmode, addr0);
4328 op1 = change_address (op1, VOIDmode, addr1);
4329
4330 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4331 OPTAB_DIRECT);
4332 if (temp != count)
4333 emit_move_insn (count, temp);
4334
4335 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4336 OPTAB_DIRECT);
4337 if (temp != blocks)
4338 emit_move_insn (blocks, temp);
4339
4340 emit_cmp_and_jump_insns (blocks, const0_rtx,
4341 EQ, NULL_RTX, mode, 1, loop_end_label);
4342
4343 emit_label (loop_start_label);
4344
4345 if (TARGET_Z10
4346 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4347 {
4348 rtx prefetch;
4349
4350 /* Issue a read prefetch for the +2 cache line of operand 1. */
4351 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4352 const0_rtx, const0_rtx);
4353 emit_insn (prefetch);
4354 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4355
4356 /* Issue a read prefetch for the +2 cache line of operand 2. */
4357 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4358 const0_rtx, const0_rtx);
4359 emit_insn (prefetch);
4360 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4361 }
4362
4363 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4364 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4365 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4366 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4367 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4368 emit_jump_insn (temp);
4369
4370 s390_load_address (addr0,
4371 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4372 s390_load_address (addr1,
4373 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4374
4375 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4376 OPTAB_DIRECT);
4377 if (temp != blocks)
4378 emit_move_insn (blocks, temp);
4379
4380 emit_cmp_and_jump_insns (blocks, const0_rtx,
4381 EQ, NULL_RTX, mode, 1, loop_end_label);
4382
4383 emit_jump (loop_start_label);
4384 emit_label (loop_end_label);
4385
4386 emit_insn (gen_cmpmem_short (op0, op1,
4387 convert_to_mode (Pmode, count, 1)));
4388 emit_label (end_label);
4389
4390 emit_insn (gen_cmpint (target, ccreg));
4391 }
4392 return true;
4393 }
4394
4395
4396 /* Expand conditional increment or decrement using alc/slb instructions.
4397 Should generate code setting DST to either SRC or SRC + INCREMENT,
4398 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4399 Returns true if successful, false otherwise.
4400
4401 That makes it possible to implement some if-constructs without jumps e.g.:
4402 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4403 unsigned int a, b, c;
4404 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4405 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4406 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4407 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4408
4409 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4410 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4411 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4412 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4413 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4414
4415 bool
4416 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4417 rtx dst, rtx src, rtx increment)
4418 {
4419 enum machine_mode cmp_mode;
4420 enum machine_mode cc_mode;
4421 rtx op_res;
4422 rtx insn;
4423 rtvec p;
4424 int ret;
4425
4426 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4427 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4428 cmp_mode = SImode;
4429 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4430 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4431 cmp_mode = DImode;
4432 else
4433 return false;
4434
4435 /* Try ADD LOGICAL WITH CARRY. */
4436 if (increment == const1_rtx)
4437 {
4438 /* Determine CC mode to use. */
4439 if (cmp_code == EQ || cmp_code == NE)
4440 {
4441 if (cmp_op1 != const0_rtx)
4442 {
4443 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4444 NULL_RTX, 0, OPTAB_WIDEN);
4445 cmp_op1 = const0_rtx;
4446 }
4447
4448 cmp_code = cmp_code == EQ ? LEU : GTU;
4449 }
4450
4451 if (cmp_code == LTU || cmp_code == LEU)
4452 {
4453 rtx tem = cmp_op0;
4454 cmp_op0 = cmp_op1;
4455 cmp_op1 = tem;
4456 cmp_code = swap_condition (cmp_code);
4457 }
4458
4459 switch (cmp_code)
4460 {
4461 case GTU:
4462 cc_mode = CCUmode;
4463 break;
4464
4465 case GEU:
4466 cc_mode = CCL3mode;
4467 break;
4468
4469 default:
4470 return false;
4471 }
4472
4473 /* Emit comparison instruction pattern. */
4474 if (!register_operand (cmp_op0, cmp_mode))
4475 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4476
4477 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4478 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4479 /* We use insn_invalid_p here to add clobbers if required. */
4480 ret = insn_invalid_p (emit_insn (insn), false);
4481 gcc_assert (!ret);
4482
4483 /* Emit ALC instruction pattern. */
4484 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4485 gen_rtx_REG (cc_mode, CC_REGNUM),
4486 const0_rtx);
4487
4488 if (src != const0_rtx)
4489 {
4490 if (!register_operand (src, GET_MODE (dst)))
4491 src = force_reg (GET_MODE (dst), src);
4492
4493 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4494 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4495 }
4496
4497 p = rtvec_alloc (2);
4498 RTVEC_ELT (p, 0) =
4499 gen_rtx_SET (VOIDmode, dst, op_res);
4500 RTVEC_ELT (p, 1) =
4501 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4502 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4503
4504 return true;
4505 }
4506
4507 /* Try SUBTRACT LOGICAL WITH BORROW. */
4508 if (increment == constm1_rtx)
4509 {
4510 /* Determine CC mode to use. */
4511 if (cmp_code == EQ || cmp_code == NE)
4512 {
4513 if (cmp_op1 != const0_rtx)
4514 {
4515 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4516 NULL_RTX, 0, OPTAB_WIDEN);
4517 cmp_op1 = const0_rtx;
4518 }
4519
4520 cmp_code = cmp_code == EQ ? LEU : GTU;
4521 }
4522
4523 if (cmp_code == GTU || cmp_code == GEU)
4524 {
4525 rtx tem = cmp_op0;
4526 cmp_op0 = cmp_op1;
4527 cmp_op1 = tem;
4528 cmp_code = swap_condition (cmp_code);
4529 }
4530
4531 switch (cmp_code)
4532 {
4533 case LEU:
4534 cc_mode = CCUmode;
4535 break;
4536
4537 case LTU:
4538 cc_mode = CCL3mode;
4539 break;
4540
4541 default:
4542 return false;
4543 }
4544
4545 /* Emit comparison instruction pattern. */
4546 if (!register_operand (cmp_op0, cmp_mode))
4547 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4548
4549 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4550 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4551 /* We use insn_invalid_p here to add clobbers if required. */
4552 ret = insn_invalid_p (emit_insn (insn), false);
4553 gcc_assert (!ret);
4554
4555 /* Emit SLB instruction pattern. */
4556 if (!register_operand (src, GET_MODE (dst)))
4557 src = force_reg (GET_MODE (dst), src);
4558
4559 op_res = gen_rtx_MINUS (GET_MODE (dst),
4560 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4561 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4562 gen_rtx_REG (cc_mode, CC_REGNUM),
4563 const0_rtx));
4564 p = rtvec_alloc (2);
4565 RTVEC_ELT (p, 0) =
4566 gen_rtx_SET (VOIDmode, dst, op_res);
4567 RTVEC_ELT (p, 1) =
4568 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4569 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4570
4571 return true;
4572 }
4573
4574 return false;
4575 }
4576
4577 /* Expand code for the insv template. Return true if successful. */
4578
4579 bool
4580 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4581 {
4582 int bitsize = INTVAL (op1);
4583 int bitpos = INTVAL (op2);
4584 enum machine_mode mode = GET_MODE (dest);
4585 enum machine_mode smode;
4586 int smode_bsize, mode_bsize;
4587 rtx op, clobber;
4588
4589 /* Generate INSERT IMMEDIATE (IILL et al). */
4590 /* (set (ze (reg)) (const_int)). */
4591 if (TARGET_ZARCH
4592 && register_operand (dest, word_mode)
4593 && (bitpos % 16) == 0
4594 && (bitsize % 16) == 0
4595 && const_int_operand (src, VOIDmode))
4596 {
4597 HOST_WIDE_INT val = INTVAL (src);
4598 int regpos = bitpos + bitsize;
4599
4600 while (regpos > bitpos)
4601 {
4602 enum machine_mode putmode;
4603 int putsize;
4604
4605 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4606 putmode = SImode;
4607 else
4608 putmode = HImode;
4609
4610 putsize = GET_MODE_BITSIZE (putmode);
4611 regpos -= putsize;
4612 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4613 GEN_INT (putsize),
4614 GEN_INT (regpos)),
4615 gen_int_mode (val, putmode));
4616 val >>= putsize;
4617 }
4618 gcc_assert (regpos == bitpos);
4619 return true;
4620 }
4621
4622 smode = smallest_mode_for_size (bitsize, MODE_INT);
4623 smode_bsize = GET_MODE_BITSIZE (smode);
4624 mode_bsize = GET_MODE_BITSIZE (mode);
4625
4626 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4627 if (bitpos == 0
4628 && (bitsize % BITS_PER_UNIT) == 0
4629 && MEM_P (dest)
4630 && (register_operand (src, word_mode)
4631 || const_int_operand (src, VOIDmode)))
4632 {
4633 /* Emit standard pattern if possible. */
4634 if (smode_bsize == bitsize)
4635 {
4636 emit_move_insn (adjust_address (dest, smode, 0),
4637 gen_lowpart (smode, src));
4638 return true;
4639 }
4640
4641 /* (set (ze (mem)) (const_int)). */
4642 else if (const_int_operand (src, VOIDmode))
4643 {
4644 int size = bitsize / BITS_PER_UNIT;
4645 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4646 BLKmode,
4647 UNITS_PER_WORD - size);
4648
4649 dest = adjust_address (dest, BLKmode, 0);
4650 set_mem_size (dest, size);
4651 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4652 return true;
4653 }
4654
4655 /* (set (ze (mem)) (reg)). */
4656 else if (register_operand (src, word_mode))
4657 {
4658 if (bitsize <= 32)
4659 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4660 const0_rtx), src);
4661 else
4662 {
4663 /* Emit st,stcmh sequence. */
4664 int stcmh_width = bitsize - 32;
4665 int size = stcmh_width / BITS_PER_UNIT;
4666
4667 emit_move_insn (adjust_address (dest, SImode, size),
4668 gen_lowpart (SImode, src));
4669 set_mem_size (dest, size);
4670 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4671 GEN_INT (stcmh_width),
4672 const0_rtx),
4673 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4674 }
4675 return true;
4676 }
4677 }
4678
4679 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4680 if ((bitpos % BITS_PER_UNIT) == 0
4681 && (bitsize % BITS_PER_UNIT) == 0
4682 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4683 && MEM_P (src)
4684 && (mode == DImode || mode == SImode)
4685 && register_operand (dest, mode))
4686 {
4687 /* Emit a strict_low_part pattern if possible. */
4688 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4689 {
4690 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4691 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4692 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4693 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4694 return true;
4695 }
4696
4697 /* ??? There are more powerful versions of ICM that are not
4698 completely represented in the md file. */
4699 }
4700
4701 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4702 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4703 {
4704 enum machine_mode mode_s = GET_MODE (src);
4705
4706 if (mode_s == VOIDmode)
4707 {
4708 /* Assume const_int etc already in the proper mode. */
4709 src = force_reg (mode, src);
4710 }
4711 else if (mode_s != mode)
4712 {
4713 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4714 src = force_reg (mode_s, src);
4715 src = gen_lowpart (mode, src);
4716 }
4717
4718 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4719 op = gen_rtx_SET (VOIDmode, op, src);
4720 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4721 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4722
4723 return true;
4724 }
4725
4726 return false;
4727 }
4728
4729 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4730 register that holds VAL of mode MODE shifted by COUNT bits. */
4731
4732 static inline rtx
4733 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4734 {
4735 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4736 NULL_RTX, 1, OPTAB_DIRECT);
4737 return expand_simple_binop (SImode, ASHIFT, val, count,
4738 NULL_RTX, 1, OPTAB_DIRECT);
4739 }
4740
4741 /* Structure to hold the initial parameters for a compare_and_swap operation
4742 in HImode and QImode. */
4743
4744 struct alignment_context
4745 {
4746 rtx memsi; /* SI aligned memory location. */
4747 rtx shift; /* Bit offset with regard to lsb. */
4748 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4749 rtx modemaski; /* ~modemask */
4750 bool aligned; /* True if memory is aligned, false else. */
4751 };
4752
4753 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4754 structure AC for transparent simplifying, if the memory alignment is known
4755 to be at least 32bit. MEM is the memory location for the actual operation
4756 and MODE its mode. */
4757
4758 static void
4759 init_alignment_context (struct alignment_context *ac, rtx mem,
4760 enum machine_mode mode)
4761 {
4762 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4763 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4764
4765 if (ac->aligned)
4766 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4767 else
4768 {
4769 /* Alignment is unknown. */
4770 rtx byteoffset, addr, align;
4771
4772 /* Force the address into a register. */
4773 addr = force_reg (Pmode, XEXP (mem, 0));
4774
4775 /* Align it to SImode. */
4776 align = expand_simple_binop (Pmode, AND, addr,
4777 GEN_INT (-GET_MODE_SIZE (SImode)),
4778 NULL_RTX, 1, OPTAB_DIRECT);
4779 /* Generate MEM. */
4780 ac->memsi = gen_rtx_MEM (SImode, align);
4781 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4782 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4783 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4784
4785 /* Calculate shiftcount. */
4786 byteoffset = expand_simple_binop (Pmode, AND, addr,
4787 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4788 NULL_RTX, 1, OPTAB_DIRECT);
4789 /* As we already have some offset, evaluate the remaining distance. */
4790 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4791 NULL_RTX, 1, OPTAB_DIRECT);
4792 }
4793
4794 /* Shift is the byte count, but we need the bitcount. */
4795 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4796 NULL_RTX, 1, OPTAB_DIRECT);
4797
4798 /* Calculate masks. */
4799 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4800 GEN_INT (GET_MODE_MASK (mode)),
4801 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4802 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4803 NULL_RTX, 1);
4804 }
4805
4806 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4807 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4808 perform the merge in SEQ2. */
4809
4810 static rtx
4811 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4812 enum machine_mode mode, rtx val, rtx ins)
4813 {
4814 rtx tmp;
4815
4816 if (ac->aligned)
4817 {
4818 start_sequence ();
4819 tmp = copy_to_mode_reg (SImode, val);
4820 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4821 const0_rtx, ins))
4822 {
4823 *seq1 = NULL;
4824 *seq2 = get_insns ();
4825 end_sequence ();
4826 return tmp;
4827 }
4828 end_sequence ();
4829 }
4830
4831 /* Failed to use insv. Generate a two part shift and mask. */
4832 start_sequence ();
4833 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4834 *seq1 = get_insns ();
4835 end_sequence ();
4836
4837 start_sequence ();
4838 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4839 *seq2 = get_insns ();
4840 end_sequence ();
4841
4842 return tmp;
4843 }
4844
4845 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4846 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4847 value to set if CMP == MEM. */
4848
4849 void
4850 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4851 rtx cmp, rtx new_rtx, bool is_weak)
4852 {
4853 struct alignment_context ac;
4854 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4855 rtx res = gen_reg_rtx (SImode);
4856 rtx csloop = NULL, csend = NULL;
4857
4858 gcc_assert (MEM_P (mem));
4859
4860 init_alignment_context (&ac, mem, mode);
4861
4862 /* Load full word. Subsequent loads are performed by CS. */
4863 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4864 NULL_RTX, 1, OPTAB_DIRECT);
4865
4866 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4867 possible, we try to use insv to make this happen efficiently. If
4868 that fails we'll generate code both inside and outside the loop. */
4869 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4870 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4871
4872 if (seq0)
4873 emit_insn (seq0);
4874 if (seq1)
4875 emit_insn (seq1);
4876
4877 /* Start CS loop. */
4878 if (!is_weak)
4879 {
4880 /* Begin assuming success. */
4881 emit_move_insn (btarget, const1_rtx);
4882
4883 csloop = gen_label_rtx ();
4884 csend = gen_label_rtx ();
4885 emit_label (csloop);
4886 }
4887
4888 /* val = "<mem>00..0<mem>"
4889 * cmp = "00..0<cmp>00..0"
4890 * new = "00..0<new>00..0"
4891 */
4892
4893 emit_insn (seq2);
4894 emit_insn (seq3);
4895
4896 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4897 if (is_weak)
4898 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4899 else
4900 {
4901 rtx tmp;
4902
4903 /* Jump to end if we're done (likely?). */
4904 s390_emit_jump (csend, cc);
4905
4906 /* Check for changes outside mode, and loop internal if so.
4907 Arrange the moves so that the compare is adjacent to the
4908 branch so that we can generate CRJ. */
4909 tmp = copy_to_reg (val);
4910 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4911 1, OPTAB_DIRECT);
4912 cc = s390_emit_compare (NE, val, tmp);
4913 s390_emit_jump (csloop, cc);
4914
4915 /* Failed. */
4916 emit_move_insn (btarget, const0_rtx);
4917 emit_label (csend);
4918 }
4919
4920 /* Return the correct part of the bitfield. */
4921 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4922 NULL_RTX, 1, OPTAB_DIRECT), 1);
4923 }
4924
4925 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4926 and VAL the value to play with. If AFTER is true then store the value
4927 MEM holds after the operation, if AFTER is false then store the value MEM
4928 holds before the operation. If TARGET is zero then discard that value, else
4929 store it to TARGET. */
4930
4931 void
4932 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4933 rtx target, rtx mem, rtx val, bool after)
4934 {
4935 struct alignment_context ac;
4936 rtx cmp;
4937 rtx new_rtx = gen_reg_rtx (SImode);
4938 rtx orig = gen_reg_rtx (SImode);
4939 rtx csloop = gen_label_rtx ();
4940
4941 gcc_assert (!target || register_operand (target, VOIDmode));
4942 gcc_assert (MEM_P (mem));
4943
4944 init_alignment_context (&ac, mem, mode);
4945
4946 /* Shift val to the correct bit positions.
4947 Preserve "icm", but prevent "ex icm". */
4948 if (!(ac.aligned && code == SET && MEM_P (val)))
4949 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4950
4951 /* Further preparation insns. */
4952 if (code == PLUS || code == MINUS)
4953 emit_move_insn (orig, val);
4954 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4955 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4956 NULL_RTX, 1, OPTAB_DIRECT);
4957
4958 /* Load full word. Subsequent loads are performed by CS. */
4959 cmp = force_reg (SImode, ac.memsi);
4960
4961 /* Start CS loop. */
4962 emit_label (csloop);
4963 emit_move_insn (new_rtx, cmp);
4964
4965 /* Patch new with val at correct position. */
4966 switch (code)
4967 {
4968 case PLUS:
4969 case MINUS:
4970 val = expand_simple_binop (SImode, code, new_rtx, orig,
4971 NULL_RTX, 1, OPTAB_DIRECT);
4972 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4973 NULL_RTX, 1, OPTAB_DIRECT);
4974 /* FALLTHRU */
4975 case SET:
4976 if (ac.aligned && MEM_P (val))
4977 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4978 0, 0, SImode, val);
4979 else
4980 {
4981 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4982 NULL_RTX, 1, OPTAB_DIRECT);
4983 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4984 NULL_RTX, 1, OPTAB_DIRECT);
4985 }
4986 break;
4987 case AND:
4988 case IOR:
4989 case XOR:
4990 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4991 NULL_RTX, 1, OPTAB_DIRECT);
4992 break;
4993 case MULT: /* NAND */
4994 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4995 NULL_RTX, 1, OPTAB_DIRECT);
4996 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4997 NULL_RTX, 1, OPTAB_DIRECT);
4998 break;
4999 default:
5000 gcc_unreachable ();
5001 }
5002
5003 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5004 ac.memsi, cmp, new_rtx));
5005
5006 /* Return the correct part of the bitfield. */
5007 if (target)
5008 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5009 after ? new_rtx : cmp, ac.shift,
5010 NULL_RTX, 1, OPTAB_DIRECT), 1);
5011 }
5012
5013 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5014 We need to emit DTP-relative relocations. */
5015
5016 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5017
5018 static void
5019 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5020 {
5021 switch (size)
5022 {
5023 case 4:
5024 fputs ("\t.long\t", file);
5025 break;
5026 case 8:
5027 fputs ("\t.quad\t", file);
5028 break;
5029 default:
5030 gcc_unreachable ();
5031 }
5032 output_addr_const (file, x);
5033 fputs ("@DTPOFF", file);
5034 }
5035
5036 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5037 /* Implement TARGET_MANGLE_TYPE. */
5038
5039 static const char *
5040 s390_mangle_type (const_tree type)
5041 {
5042 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5043 && TARGET_LONG_DOUBLE_128)
5044 return "g";
5045
5046 /* For all other types, use normal C++ mangling. */
5047 return NULL;
5048 }
5049 #endif
5050
5051 /* In the name of slightly smaller debug output, and to cater to
5052 general assembler lossage, recognize various UNSPEC sequences
5053 and turn them back into a direct symbol reference. */
5054
5055 static rtx
5056 s390_delegitimize_address (rtx orig_x)
5057 {
5058 rtx x, y;
5059
5060 orig_x = delegitimize_mem_from_attrs (orig_x);
5061 x = orig_x;
5062
5063 /* Extract the symbol ref from:
5064 (plus:SI (reg:SI 12 %r12)
5065 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5066 UNSPEC_GOTOFF/PLTOFF)))
5067 and
5068 (plus:SI (reg:SI 12 %r12)
5069 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5070 UNSPEC_GOTOFF/PLTOFF)
5071 (const_int 4 [0x4])))) */
5072 if (GET_CODE (x) == PLUS
5073 && REG_P (XEXP (x, 0))
5074 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5075 && GET_CODE (XEXP (x, 1)) == CONST)
5076 {
5077 HOST_WIDE_INT offset = 0;
5078
5079 /* The const operand. */
5080 y = XEXP (XEXP (x, 1), 0);
5081
5082 if (GET_CODE (y) == PLUS
5083 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5084 {
5085 offset = INTVAL (XEXP (y, 1));
5086 y = XEXP (y, 0);
5087 }
5088
5089 if (GET_CODE (y) == UNSPEC
5090 && (XINT (y, 1) == UNSPEC_GOTOFF
5091 || XINT (y, 1) == UNSPEC_PLTOFF))
5092 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5093 }
5094
5095 if (GET_CODE (x) != MEM)
5096 return orig_x;
5097
5098 x = XEXP (x, 0);
5099 if (GET_CODE (x) == PLUS
5100 && GET_CODE (XEXP (x, 1)) == CONST
5101 && GET_CODE (XEXP (x, 0)) == REG
5102 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5103 {
5104 y = XEXP (XEXP (x, 1), 0);
5105 if (GET_CODE (y) == UNSPEC
5106 && XINT (y, 1) == UNSPEC_GOT)
5107 y = XVECEXP (y, 0, 0);
5108 else
5109 return orig_x;
5110 }
5111 else if (GET_CODE (x) == CONST)
5112 {
5113 /* Extract the symbol ref from:
5114 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5115 UNSPEC_PLT/GOTENT))) */
5116
5117 y = XEXP (x, 0);
5118 if (GET_CODE (y) == UNSPEC
5119 && (XINT (y, 1) == UNSPEC_GOTENT
5120 || XINT (y, 1) == UNSPEC_PLT))
5121 y = XVECEXP (y, 0, 0);
5122 else
5123 return orig_x;
5124 }
5125 else
5126 return orig_x;
5127
5128 if (GET_MODE (orig_x) != Pmode)
5129 {
5130 if (GET_MODE (orig_x) == BLKmode)
5131 return orig_x;
5132 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5133 if (y == NULL_RTX)
5134 return orig_x;
5135 }
5136 return y;
5137 }
5138
5139 /* Output operand OP to stdio stream FILE.
5140 OP is an address (register + offset) which is not used to address data;
5141 instead the rightmost bits are interpreted as the value. */
5142
5143 static void
5144 print_shift_count_operand (FILE *file, rtx op)
5145 {
5146 HOST_WIDE_INT offset;
5147 rtx base;
5148
5149 /* Extract base register and offset. */
5150 if (!s390_decompose_shift_count (op, &base, &offset))
5151 gcc_unreachable ();
5152
5153 /* Sanity check. */
5154 if (base)
5155 {
5156 gcc_assert (GET_CODE (base) == REG);
5157 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5158 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5159 }
5160
5161 /* Offsets are constricted to twelve bits. */
5162 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5163 if (base)
5164 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5165 }
5166
5167 /* See 'get_some_local_dynamic_name'. */
5168
5169 static int
5170 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5171 {
5172 rtx x = *px;
5173
5174 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5175 {
5176 x = get_pool_constant (x);
5177 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5178 }
5179
5180 if (GET_CODE (x) == SYMBOL_REF
5181 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5182 {
5183 cfun->machine->some_ld_name = XSTR (x, 0);
5184 return 1;
5185 }
5186
5187 return 0;
5188 }
5189
5190 /* Locate some local-dynamic symbol still in use by this function
5191 so that we can print its name in local-dynamic base patterns. */
5192
5193 static const char *
5194 get_some_local_dynamic_name (void)
5195 {
5196 rtx insn;
5197
5198 if (cfun->machine->some_ld_name)
5199 return cfun->machine->some_ld_name;
5200
5201 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5202 if (INSN_P (insn)
5203 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5204 return cfun->machine->some_ld_name;
5205
5206 gcc_unreachable ();
5207 }
5208
5209 /* Output machine-dependent UNSPECs occurring in address constant X
5210 in assembler syntax to stdio stream FILE. Returns true if the
5211 constant X could be recognized, false otherwise. */
5212
5213 static bool
5214 s390_output_addr_const_extra (FILE *file, rtx x)
5215 {
5216 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5217 switch (XINT (x, 1))
5218 {
5219 case UNSPEC_GOTENT:
5220 output_addr_const (file, XVECEXP (x, 0, 0));
5221 fprintf (file, "@GOTENT");
5222 return true;
5223 case UNSPEC_GOT:
5224 output_addr_const (file, XVECEXP (x, 0, 0));
5225 fprintf (file, "@GOT");
5226 return true;
5227 case UNSPEC_GOTOFF:
5228 output_addr_const (file, XVECEXP (x, 0, 0));
5229 fprintf (file, "@GOTOFF");
5230 return true;
5231 case UNSPEC_PLT:
5232 output_addr_const (file, XVECEXP (x, 0, 0));
5233 fprintf (file, "@PLT");
5234 return true;
5235 case UNSPEC_PLTOFF:
5236 output_addr_const (file, XVECEXP (x, 0, 0));
5237 fprintf (file, "@PLTOFF");
5238 return true;
5239 case UNSPEC_TLSGD:
5240 output_addr_const (file, XVECEXP (x, 0, 0));
5241 fprintf (file, "@TLSGD");
5242 return true;
5243 case UNSPEC_TLSLDM:
5244 assemble_name (file, get_some_local_dynamic_name ());
5245 fprintf (file, "@TLSLDM");
5246 return true;
5247 case UNSPEC_DTPOFF:
5248 output_addr_const (file, XVECEXP (x, 0, 0));
5249 fprintf (file, "@DTPOFF");
5250 return true;
5251 case UNSPEC_NTPOFF:
5252 output_addr_const (file, XVECEXP (x, 0, 0));
5253 fprintf (file, "@NTPOFF");
5254 return true;
5255 case UNSPEC_GOTNTPOFF:
5256 output_addr_const (file, XVECEXP (x, 0, 0));
5257 fprintf (file, "@GOTNTPOFF");
5258 return true;
5259 case UNSPEC_INDNTPOFF:
5260 output_addr_const (file, XVECEXP (x, 0, 0));
5261 fprintf (file, "@INDNTPOFF");
5262 return true;
5263 }
5264
5265 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5266 switch (XINT (x, 1))
5267 {
5268 case UNSPEC_POOL_OFFSET:
5269 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5270 output_addr_const (file, x);
5271 return true;
5272 }
5273 return false;
5274 }
5275
5276 /* Output address operand ADDR in assembler syntax to
5277 stdio stream FILE. */
5278
5279 void
5280 print_operand_address (FILE *file, rtx addr)
5281 {
5282 struct s390_address ad;
5283
5284 if (s390_loadrelative_operand_p (addr))
5285 {
5286 if (!TARGET_Z10)
5287 {
5288 output_operand_lossage ("symbolic memory references are "
5289 "only supported on z10 or later");
5290 return;
5291 }
5292 output_addr_const (file, addr);
5293 return;
5294 }
5295
5296 if (!s390_decompose_address (addr, &ad)
5297 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5298 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5299 output_operand_lossage ("cannot decompose address");
5300
5301 if (ad.disp)
5302 output_addr_const (file, ad.disp);
5303 else
5304 fprintf (file, "0");
5305
5306 if (ad.base && ad.indx)
5307 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5308 reg_names[REGNO (ad.base)]);
5309 else if (ad.base)
5310 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5311 }
5312
5313 /* Output operand X in assembler syntax to stdio stream FILE.
5314 CODE specified the format flag. The following format flags
5315 are recognized:
5316
5317 'C': print opcode suffix for branch condition.
5318 'D': print opcode suffix for inverse branch condition.
5319 'E': print opcode suffix for branch on index instruction.
5320 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5321 'G': print the size of the operand in bytes.
5322 'O': print only the displacement of a memory reference.
5323 'R': print only the base register of a memory reference.
5324 'S': print S-type memory reference (base+displacement).
5325 'N': print the second word of a DImode operand.
5326 'M': print the second word of a TImode operand.
5327 'Y': print shift count operand.
5328
5329 'b': print integer X as if it's an unsigned byte.
5330 'c': print integer X as if it's an signed byte.
5331 'x': print integer X as if it's an unsigned halfword.
5332 'h': print integer X as if it's a signed halfword.
5333 'i': print the first nonzero HImode part of X.
5334 'j': print the first HImode part unequal to -1 of X.
5335 'k': print the first nonzero SImode part of X.
5336 'm': print the first SImode part unequal to -1 of X.
5337 'o': print integer X as if it's an unsigned 32bit word. */
5338
5339 void
5340 print_operand (FILE *file, rtx x, int code)
5341 {
5342 switch (code)
5343 {
5344 case 'C':
5345 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5346 return;
5347
5348 case 'D':
5349 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5350 return;
5351
5352 case 'E':
5353 if (GET_CODE (x) == LE)
5354 fprintf (file, "l");
5355 else if (GET_CODE (x) == GT)
5356 fprintf (file, "h");
5357 else
5358 output_operand_lossage ("invalid comparison operator "
5359 "for 'E' output modifier");
5360 return;
5361
5362 case 'J':
5363 if (GET_CODE (x) == SYMBOL_REF)
5364 {
5365 fprintf (file, "%s", ":tls_load:");
5366 output_addr_const (file, x);
5367 }
5368 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5369 {
5370 fprintf (file, "%s", ":tls_gdcall:");
5371 output_addr_const (file, XVECEXP (x, 0, 0));
5372 }
5373 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5374 {
5375 fprintf (file, "%s", ":tls_ldcall:");
5376 assemble_name (file, get_some_local_dynamic_name ());
5377 }
5378 else
5379 output_operand_lossage ("invalid reference for 'J' output modifier");
5380 return;
5381
5382 case 'G':
5383 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5384 return;
5385
5386 case 'O':
5387 {
5388 struct s390_address ad;
5389 int ret;
5390
5391 if (!MEM_P (x))
5392 {
5393 output_operand_lossage ("memory reference expected for "
5394 "'O' output modifier");
5395 return;
5396 }
5397
5398 ret = s390_decompose_address (XEXP (x, 0), &ad);
5399
5400 if (!ret
5401 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5402 || ad.indx)
5403 {
5404 output_operand_lossage ("invalid address for 'O' output modifier");
5405 return;
5406 }
5407
5408 if (ad.disp)
5409 output_addr_const (file, ad.disp);
5410 else
5411 fprintf (file, "0");
5412 }
5413 return;
5414
5415 case 'R':
5416 {
5417 struct s390_address ad;
5418 int ret;
5419
5420 if (!MEM_P (x))
5421 {
5422 output_operand_lossage ("memory reference expected for "
5423 "'R' output modifier");
5424 return;
5425 }
5426
5427 ret = s390_decompose_address (XEXP (x, 0), &ad);
5428
5429 if (!ret
5430 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5431 || ad.indx)
5432 {
5433 output_operand_lossage ("invalid address for 'R' output modifier");
5434 return;
5435 }
5436
5437 if (ad.base)
5438 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5439 else
5440 fprintf (file, "0");
5441 }
5442 return;
5443
5444 case 'S':
5445 {
5446 struct s390_address ad;
5447 int ret;
5448
5449 if (!MEM_P (x))
5450 {
5451 output_operand_lossage ("memory reference expected for "
5452 "'S' output modifier");
5453 return;
5454 }
5455 ret = s390_decompose_address (XEXP (x, 0), &ad);
5456
5457 if (!ret
5458 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5459 || ad.indx)
5460 {
5461 output_operand_lossage ("invalid address for 'S' output modifier");
5462 return;
5463 }
5464
5465 if (ad.disp)
5466 output_addr_const (file, ad.disp);
5467 else
5468 fprintf (file, "0");
5469
5470 if (ad.base)
5471 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5472 }
5473 return;
5474
5475 case 'N':
5476 if (GET_CODE (x) == REG)
5477 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5478 else if (GET_CODE (x) == MEM)
5479 x = change_address (x, VOIDmode,
5480 plus_constant (Pmode, XEXP (x, 0), 4));
5481 else
5482 output_operand_lossage ("register or memory expression expected "
5483 "for 'N' output modifier");
5484 break;
5485
5486 case 'M':
5487 if (GET_CODE (x) == REG)
5488 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5489 else if (GET_CODE (x) == MEM)
5490 x = change_address (x, VOIDmode,
5491 plus_constant (Pmode, XEXP (x, 0), 8));
5492 else
5493 output_operand_lossage ("register or memory expression expected "
5494 "for 'M' output modifier");
5495 break;
5496
5497 case 'Y':
5498 print_shift_count_operand (file, x);
5499 return;
5500 }
5501
5502 switch (GET_CODE (x))
5503 {
5504 case REG:
5505 fprintf (file, "%s", reg_names[REGNO (x)]);
5506 break;
5507
5508 case MEM:
5509 output_address (XEXP (x, 0));
5510 break;
5511
5512 case CONST:
5513 case CODE_LABEL:
5514 case LABEL_REF:
5515 case SYMBOL_REF:
5516 output_addr_const (file, x);
5517 break;
5518
5519 case CONST_INT:
5520 if (code == 'b')
5521 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5522 else if (code == 'c')
5523 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5524 else if (code == 'x')
5525 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5526 else if (code == 'h')
5527 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5528 else if (code == 'i')
5529 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5530 s390_extract_part (x, HImode, 0));
5531 else if (code == 'j')
5532 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5533 s390_extract_part (x, HImode, -1));
5534 else if (code == 'k')
5535 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5536 s390_extract_part (x, SImode, 0));
5537 else if (code == 'm')
5538 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5539 s390_extract_part (x, SImode, -1));
5540 else if (code == 'o')
5541 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5542 else
5543 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5544 break;
5545
5546 case CONST_DOUBLE:
5547 gcc_assert (GET_MODE (x) == VOIDmode);
5548 if (code == 'b')
5549 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5550 else if (code == 'x')
5551 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5552 else if (code == 'h')
5553 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5554 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5555 else
5556 {
5557 if (code == 0)
5558 output_operand_lossage ("invalid constant - try using "
5559 "an output modifier");
5560 else
5561 output_operand_lossage ("invalid constant for output modifier '%c'",
5562 code);
5563 }
5564 break;
5565
5566 default:
5567 if (code == 0)
5568 output_operand_lossage ("invalid expression - try using "
5569 "an output modifier");
5570 else
5571 output_operand_lossage ("invalid expression for output "
5572 "modifier '%c'", code);
5573 break;
5574 }
5575 }
5576
5577 /* Target hook for assembling integer objects. We need to define it
5578 here to work a round a bug in some versions of GAS, which couldn't
5579 handle values smaller than INT_MIN when printed in decimal. */
5580
5581 static bool
5582 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5583 {
5584 if (size == 8 && aligned_p
5585 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5586 {
5587 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5588 INTVAL (x));
5589 return true;
5590 }
5591 return default_assemble_integer (x, size, aligned_p);
5592 }
5593
5594 /* Returns true if register REGNO is used for forming
5595 a memory address in expression X. */
5596
5597 static bool
5598 reg_used_in_mem_p (int regno, rtx x)
5599 {
5600 enum rtx_code code = GET_CODE (x);
5601 int i, j;
5602 const char *fmt;
5603
5604 if (code == MEM)
5605 {
5606 if (refers_to_regno_p (regno, regno+1,
5607 XEXP (x, 0), 0))
5608 return true;
5609 }
5610 else if (code == SET
5611 && GET_CODE (SET_DEST (x)) == PC)
5612 {
5613 if (refers_to_regno_p (regno, regno+1,
5614 SET_SRC (x), 0))
5615 return true;
5616 }
5617
5618 fmt = GET_RTX_FORMAT (code);
5619 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5620 {
5621 if (fmt[i] == 'e'
5622 && reg_used_in_mem_p (regno, XEXP (x, i)))
5623 return true;
5624
5625 else if (fmt[i] == 'E')
5626 for (j = 0; j < XVECLEN (x, i); j++)
5627 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5628 return true;
5629 }
5630 return false;
5631 }
5632
5633 /* Returns true if expression DEP_RTX sets an address register
5634 used by instruction INSN to address memory. */
5635
5636 static bool
5637 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5638 {
5639 rtx target, pat;
5640
5641 if (GET_CODE (dep_rtx) == INSN)
5642 dep_rtx = PATTERN (dep_rtx);
5643
5644 if (GET_CODE (dep_rtx) == SET)
5645 {
5646 target = SET_DEST (dep_rtx);
5647 if (GET_CODE (target) == STRICT_LOW_PART)
5648 target = XEXP (target, 0);
5649 while (GET_CODE (target) == SUBREG)
5650 target = SUBREG_REG (target);
5651
5652 if (GET_CODE (target) == REG)
5653 {
5654 int regno = REGNO (target);
5655
5656 if (s390_safe_attr_type (insn) == TYPE_LA)
5657 {
5658 pat = PATTERN (insn);
5659 if (GET_CODE (pat) == PARALLEL)
5660 {
5661 gcc_assert (XVECLEN (pat, 0) == 2);
5662 pat = XVECEXP (pat, 0, 0);
5663 }
5664 gcc_assert (GET_CODE (pat) == SET);
5665 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5666 }
5667 else if (get_attr_atype (insn) == ATYPE_AGEN)
5668 return reg_used_in_mem_p (regno, PATTERN (insn));
5669 }
5670 }
5671 return false;
5672 }
5673
5674 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5675
5676 int
5677 s390_agen_dep_p (rtx dep_insn, rtx insn)
5678 {
5679 rtx dep_rtx = PATTERN (dep_insn);
5680 int i;
5681
5682 if (GET_CODE (dep_rtx) == SET
5683 && addr_generation_dependency_p (dep_rtx, insn))
5684 return 1;
5685 else if (GET_CODE (dep_rtx) == PARALLEL)
5686 {
5687 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5688 {
5689 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5690 return 1;
5691 }
5692 }
5693 return 0;
5694 }
5695
5696
5697 /* A C statement (sans semicolon) to update the integer scheduling priority
5698 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5699 reduce the priority to execute INSN later. Do not define this macro if
5700 you do not need to adjust the scheduling priorities of insns.
5701
5702 A STD instruction should be scheduled earlier,
5703 in order to use the bypass. */
5704 static int
5705 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5706 {
5707 if (! INSN_P (insn))
5708 return priority;
5709
5710 if (s390_tune != PROCESSOR_2084_Z990
5711 && s390_tune != PROCESSOR_2094_Z9_109
5712 && s390_tune != PROCESSOR_2097_Z10
5713 && s390_tune != PROCESSOR_2817_Z196)
5714 return priority;
5715
5716 switch (s390_safe_attr_type (insn))
5717 {
5718 case TYPE_FSTOREDF:
5719 case TYPE_FSTORESF:
5720 priority = priority << 3;
5721 break;
5722 case TYPE_STORE:
5723 case TYPE_STM:
5724 priority = priority << 1;
5725 break;
5726 default:
5727 break;
5728 }
5729 return priority;
5730 }
5731
5732
5733 /* The number of instructions that can be issued per cycle. */
5734
5735 static int
5736 s390_issue_rate (void)
5737 {
5738 switch (s390_tune)
5739 {
5740 case PROCESSOR_2084_Z990:
5741 case PROCESSOR_2094_Z9_109:
5742 case PROCESSOR_2817_Z196:
5743 return 3;
5744 case PROCESSOR_2097_Z10:
5745 return 2;
5746 default:
5747 return 1;
5748 }
5749 }
5750
5751 static int
5752 s390_first_cycle_multipass_dfa_lookahead (void)
5753 {
5754 return 4;
5755 }
5756
5757 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5758 Fix up MEMs as required. */
5759
5760 static void
5761 annotate_constant_pool_refs (rtx *x)
5762 {
5763 int i, j;
5764 const char *fmt;
5765
5766 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5767 || !CONSTANT_POOL_ADDRESS_P (*x));
5768
5769 /* Literal pool references can only occur inside a MEM ... */
5770 if (GET_CODE (*x) == MEM)
5771 {
5772 rtx memref = XEXP (*x, 0);
5773
5774 if (GET_CODE (memref) == SYMBOL_REF
5775 && CONSTANT_POOL_ADDRESS_P (memref))
5776 {
5777 rtx base = cfun->machine->base_reg;
5778 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5779 UNSPEC_LTREF);
5780
5781 *x = replace_equiv_address (*x, addr);
5782 return;
5783 }
5784
5785 if (GET_CODE (memref) == CONST
5786 && GET_CODE (XEXP (memref, 0)) == PLUS
5787 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5788 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5789 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5790 {
5791 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5792 rtx sym = XEXP (XEXP (memref, 0), 0);
5793 rtx base = cfun->machine->base_reg;
5794 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5795 UNSPEC_LTREF);
5796
5797 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5798 return;
5799 }
5800 }
5801
5802 /* ... or a load-address type pattern. */
5803 if (GET_CODE (*x) == SET)
5804 {
5805 rtx addrref = SET_SRC (*x);
5806
5807 if (GET_CODE (addrref) == SYMBOL_REF
5808 && CONSTANT_POOL_ADDRESS_P (addrref))
5809 {
5810 rtx base = cfun->machine->base_reg;
5811 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5812 UNSPEC_LTREF);
5813
5814 SET_SRC (*x) = addr;
5815 return;
5816 }
5817
5818 if (GET_CODE (addrref) == CONST
5819 && GET_CODE (XEXP (addrref, 0)) == PLUS
5820 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5821 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5822 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5823 {
5824 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5825 rtx sym = XEXP (XEXP (addrref, 0), 0);
5826 rtx base = cfun->machine->base_reg;
5827 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5828 UNSPEC_LTREF);
5829
5830 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5831 return;
5832 }
5833 }
5834
5835 /* Annotate LTREL_BASE as well. */
5836 if (GET_CODE (*x) == UNSPEC
5837 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5838 {
5839 rtx base = cfun->machine->base_reg;
5840 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5841 UNSPEC_LTREL_BASE);
5842 return;
5843 }
5844
5845 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5846 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5847 {
5848 if (fmt[i] == 'e')
5849 {
5850 annotate_constant_pool_refs (&XEXP (*x, i));
5851 }
5852 else if (fmt[i] == 'E')
5853 {
5854 for (j = 0; j < XVECLEN (*x, i); j++)
5855 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5856 }
5857 }
5858 }
5859
5860 /* Split all branches that exceed the maximum distance.
5861 Returns true if this created a new literal pool entry. */
5862
5863 static int
5864 s390_split_branches (void)
5865 {
5866 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5867 int new_literal = 0, ret;
5868 rtx insn, pat, tmp, target;
5869 rtx *label;
5870
5871 /* We need correct insn addresses. */
5872
5873 shorten_branches (get_insns ());
5874
5875 /* Find all branches that exceed 64KB, and split them. */
5876
5877 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5878 {
5879 if (GET_CODE (insn) != JUMP_INSN)
5880 continue;
5881
5882 pat = PATTERN (insn);
5883 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5884 pat = XVECEXP (pat, 0, 0);
5885 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5886 continue;
5887
5888 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5889 {
5890 label = &SET_SRC (pat);
5891 }
5892 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5893 {
5894 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5895 label = &XEXP (SET_SRC (pat), 1);
5896 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5897 label = &XEXP (SET_SRC (pat), 2);
5898 else
5899 continue;
5900 }
5901 else
5902 continue;
5903
5904 if (get_attr_length (insn) <= 4)
5905 continue;
5906
5907 /* We are going to use the return register as scratch register,
5908 make sure it will be saved/restored by the prologue/epilogue. */
5909 cfun_frame_layout.save_return_addr_p = 1;
5910
5911 if (!flag_pic)
5912 {
5913 new_literal = 1;
5914 tmp = force_const_mem (Pmode, *label);
5915 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5916 INSN_ADDRESSES_NEW (tmp, -1);
5917 annotate_constant_pool_refs (&PATTERN (tmp));
5918
5919 target = temp_reg;
5920 }
5921 else
5922 {
5923 new_literal = 1;
5924 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5925 UNSPEC_LTREL_OFFSET);
5926 target = gen_rtx_CONST (Pmode, target);
5927 target = force_const_mem (Pmode, target);
5928 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5929 INSN_ADDRESSES_NEW (tmp, -1);
5930 annotate_constant_pool_refs (&PATTERN (tmp));
5931
5932 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5933 cfun->machine->base_reg),
5934 UNSPEC_LTREL_BASE);
5935 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5936 }
5937
5938 ret = validate_change (insn, label, target, 0);
5939 gcc_assert (ret);
5940 }
5941
5942 return new_literal;
5943 }
5944
5945
5946 /* Find an annotated literal pool symbol referenced in RTX X,
5947 and store it at REF. Will abort if X contains references to
5948 more than one such pool symbol; multiple references to the same
5949 symbol are allowed, however.
5950
5951 The rtx pointed to by REF must be initialized to NULL_RTX
5952 by the caller before calling this routine. */
5953
5954 static void
5955 find_constant_pool_ref (rtx x, rtx *ref)
5956 {
5957 int i, j;
5958 const char *fmt;
5959
5960 /* Ignore LTREL_BASE references. */
5961 if (GET_CODE (x) == UNSPEC
5962 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5963 return;
5964 /* Likewise POOL_ENTRY insns. */
5965 if (GET_CODE (x) == UNSPEC_VOLATILE
5966 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5967 return;
5968
5969 gcc_assert (GET_CODE (x) != SYMBOL_REF
5970 || !CONSTANT_POOL_ADDRESS_P (x));
5971
5972 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5973 {
5974 rtx sym = XVECEXP (x, 0, 0);
5975 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5976 && CONSTANT_POOL_ADDRESS_P (sym));
5977
5978 if (*ref == NULL_RTX)
5979 *ref = sym;
5980 else
5981 gcc_assert (*ref == sym);
5982
5983 return;
5984 }
5985
5986 fmt = GET_RTX_FORMAT (GET_CODE (x));
5987 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5988 {
5989 if (fmt[i] == 'e')
5990 {
5991 find_constant_pool_ref (XEXP (x, i), ref);
5992 }
5993 else if (fmt[i] == 'E')
5994 {
5995 for (j = 0; j < XVECLEN (x, i); j++)
5996 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5997 }
5998 }
5999 }
6000
6001 /* Replace every reference to the annotated literal pool
6002 symbol REF in X by its base plus OFFSET. */
6003
6004 static void
6005 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6006 {
6007 int i, j;
6008 const char *fmt;
6009
6010 gcc_assert (*x != ref);
6011
6012 if (GET_CODE (*x) == UNSPEC
6013 && XINT (*x, 1) == UNSPEC_LTREF
6014 && XVECEXP (*x, 0, 0) == ref)
6015 {
6016 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6017 return;
6018 }
6019
6020 if (GET_CODE (*x) == PLUS
6021 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6022 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6023 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6024 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6025 {
6026 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6027 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6028 return;
6029 }
6030
6031 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6032 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6033 {
6034 if (fmt[i] == 'e')
6035 {
6036 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6037 }
6038 else if (fmt[i] == 'E')
6039 {
6040 for (j = 0; j < XVECLEN (*x, i); j++)
6041 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6042 }
6043 }
6044 }
6045
6046 /* Check whether X contains an UNSPEC_LTREL_BASE.
6047 Return its constant pool symbol if found, NULL_RTX otherwise. */
6048
6049 static rtx
6050 find_ltrel_base (rtx x)
6051 {
6052 int i, j;
6053 const char *fmt;
6054
6055 if (GET_CODE (x) == UNSPEC
6056 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6057 return XVECEXP (x, 0, 0);
6058
6059 fmt = GET_RTX_FORMAT (GET_CODE (x));
6060 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6061 {
6062 if (fmt[i] == 'e')
6063 {
6064 rtx fnd = find_ltrel_base (XEXP (x, i));
6065 if (fnd)
6066 return fnd;
6067 }
6068 else if (fmt[i] == 'E')
6069 {
6070 for (j = 0; j < XVECLEN (x, i); j++)
6071 {
6072 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6073 if (fnd)
6074 return fnd;
6075 }
6076 }
6077 }
6078
6079 return NULL_RTX;
6080 }
6081
6082 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6083
6084 static void
6085 replace_ltrel_base (rtx *x)
6086 {
6087 int i, j;
6088 const char *fmt;
6089
6090 if (GET_CODE (*x) == UNSPEC
6091 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6092 {
6093 *x = XVECEXP (*x, 0, 1);
6094 return;
6095 }
6096
6097 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6098 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6099 {
6100 if (fmt[i] == 'e')
6101 {
6102 replace_ltrel_base (&XEXP (*x, i));
6103 }
6104 else if (fmt[i] == 'E')
6105 {
6106 for (j = 0; j < XVECLEN (*x, i); j++)
6107 replace_ltrel_base (&XVECEXP (*x, i, j));
6108 }
6109 }
6110 }
6111
6112
6113 /* We keep a list of constants which we have to add to internal
6114 constant tables in the middle of large functions. */
6115
6116 #define NR_C_MODES 11
6117 enum machine_mode constant_modes[NR_C_MODES] =
6118 {
6119 TFmode, TImode, TDmode,
6120 DFmode, DImode, DDmode,
6121 SFmode, SImode, SDmode,
6122 HImode,
6123 QImode
6124 };
6125
6126 struct constant
6127 {
6128 struct constant *next;
6129 rtx value;
6130 rtx label;
6131 };
6132
6133 struct constant_pool
6134 {
6135 struct constant_pool *next;
6136 rtx first_insn;
6137 rtx pool_insn;
6138 bitmap insns;
6139 rtx emit_pool_after;
6140
6141 struct constant *constants[NR_C_MODES];
6142 struct constant *execute;
6143 rtx label;
6144 int size;
6145 };
6146
6147 /* Allocate new constant_pool structure. */
6148
6149 static struct constant_pool *
6150 s390_alloc_pool (void)
6151 {
6152 struct constant_pool *pool;
6153 int i;
6154
6155 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6156 pool->next = NULL;
6157 for (i = 0; i < NR_C_MODES; i++)
6158 pool->constants[i] = NULL;
6159
6160 pool->execute = NULL;
6161 pool->label = gen_label_rtx ();
6162 pool->first_insn = NULL_RTX;
6163 pool->pool_insn = NULL_RTX;
6164 pool->insns = BITMAP_ALLOC (NULL);
6165 pool->size = 0;
6166 pool->emit_pool_after = NULL_RTX;
6167
6168 return pool;
6169 }
6170
6171 /* Create new constant pool covering instructions starting at INSN
6172 and chain it to the end of POOL_LIST. */
6173
6174 static struct constant_pool *
6175 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6176 {
6177 struct constant_pool *pool, **prev;
6178
6179 pool = s390_alloc_pool ();
6180 pool->first_insn = insn;
6181
6182 for (prev = pool_list; *prev; prev = &(*prev)->next)
6183 ;
6184 *prev = pool;
6185
6186 return pool;
6187 }
6188
6189 /* End range of instructions covered by POOL at INSN and emit
6190 placeholder insn representing the pool. */
6191
6192 static void
6193 s390_end_pool (struct constant_pool *pool, rtx insn)
6194 {
6195 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6196
6197 if (!insn)
6198 insn = get_last_insn ();
6199
6200 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6201 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6202 }
6203
6204 /* Add INSN to the list of insns covered by POOL. */
6205
6206 static void
6207 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6208 {
6209 bitmap_set_bit (pool->insns, INSN_UID (insn));
6210 }
6211
6212 /* Return pool out of POOL_LIST that covers INSN. */
6213
6214 static struct constant_pool *
6215 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6216 {
6217 struct constant_pool *pool;
6218
6219 for (pool = pool_list; pool; pool = pool->next)
6220 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6221 break;
6222
6223 return pool;
6224 }
6225
6226 /* Add constant VAL of mode MODE to the constant pool POOL. */
6227
6228 static void
6229 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6230 {
6231 struct constant *c;
6232 int i;
6233
6234 for (i = 0; i < NR_C_MODES; i++)
6235 if (constant_modes[i] == mode)
6236 break;
6237 gcc_assert (i != NR_C_MODES);
6238
6239 for (c = pool->constants[i]; c != NULL; c = c->next)
6240 if (rtx_equal_p (val, c->value))
6241 break;
6242
6243 if (c == NULL)
6244 {
6245 c = (struct constant *) xmalloc (sizeof *c);
6246 c->value = val;
6247 c->label = gen_label_rtx ();
6248 c->next = pool->constants[i];
6249 pool->constants[i] = c;
6250 pool->size += GET_MODE_SIZE (mode);
6251 }
6252 }
6253
6254 /* Return an rtx that represents the offset of X from the start of
6255 pool POOL. */
6256
6257 static rtx
6258 s390_pool_offset (struct constant_pool *pool, rtx x)
6259 {
6260 rtx label;
6261
6262 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6263 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6264 UNSPEC_POOL_OFFSET);
6265 return gen_rtx_CONST (GET_MODE (x), x);
6266 }
6267
6268 /* Find constant VAL of mode MODE in the constant pool POOL.
6269 Return an RTX describing the distance from the start of
6270 the pool to the location of the new constant. */
6271
6272 static rtx
6273 s390_find_constant (struct constant_pool *pool, rtx val,
6274 enum machine_mode mode)
6275 {
6276 struct constant *c;
6277 int i;
6278
6279 for (i = 0; i < NR_C_MODES; i++)
6280 if (constant_modes[i] == mode)
6281 break;
6282 gcc_assert (i != NR_C_MODES);
6283
6284 for (c = pool->constants[i]; c != NULL; c = c->next)
6285 if (rtx_equal_p (val, c->value))
6286 break;
6287
6288 gcc_assert (c);
6289
6290 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6291 }
6292
6293 /* Check whether INSN is an execute. Return the label_ref to its
6294 execute target template if so, NULL_RTX otherwise. */
6295
6296 static rtx
6297 s390_execute_label (rtx insn)
6298 {
6299 if (GET_CODE (insn) == INSN
6300 && GET_CODE (PATTERN (insn)) == PARALLEL
6301 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6302 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6303 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6304
6305 return NULL_RTX;
6306 }
6307
6308 /* Add execute target for INSN to the constant pool POOL. */
6309
6310 static void
6311 s390_add_execute (struct constant_pool *pool, rtx insn)
6312 {
6313 struct constant *c;
6314
6315 for (c = pool->execute; c != NULL; c = c->next)
6316 if (INSN_UID (insn) == INSN_UID (c->value))
6317 break;
6318
6319 if (c == NULL)
6320 {
6321 c = (struct constant *) xmalloc (sizeof *c);
6322 c->value = insn;
6323 c->label = gen_label_rtx ();
6324 c->next = pool->execute;
6325 pool->execute = c;
6326 pool->size += 6;
6327 }
6328 }
6329
6330 /* Find execute target for INSN in the constant pool POOL.
6331 Return an RTX describing the distance from the start of
6332 the pool to the location of the execute target. */
6333
6334 static rtx
6335 s390_find_execute (struct constant_pool *pool, rtx insn)
6336 {
6337 struct constant *c;
6338
6339 for (c = pool->execute; c != NULL; c = c->next)
6340 if (INSN_UID (insn) == INSN_UID (c->value))
6341 break;
6342
6343 gcc_assert (c);
6344
6345 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6346 }
6347
6348 /* For an execute INSN, extract the execute target template. */
6349
6350 static rtx
6351 s390_execute_target (rtx insn)
6352 {
6353 rtx pattern = PATTERN (insn);
6354 gcc_assert (s390_execute_label (insn));
6355
6356 if (XVECLEN (pattern, 0) == 2)
6357 {
6358 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6359 }
6360 else
6361 {
6362 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6363 int i;
6364
6365 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6366 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6367
6368 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6369 }
6370
6371 return pattern;
6372 }
6373
6374 /* Indicate that INSN cannot be duplicated. This is the case for
6375 execute insns that carry a unique label. */
6376
6377 static bool
6378 s390_cannot_copy_insn_p (rtx insn)
6379 {
6380 rtx label = s390_execute_label (insn);
6381 return label && label != const0_rtx;
6382 }
6383
6384 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6385 do not emit the pool base label. */
6386
6387 static void
6388 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6389 {
6390 struct constant *c;
6391 rtx insn = pool->pool_insn;
6392 int i;
6393
6394 /* Switch to rodata section. */
6395 if (TARGET_CPU_ZARCH)
6396 {
6397 insn = emit_insn_after (gen_pool_section_start (), insn);
6398 INSN_ADDRESSES_NEW (insn, -1);
6399 }
6400
6401 /* Ensure minimum pool alignment. */
6402 if (TARGET_CPU_ZARCH)
6403 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6404 else
6405 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6406 INSN_ADDRESSES_NEW (insn, -1);
6407
6408 /* Emit pool base label. */
6409 if (!remote_label)
6410 {
6411 insn = emit_label_after (pool->label, insn);
6412 INSN_ADDRESSES_NEW (insn, -1);
6413 }
6414
6415 /* Dump constants in descending alignment requirement order,
6416 ensuring proper alignment for every constant. */
6417 for (i = 0; i < NR_C_MODES; i++)
6418 for (c = pool->constants[i]; c; c = c->next)
6419 {
6420 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6421 rtx value = copy_rtx (c->value);
6422 if (GET_CODE (value) == CONST
6423 && GET_CODE (XEXP (value, 0)) == UNSPEC
6424 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6425 && XVECLEN (XEXP (value, 0), 0) == 1)
6426 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6427
6428 insn = emit_label_after (c->label, insn);
6429 INSN_ADDRESSES_NEW (insn, -1);
6430
6431 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6432 gen_rtvec (1, value),
6433 UNSPECV_POOL_ENTRY);
6434 insn = emit_insn_after (value, insn);
6435 INSN_ADDRESSES_NEW (insn, -1);
6436 }
6437
6438 /* Ensure minimum alignment for instructions. */
6439 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6440 INSN_ADDRESSES_NEW (insn, -1);
6441
6442 /* Output in-pool execute template insns. */
6443 for (c = pool->execute; c; c = c->next)
6444 {
6445 insn = emit_label_after (c->label, insn);
6446 INSN_ADDRESSES_NEW (insn, -1);
6447
6448 insn = emit_insn_after (s390_execute_target (c->value), insn);
6449 INSN_ADDRESSES_NEW (insn, -1);
6450 }
6451
6452 /* Switch back to previous section. */
6453 if (TARGET_CPU_ZARCH)
6454 {
6455 insn = emit_insn_after (gen_pool_section_end (), insn);
6456 INSN_ADDRESSES_NEW (insn, -1);
6457 }
6458
6459 insn = emit_barrier_after (insn);
6460 INSN_ADDRESSES_NEW (insn, -1);
6461
6462 /* Remove placeholder insn. */
6463 remove_insn (pool->pool_insn);
6464 }
6465
6466 /* Free all memory used by POOL. */
6467
6468 static void
6469 s390_free_pool (struct constant_pool *pool)
6470 {
6471 struct constant *c, *next;
6472 int i;
6473
6474 for (i = 0; i < NR_C_MODES; i++)
6475 for (c = pool->constants[i]; c; c = next)
6476 {
6477 next = c->next;
6478 free (c);
6479 }
6480
6481 for (c = pool->execute; c; c = next)
6482 {
6483 next = c->next;
6484 free (c);
6485 }
6486
6487 BITMAP_FREE (pool->insns);
6488 free (pool);
6489 }
6490
6491
6492 /* Collect main literal pool. Return NULL on overflow. */
6493
6494 static struct constant_pool *
6495 s390_mainpool_start (void)
6496 {
6497 struct constant_pool *pool;
6498 rtx insn;
6499
6500 pool = s390_alloc_pool ();
6501
6502 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6503 {
6504 if (GET_CODE (insn) == INSN
6505 && GET_CODE (PATTERN (insn)) == SET
6506 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6507 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6508 {
6509 gcc_assert (!pool->pool_insn);
6510 pool->pool_insn = insn;
6511 }
6512
6513 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6514 {
6515 s390_add_execute (pool, insn);
6516 }
6517 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6518 {
6519 rtx pool_ref = NULL_RTX;
6520 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6521 if (pool_ref)
6522 {
6523 rtx constant = get_pool_constant (pool_ref);
6524 enum machine_mode mode = get_pool_mode (pool_ref);
6525 s390_add_constant (pool, constant, mode);
6526 }
6527 }
6528
6529 /* If hot/cold partitioning is enabled we have to make sure that
6530 the literal pool is emitted in the same section where the
6531 initialization of the literal pool base pointer takes place.
6532 emit_pool_after is only used in the non-overflow case on non
6533 Z cpus where we can emit the literal pool at the end of the
6534 function body within the text section. */
6535 if (NOTE_P (insn)
6536 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6537 && !pool->emit_pool_after)
6538 pool->emit_pool_after = PREV_INSN (insn);
6539 }
6540
6541 gcc_assert (pool->pool_insn || pool->size == 0);
6542
6543 if (pool->size >= 4096)
6544 {
6545 /* We're going to chunkify the pool, so remove the main
6546 pool placeholder insn. */
6547 remove_insn (pool->pool_insn);
6548
6549 s390_free_pool (pool);
6550 pool = NULL;
6551 }
6552
6553 /* If the functions ends with the section where the literal pool
6554 should be emitted set the marker to its end. */
6555 if (pool && !pool->emit_pool_after)
6556 pool->emit_pool_after = get_last_insn ();
6557
6558 return pool;
6559 }
6560
6561 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6562 Modify the current function to output the pool constants as well as
6563 the pool register setup instruction. */
6564
6565 static void
6566 s390_mainpool_finish (struct constant_pool *pool)
6567 {
6568 rtx base_reg = cfun->machine->base_reg;
6569 rtx insn;
6570
6571 /* If the pool is empty, we're done. */
6572 if (pool->size == 0)
6573 {
6574 /* We don't actually need a base register after all. */
6575 cfun->machine->base_reg = NULL_RTX;
6576
6577 if (pool->pool_insn)
6578 remove_insn (pool->pool_insn);
6579 s390_free_pool (pool);
6580 return;
6581 }
6582
6583 /* We need correct insn addresses. */
6584 shorten_branches (get_insns ());
6585
6586 /* On zSeries, we use a LARL to load the pool register. The pool is
6587 located in the .rodata section, so we emit it after the function. */
6588 if (TARGET_CPU_ZARCH)
6589 {
6590 insn = gen_main_base_64 (base_reg, pool->label);
6591 insn = emit_insn_after (insn, pool->pool_insn);
6592 INSN_ADDRESSES_NEW (insn, -1);
6593 remove_insn (pool->pool_insn);
6594
6595 insn = get_last_insn ();
6596 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6597 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6598
6599 s390_dump_pool (pool, 0);
6600 }
6601
6602 /* On S/390, if the total size of the function's code plus literal pool
6603 does not exceed 4096 bytes, we use BASR to set up a function base
6604 pointer, and emit the literal pool at the end of the function. */
6605 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6606 + pool->size + 8 /* alignment slop */ < 4096)
6607 {
6608 insn = gen_main_base_31_small (base_reg, pool->label);
6609 insn = emit_insn_after (insn, pool->pool_insn);
6610 INSN_ADDRESSES_NEW (insn, -1);
6611 remove_insn (pool->pool_insn);
6612
6613 insn = emit_label_after (pool->label, insn);
6614 INSN_ADDRESSES_NEW (insn, -1);
6615
6616 /* emit_pool_after will be set by s390_mainpool_start to the
6617 last insn of the section where the literal pool should be
6618 emitted. */
6619 insn = pool->emit_pool_after;
6620
6621 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6622 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6623
6624 s390_dump_pool (pool, 1);
6625 }
6626
6627 /* Otherwise, we emit an inline literal pool and use BASR to branch
6628 over it, setting up the pool register at the same time. */
6629 else
6630 {
6631 rtx pool_end = gen_label_rtx ();
6632
6633 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6634 insn = emit_jump_insn_after (insn, pool->pool_insn);
6635 JUMP_LABEL (insn) = pool_end;
6636 INSN_ADDRESSES_NEW (insn, -1);
6637 remove_insn (pool->pool_insn);
6638
6639 insn = emit_label_after (pool->label, insn);
6640 INSN_ADDRESSES_NEW (insn, -1);
6641
6642 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6643 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6644
6645 insn = emit_label_after (pool_end, pool->pool_insn);
6646 INSN_ADDRESSES_NEW (insn, -1);
6647
6648 s390_dump_pool (pool, 1);
6649 }
6650
6651
6652 /* Replace all literal pool references. */
6653
6654 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6655 {
6656 if (INSN_P (insn))
6657 replace_ltrel_base (&PATTERN (insn));
6658
6659 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6660 {
6661 rtx addr, pool_ref = NULL_RTX;
6662 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6663 if (pool_ref)
6664 {
6665 if (s390_execute_label (insn))
6666 addr = s390_find_execute (pool, insn);
6667 else
6668 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6669 get_pool_mode (pool_ref));
6670
6671 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6672 INSN_CODE (insn) = -1;
6673 }
6674 }
6675 }
6676
6677
6678 /* Free the pool. */
6679 s390_free_pool (pool);
6680 }
6681
6682 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6683 We have decided we cannot use this pool, so revert all changes
6684 to the current function that were done by s390_mainpool_start. */
6685 static void
6686 s390_mainpool_cancel (struct constant_pool *pool)
6687 {
6688 /* We didn't actually change the instruction stream, so simply
6689 free the pool memory. */
6690 s390_free_pool (pool);
6691 }
6692
6693
6694 /* Chunkify the literal pool. */
6695
6696 #define S390_POOL_CHUNK_MIN 0xc00
6697 #define S390_POOL_CHUNK_MAX 0xe00
6698
6699 static struct constant_pool *
6700 s390_chunkify_start (void)
6701 {
6702 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6703 int extra_size = 0;
6704 bitmap far_labels;
6705 rtx pending_ltrel = NULL_RTX;
6706 rtx insn;
6707
6708 rtx (*gen_reload_base) (rtx, rtx) =
6709 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6710
6711
6712 /* We need correct insn addresses. */
6713
6714 shorten_branches (get_insns ());
6715
6716 /* Scan all insns and move literals to pool chunks. */
6717
6718 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6719 {
6720 bool section_switch_p = false;
6721
6722 /* Check for pending LTREL_BASE. */
6723 if (INSN_P (insn))
6724 {
6725 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6726 if (ltrel_base)
6727 {
6728 gcc_assert (ltrel_base == pending_ltrel);
6729 pending_ltrel = NULL_RTX;
6730 }
6731 }
6732
6733 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6734 {
6735 if (!curr_pool)
6736 curr_pool = s390_start_pool (&pool_list, insn);
6737
6738 s390_add_execute (curr_pool, insn);
6739 s390_add_pool_insn (curr_pool, insn);
6740 }
6741 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6742 {
6743 rtx pool_ref = NULL_RTX;
6744 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6745 if (pool_ref)
6746 {
6747 rtx constant = get_pool_constant (pool_ref);
6748 enum machine_mode mode = get_pool_mode (pool_ref);
6749
6750 if (!curr_pool)
6751 curr_pool = s390_start_pool (&pool_list, insn);
6752
6753 s390_add_constant (curr_pool, constant, mode);
6754 s390_add_pool_insn (curr_pool, insn);
6755
6756 /* Don't split the pool chunk between a LTREL_OFFSET load
6757 and the corresponding LTREL_BASE. */
6758 if (GET_CODE (constant) == CONST
6759 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6760 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6761 {
6762 gcc_assert (!pending_ltrel);
6763 pending_ltrel = pool_ref;
6764 }
6765 }
6766 }
6767
6768 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6769 {
6770 if (curr_pool)
6771 s390_add_pool_insn (curr_pool, insn);
6772 /* An LTREL_BASE must follow within the same basic block. */
6773 gcc_assert (!pending_ltrel);
6774 }
6775
6776 if (NOTE_P (insn))
6777 switch (NOTE_KIND (insn))
6778 {
6779 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6780 section_switch_p = true;
6781 break;
6782 case NOTE_INSN_VAR_LOCATION:
6783 case NOTE_INSN_CALL_ARG_LOCATION:
6784 continue;
6785 default:
6786 break;
6787 }
6788
6789 if (!curr_pool
6790 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6791 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6792 continue;
6793
6794 if (TARGET_CPU_ZARCH)
6795 {
6796 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6797 continue;
6798
6799 s390_end_pool (curr_pool, NULL_RTX);
6800 curr_pool = NULL;
6801 }
6802 else
6803 {
6804 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6805 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6806 + extra_size;
6807
6808 /* We will later have to insert base register reload insns.
6809 Those will have an effect on code size, which we need to
6810 consider here. This calculation makes rather pessimistic
6811 worst-case assumptions. */
6812 if (GET_CODE (insn) == CODE_LABEL)
6813 extra_size += 6;
6814
6815 if (chunk_size < S390_POOL_CHUNK_MIN
6816 && curr_pool->size < S390_POOL_CHUNK_MIN
6817 && !section_switch_p)
6818 continue;
6819
6820 /* Pool chunks can only be inserted after BARRIERs ... */
6821 if (GET_CODE (insn) == BARRIER)
6822 {
6823 s390_end_pool (curr_pool, insn);
6824 curr_pool = NULL;
6825 extra_size = 0;
6826 }
6827
6828 /* ... so if we don't find one in time, create one. */
6829 else if (chunk_size > S390_POOL_CHUNK_MAX
6830 || curr_pool->size > S390_POOL_CHUNK_MAX
6831 || section_switch_p)
6832 {
6833 rtx label, jump, barrier, next, prev;
6834
6835 if (!section_switch_p)
6836 {
6837 /* We can insert the barrier only after a 'real' insn. */
6838 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6839 continue;
6840 if (get_attr_length (insn) == 0)
6841 continue;
6842 /* Don't separate LTREL_BASE from the corresponding
6843 LTREL_OFFSET load. */
6844 if (pending_ltrel)
6845 continue;
6846 next = insn;
6847 do
6848 {
6849 insn = next;
6850 next = NEXT_INSN (insn);
6851 }
6852 while (next
6853 && NOTE_P (next)
6854 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6855 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6856 }
6857 else
6858 {
6859 gcc_assert (!pending_ltrel);
6860
6861 /* The old pool has to end before the section switch
6862 note in order to make it part of the current
6863 section. */
6864 insn = PREV_INSN (insn);
6865 }
6866
6867 label = gen_label_rtx ();
6868 prev = insn;
6869 if (prev && NOTE_P (prev))
6870 prev = prev_nonnote_insn (prev);
6871 if (prev)
6872 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6873 INSN_LOCATION (prev));
6874 else
6875 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6876 barrier = emit_barrier_after (jump);
6877 insn = emit_label_after (label, barrier);
6878 JUMP_LABEL (jump) = label;
6879 LABEL_NUSES (label) = 1;
6880
6881 INSN_ADDRESSES_NEW (jump, -1);
6882 INSN_ADDRESSES_NEW (barrier, -1);
6883 INSN_ADDRESSES_NEW (insn, -1);
6884
6885 s390_end_pool (curr_pool, barrier);
6886 curr_pool = NULL;
6887 extra_size = 0;
6888 }
6889 }
6890 }
6891
6892 if (curr_pool)
6893 s390_end_pool (curr_pool, NULL_RTX);
6894 gcc_assert (!pending_ltrel);
6895
6896 /* Find all labels that are branched into
6897 from an insn belonging to a different chunk. */
6898
6899 far_labels = BITMAP_ALLOC (NULL);
6900
6901 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6902 {
6903 /* Labels marked with LABEL_PRESERVE_P can be target
6904 of non-local jumps, so we have to mark them.
6905 The same holds for named labels.
6906
6907 Don't do that, however, if it is the label before
6908 a jump table. */
6909
6910 if (GET_CODE (insn) == CODE_LABEL
6911 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6912 {
6913 rtx vec_insn = next_real_insn (insn);
6914 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6915 PATTERN (vec_insn) : NULL_RTX;
6916 if (!vec_pat
6917 || !(GET_CODE (vec_pat) == ADDR_VEC
6918 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6919 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6920 }
6921
6922 /* If we have a direct jump (conditional or unconditional)
6923 or a casesi jump, check all potential targets. */
6924 else if (GET_CODE (insn) == JUMP_INSN)
6925 {
6926 rtx pat = PATTERN (insn);
6927 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6928 pat = XVECEXP (pat, 0, 0);
6929
6930 if (GET_CODE (pat) == SET)
6931 {
6932 rtx label = JUMP_LABEL (insn);
6933 if (label)
6934 {
6935 if (s390_find_pool (pool_list, label)
6936 != s390_find_pool (pool_list, insn))
6937 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6938 }
6939 }
6940 else if (GET_CODE (pat) == PARALLEL
6941 && XVECLEN (pat, 0) == 2
6942 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6943 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6944 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6945 {
6946 /* Find the jump table used by this casesi jump. */
6947 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6948 rtx vec_insn = next_real_insn (vec_label);
6949 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6950 PATTERN (vec_insn) : NULL_RTX;
6951 if (vec_pat
6952 && (GET_CODE (vec_pat) == ADDR_VEC
6953 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6954 {
6955 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6956
6957 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6958 {
6959 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6960
6961 if (s390_find_pool (pool_list, label)
6962 != s390_find_pool (pool_list, insn))
6963 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6964 }
6965 }
6966 }
6967 }
6968 }
6969
6970 /* Insert base register reload insns before every pool. */
6971
6972 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6973 {
6974 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6975 curr_pool->label);
6976 rtx insn = curr_pool->first_insn;
6977 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6978 }
6979
6980 /* Insert base register reload insns at every far label. */
6981
6982 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6983 if (GET_CODE (insn) == CODE_LABEL
6984 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6985 {
6986 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6987 if (pool)
6988 {
6989 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6990 pool->label);
6991 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6992 }
6993 }
6994
6995
6996 BITMAP_FREE (far_labels);
6997
6998
6999 /* Recompute insn addresses. */
7000
7001 init_insn_lengths ();
7002 shorten_branches (get_insns ());
7003
7004 return pool_list;
7005 }
7006
7007 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7008 After we have decided to use this list, finish implementing
7009 all changes to the current function as required. */
7010
7011 static void
7012 s390_chunkify_finish (struct constant_pool *pool_list)
7013 {
7014 struct constant_pool *curr_pool = NULL;
7015 rtx insn;
7016
7017
7018 /* Replace all literal pool references. */
7019
7020 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7021 {
7022 if (INSN_P (insn))
7023 replace_ltrel_base (&PATTERN (insn));
7024
7025 curr_pool = s390_find_pool (pool_list, insn);
7026 if (!curr_pool)
7027 continue;
7028
7029 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
7030 {
7031 rtx addr, pool_ref = NULL_RTX;
7032 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7033 if (pool_ref)
7034 {
7035 if (s390_execute_label (insn))
7036 addr = s390_find_execute (curr_pool, insn);
7037 else
7038 addr = s390_find_constant (curr_pool,
7039 get_pool_constant (pool_ref),
7040 get_pool_mode (pool_ref));
7041
7042 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7043 INSN_CODE (insn) = -1;
7044 }
7045 }
7046 }
7047
7048 /* Dump out all literal pools. */
7049
7050 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7051 s390_dump_pool (curr_pool, 0);
7052
7053 /* Free pool list. */
7054
7055 while (pool_list)
7056 {
7057 struct constant_pool *next = pool_list->next;
7058 s390_free_pool (pool_list);
7059 pool_list = next;
7060 }
7061 }
7062
7063 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7064 We have decided we cannot use this list, so revert all changes
7065 to the current function that were done by s390_chunkify_start. */
7066
7067 static void
7068 s390_chunkify_cancel (struct constant_pool *pool_list)
7069 {
7070 struct constant_pool *curr_pool = NULL;
7071 rtx insn;
7072
7073 /* Remove all pool placeholder insns. */
7074
7075 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7076 {
7077 /* Did we insert an extra barrier? Remove it. */
7078 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7079 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7080 rtx label = NEXT_INSN (curr_pool->pool_insn);
7081
7082 if (jump && GET_CODE (jump) == JUMP_INSN
7083 && barrier && GET_CODE (barrier) == BARRIER
7084 && label && GET_CODE (label) == CODE_LABEL
7085 && GET_CODE (PATTERN (jump)) == SET
7086 && SET_DEST (PATTERN (jump)) == pc_rtx
7087 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7088 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7089 {
7090 remove_insn (jump);
7091 remove_insn (barrier);
7092 remove_insn (label);
7093 }
7094
7095 remove_insn (curr_pool->pool_insn);
7096 }
7097
7098 /* Remove all base register reload insns. */
7099
7100 for (insn = get_insns (); insn; )
7101 {
7102 rtx next_insn = NEXT_INSN (insn);
7103
7104 if (GET_CODE (insn) == INSN
7105 && GET_CODE (PATTERN (insn)) == SET
7106 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7107 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7108 remove_insn (insn);
7109
7110 insn = next_insn;
7111 }
7112
7113 /* Free pool list. */
7114
7115 while (pool_list)
7116 {
7117 struct constant_pool *next = pool_list->next;
7118 s390_free_pool (pool_list);
7119 pool_list = next;
7120 }
7121 }
7122
7123 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7124
7125 void
7126 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7127 {
7128 REAL_VALUE_TYPE r;
7129
7130 switch (GET_MODE_CLASS (mode))
7131 {
7132 case MODE_FLOAT:
7133 case MODE_DECIMAL_FLOAT:
7134 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7135
7136 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7137 assemble_real (r, mode, align);
7138 break;
7139
7140 case MODE_INT:
7141 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7142 mark_symbol_refs_as_used (exp);
7143 break;
7144
7145 default:
7146 gcc_unreachable ();
7147 }
7148 }
7149
7150
7151 /* Return an RTL expression representing the value of the return address
7152 for the frame COUNT steps up from the current frame. FRAME is the
7153 frame pointer of that frame. */
7154
7155 rtx
7156 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7157 {
7158 int offset;
7159 rtx addr;
7160
7161 /* Without backchain, we fail for all but the current frame. */
7162
7163 if (!TARGET_BACKCHAIN && count > 0)
7164 return NULL_RTX;
7165
7166 /* For the current frame, we need to make sure the initial
7167 value of RETURN_REGNUM is actually saved. */
7168
7169 if (count == 0)
7170 {
7171 /* On non-z architectures branch splitting could overwrite r14. */
7172 if (TARGET_CPU_ZARCH)
7173 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7174 else
7175 {
7176 cfun_frame_layout.save_return_addr_p = true;
7177 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7178 }
7179 }
7180
7181 if (TARGET_PACKED_STACK)
7182 offset = -2 * UNITS_PER_LONG;
7183 else
7184 offset = RETURN_REGNUM * UNITS_PER_LONG;
7185
7186 addr = plus_constant (Pmode, frame, offset);
7187 addr = memory_address (Pmode, addr);
7188 return gen_rtx_MEM (Pmode, addr);
7189 }
7190
7191 /* Return an RTL expression representing the back chain stored in
7192 the current stack frame. */
7193
7194 rtx
7195 s390_back_chain_rtx (void)
7196 {
7197 rtx chain;
7198
7199 gcc_assert (TARGET_BACKCHAIN);
7200
7201 if (TARGET_PACKED_STACK)
7202 chain = plus_constant (Pmode, stack_pointer_rtx,
7203 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7204 else
7205 chain = stack_pointer_rtx;
7206
7207 chain = gen_rtx_MEM (Pmode, chain);
7208 return chain;
7209 }
7210
7211 /* Find first call clobbered register unused in a function.
7212 This could be used as base register in a leaf function
7213 or for holding the return address before epilogue. */
7214
7215 static int
7216 find_unused_clobbered_reg (void)
7217 {
7218 int i;
7219 for (i = 0; i < 6; i++)
7220 if (!df_regs_ever_live_p (i))
7221 return i;
7222 return 0;
7223 }
7224
7225
7226 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7227 clobbered hard regs in SETREG. */
7228
7229 static void
7230 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7231 {
7232 int *regs_ever_clobbered = (int *)data;
7233 unsigned int i, regno;
7234 enum machine_mode mode = GET_MODE (setreg);
7235
7236 if (GET_CODE (setreg) == SUBREG)
7237 {
7238 rtx inner = SUBREG_REG (setreg);
7239 if (!GENERAL_REG_P (inner))
7240 return;
7241 regno = subreg_regno (setreg);
7242 }
7243 else if (GENERAL_REG_P (setreg))
7244 regno = REGNO (setreg);
7245 else
7246 return;
7247
7248 for (i = regno;
7249 i < regno + HARD_REGNO_NREGS (regno, mode);
7250 i++)
7251 regs_ever_clobbered[i] = 1;
7252 }
7253
7254 /* Walks through all basic blocks of the current function looking
7255 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7256 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7257 each of those regs. */
7258
7259 static void
7260 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7261 {
7262 basic_block cur_bb;
7263 rtx cur_insn;
7264 unsigned int i;
7265
7266 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7267
7268 /* For non-leaf functions we have to consider all call clobbered regs to be
7269 clobbered. */
7270 if (!crtl->is_leaf)
7271 {
7272 for (i = 0; i < 16; i++)
7273 regs_ever_clobbered[i] = call_really_used_regs[i];
7274 }
7275
7276 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7277 this work is done by liveness analysis (mark_regs_live_at_end).
7278 Special care is needed for functions containing landing pads. Landing pads
7279 may use the eh registers, but the code which sets these registers is not
7280 contained in that function. Hence s390_regs_ever_clobbered is not able to
7281 deal with this automatically. */
7282 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7283 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7284 if (crtl->calls_eh_return
7285 || (cfun->machine->has_landing_pad_p
7286 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7287 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7288
7289 /* For nonlocal gotos all call-saved registers have to be saved.
7290 This flag is also set for the unwinding code in libgcc.
7291 See expand_builtin_unwind_init. For regs_ever_live this is done by
7292 reload. */
7293 if (cfun->has_nonlocal_label)
7294 for (i = 0; i < 16; i++)
7295 if (!call_really_used_regs[i])
7296 regs_ever_clobbered[i] = 1;
7297
7298 FOR_EACH_BB (cur_bb)
7299 {
7300 FOR_BB_INSNS (cur_bb, cur_insn)
7301 {
7302 if (INSN_P (cur_insn))
7303 note_stores (PATTERN (cur_insn),
7304 s390_reg_clobbered_rtx,
7305 regs_ever_clobbered);
7306 }
7307 }
7308 }
7309
7310 /* Determine the frame area which actually has to be accessed
7311 in the function epilogue. The values are stored at the
7312 given pointers AREA_BOTTOM (address of the lowest used stack
7313 address) and AREA_TOP (address of the first item which does
7314 not belong to the stack frame). */
7315
7316 static void
7317 s390_frame_area (int *area_bottom, int *area_top)
7318 {
7319 int b, t;
7320 int i;
7321
7322 b = INT_MAX;
7323 t = INT_MIN;
7324
7325 if (cfun_frame_layout.first_restore_gpr != -1)
7326 {
7327 b = (cfun_frame_layout.gprs_offset
7328 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7329 t = b + (cfun_frame_layout.last_restore_gpr
7330 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7331 }
7332
7333 if (TARGET_64BIT && cfun_save_high_fprs_p)
7334 {
7335 b = MIN (b, cfun_frame_layout.f8_offset);
7336 t = MAX (t, (cfun_frame_layout.f8_offset
7337 + cfun_frame_layout.high_fprs * 8));
7338 }
7339
7340 if (!TARGET_64BIT)
7341 for (i = 2; i < 4; i++)
7342 if (cfun_fpr_bit_p (i))
7343 {
7344 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7345 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7346 }
7347
7348 *area_bottom = b;
7349 *area_top = t;
7350 }
7351
7352 /* Fill cfun->machine with info about register usage of current function.
7353 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7354
7355 static void
7356 s390_register_info (int clobbered_regs[])
7357 {
7358 int i, j;
7359
7360 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7361 cfun_frame_layout.fpr_bitmap = 0;
7362 cfun_frame_layout.high_fprs = 0;
7363 if (TARGET_64BIT)
7364 for (i = 24; i < 32; i++)
7365 if (df_regs_ever_live_p (i) && !global_regs[i])
7366 {
7367 cfun_set_fpr_bit (i - 16);
7368 cfun_frame_layout.high_fprs++;
7369 }
7370
7371 /* Find first and last gpr to be saved. We trust regs_ever_live
7372 data, except that we don't save and restore global registers.
7373
7374 Also, all registers with special meaning to the compiler need
7375 to be handled extra. */
7376
7377 s390_regs_ever_clobbered (clobbered_regs);
7378
7379 for (i = 0; i < 16; i++)
7380 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7381
7382 if (frame_pointer_needed)
7383 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7384
7385 if (flag_pic)
7386 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7387 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7388
7389 clobbered_regs[BASE_REGNUM]
7390 |= (cfun->machine->base_reg
7391 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7392
7393 clobbered_regs[RETURN_REGNUM]
7394 |= (!crtl->is_leaf
7395 || TARGET_TPF_PROFILING
7396 || cfun->machine->split_branches_pending_p
7397 || cfun_frame_layout.save_return_addr_p
7398 || crtl->calls_eh_return
7399 || cfun->stdarg);
7400
7401 clobbered_regs[STACK_POINTER_REGNUM]
7402 |= (!crtl->is_leaf
7403 || TARGET_TPF_PROFILING
7404 || cfun_save_high_fprs_p
7405 || get_frame_size () > 0
7406 || cfun->calls_alloca
7407 || cfun->stdarg);
7408
7409 for (i = 6; i < 16; i++)
7410 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7411 break;
7412 for (j = 15; j > i; j--)
7413 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7414 break;
7415
7416 if (i == 16)
7417 {
7418 /* Nothing to save/restore. */
7419 cfun_frame_layout.first_save_gpr_slot = -1;
7420 cfun_frame_layout.last_save_gpr_slot = -1;
7421 cfun_frame_layout.first_save_gpr = -1;
7422 cfun_frame_layout.first_restore_gpr = -1;
7423 cfun_frame_layout.last_save_gpr = -1;
7424 cfun_frame_layout.last_restore_gpr = -1;
7425 }
7426 else
7427 {
7428 /* Save slots for gprs from i to j. */
7429 cfun_frame_layout.first_save_gpr_slot = i;
7430 cfun_frame_layout.last_save_gpr_slot = j;
7431
7432 for (i = cfun_frame_layout.first_save_gpr_slot;
7433 i < cfun_frame_layout.last_save_gpr_slot + 1;
7434 i++)
7435 if (clobbered_regs[i])
7436 break;
7437
7438 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7439 if (clobbered_regs[j])
7440 break;
7441
7442 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7443 {
7444 /* Nothing to save/restore. */
7445 cfun_frame_layout.first_save_gpr = -1;
7446 cfun_frame_layout.first_restore_gpr = -1;
7447 cfun_frame_layout.last_save_gpr = -1;
7448 cfun_frame_layout.last_restore_gpr = -1;
7449 }
7450 else
7451 {
7452 /* Save / Restore from gpr i to j. */
7453 cfun_frame_layout.first_save_gpr = i;
7454 cfun_frame_layout.first_restore_gpr = i;
7455 cfun_frame_layout.last_save_gpr = j;
7456 cfun_frame_layout.last_restore_gpr = j;
7457 }
7458 }
7459
7460 if (cfun->stdarg)
7461 {
7462 /* Varargs functions need to save gprs 2 to 6. */
7463 if (cfun->va_list_gpr_size
7464 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7465 {
7466 int min_gpr = crtl->args.info.gprs;
7467 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7468 if (max_gpr > GP_ARG_NUM_REG)
7469 max_gpr = GP_ARG_NUM_REG;
7470
7471 if (cfun_frame_layout.first_save_gpr == -1
7472 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7473 {
7474 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7475 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7476 }
7477
7478 if (cfun_frame_layout.last_save_gpr == -1
7479 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7480 {
7481 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7482 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7483 }
7484 }
7485
7486 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7487 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7488 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7489 {
7490 int min_fpr = crtl->args.info.fprs;
7491 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7492 if (max_fpr > FP_ARG_NUM_REG)
7493 max_fpr = FP_ARG_NUM_REG;
7494
7495 /* ??? This is currently required to ensure proper location
7496 of the fpr save slots within the va_list save area. */
7497 if (TARGET_PACKED_STACK)
7498 min_fpr = 0;
7499
7500 for (i = min_fpr; i < max_fpr; i++)
7501 cfun_set_fpr_bit (i);
7502 }
7503 }
7504
7505 if (!TARGET_64BIT)
7506 for (i = 2; i < 4; i++)
7507 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7508 cfun_set_fpr_bit (i);
7509 }
7510
7511 /* Fill cfun->machine with info about frame of current function. */
7512
7513 static void
7514 s390_frame_info (void)
7515 {
7516 int i;
7517
7518 cfun_frame_layout.frame_size = get_frame_size ();
7519 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7520 fatal_error ("total size of local variables exceeds architecture limit");
7521
7522 if (!TARGET_PACKED_STACK)
7523 {
7524 cfun_frame_layout.backchain_offset = 0;
7525 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7526 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7527 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7528 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7529 * UNITS_PER_LONG);
7530 }
7531 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7532 {
7533 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7534 - UNITS_PER_LONG);
7535 cfun_frame_layout.gprs_offset
7536 = (cfun_frame_layout.backchain_offset
7537 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7538 * UNITS_PER_LONG);
7539
7540 if (TARGET_64BIT)
7541 {
7542 cfun_frame_layout.f4_offset
7543 = (cfun_frame_layout.gprs_offset
7544 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7545
7546 cfun_frame_layout.f0_offset
7547 = (cfun_frame_layout.f4_offset
7548 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7549 }
7550 else
7551 {
7552 /* On 31 bit we have to care about alignment of the
7553 floating point regs to provide fastest access. */
7554 cfun_frame_layout.f0_offset
7555 = ((cfun_frame_layout.gprs_offset
7556 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7557 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7558
7559 cfun_frame_layout.f4_offset
7560 = (cfun_frame_layout.f0_offset
7561 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7562 }
7563 }
7564 else /* no backchain */
7565 {
7566 cfun_frame_layout.f4_offset
7567 = (STACK_POINTER_OFFSET
7568 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7569
7570 cfun_frame_layout.f0_offset
7571 = (cfun_frame_layout.f4_offset
7572 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7573
7574 cfun_frame_layout.gprs_offset
7575 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7576 }
7577
7578 if (crtl->is_leaf
7579 && !TARGET_TPF_PROFILING
7580 && cfun_frame_layout.frame_size == 0
7581 && !cfun_save_high_fprs_p
7582 && !cfun->calls_alloca
7583 && !cfun->stdarg)
7584 return;
7585
7586 if (!TARGET_PACKED_STACK)
7587 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7588 + crtl->outgoing_args_size
7589 + cfun_frame_layout.high_fprs * 8);
7590 else
7591 {
7592 if (TARGET_BACKCHAIN)
7593 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7594
7595 /* No alignment trouble here because f8-f15 are only saved under
7596 64 bit. */
7597 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7598 cfun_frame_layout.f4_offset),
7599 cfun_frame_layout.gprs_offset)
7600 - cfun_frame_layout.high_fprs * 8);
7601
7602 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7603
7604 for (i = 0; i < 8; i++)
7605 if (cfun_fpr_bit_p (i))
7606 cfun_frame_layout.frame_size += 8;
7607
7608 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7609
7610 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7611 the frame size to sustain 8 byte alignment of stack frames. */
7612 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7613 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7614 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7615
7616 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7617 }
7618 }
7619
7620 /* Generate frame layout. Fills in register and frame data for the current
7621 function in cfun->machine. This routine can be called multiple times;
7622 it will re-do the complete frame layout every time. */
7623
7624 static void
7625 s390_init_frame_layout (void)
7626 {
7627 HOST_WIDE_INT frame_size;
7628 int base_used;
7629 int clobbered_regs[16];
7630
7631 /* On S/390 machines, we may need to perform branch splitting, which
7632 will require both base and return address register. We have no
7633 choice but to assume we're going to need them until right at the
7634 end of the machine dependent reorg phase. */
7635 if (!TARGET_CPU_ZARCH)
7636 cfun->machine->split_branches_pending_p = true;
7637
7638 do
7639 {
7640 frame_size = cfun_frame_layout.frame_size;
7641
7642 /* Try to predict whether we'll need the base register. */
7643 base_used = cfun->machine->split_branches_pending_p
7644 || crtl->uses_const_pool
7645 || (!DISP_IN_RANGE (frame_size)
7646 && !CONST_OK_FOR_K (frame_size));
7647
7648 /* Decide which register to use as literal pool base. In small
7649 leaf functions, try to use an unused call-clobbered register
7650 as base register to avoid save/restore overhead. */
7651 if (!base_used)
7652 cfun->machine->base_reg = NULL_RTX;
7653 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7654 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7655 else
7656 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7657
7658 s390_register_info (clobbered_regs);
7659 s390_frame_info ();
7660 }
7661 while (frame_size != cfun_frame_layout.frame_size);
7662 }
7663
7664 /* Update frame layout. Recompute actual register save data based on
7665 current info and update regs_ever_live for the special registers.
7666 May be called multiple times, but may never cause *more* registers
7667 to be saved than s390_init_frame_layout allocated room for. */
7668
7669 static void
7670 s390_update_frame_layout (void)
7671 {
7672 int clobbered_regs[16];
7673
7674 s390_register_info (clobbered_regs);
7675
7676 df_set_regs_ever_live (BASE_REGNUM,
7677 clobbered_regs[BASE_REGNUM] ? true : false);
7678 df_set_regs_ever_live (RETURN_REGNUM,
7679 clobbered_regs[RETURN_REGNUM] ? true : false);
7680 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7681 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7682
7683 if (cfun->machine->base_reg)
7684 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7685 }
7686
7687 /* Return true if it is legal to put a value with MODE into REGNO. */
7688
7689 bool
7690 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7691 {
7692 switch (REGNO_REG_CLASS (regno))
7693 {
7694 case FP_REGS:
7695 if (REGNO_PAIR_OK (regno, mode))
7696 {
7697 if (mode == SImode || mode == DImode)
7698 return true;
7699
7700 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7701 return true;
7702 }
7703 break;
7704 case ADDR_REGS:
7705 if (FRAME_REGNO_P (regno) && mode == Pmode)
7706 return true;
7707
7708 /* fallthrough */
7709 case GENERAL_REGS:
7710 if (REGNO_PAIR_OK (regno, mode))
7711 {
7712 if (TARGET_ZARCH
7713 || (mode != TFmode && mode != TCmode && mode != TDmode))
7714 return true;
7715 }
7716 break;
7717 case CC_REGS:
7718 if (GET_MODE_CLASS (mode) == MODE_CC)
7719 return true;
7720 break;
7721 case ACCESS_REGS:
7722 if (REGNO_PAIR_OK (regno, mode))
7723 {
7724 if (mode == SImode || mode == Pmode)
7725 return true;
7726 }
7727 break;
7728 default:
7729 return false;
7730 }
7731
7732 return false;
7733 }
7734
7735 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7736
7737 bool
7738 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7739 {
7740 /* Once we've decided upon a register to use as base register, it must
7741 no longer be used for any other purpose. */
7742 if (cfun->machine->base_reg)
7743 if (REGNO (cfun->machine->base_reg) == old_reg
7744 || REGNO (cfun->machine->base_reg) == new_reg)
7745 return false;
7746
7747 return true;
7748 }
7749
7750 /* Maximum number of registers to represent a value of mode MODE
7751 in a register of class RCLASS. */
7752
7753 int
7754 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7755 {
7756 switch (rclass)
7757 {
7758 case FP_REGS:
7759 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7760 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7761 else
7762 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7763 case ACCESS_REGS:
7764 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7765 default:
7766 break;
7767 }
7768 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7769 }
7770
7771 /* Return true if register FROM can be eliminated via register TO. */
7772
7773 static bool
7774 s390_can_eliminate (const int from, const int to)
7775 {
7776 /* On zSeries machines, we have not marked the base register as fixed.
7777 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7778 If a function requires the base register, we say here that this
7779 elimination cannot be performed. This will cause reload to free
7780 up the base register (as if it were fixed). On the other hand,
7781 if the current function does *not* require the base register, we
7782 say here the elimination succeeds, which in turn allows reload
7783 to allocate the base register for any other purpose. */
7784 if (from == BASE_REGNUM && to == BASE_REGNUM)
7785 {
7786 if (TARGET_CPU_ZARCH)
7787 {
7788 s390_init_frame_layout ();
7789 return cfun->machine->base_reg == NULL_RTX;
7790 }
7791
7792 return false;
7793 }
7794
7795 /* Everything else must point into the stack frame. */
7796 gcc_assert (to == STACK_POINTER_REGNUM
7797 || to == HARD_FRAME_POINTER_REGNUM);
7798
7799 gcc_assert (from == FRAME_POINTER_REGNUM
7800 || from == ARG_POINTER_REGNUM
7801 || from == RETURN_ADDRESS_POINTER_REGNUM);
7802
7803 /* Make sure we actually saved the return address. */
7804 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7805 if (!crtl->calls_eh_return
7806 && !cfun->stdarg
7807 && !cfun_frame_layout.save_return_addr_p)
7808 return false;
7809
7810 return true;
7811 }
7812
7813 /* Return offset between register FROM and TO initially after prolog. */
7814
7815 HOST_WIDE_INT
7816 s390_initial_elimination_offset (int from, int to)
7817 {
7818 HOST_WIDE_INT offset;
7819 int index;
7820
7821 /* ??? Why are we called for non-eliminable pairs? */
7822 if (!s390_can_eliminate (from, to))
7823 return 0;
7824
7825 switch (from)
7826 {
7827 case FRAME_POINTER_REGNUM:
7828 offset = (get_frame_size()
7829 + STACK_POINTER_OFFSET
7830 + crtl->outgoing_args_size);
7831 break;
7832
7833 case ARG_POINTER_REGNUM:
7834 s390_init_frame_layout ();
7835 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7836 break;
7837
7838 case RETURN_ADDRESS_POINTER_REGNUM:
7839 s390_init_frame_layout ();
7840 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7841 gcc_assert (index >= 0);
7842 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7843 offset += index * UNITS_PER_LONG;
7844 break;
7845
7846 case BASE_REGNUM:
7847 offset = 0;
7848 break;
7849
7850 default:
7851 gcc_unreachable ();
7852 }
7853
7854 return offset;
7855 }
7856
7857 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7858 to register BASE. Return generated insn. */
7859
7860 static rtx
7861 save_fpr (rtx base, int offset, int regnum)
7862 {
7863 rtx addr;
7864 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7865
7866 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7867 set_mem_alias_set (addr, get_varargs_alias_set ());
7868 else
7869 set_mem_alias_set (addr, get_frame_alias_set ());
7870
7871 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7872 }
7873
7874 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7875 to register BASE. Return generated insn. */
7876
7877 static rtx
7878 restore_fpr (rtx base, int offset, int regnum)
7879 {
7880 rtx addr;
7881 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7882 set_mem_alias_set (addr, get_frame_alias_set ());
7883
7884 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7885 }
7886
7887 /* Return true if REGNO is a global register, but not one
7888 of the special ones that need to be saved/restored in anyway. */
7889
7890 static inline bool
7891 global_not_special_regno_p (int regno)
7892 {
7893 return (global_regs[regno]
7894 /* These registers are special and need to be
7895 restored in any case. */
7896 && !(regno == STACK_POINTER_REGNUM
7897 || regno == RETURN_REGNUM
7898 || regno == BASE_REGNUM
7899 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7900 }
7901
7902 /* Generate insn to save registers FIRST to LAST into
7903 the register save area located at offset OFFSET
7904 relative to register BASE. */
7905
7906 static rtx
7907 save_gprs (rtx base, int offset, int first, int last)
7908 {
7909 rtx addr, insn, note;
7910 int i;
7911
7912 addr = plus_constant (Pmode, base, offset);
7913 addr = gen_rtx_MEM (Pmode, addr);
7914
7915 set_mem_alias_set (addr, get_frame_alias_set ());
7916
7917 /* Special-case single register. */
7918 if (first == last)
7919 {
7920 if (TARGET_64BIT)
7921 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7922 else
7923 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7924
7925 if (!global_not_special_regno_p (first))
7926 RTX_FRAME_RELATED_P (insn) = 1;
7927 return insn;
7928 }
7929
7930
7931 insn = gen_store_multiple (addr,
7932 gen_rtx_REG (Pmode, first),
7933 GEN_INT (last - first + 1));
7934
7935 if (first <= 6 && cfun->stdarg)
7936 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7937 {
7938 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7939
7940 if (first + i <= 6)
7941 set_mem_alias_set (mem, get_varargs_alias_set ());
7942 }
7943
7944 /* We need to set the FRAME_RELATED flag on all SETs
7945 inside the store-multiple pattern.
7946
7947 However, we must not emit DWARF records for registers 2..5
7948 if they are stored for use by variable arguments ...
7949
7950 ??? Unfortunately, it is not enough to simply not the
7951 FRAME_RELATED flags for those SETs, because the first SET
7952 of the PARALLEL is always treated as if it had the flag
7953 set, even if it does not. Therefore we emit a new pattern
7954 without those registers as REG_FRAME_RELATED_EXPR note. */
7955
7956 if (first >= 6 && !global_not_special_regno_p (first))
7957 {
7958 rtx pat = PATTERN (insn);
7959
7960 for (i = 0; i < XVECLEN (pat, 0); i++)
7961 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7962 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7963 0, i)))))
7964 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7965
7966 RTX_FRAME_RELATED_P (insn) = 1;
7967 }
7968 else if (last >= 6)
7969 {
7970 int start;
7971
7972 for (start = first >= 6 ? first : 6; start <= last; start++)
7973 if (!global_not_special_regno_p (start))
7974 break;
7975
7976 if (start > last)
7977 return insn;
7978
7979 addr = plus_constant (Pmode, base,
7980 offset + (start - first) * UNITS_PER_LONG);
7981 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7982 gen_rtx_REG (Pmode, start),
7983 GEN_INT (last - start + 1));
7984 note = PATTERN (note);
7985
7986 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7987
7988 for (i = 0; i < XVECLEN (note, 0); i++)
7989 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7990 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7991 0, i)))))
7992 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7993
7994 RTX_FRAME_RELATED_P (insn) = 1;
7995 }
7996
7997 return insn;
7998 }
7999
8000 /* Generate insn to restore registers FIRST to LAST from
8001 the register save area located at offset OFFSET
8002 relative to register BASE. */
8003
8004 static rtx
8005 restore_gprs (rtx base, int offset, int first, int last)
8006 {
8007 rtx addr, insn;
8008
8009 addr = plus_constant (Pmode, base, offset);
8010 addr = gen_rtx_MEM (Pmode, addr);
8011 set_mem_alias_set (addr, get_frame_alias_set ());
8012
8013 /* Special-case single register. */
8014 if (first == last)
8015 {
8016 if (TARGET_64BIT)
8017 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8018 else
8019 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8020
8021 return insn;
8022 }
8023
8024 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8025 addr,
8026 GEN_INT (last - first + 1));
8027 return insn;
8028 }
8029
8030 /* Return insn sequence to load the GOT register. */
8031
8032 static GTY(()) rtx got_symbol;
8033 rtx
8034 s390_load_got (void)
8035 {
8036 rtx insns;
8037
8038 /* We cannot use pic_offset_table_rtx here since we use this
8039 function also for non-pic if __tls_get_offset is called and in
8040 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8041 aren't usable. */
8042 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8043
8044 if (!got_symbol)
8045 {
8046 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8047 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8048 }
8049
8050 start_sequence ();
8051
8052 if (TARGET_CPU_ZARCH)
8053 {
8054 emit_move_insn (got_rtx, got_symbol);
8055 }
8056 else
8057 {
8058 rtx offset;
8059
8060 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8061 UNSPEC_LTREL_OFFSET);
8062 offset = gen_rtx_CONST (Pmode, offset);
8063 offset = force_const_mem (Pmode, offset);
8064
8065 emit_move_insn (got_rtx, offset);
8066
8067 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8068 UNSPEC_LTREL_BASE);
8069 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8070
8071 emit_move_insn (got_rtx, offset);
8072 }
8073
8074 insns = get_insns ();
8075 end_sequence ();
8076 return insns;
8077 }
8078
8079 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8080 and the change to the stack pointer. */
8081
8082 static void
8083 s390_emit_stack_tie (void)
8084 {
8085 rtx mem = gen_frame_mem (BLKmode,
8086 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8087
8088 emit_insn (gen_stack_tie (mem));
8089 }
8090
8091 /* Expand the prologue into a bunch of separate insns. */
8092
8093 void
8094 s390_emit_prologue (void)
8095 {
8096 rtx insn, addr;
8097 rtx temp_reg;
8098 int i;
8099 int offset;
8100 int next_fpr = 0;
8101
8102 /* Complete frame layout. */
8103
8104 s390_update_frame_layout ();
8105
8106 /* Annotate all constant pool references to let the scheduler know
8107 they implicitly use the base register. */
8108
8109 push_topmost_sequence ();
8110
8111 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8112 if (INSN_P (insn))
8113 {
8114 annotate_constant_pool_refs (&PATTERN (insn));
8115 df_insn_rescan (insn);
8116 }
8117
8118 pop_topmost_sequence ();
8119
8120 /* Choose best register to use for temp use within prologue.
8121 See below for why TPF must use the register 1. */
8122
8123 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8124 && !crtl->is_leaf
8125 && !TARGET_TPF_PROFILING)
8126 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8127 else
8128 temp_reg = gen_rtx_REG (Pmode, 1);
8129
8130 /* Save call saved gprs. */
8131 if (cfun_frame_layout.first_save_gpr != -1)
8132 {
8133 insn = save_gprs (stack_pointer_rtx,
8134 cfun_frame_layout.gprs_offset +
8135 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8136 - cfun_frame_layout.first_save_gpr_slot),
8137 cfun_frame_layout.first_save_gpr,
8138 cfun_frame_layout.last_save_gpr);
8139 emit_insn (insn);
8140 }
8141
8142 /* Dummy insn to mark literal pool slot. */
8143
8144 if (cfun->machine->base_reg)
8145 emit_insn (gen_main_pool (cfun->machine->base_reg));
8146
8147 offset = cfun_frame_layout.f0_offset;
8148
8149 /* Save f0 and f2. */
8150 for (i = 0; i < 2; i++)
8151 {
8152 if (cfun_fpr_bit_p (i))
8153 {
8154 save_fpr (stack_pointer_rtx, offset, i + 16);
8155 offset += 8;
8156 }
8157 else if (!TARGET_PACKED_STACK)
8158 offset += 8;
8159 }
8160
8161 /* Save f4 and f6. */
8162 offset = cfun_frame_layout.f4_offset;
8163 for (i = 2; i < 4; i++)
8164 {
8165 if (cfun_fpr_bit_p (i))
8166 {
8167 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8168 offset += 8;
8169
8170 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8171 therefore are not frame related. */
8172 if (!call_really_used_regs[i + 16])
8173 RTX_FRAME_RELATED_P (insn) = 1;
8174 }
8175 else if (!TARGET_PACKED_STACK)
8176 offset += 8;
8177 }
8178
8179 if (TARGET_PACKED_STACK
8180 && cfun_save_high_fprs_p
8181 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8182 {
8183 offset = (cfun_frame_layout.f8_offset
8184 + (cfun_frame_layout.high_fprs - 1) * 8);
8185
8186 for (i = 15; i > 7 && offset >= 0; i--)
8187 if (cfun_fpr_bit_p (i))
8188 {
8189 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8190
8191 RTX_FRAME_RELATED_P (insn) = 1;
8192 offset -= 8;
8193 }
8194 if (offset >= cfun_frame_layout.f8_offset)
8195 next_fpr = i + 16;
8196 }
8197
8198 if (!TARGET_PACKED_STACK)
8199 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8200
8201 if (flag_stack_usage_info)
8202 current_function_static_stack_size = cfun_frame_layout.frame_size;
8203
8204 /* Decrement stack pointer. */
8205
8206 if (cfun_frame_layout.frame_size > 0)
8207 {
8208 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8209 rtx real_frame_off;
8210
8211 if (s390_stack_size)
8212 {
8213 HOST_WIDE_INT stack_guard;
8214
8215 if (s390_stack_guard)
8216 stack_guard = s390_stack_guard;
8217 else
8218 {
8219 /* If no value for stack guard is provided the smallest power of 2
8220 larger than the current frame size is chosen. */
8221 stack_guard = 1;
8222 while (stack_guard < cfun_frame_layout.frame_size)
8223 stack_guard <<= 1;
8224 }
8225
8226 if (cfun_frame_layout.frame_size >= s390_stack_size)
8227 {
8228 warning (0, "frame size of function %qs is %wd"
8229 " bytes exceeding user provided stack limit of "
8230 "%d bytes. "
8231 "An unconditional trap is added.",
8232 current_function_name(), cfun_frame_layout.frame_size,
8233 s390_stack_size);
8234 emit_insn (gen_trap ());
8235 }
8236 else
8237 {
8238 /* stack_guard has to be smaller than s390_stack_size.
8239 Otherwise we would emit an AND with zero which would
8240 not match the test under mask pattern. */
8241 if (stack_guard >= s390_stack_size)
8242 {
8243 warning (0, "frame size of function %qs is %wd"
8244 " bytes which is more than half the stack size. "
8245 "The dynamic check would not be reliable. "
8246 "No check emitted for this function.",
8247 current_function_name(),
8248 cfun_frame_layout.frame_size);
8249 }
8250 else
8251 {
8252 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8253 & ~(stack_guard - 1));
8254
8255 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8256 GEN_INT (stack_check_mask));
8257 if (TARGET_64BIT)
8258 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8259 t, const0_rtx),
8260 t, const0_rtx, const0_rtx));
8261 else
8262 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8263 t, const0_rtx),
8264 t, const0_rtx, const0_rtx));
8265 }
8266 }
8267 }
8268
8269 if (s390_warn_framesize > 0
8270 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8271 warning (0, "frame size of %qs is %wd bytes",
8272 current_function_name (), cfun_frame_layout.frame_size);
8273
8274 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8275 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8276
8277 /* Save incoming stack pointer into temp reg. */
8278 if (TARGET_BACKCHAIN || next_fpr)
8279 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8280
8281 /* Subtract frame size from stack pointer. */
8282
8283 if (DISP_IN_RANGE (INTVAL (frame_off)))
8284 {
8285 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8286 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8287 frame_off));
8288 insn = emit_insn (insn);
8289 }
8290 else
8291 {
8292 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8293 frame_off = force_const_mem (Pmode, frame_off);
8294
8295 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8296 annotate_constant_pool_refs (&PATTERN (insn));
8297 }
8298
8299 RTX_FRAME_RELATED_P (insn) = 1;
8300 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8301 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8302 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8303 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8304 real_frame_off)));
8305
8306 /* Set backchain. */
8307
8308 if (TARGET_BACKCHAIN)
8309 {
8310 if (cfun_frame_layout.backchain_offset)
8311 addr = gen_rtx_MEM (Pmode,
8312 plus_constant (Pmode, stack_pointer_rtx,
8313 cfun_frame_layout.backchain_offset));
8314 else
8315 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8316 set_mem_alias_set (addr, get_frame_alias_set ());
8317 insn = emit_insn (gen_move_insn (addr, temp_reg));
8318 }
8319
8320 /* If we support non-call exceptions (e.g. for Java),
8321 we need to make sure the backchain pointer is set up
8322 before any possibly trapping memory access. */
8323 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8324 {
8325 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8326 emit_clobber (addr);
8327 }
8328 }
8329
8330 /* Save fprs 8 - 15 (64 bit ABI). */
8331
8332 if (cfun_save_high_fprs_p && next_fpr)
8333 {
8334 /* If the stack might be accessed through a different register
8335 we have to make sure that the stack pointer decrement is not
8336 moved below the use of the stack slots. */
8337 s390_emit_stack_tie ();
8338
8339 insn = emit_insn (gen_add2_insn (temp_reg,
8340 GEN_INT (cfun_frame_layout.f8_offset)));
8341
8342 offset = 0;
8343
8344 for (i = 24; i <= next_fpr; i++)
8345 if (cfun_fpr_bit_p (i - 16))
8346 {
8347 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8348 cfun_frame_layout.frame_size
8349 + cfun_frame_layout.f8_offset
8350 + offset);
8351
8352 insn = save_fpr (temp_reg, offset, i);
8353 offset += 8;
8354 RTX_FRAME_RELATED_P (insn) = 1;
8355 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8356 gen_rtx_SET (VOIDmode,
8357 gen_rtx_MEM (DFmode, addr),
8358 gen_rtx_REG (DFmode, i)));
8359 }
8360 }
8361
8362 /* Set frame pointer, if needed. */
8363
8364 if (frame_pointer_needed)
8365 {
8366 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8367 RTX_FRAME_RELATED_P (insn) = 1;
8368 }
8369
8370 /* Set up got pointer, if needed. */
8371
8372 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8373 {
8374 rtx insns = s390_load_got ();
8375
8376 for (insn = insns; insn; insn = NEXT_INSN (insn))
8377 annotate_constant_pool_refs (&PATTERN (insn));
8378
8379 emit_insn (insns);
8380 }
8381
8382 if (TARGET_TPF_PROFILING)
8383 {
8384 /* Generate a BAS instruction to serve as a function
8385 entry intercept to facilitate the use of tracing
8386 algorithms located at the branch target. */
8387 emit_insn (gen_prologue_tpf ());
8388
8389 /* Emit a blockage here so that all code
8390 lies between the profiling mechanisms. */
8391 emit_insn (gen_blockage ());
8392 }
8393 }
8394
8395 /* Expand the epilogue into a bunch of separate insns. */
8396
8397 void
8398 s390_emit_epilogue (bool sibcall)
8399 {
8400 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8401 int area_bottom, area_top, offset = 0;
8402 int next_offset;
8403 rtvec p;
8404 int i;
8405
8406 if (TARGET_TPF_PROFILING)
8407 {
8408
8409 /* Generate a BAS instruction to serve as a function
8410 entry intercept to facilitate the use of tracing
8411 algorithms located at the branch target. */
8412
8413 /* Emit a blockage here so that all code
8414 lies between the profiling mechanisms. */
8415 emit_insn (gen_blockage ());
8416
8417 emit_insn (gen_epilogue_tpf ());
8418 }
8419
8420 /* Check whether to use frame or stack pointer for restore. */
8421
8422 frame_pointer = (frame_pointer_needed
8423 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8424
8425 s390_frame_area (&area_bottom, &area_top);
8426
8427 /* Check whether we can access the register save area.
8428 If not, increment the frame pointer as required. */
8429
8430 if (area_top <= area_bottom)
8431 {
8432 /* Nothing to restore. */
8433 }
8434 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8435 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8436 {
8437 /* Area is in range. */
8438 offset = cfun_frame_layout.frame_size;
8439 }
8440 else
8441 {
8442 rtx insn, frame_off, cfa;
8443
8444 offset = area_bottom < 0 ? -area_bottom : 0;
8445 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8446
8447 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8448 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8449 if (DISP_IN_RANGE (INTVAL (frame_off)))
8450 {
8451 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8452 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8453 insn = emit_insn (insn);
8454 }
8455 else
8456 {
8457 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8458 frame_off = force_const_mem (Pmode, frame_off);
8459
8460 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8461 annotate_constant_pool_refs (&PATTERN (insn));
8462 }
8463 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8464 RTX_FRAME_RELATED_P (insn) = 1;
8465 }
8466
8467 /* Restore call saved fprs. */
8468
8469 if (TARGET_64BIT)
8470 {
8471 if (cfun_save_high_fprs_p)
8472 {
8473 next_offset = cfun_frame_layout.f8_offset;
8474 for (i = 24; i < 32; i++)
8475 {
8476 if (cfun_fpr_bit_p (i - 16))
8477 {
8478 restore_fpr (frame_pointer,
8479 offset + next_offset, i);
8480 cfa_restores
8481 = alloc_reg_note (REG_CFA_RESTORE,
8482 gen_rtx_REG (DFmode, i), cfa_restores);
8483 next_offset += 8;
8484 }
8485 }
8486 }
8487
8488 }
8489 else
8490 {
8491 next_offset = cfun_frame_layout.f4_offset;
8492 for (i = 18; i < 20; i++)
8493 {
8494 if (cfun_fpr_bit_p (i - 16))
8495 {
8496 restore_fpr (frame_pointer,
8497 offset + next_offset, i);
8498 cfa_restores
8499 = alloc_reg_note (REG_CFA_RESTORE,
8500 gen_rtx_REG (DFmode, i), cfa_restores);
8501 next_offset += 8;
8502 }
8503 else if (!TARGET_PACKED_STACK)
8504 next_offset += 8;
8505 }
8506
8507 }
8508
8509 /* Return register. */
8510
8511 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8512
8513 /* Restore call saved gprs. */
8514
8515 if (cfun_frame_layout.first_restore_gpr != -1)
8516 {
8517 rtx insn, addr;
8518 int i;
8519
8520 /* Check for global register and save them
8521 to stack location from where they get restored. */
8522
8523 for (i = cfun_frame_layout.first_restore_gpr;
8524 i <= cfun_frame_layout.last_restore_gpr;
8525 i++)
8526 {
8527 if (global_not_special_regno_p (i))
8528 {
8529 addr = plus_constant (Pmode, frame_pointer,
8530 offset + cfun_frame_layout.gprs_offset
8531 + (i - cfun_frame_layout.first_save_gpr_slot)
8532 * UNITS_PER_LONG);
8533 addr = gen_rtx_MEM (Pmode, addr);
8534 set_mem_alias_set (addr, get_frame_alias_set ());
8535 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8536 }
8537 else
8538 cfa_restores
8539 = alloc_reg_note (REG_CFA_RESTORE,
8540 gen_rtx_REG (Pmode, i), cfa_restores);
8541 }
8542
8543 if (! sibcall)
8544 {
8545 /* Fetch return address from stack before load multiple,
8546 this will do good for scheduling. */
8547
8548 if (cfun_frame_layout.save_return_addr_p
8549 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8550 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8551 {
8552 int return_regnum = find_unused_clobbered_reg();
8553 if (!return_regnum)
8554 return_regnum = 4;
8555 return_reg = gen_rtx_REG (Pmode, return_regnum);
8556
8557 addr = plus_constant (Pmode, frame_pointer,
8558 offset + cfun_frame_layout.gprs_offset
8559 + (RETURN_REGNUM
8560 - cfun_frame_layout.first_save_gpr_slot)
8561 * UNITS_PER_LONG);
8562 addr = gen_rtx_MEM (Pmode, addr);
8563 set_mem_alias_set (addr, get_frame_alias_set ());
8564 emit_move_insn (return_reg, addr);
8565 }
8566 }
8567
8568 insn = restore_gprs (frame_pointer,
8569 offset + cfun_frame_layout.gprs_offset
8570 + (cfun_frame_layout.first_restore_gpr
8571 - cfun_frame_layout.first_save_gpr_slot)
8572 * UNITS_PER_LONG,
8573 cfun_frame_layout.first_restore_gpr,
8574 cfun_frame_layout.last_restore_gpr);
8575 insn = emit_insn (insn);
8576 REG_NOTES (insn) = cfa_restores;
8577 add_reg_note (insn, REG_CFA_DEF_CFA,
8578 plus_constant (Pmode, stack_pointer_rtx,
8579 STACK_POINTER_OFFSET));
8580 RTX_FRAME_RELATED_P (insn) = 1;
8581 }
8582
8583 if (! sibcall)
8584 {
8585
8586 /* Return to caller. */
8587
8588 p = rtvec_alloc (2);
8589
8590 RTVEC_ELT (p, 0) = ret_rtx;
8591 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8592 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8593 }
8594 }
8595
8596
8597 /* Return the size in bytes of a function argument of
8598 type TYPE and/or mode MODE. At least one of TYPE or
8599 MODE must be specified. */
8600
8601 static int
8602 s390_function_arg_size (enum machine_mode mode, const_tree type)
8603 {
8604 if (type)
8605 return int_size_in_bytes (type);
8606
8607 /* No type info available for some library calls ... */
8608 if (mode != BLKmode)
8609 return GET_MODE_SIZE (mode);
8610
8611 /* If we have neither type nor mode, abort */
8612 gcc_unreachable ();
8613 }
8614
8615 /* Return true if a function argument of type TYPE and mode MODE
8616 is to be passed in a floating-point register, if available. */
8617
8618 static bool
8619 s390_function_arg_float (enum machine_mode mode, const_tree type)
8620 {
8621 int size = s390_function_arg_size (mode, type);
8622 if (size > 8)
8623 return false;
8624
8625 /* Soft-float changes the ABI: no floating-point registers are used. */
8626 if (TARGET_SOFT_FLOAT)
8627 return false;
8628
8629 /* No type info available for some library calls ... */
8630 if (!type)
8631 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8632
8633 /* The ABI says that record types with a single member are treated
8634 just like that member would be. */
8635 while (TREE_CODE (type) == RECORD_TYPE)
8636 {
8637 tree field, single = NULL_TREE;
8638
8639 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8640 {
8641 if (TREE_CODE (field) != FIELD_DECL)
8642 continue;
8643
8644 if (single == NULL_TREE)
8645 single = TREE_TYPE (field);
8646 else
8647 return false;
8648 }
8649
8650 if (single == NULL_TREE)
8651 return false;
8652 else
8653 type = single;
8654 }
8655
8656 return TREE_CODE (type) == REAL_TYPE;
8657 }
8658
8659 /* Return true if a function argument of type TYPE and mode MODE
8660 is to be passed in an integer register, or a pair of integer
8661 registers, if available. */
8662
8663 static bool
8664 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8665 {
8666 int size = s390_function_arg_size (mode, type);
8667 if (size > 8)
8668 return false;
8669
8670 /* No type info available for some library calls ... */
8671 if (!type)
8672 return GET_MODE_CLASS (mode) == MODE_INT
8673 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8674
8675 /* We accept small integral (and similar) types. */
8676 if (INTEGRAL_TYPE_P (type)
8677 || POINTER_TYPE_P (type)
8678 || TREE_CODE (type) == NULLPTR_TYPE
8679 || TREE_CODE (type) == OFFSET_TYPE
8680 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8681 return true;
8682
8683 /* We also accept structs of size 1, 2, 4, 8 that are not
8684 passed in floating-point registers. */
8685 if (AGGREGATE_TYPE_P (type)
8686 && exact_log2 (size) >= 0
8687 && !s390_function_arg_float (mode, type))
8688 return true;
8689
8690 return false;
8691 }
8692
8693 /* Return 1 if a function argument of type TYPE and mode MODE
8694 is to be passed by reference. The ABI specifies that only
8695 structures of size 1, 2, 4, or 8 bytes are passed by value,
8696 all other structures (and complex numbers) are passed by
8697 reference. */
8698
8699 static bool
8700 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8701 enum machine_mode mode, const_tree type,
8702 bool named ATTRIBUTE_UNUSED)
8703 {
8704 int size = s390_function_arg_size (mode, type);
8705 if (size > 8)
8706 return true;
8707
8708 if (type)
8709 {
8710 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8711 return 1;
8712
8713 if (TREE_CODE (type) == COMPLEX_TYPE
8714 || TREE_CODE (type) == VECTOR_TYPE)
8715 return 1;
8716 }
8717
8718 return 0;
8719 }
8720
8721 /* Update the data in CUM to advance over an argument of mode MODE and
8722 data type TYPE. (TYPE is null for libcalls where that information
8723 may not be available.). The boolean NAMED specifies whether the
8724 argument is a named argument (as opposed to an unnamed argument
8725 matching an ellipsis). */
8726
8727 static void
8728 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8729 const_tree type, bool named ATTRIBUTE_UNUSED)
8730 {
8731 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8732
8733 if (s390_function_arg_float (mode, type))
8734 {
8735 cum->fprs += 1;
8736 }
8737 else if (s390_function_arg_integer (mode, type))
8738 {
8739 int size = s390_function_arg_size (mode, type);
8740 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8741 }
8742 else
8743 gcc_unreachable ();
8744 }
8745
8746 /* Define where to put the arguments to a function.
8747 Value is zero to push the argument on the stack,
8748 or a hard register in which to store the argument.
8749
8750 MODE is the argument's machine mode.
8751 TYPE is the data type of the argument (as a tree).
8752 This is null for libcalls where that information may
8753 not be available.
8754 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8755 the preceding args and about the function being called.
8756 NAMED is nonzero if this argument is a named parameter
8757 (otherwise it is an extra parameter matching an ellipsis).
8758
8759 On S/390, we use general purpose registers 2 through 6 to
8760 pass integer, pointer, and certain structure arguments, and
8761 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8762 to pass floating point arguments. All remaining arguments
8763 are pushed to the stack. */
8764
8765 static rtx
8766 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8767 const_tree type, bool named ATTRIBUTE_UNUSED)
8768 {
8769 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8770
8771 if (s390_function_arg_float (mode, type))
8772 {
8773 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8774 return 0;
8775 else
8776 return gen_rtx_REG (mode, cum->fprs + 16);
8777 }
8778 else if (s390_function_arg_integer (mode, type))
8779 {
8780 int size = s390_function_arg_size (mode, type);
8781 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8782
8783 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8784 return 0;
8785 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8786 return gen_rtx_REG (mode, cum->gprs + 2);
8787 else if (n_gprs == 2)
8788 {
8789 rtvec p = rtvec_alloc (2);
8790
8791 RTVEC_ELT (p, 0)
8792 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8793 const0_rtx);
8794 RTVEC_ELT (p, 1)
8795 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8796 GEN_INT (4));
8797
8798 return gen_rtx_PARALLEL (mode, p);
8799 }
8800 }
8801
8802 /* After the real arguments, expand_call calls us once again
8803 with a void_type_node type. Whatever we return here is
8804 passed as operand 2 to the call expanders.
8805
8806 We don't need this feature ... */
8807 else if (type == void_type_node)
8808 return const0_rtx;
8809
8810 gcc_unreachable ();
8811 }
8812
8813 /* Return true if return values of type TYPE should be returned
8814 in a memory buffer whose address is passed by the caller as
8815 hidden first argument. */
8816
8817 static bool
8818 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8819 {
8820 /* We accept small integral (and similar) types. */
8821 if (INTEGRAL_TYPE_P (type)
8822 || POINTER_TYPE_P (type)
8823 || TREE_CODE (type) == OFFSET_TYPE
8824 || TREE_CODE (type) == REAL_TYPE)
8825 return int_size_in_bytes (type) > 8;
8826
8827 /* Aggregates and similar constructs are always returned
8828 in memory. */
8829 if (AGGREGATE_TYPE_P (type)
8830 || TREE_CODE (type) == COMPLEX_TYPE
8831 || TREE_CODE (type) == VECTOR_TYPE)
8832 return true;
8833
8834 /* ??? We get called on all sorts of random stuff from
8835 aggregate_value_p. We can't abort, but it's not clear
8836 what's safe to return. Pretend it's a struct I guess. */
8837 return true;
8838 }
8839
8840 /* Function arguments and return values are promoted to word size. */
8841
8842 static enum machine_mode
8843 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8844 int *punsignedp,
8845 const_tree fntype ATTRIBUTE_UNUSED,
8846 int for_return ATTRIBUTE_UNUSED)
8847 {
8848 if (INTEGRAL_MODE_P (mode)
8849 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8850 {
8851 if (type != NULL_TREE && POINTER_TYPE_P (type))
8852 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8853 return Pmode;
8854 }
8855
8856 return mode;
8857 }
8858
8859 /* Define where to return a (scalar) value of type RET_TYPE.
8860 If RET_TYPE is null, define where to return a (scalar)
8861 value of mode MODE from a libcall. */
8862
8863 static rtx
8864 s390_function_and_libcall_value (enum machine_mode mode,
8865 const_tree ret_type,
8866 const_tree fntype_or_decl,
8867 bool outgoing ATTRIBUTE_UNUSED)
8868 {
8869 /* For normal functions perform the promotion as
8870 promote_function_mode would do. */
8871 if (ret_type)
8872 {
8873 int unsignedp = TYPE_UNSIGNED (ret_type);
8874 mode = promote_function_mode (ret_type, mode, &unsignedp,
8875 fntype_or_decl, 1);
8876 }
8877
8878 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8879 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8880
8881 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8882 return gen_rtx_REG (mode, 16);
8883 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8884 || UNITS_PER_LONG == UNITS_PER_WORD)
8885 return gen_rtx_REG (mode, 2);
8886 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8887 {
8888 /* This case is triggered when returning a 64 bit value with
8889 -m31 -mzarch. Although the value would fit into a single
8890 register it has to be forced into a 32 bit register pair in
8891 order to match the ABI. */
8892 rtvec p = rtvec_alloc (2);
8893
8894 RTVEC_ELT (p, 0)
8895 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8896 RTVEC_ELT (p, 1)
8897 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8898
8899 return gen_rtx_PARALLEL (mode, p);
8900 }
8901
8902 gcc_unreachable ();
8903 }
8904
8905 /* Define where to return a scalar return value of type RET_TYPE. */
8906
8907 static rtx
8908 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8909 bool outgoing)
8910 {
8911 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8912 fn_decl_or_type, outgoing);
8913 }
8914
8915 /* Define where to return a scalar libcall return value of mode
8916 MODE. */
8917
8918 static rtx
8919 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8920 {
8921 return s390_function_and_libcall_value (mode, NULL_TREE,
8922 NULL_TREE, true);
8923 }
8924
8925
8926 /* Create and return the va_list datatype.
8927
8928 On S/390, va_list is an array type equivalent to
8929
8930 typedef struct __va_list_tag
8931 {
8932 long __gpr;
8933 long __fpr;
8934 void *__overflow_arg_area;
8935 void *__reg_save_area;
8936 } va_list[1];
8937
8938 where __gpr and __fpr hold the number of general purpose
8939 or floating point arguments used up to now, respectively,
8940 __overflow_arg_area points to the stack location of the
8941 next argument passed on the stack, and __reg_save_area
8942 always points to the start of the register area in the
8943 call frame of the current function. The function prologue
8944 saves all registers used for argument passing into this
8945 area if the function uses variable arguments. */
8946
8947 static tree
8948 s390_build_builtin_va_list (void)
8949 {
8950 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8951
8952 record = lang_hooks.types.make_type (RECORD_TYPE);
8953
8954 type_decl =
8955 build_decl (BUILTINS_LOCATION,
8956 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8957
8958 f_gpr = build_decl (BUILTINS_LOCATION,
8959 FIELD_DECL, get_identifier ("__gpr"),
8960 long_integer_type_node);
8961 f_fpr = build_decl (BUILTINS_LOCATION,
8962 FIELD_DECL, get_identifier ("__fpr"),
8963 long_integer_type_node);
8964 f_ovf = build_decl (BUILTINS_LOCATION,
8965 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8966 ptr_type_node);
8967 f_sav = build_decl (BUILTINS_LOCATION,
8968 FIELD_DECL, get_identifier ("__reg_save_area"),
8969 ptr_type_node);
8970
8971 va_list_gpr_counter_field = f_gpr;
8972 va_list_fpr_counter_field = f_fpr;
8973
8974 DECL_FIELD_CONTEXT (f_gpr) = record;
8975 DECL_FIELD_CONTEXT (f_fpr) = record;
8976 DECL_FIELD_CONTEXT (f_ovf) = record;
8977 DECL_FIELD_CONTEXT (f_sav) = record;
8978
8979 TYPE_STUB_DECL (record) = type_decl;
8980 TYPE_NAME (record) = type_decl;
8981 TYPE_FIELDS (record) = f_gpr;
8982 DECL_CHAIN (f_gpr) = f_fpr;
8983 DECL_CHAIN (f_fpr) = f_ovf;
8984 DECL_CHAIN (f_ovf) = f_sav;
8985
8986 layout_type (record);
8987
8988 /* The correct type is an array type of one element. */
8989 return build_array_type (record, build_index_type (size_zero_node));
8990 }
8991
8992 /* Implement va_start by filling the va_list structure VALIST.
8993 STDARG_P is always true, and ignored.
8994 NEXTARG points to the first anonymous stack argument.
8995
8996 The following global variables are used to initialize
8997 the va_list structure:
8998
8999 crtl->args.info:
9000 holds number of gprs and fprs used for named arguments.
9001 crtl->args.arg_offset_rtx:
9002 holds the offset of the first anonymous stack argument
9003 (relative to the virtual arg pointer). */
9004
9005 static void
9006 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9007 {
9008 HOST_WIDE_INT n_gpr, n_fpr;
9009 int off;
9010 tree f_gpr, f_fpr, f_ovf, f_sav;
9011 tree gpr, fpr, ovf, sav, t;
9012
9013 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9014 f_fpr = DECL_CHAIN (f_gpr);
9015 f_ovf = DECL_CHAIN (f_fpr);
9016 f_sav = DECL_CHAIN (f_ovf);
9017
9018 valist = build_simple_mem_ref (valist);
9019 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9020 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9021 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9022 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9023
9024 /* Count number of gp and fp argument registers used. */
9025
9026 n_gpr = crtl->args.info.gprs;
9027 n_fpr = crtl->args.info.fprs;
9028
9029 if (cfun->va_list_gpr_size)
9030 {
9031 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9032 build_int_cst (NULL_TREE, n_gpr));
9033 TREE_SIDE_EFFECTS (t) = 1;
9034 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9035 }
9036
9037 if (cfun->va_list_fpr_size)
9038 {
9039 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9040 build_int_cst (NULL_TREE, n_fpr));
9041 TREE_SIDE_EFFECTS (t) = 1;
9042 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9043 }
9044
9045 /* Find the overflow area. */
9046 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9047 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9048 {
9049 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9050
9051 off = INTVAL (crtl->args.arg_offset_rtx);
9052 off = off < 0 ? 0 : off;
9053 if (TARGET_DEBUG_ARG)
9054 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9055 (int)n_gpr, (int)n_fpr, off);
9056
9057 t = fold_build_pointer_plus_hwi (t, off);
9058
9059 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9060 TREE_SIDE_EFFECTS (t) = 1;
9061 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9062 }
9063
9064 /* Find the register save area. */
9065 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9066 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9067 {
9068 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9069 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9070
9071 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9072 TREE_SIDE_EFFECTS (t) = 1;
9073 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9074 }
9075 }
9076
9077 /* Implement va_arg by updating the va_list structure
9078 VALIST as required to retrieve an argument of type
9079 TYPE, and returning that argument.
9080
9081 Generates code equivalent to:
9082
9083 if (integral value) {
9084 if (size <= 4 && args.gpr < 5 ||
9085 size > 4 && args.gpr < 4 )
9086 ret = args.reg_save_area[args.gpr+8]
9087 else
9088 ret = *args.overflow_arg_area++;
9089 } else if (float value) {
9090 if (args.fgpr < 2)
9091 ret = args.reg_save_area[args.fpr+64]
9092 else
9093 ret = *args.overflow_arg_area++;
9094 } else if (aggregate value) {
9095 if (args.gpr < 5)
9096 ret = *args.reg_save_area[args.gpr]
9097 else
9098 ret = **args.overflow_arg_area++;
9099 } */
9100
9101 static tree
9102 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9103 gimple_seq *post_p ATTRIBUTE_UNUSED)
9104 {
9105 tree f_gpr, f_fpr, f_ovf, f_sav;
9106 tree gpr, fpr, ovf, sav, reg, t, u;
9107 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9108 tree lab_false, lab_over, addr;
9109
9110 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9111 f_fpr = DECL_CHAIN (f_gpr);
9112 f_ovf = DECL_CHAIN (f_fpr);
9113 f_sav = DECL_CHAIN (f_ovf);
9114
9115 valist = build_va_arg_indirect_ref (valist);
9116 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9117 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9118 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9119
9120 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9121 both appear on a lhs. */
9122 valist = unshare_expr (valist);
9123 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9124
9125 size = int_size_in_bytes (type);
9126
9127 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9128 {
9129 if (TARGET_DEBUG_ARG)
9130 {
9131 fprintf (stderr, "va_arg: aggregate type");
9132 debug_tree (type);
9133 }
9134
9135 /* Aggregates are passed by reference. */
9136 indirect_p = 1;
9137 reg = gpr;
9138 n_reg = 1;
9139
9140 /* kernel stack layout on 31 bit: It is assumed here that no padding
9141 will be added by s390_frame_info because for va_args always an even
9142 number of gprs has to be saved r15-r2 = 14 regs. */
9143 sav_ofs = 2 * UNITS_PER_LONG;
9144 sav_scale = UNITS_PER_LONG;
9145 size = UNITS_PER_LONG;
9146 max_reg = GP_ARG_NUM_REG - n_reg;
9147 }
9148 else if (s390_function_arg_float (TYPE_MODE (type), type))
9149 {
9150 if (TARGET_DEBUG_ARG)
9151 {
9152 fprintf (stderr, "va_arg: float type");
9153 debug_tree (type);
9154 }
9155
9156 /* FP args go in FP registers, if present. */
9157 indirect_p = 0;
9158 reg = fpr;
9159 n_reg = 1;
9160 sav_ofs = 16 * UNITS_PER_LONG;
9161 sav_scale = 8;
9162 max_reg = FP_ARG_NUM_REG - n_reg;
9163 }
9164 else
9165 {
9166 if (TARGET_DEBUG_ARG)
9167 {
9168 fprintf (stderr, "va_arg: other type");
9169 debug_tree (type);
9170 }
9171
9172 /* Otherwise into GP registers. */
9173 indirect_p = 0;
9174 reg = gpr;
9175 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9176
9177 /* kernel stack layout on 31 bit: It is assumed here that no padding
9178 will be added by s390_frame_info because for va_args always an even
9179 number of gprs has to be saved r15-r2 = 14 regs. */
9180 sav_ofs = 2 * UNITS_PER_LONG;
9181
9182 if (size < UNITS_PER_LONG)
9183 sav_ofs += UNITS_PER_LONG - size;
9184
9185 sav_scale = UNITS_PER_LONG;
9186 max_reg = GP_ARG_NUM_REG - n_reg;
9187 }
9188
9189 /* Pull the value out of the saved registers ... */
9190
9191 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9192 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9193 addr = create_tmp_var (ptr_type_node, "addr");
9194
9195 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9196 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9197 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9198 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9199 gimplify_and_add (t, pre_p);
9200
9201 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9202 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9203 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9204 t = fold_build_pointer_plus (t, u);
9205
9206 gimplify_assign (addr, t, pre_p);
9207
9208 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9209
9210 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9211
9212
9213 /* ... Otherwise out of the overflow area. */
9214
9215 t = ovf;
9216 if (size < UNITS_PER_LONG)
9217 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9218
9219 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9220
9221 gimplify_assign (addr, t, pre_p);
9222
9223 t = fold_build_pointer_plus_hwi (t, size);
9224 gimplify_assign (ovf, t, pre_p);
9225
9226 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9227
9228
9229 /* Increment register save count. */
9230
9231 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9232 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9233 gimplify_and_add (u, pre_p);
9234
9235 if (indirect_p)
9236 {
9237 t = build_pointer_type_for_mode (build_pointer_type (type),
9238 ptr_mode, true);
9239 addr = fold_convert (t, addr);
9240 addr = build_va_arg_indirect_ref (addr);
9241 }
9242 else
9243 {
9244 t = build_pointer_type_for_mode (type, ptr_mode, true);
9245 addr = fold_convert (t, addr);
9246 }
9247
9248 return build_va_arg_indirect_ref (addr);
9249 }
9250
9251
9252 /* Builtins. */
9253
9254 enum s390_builtin
9255 {
9256 S390_BUILTIN_THREAD_POINTER,
9257 S390_BUILTIN_SET_THREAD_POINTER,
9258
9259 S390_BUILTIN_max
9260 };
9261
9262 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9263 CODE_FOR_get_tp_64,
9264 CODE_FOR_set_tp_64
9265 };
9266
9267 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9268 CODE_FOR_get_tp_31,
9269 CODE_FOR_set_tp_31
9270 };
9271
9272 static void
9273 s390_init_builtins (void)
9274 {
9275 tree ftype;
9276
9277 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9278 add_builtin_function ("__builtin_thread_pointer", ftype,
9279 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9280 NULL, NULL_TREE);
9281
9282 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9283 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9284 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9285 NULL, NULL_TREE);
9286 }
9287
9288 /* Expand an expression EXP that calls a built-in function,
9289 with result going to TARGET if that's convenient
9290 (and in mode MODE if that's convenient).
9291 SUBTARGET may be used as the target for computing one of EXP's operands.
9292 IGNORE is nonzero if the value is to be ignored. */
9293
9294 static rtx
9295 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9296 enum machine_mode mode ATTRIBUTE_UNUSED,
9297 int ignore ATTRIBUTE_UNUSED)
9298 {
9299 #define MAX_ARGS 2
9300
9301 enum insn_code const *code_for_builtin =
9302 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9303
9304 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9305 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9306 enum insn_code icode;
9307 rtx op[MAX_ARGS], pat;
9308 int arity;
9309 bool nonvoid;
9310 tree arg;
9311 call_expr_arg_iterator iter;
9312
9313 if (fcode >= S390_BUILTIN_max)
9314 internal_error ("bad builtin fcode");
9315 icode = code_for_builtin[fcode];
9316 if (icode == 0)
9317 internal_error ("bad builtin fcode");
9318
9319 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9320
9321 arity = 0;
9322 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9323 {
9324 const struct insn_operand_data *insn_op;
9325
9326 if (arg == error_mark_node)
9327 return NULL_RTX;
9328 if (arity > MAX_ARGS)
9329 return NULL_RTX;
9330
9331 insn_op = &insn_data[icode].operand[arity + nonvoid];
9332
9333 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9334
9335 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9336 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9337 arity++;
9338 }
9339
9340 if (nonvoid)
9341 {
9342 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9343 if (!target
9344 || GET_MODE (target) != tmode
9345 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9346 target = gen_reg_rtx (tmode);
9347 }
9348
9349 switch (arity)
9350 {
9351 case 0:
9352 pat = GEN_FCN (icode) (target);
9353 break;
9354 case 1:
9355 if (nonvoid)
9356 pat = GEN_FCN (icode) (target, op[0]);
9357 else
9358 pat = GEN_FCN (icode) (op[0]);
9359 break;
9360 case 2:
9361 pat = GEN_FCN (icode) (target, op[0], op[1]);
9362 break;
9363 default:
9364 gcc_unreachable ();
9365 }
9366 if (!pat)
9367 return NULL_RTX;
9368 emit_insn (pat);
9369
9370 if (nonvoid)
9371 return target;
9372 else
9373 return const0_rtx;
9374 }
9375
9376
9377 /* Output assembly code for the trampoline template to
9378 stdio stream FILE.
9379
9380 On S/390, we use gpr 1 internally in the trampoline code;
9381 gpr 0 is used to hold the static chain. */
9382
9383 static void
9384 s390_asm_trampoline_template (FILE *file)
9385 {
9386 rtx op[2];
9387 op[0] = gen_rtx_REG (Pmode, 0);
9388 op[1] = gen_rtx_REG (Pmode, 1);
9389
9390 if (TARGET_64BIT)
9391 {
9392 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9393 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9394 output_asm_insn ("br\t%1", op); /* 2 byte */
9395 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9396 }
9397 else
9398 {
9399 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9400 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9401 output_asm_insn ("br\t%1", op); /* 2 byte */
9402 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9403 }
9404 }
9405
9406 /* Emit RTL insns to initialize the variable parts of a trampoline.
9407 FNADDR is an RTX for the address of the function's pure code.
9408 CXT is an RTX for the static chain value for the function. */
9409
9410 static void
9411 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9412 {
9413 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9414 rtx mem;
9415
9416 emit_block_move (m_tramp, assemble_trampoline_template (),
9417 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9418
9419 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9420 emit_move_insn (mem, cxt);
9421 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9422 emit_move_insn (mem, fnaddr);
9423 }
9424
9425 /* Output assembler code to FILE to increment profiler label # LABELNO
9426 for profiling a function entry. */
9427
9428 void
9429 s390_function_profiler (FILE *file, int labelno)
9430 {
9431 rtx op[7];
9432
9433 char label[128];
9434 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9435
9436 fprintf (file, "# function profiler \n");
9437
9438 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9439 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9440 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9441
9442 op[2] = gen_rtx_REG (Pmode, 1);
9443 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9444 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9445
9446 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9447 if (flag_pic)
9448 {
9449 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9450 op[4] = gen_rtx_CONST (Pmode, op[4]);
9451 }
9452
9453 if (TARGET_64BIT)
9454 {
9455 output_asm_insn ("stg\t%0,%1", op);
9456 output_asm_insn ("larl\t%2,%3", op);
9457 output_asm_insn ("brasl\t%0,%4", op);
9458 output_asm_insn ("lg\t%0,%1", op);
9459 }
9460 else if (!flag_pic)
9461 {
9462 op[6] = gen_label_rtx ();
9463
9464 output_asm_insn ("st\t%0,%1", op);
9465 output_asm_insn ("bras\t%2,%l6", op);
9466 output_asm_insn (".long\t%4", op);
9467 output_asm_insn (".long\t%3", op);
9468 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9469 output_asm_insn ("l\t%0,0(%2)", op);
9470 output_asm_insn ("l\t%2,4(%2)", op);
9471 output_asm_insn ("basr\t%0,%0", op);
9472 output_asm_insn ("l\t%0,%1", op);
9473 }
9474 else
9475 {
9476 op[5] = gen_label_rtx ();
9477 op[6] = gen_label_rtx ();
9478
9479 output_asm_insn ("st\t%0,%1", op);
9480 output_asm_insn ("bras\t%2,%l6", op);
9481 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9482 output_asm_insn (".long\t%4-%l5", op);
9483 output_asm_insn (".long\t%3-%l5", op);
9484 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9485 output_asm_insn ("lr\t%0,%2", op);
9486 output_asm_insn ("a\t%0,0(%2)", op);
9487 output_asm_insn ("a\t%2,4(%2)", op);
9488 output_asm_insn ("basr\t%0,%0", op);
9489 output_asm_insn ("l\t%0,%1", op);
9490 }
9491 }
9492
9493 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9494 into its SYMBOL_REF_FLAGS. */
9495
9496 static void
9497 s390_encode_section_info (tree decl, rtx rtl, int first)
9498 {
9499 default_encode_section_info (decl, rtl, first);
9500
9501 if (TREE_CODE (decl) == VAR_DECL)
9502 {
9503 /* If a variable has a forced alignment to < 2 bytes, mark it
9504 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9505 operand. */
9506 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9507 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9508 if (!DECL_SIZE (decl)
9509 || !DECL_ALIGN (decl)
9510 || !host_integerp (DECL_SIZE (decl), 0)
9511 || (DECL_ALIGN (decl) <= 64
9512 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9513 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9514 }
9515
9516 /* Literal pool references don't have a decl so they are handled
9517 differently here. We rely on the information in the MEM_ALIGN
9518 entry to decide upon natural alignment. */
9519 if (MEM_P (rtl)
9520 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9521 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9522 && (MEM_ALIGN (rtl) == 0
9523 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9524 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9525 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9526 }
9527
9528 /* Output thunk to FILE that implements a C++ virtual function call (with
9529 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9530 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9531 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9532 relative to the resulting this pointer. */
9533
9534 static void
9535 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9536 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9537 tree function)
9538 {
9539 rtx op[10];
9540 int nonlocal = 0;
9541
9542 /* Make sure unwind info is emitted for the thunk if needed. */
9543 final_start_function (emit_barrier (), file, 1);
9544
9545 /* Operand 0 is the target function. */
9546 op[0] = XEXP (DECL_RTL (function), 0);
9547 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9548 {
9549 nonlocal = 1;
9550 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9551 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9552 op[0] = gen_rtx_CONST (Pmode, op[0]);
9553 }
9554
9555 /* Operand 1 is the 'this' pointer. */
9556 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9557 op[1] = gen_rtx_REG (Pmode, 3);
9558 else
9559 op[1] = gen_rtx_REG (Pmode, 2);
9560
9561 /* Operand 2 is the delta. */
9562 op[2] = GEN_INT (delta);
9563
9564 /* Operand 3 is the vcall_offset. */
9565 op[3] = GEN_INT (vcall_offset);
9566
9567 /* Operand 4 is the temporary register. */
9568 op[4] = gen_rtx_REG (Pmode, 1);
9569
9570 /* Operands 5 to 8 can be used as labels. */
9571 op[5] = NULL_RTX;
9572 op[6] = NULL_RTX;
9573 op[7] = NULL_RTX;
9574 op[8] = NULL_RTX;
9575
9576 /* Operand 9 can be used for temporary register. */
9577 op[9] = NULL_RTX;
9578
9579 /* Generate code. */
9580 if (TARGET_64BIT)
9581 {
9582 /* Setup literal pool pointer if required. */
9583 if ((!DISP_IN_RANGE (delta)
9584 && !CONST_OK_FOR_K (delta)
9585 && !CONST_OK_FOR_Os (delta))
9586 || (!DISP_IN_RANGE (vcall_offset)
9587 && !CONST_OK_FOR_K (vcall_offset)
9588 && !CONST_OK_FOR_Os (vcall_offset)))
9589 {
9590 op[5] = gen_label_rtx ();
9591 output_asm_insn ("larl\t%4,%5", op);
9592 }
9593
9594 /* Add DELTA to this pointer. */
9595 if (delta)
9596 {
9597 if (CONST_OK_FOR_J (delta))
9598 output_asm_insn ("la\t%1,%2(%1)", op);
9599 else if (DISP_IN_RANGE (delta))
9600 output_asm_insn ("lay\t%1,%2(%1)", op);
9601 else if (CONST_OK_FOR_K (delta))
9602 output_asm_insn ("aghi\t%1,%2", op);
9603 else if (CONST_OK_FOR_Os (delta))
9604 output_asm_insn ("agfi\t%1,%2", op);
9605 else
9606 {
9607 op[6] = gen_label_rtx ();
9608 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9609 }
9610 }
9611
9612 /* Perform vcall adjustment. */
9613 if (vcall_offset)
9614 {
9615 if (DISP_IN_RANGE (vcall_offset))
9616 {
9617 output_asm_insn ("lg\t%4,0(%1)", op);
9618 output_asm_insn ("ag\t%1,%3(%4)", op);
9619 }
9620 else if (CONST_OK_FOR_K (vcall_offset))
9621 {
9622 output_asm_insn ("lghi\t%4,%3", op);
9623 output_asm_insn ("ag\t%4,0(%1)", op);
9624 output_asm_insn ("ag\t%1,0(%4)", op);
9625 }
9626 else if (CONST_OK_FOR_Os (vcall_offset))
9627 {
9628 output_asm_insn ("lgfi\t%4,%3", op);
9629 output_asm_insn ("ag\t%4,0(%1)", op);
9630 output_asm_insn ("ag\t%1,0(%4)", op);
9631 }
9632 else
9633 {
9634 op[7] = gen_label_rtx ();
9635 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9636 output_asm_insn ("ag\t%4,0(%1)", op);
9637 output_asm_insn ("ag\t%1,0(%4)", op);
9638 }
9639 }
9640
9641 /* Jump to target. */
9642 output_asm_insn ("jg\t%0", op);
9643
9644 /* Output literal pool if required. */
9645 if (op[5])
9646 {
9647 output_asm_insn (".align\t4", op);
9648 targetm.asm_out.internal_label (file, "L",
9649 CODE_LABEL_NUMBER (op[5]));
9650 }
9651 if (op[6])
9652 {
9653 targetm.asm_out.internal_label (file, "L",
9654 CODE_LABEL_NUMBER (op[6]));
9655 output_asm_insn (".long\t%2", op);
9656 }
9657 if (op[7])
9658 {
9659 targetm.asm_out.internal_label (file, "L",
9660 CODE_LABEL_NUMBER (op[7]));
9661 output_asm_insn (".long\t%3", op);
9662 }
9663 }
9664 else
9665 {
9666 /* Setup base pointer if required. */
9667 if (!vcall_offset
9668 || (!DISP_IN_RANGE (delta)
9669 && !CONST_OK_FOR_K (delta)
9670 && !CONST_OK_FOR_Os (delta))
9671 || (!DISP_IN_RANGE (delta)
9672 && !CONST_OK_FOR_K (vcall_offset)
9673 && !CONST_OK_FOR_Os (vcall_offset)))
9674 {
9675 op[5] = gen_label_rtx ();
9676 output_asm_insn ("basr\t%4,0", op);
9677 targetm.asm_out.internal_label (file, "L",
9678 CODE_LABEL_NUMBER (op[5]));
9679 }
9680
9681 /* Add DELTA to this pointer. */
9682 if (delta)
9683 {
9684 if (CONST_OK_FOR_J (delta))
9685 output_asm_insn ("la\t%1,%2(%1)", op);
9686 else if (DISP_IN_RANGE (delta))
9687 output_asm_insn ("lay\t%1,%2(%1)", op);
9688 else if (CONST_OK_FOR_K (delta))
9689 output_asm_insn ("ahi\t%1,%2", op);
9690 else if (CONST_OK_FOR_Os (delta))
9691 output_asm_insn ("afi\t%1,%2", op);
9692 else
9693 {
9694 op[6] = gen_label_rtx ();
9695 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9696 }
9697 }
9698
9699 /* Perform vcall adjustment. */
9700 if (vcall_offset)
9701 {
9702 if (CONST_OK_FOR_J (vcall_offset))
9703 {
9704 output_asm_insn ("l\t%4,0(%1)", op);
9705 output_asm_insn ("a\t%1,%3(%4)", op);
9706 }
9707 else if (DISP_IN_RANGE (vcall_offset))
9708 {
9709 output_asm_insn ("l\t%4,0(%1)", op);
9710 output_asm_insn ("ay\t%1,%3(%4)", op);
9711 }
9712 else if (CONST_OK_FOR_K (vcall_offset))
9713 {
9714 output_asm_insn ("lhi\t%4,%3", op);
9715 output_asm_insn ("a\t%4,0(%1)", op);
9716 output_asm_insn ("a\t%1,0(%4)", op);
9717 }
9718 else if (CONST_OK_FOR_Os (vcall_offset))
9719 {
9720 output_asm_insn ("iilf\t%4,%3", op);
9721 output_asm_insn ("a\t%4,0(%1)", op);
9722 output_asm_insn ("a\t%1,0(%4)", op);
9723 }
9724 else
9725 {
9726 op[7] = gen_label_rtx ();
9727 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9728 output_asm_insn ("a\t%4,0(%1)", op);
9729 output_asm_insn ("a\t%1,0(%4)", op);
9730 }
9731
9732 /* We had to clobber the base pointer register.
9733 Re-setup the base pointer (with a different base). */
9734 op[5] = gen_label_rtx ();
9735 output_asm_insn ("basr\t%4,0", op);
9736 targetm.asm_out.internal_label (file, "L",
9737 CODE_LABEL_NUMBER (op[5]));
9738 }
9739
9740 /* Jump to target. */
9741 op[8] = gen_label_rtx ();
9742
9743 if (!flag_pic)
9744 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9745 else if (!nonlocal)
9746 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9747 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9748 else if (flag_pic == 1)
9749 {
9750 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9751 output_asm_insn ("l\t%4,%0(%4)", op);
9752 }
9753 else if (flag_pic == 2)
9754 {
9755 op[9] = gen_rtx_REG (Pmode, 0);
9756 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9757 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9758 output_asm_insn ("ar\t%4,%9", op);
9759 output_asm_insn ("l\t%4,0(%4)", op);
9760 }
9761
9762 output_asm_insn ("br\t%4", op);
9763
9764 /* Output literal pool. */
9765 output_asm_insn (".align\t4", op);
9766
9767 if (nonlocal && flag_pic == 2)
9768 output_asm_insn (".long\t%0", op);
9769 if (nonlocal)
9770 {
9771 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9772 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9773 }
9774
9775 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9776 if (!flag_pic)
9777 output_asm_insn (".long\t%0", op);
9778 else
9779 output_asm_insn (".long\t%0-%5", op);
9780
9781 if (op[6])
9782 {
9783 targetm.asm_out.internal_label (file, "L",
9784 CODE_LABEL_NUMBER (op[6]));
9785 output_asm_insn (".long\t%2", op);
9786 }
9787 if (op[7])
9788 {
9789 targetm.asm_out.internal_label (file, "L",
9790 CODE_LABEL_NUMBER (op[7]));
9791 output_asm_insn (".long\t%3", op);
9792 }
9793 }
9794 final_end_function ();
9795 }
9796
9797 static bool
9798 s390_valid_pointer_mode (enum machine_mode mode)
9799 {
9800 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9801 }
9802
9803 /* Checks whether the given CALL_EXPR would use a caller
9804 saved register. This is used to decide whether sibling call
9805 optimization could be performed on the respective function
9806 call. */
9807
9808 static bool
9809 s390_call_saved_register_used (tree call_expr)
9810 {
9811 CUMULATIVE_ARGS cum_v;
9812 cumulative_args_t cum;
9813 tree parameter;
9814 enum machine_mode mode;
9815 tree type;
9816 rtx parm_rtx;
9817 int reg, i;
9818
9819 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9820 cum = pack_cumulative_args (&cum_v);
9821
9822 for (i = 0; i < call_expr_nargs (call_expr); i++)
9823 {
9824 parameter = CALL_EXPR_ARG (call_expr, i);
9825 gcc_assert (parameter);
9826
9827 /* For an undeclared variable passed as parameter we will get
9828 an ERROR_MARK node here. */
9829 if (TREE_CODE (parameter) == ERROR_MARK)
9830 return true;
9831
9832 type = TREE_TYPE (parameter);
9833 gcc_assert (type);
9834
9835 mode = TYPE_MODE (type);
9836 gcc_assert (mode);
9837
9838 if (pass_by_reference (&cum_v, mode, type, true))
9839 {
9840 mode = Pmode;
9841 type = build_pointer_type (type);
9842 }
9843
9844 parm_rtx = s390_function_arg (cum, mode, type, 0);
9845
9846 s390_function_arg_advance (cum, mode, type, 0);
9847
9848 if (!parm_rtx)
9849 continue;
9850
9851 if (REG_P (parm_rtx))
9852 {
9853 for (reg = 0;
9854 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9855 reg++)
9856 if (!call_used_regs[reg + REGNO (parm_rtx)])
9857 return true;
9858 }
9859
9860 if (GET_CODE (parm_rtx) == PARALLEL)
9861 {
9862 int i;
9863
9864 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9865 {
9866 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9867
9868 gcc_assert (REG_P (r));
9869
9870 for (reg = 0;
9871 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9872 reg++)
9873 if (!call_used_regs[reg + REGNO (r)])
9874 return true;
9875 }
9876 }
9877
9878 }
9879 return false;
9880 }
9881
9882 /* Return true if the given call expression can be
9883 turned into a sibling call.
9884 DECL holds the declaration of the function to be called whereas
9885 EXP is the call expression itself. */
9886
9887 static bool
9888 s390_function_ok_for_sibcall (tree decl, tree exp)
9889 {
9890 /* The TPF epilogue uses register 1. */
9891 if (TARGET_TPF_PROFILING)
9892 return false;
9893
9894 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9895 which would have to be restored before the sibcall. */
9896 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9897 return false;
9898
9899 /* Register 6 on s390 is available as an argument register but unfortunately
9900 "caller saved". This makes functions needing this register for arguments
9901 not suitable for sibcalls. */
9902 return !s390_call_saved_register_used (exp);
9903 }
9904
9905 /* Return the fixed registers used for condition codes. */
9906
9907 static bool
9908 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9909 {
9910 *p1 = CC_REGNUM;
9911 *p2 = INVALID_REGNUM;
9912
9913 return true;
9914 }
9915
9916 /* This function is used by the call expanders of the machine description.
9917 It emits the call insn itself together with the necessary operations
9918 to adjust the target address and returns the emitted insn.
9919 ADDR_LOCATION is the target address rtx
9920 TLS_CALL the location of the thread-local symbol
9921 RESULT_REG the register where the result of the call should be stored
9922 RETADDR_REG the register where the return address should be stored
9923 If this parameter is NULL_RTX the call is considered
9924 to be a sibling call. */
9925
9926 rtx
9927 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9928 rtx retaddr_reg)
9929 {
9930 bool plt_call = false;
9931 rtx insn;
9932 rtx call;
9933 rtx clobber;
9934 rtvec vec;
9935
9936 /* Direct function calls need special treatment. */
9937 if (GET_CODE (addr_location) == SYMBOL_REF)
9938 {
9939 /* When calling a global routine in PIC mode, we must
9940 replace the symbol itself with the PLT stub. */
9941 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9942 {
9943 if (retaddr_reg != NULL_RTX)
9944 {
9945 addr_location = gen_rtx_UNSPEC (Pmode,
9946 gen_rtvec (1, addr_location),
9947 UNSPEC_PLT);
9948 addr_location = gen_rtx_CONST (Pmode, addr_location);
9949 plt_call = true;
9950 }
9951 else
9952 /* For -fpic code the PLT entries might use r12 which is
9953 call-saved. Therefore we cannot do a sibcall when
9954 calling directly using a symbol ref. When reaching
9955 this point we decided (in s390_function_ok_for_sibcall)
9956 to do a sibcall for a function pointer but one of the
9957 optimizers was able to get rid of the function pointer
9958 by propagating the symbol ref into the call. This
9959 optimization is illegal for S/390 so we turn the direct
9960 call into a indirect call again. */
9961 addr_location = force_reg (Pmode, addr_location);
9962 }
9963
9964 /* Unless we can use the bras(l) insn, force the
9965 routine address into a register. */
9966 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9967 {
9968 if (flag_pic)
9969 addr_location = legitimize_pic_address (addr_location, 0);
9970 else
9971 addr_location = force_reg (Pmode, addr_location);
9972 }
9973 }
9974
9975 /* If it is already an indirect call or the code above moved the
9976 SYMBOL_REF to somewhere else make sure the address can be found in
9977 register 1. */
9978 if (retaddr_reg == NULL_RTX
9979 && GET_CODE (addr_location) != SYMBOL_REF
9980 && !plt_call)
9981 {
9982 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9983 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9984 }
9985
9986 addr_location = gen_rtx_MEM (QImode, addr_location);
9987 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9988
9989 if (result_reg != NULL_RTX)
9990 call = gen_rtx_SET (VOIDmode, result_reg, call);
9991
9992 if (retaddr_reg != NULL_RTX)
9993 {
9994 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9995
9996 if (tls_call != NULL_RTX)
9997 vec = gen_rtvec (3, call, clobber,
9998 gen_rtx_USE (VOIDmode, tls_call));
9999 else
10000 vec = gen_rtvec (2, call, clobber);
10001
10002 call = gen_rtx_PARALLEL (VOIDmode, vec);
10003 }
10004
10005 insn = emit_call_insn (call);
10006
10007 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10008 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
10009 {
10010 /* s390_function_ok_for_sibcall should
10011 have denied sibcalls in this case. */
10012 gcc_assert (retaddr_reg != NULL_RTX);
10013 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10014 }
10015 return insn;
10016 }
10017
10018 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10019
10020 static void
10021 s390_conditional_register_usage (void)
10022 {
10023 int i;
10024
10025 if (flag_pic)
10026 {
10027 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10028 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10029 }
10030 if (TARGET_CPU_ZARCH)
10031 {
10032 fixed_regs[BASE_REGNUM] = 0;
10033 call_used_regs[BASE_REGNUM] = 0;
10034 fixed_regs[RETURN_REGNUM] = 0;
10035 call_used_regs[RETURN_REGNUM] = 0;
10036 }
10037 if (TARGET_64BIT)
10038 {
10039 for (i = 24; i < 32; i++)
10040 call_used_regs[i] = call_really_used_regs[i] = 0;
10041 }
10042 else
10043 {
10044 for (i = 18; i < 20; i++)
10045 call_used_regs[i] = call_really_used_regs[i] = 0;
10046 }
10047
10048 if (TARGET_SOFT_FLOAT)
10049 {
10050 for (i = 16; i < 32; i++)
10051 call_used_regs[i] = fixed_regs[i] = 1;
10052 }
10053 }
10054
10055 /* Corresponding function to eh_return expander. */
10056
10057 static GTY(()) rtx s390_tpf_eh_return_symbol;
10058 void
10059 s390_emit_tpf_eh_return (rtx target)
10060 {
10061 rtx insn, reg;
10062
10063 if (!s390_tpf_eh_return_symbol)
10064 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10065
10066 reg = gen_rtx_REG (Pmode, 2);
10067
10068 emit_move_insn (reg, target);
10069 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10070 gen_rtx_REG (Pmode, RETURN_REGNUM));
10071 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10072
10073 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10074 }
10075
10076 /* Rework the prologue/epilogue to avoid saving/restoring
10077 registers unnecessarily. */
10078
10079 static void
10080 s390_optimize_prologue (void)
10081 {
10082 rtx insn, new_insn, next_insn;
10083
10084 /* Do a final recompute of the frame-related data. */
10085
10086 s390_update_frame_layout ();
10087
10088 /* If all special registers are in fact used, there's nothing we
10089 can do, so no point in walking the insn list. */
10090
10091 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10092 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10093 && (TARGET_CPU_ZARCH
10094 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10095 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10096 return;
10097
10098 /* Search for prologue/epilogue insns and replace them. */
10099
10100 for (insn = get_insns (); insn; insn = next_insn)
10101 {
10102 int first, last, off;
10103 rtx set, base, offset;
10104
10105 next_insn = NEXT_INSN (insn);
10106
10107 if (GET_CODE (insn) != INSN)
10108 continue;
10109
10110 if (GET_CODE (PATTERN (insn)) == PARALLEL
10111 && store_multiple_operation (PATTERN (insn), VOIDmode))
10112 {
10113 set = XVECEXP (PATTERN (insn), 0, 0);
10114 first = REGNO (SET_SRC (set));
10115 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10116 offset = const0_rtx;
10117 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10118 off = INTVAL (offset);
10119
10120 if (GET_CODE (base) != REG || off < 0)
10121 continue;
10122 if (cfun_frame_layout.first_save_gpr != -1
10123 && (cfun_frame_layout.first_save_gpr < first
10124 || cfun_frame_layout.last_save_gpr > last))
10125 continue;
10126 if (REGNO (base) != STACK_POINTER_REGNUM
10127 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10128 continue;
10129 if (first > BASE_REGNUM || last < BASE_REGNUM)
10130 continue;
10131
10132 if (cfun_frame_layout.first_save_gpr != -1)
10133 {
10134 new_insn = save_gprs (base,
10135 off + (cfun_frame_layout.first_save_gpr
10136 - first) * UNITS_PER_LONG,
10137 cfun_frame_layout.first_save_gpr,
10138 cfun_frame_layout.last_save_gpr);
10139 new_insn = emit_insn_before (new_insn, insn);
10140 INSN_ADDRESSES_NEW (new_insn, -1);
10141 }
10142
10143 remove_insn (insn);
10144 continue;
10145 }
10146
10147 if (cfun_frame_layout.first_save_gpr == -1
10148 && GET_CODE (PATTERN (insn)) == SET
10149 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10150 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10151 || (!TARGET_CPU_ZARCH
10152 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10153 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10154 {
10155 set = PATTERN (insn);
10156 first = REGNO (SET_SRC (set));
10157 offset = const0_rtx;
10158 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10159 off = INTVAL (offset);
10160
10161 if (GET_CODE (base) != REG || off < 0)
10162 continue;
10163 if (REGNO (base) != STACK_POINTER_REGNUM
10164 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10165 continue;
10166
10167 remove_insn (insn);
10168 continue;
10169 }
10170
10171 if (GET_CODE (PATTERN (insn)) == PARALLEL
10172 && load_multiple_operation (PATTERN (insn), VOIDmode))
10173 {
10174 set = XVECEXP (PATTERN (insn), 0, 0);
10175 first = REGNO (SET_DEST (set));
10176 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10177 offset = const0_rtx;
10178 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10179 off = INTVAL (offset);
10180
10181 if (GET_CODE (base) != REG || off < 0)
10182 continue;
10183 if (cfun_frame_layout.first_restore_gpr != -1
10184 && (cfun_frame_layout.first_restore_gpr < first
10185 || cfun_frame_layout.last_restore_gpr > last))
10186 continue;
10187 if (REGNO (base) != STACK_POINTER_REGNUM
10188 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10189 continue;
10190 if (first > BASE_REGNUM || last < BASE_REGNUM)
10191 continue;
10192
10193 if (cfun_frame_layout.first_restore_gpr != -1)
10194 {
10195 new_insn = restore_gprs (base,
10196 off + (cfun_frame_layout.first_restore_gpr
10197 - first) * UNITS_PER_LONG,
10198 cfun_frame_layout.first_restore_gpr,
10199 cfun_frame_layout.last_restore_gpr);
10200 new_insn = emit_insn_before (new_insn, insn);
10201 INSN_ADDRESSES_NEW (new_insn, -1);
10202 }
10203
10204 remove_insn (insn);
10205 continue;
10206 }
10207
10208 if (cfun_frame_layout.first_restore_gpr == -1
10209 && GET_CODE (PATTERN (insn)) == SET
10210 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10211 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10212 || (!TARGET_CPU_ZARCH
10213 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10214 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10215 {
10216 set = PATTERN (insn);
10217 first = REGNO (SET_DEST (set));
10218 offset = const0_rtx;
10219 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10220 off = INTVAL (offset);
10221
10222 if (GET_CODE (base) != REG || off < 0)
10223 continue;
10224 if (REGNO (base) != STACK_POINTER_REGNUM
10225 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10226 continue;
10227
10228 remove_insn (insn);
10229 continue;
10230 }
10231 }
10232 }
10233
10234 /* On z10 and later the dynamic branch prediction must see the
10235 backward jump within a certain windows. If not it falls back to
10236 the static prediction. This function rearranges the loop backward
10237 branch in a way which makes the static prediction always correct.
10238 The function returns true if it added an instruction. */
10239 static bool
10240 s390_fix_long_loop_prediction (rtx insn)
10241 {
10242 rtx set = single_set (insn);
10243 rtx code_label, label_ref, new_label;
10244 rtx uncond_jump;
10245 rtx cur_insn;
10246 rtx tmp;
10247 int distance;
10248
10249 /* This will exclude branch on count and branch on index patterns
10250 since these are correctly statically predicted. */
10251 if (!set
10252 || SET_DEST (set) != pc_rtx
10253 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10254 return false;
10255
10256 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10257 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10258
10259 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10260
10261 code_label = XEXP (label_ref, 0);
10262
10263 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10264 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10265 || (INSN_ADDRESSES (INSN_UID (insn))
10266 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10267 return false;
10268
10269 for (distance = 0, cur_insn = PREV_INSN (insn);
10270 distance < PREDICT_DISTANCE - 6;
10271 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10272 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10273 return false;
10274
10275 new_label = gen_label_rtx ();
10276 uncond_jump = emit_jump_insn_after (
10277 gen_rtx_SET (VOIDmode, pc_rtx,
10278 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10279 insn);
10280 emit_label_after (new_label, uncond_jump);
10281
10282 tmp = XEXP (SET_SRC (set), 1);
10283 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10284 XEXP (SET_SRC (set), 2) = tmp;
10285 INSN_CODE (insn) = -1;
10286
10287 XEXP (label_ref, 0) = new_label;
10288 JUMP_LABEL (insn) = new_label;
10289 JUMP_LABEL (uncond_jump) = code_label;
10290
10291 return true;
10292 }
10293
10294 /* Returns 1 if INSN reads the value of REG for purposes not related
10295 to addressing of memory, and 0 otherwise. */
10296 static int
10297 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10298 {
10299 return reg_referenced_p (reg, PATTERN (insn))
10300 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10301 }
10302
10303 /* Starting from INSN find_cond_jump looks downwards in the insn
10304 stream for a single jump insn which is the last user of the
10305 condition code set in INSN. */
10306 static rtx
10307 find_cond_jump (rtx insn)
10308 {
10309 for (; insn; insn = NEXT_INSN (insn))
10310 {
10311 rtx ite, cc;
10312
10313 if (LABEL_P (insn))
10314 break;
10315
10316 if (!JUMP_P (insn))
10317 {
10318 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10319 break;
10320 continue;
10321 }
10322
10323 /* This will be triggered by a return. */
10324 if (GET_CODE (PATTERN (insn)) != SET)
10325 break;
10326
10327 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10328 ite = SET_SRC (PATTERN (insn));
10329
10330 if (GET_CODE (ite) != IF_THEN_ELSE)
10331 break;
10332
10333 cc = XEXP (XEXP (ite, 0), 0);
10334 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10335 break;
10336
10337 if (find_reg_note (insn, REG_DEAD, cc))
10338 return insn;
10339 break;
10340 }
10341
10342 return NULL_RTX;
10343 }
10344
10345 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10346 the semantics does not change. If NULL_RTX is passed as COND the
10347 function tries to find the conditional jump starting with INSN. */
10348 static void
10349 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10350 {
10351 rtx tmp = *op0;
10352
10353 if (cond == NULL_RTX)
10354 {
10355 rtx jump = find_cond_jump (NEXT_INSN (insn));
10356 jump = jump ? single_set (jump) : NULL_RTX;
10357
10358 if (jump == NULL_RTX)
10359 return;
10360
10361 cond = XEXP (XEXP (jump, 1), 0);
10362 }
10363
10364 *op0 = *op1;
10365 *op1 = tmp;
10366 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10367 }
10368
10369 /* On z10, instructions of the compare-and-branch family have the
10370 property to access the register occurring as second operand with
10371 its bits complemented. If such a compare is grouped with a second
10372 instruction that accesses the same register non-complemented, and
10373 if that register's value is delivered via a bypass, then the
10374 pipeline recycles, thereby causing significant performance decline.
10375 This function locates such situations and exchanges the two
10376 operands of the compare. The function return true whenever it
10377 added an insn. */
10378 static bool
10379 s390_z10_optimize_cmp (rtx insn)
10380 {
10381 rtx prev_insn, next_insn;
10382 bool insn_added_p = false;
10383 rtx cond, *op0, *op1;
10384
10385 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10386 {
10387 /* Handle compare and branch and branch on count
10388 instructions. */
10389 rtx pattern = single_set (insn);
10390
10391 if (!pattern
10392 || SET_DEST (pattern) != pc_rtx
10393 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10394 return false;
10395
10396 cond = XEXP (SET_SRC (pattern), 0);
10397 op0 = &XEXP (cond, 0);
10398 op1 = &XEXP (cond, 1);
10399 }
10400 else if (GET_CODE (PATTERN (insn)) == SET)
10401 {
10402 rtx src, dest;
10403
10404 /* Handle normal compare instructions. */
10405 src = SET_SRC (PATTERN (insn));
10406 dest = SET_DEST (PATTERN (insn));
10407
10408 if (!REG_P (dest)
10409 || !CC_REGNO_P (REGNO (dest))
10410 || GET_CODE (src) != COMPARE)
10411 return false;
10412
10413 /* s390_swap_cmp will try to find the conditional
10414 jump when passing NULL_RTX as condition. */
10415 cond = NULL_RTX;
10416 op0 = &XEXP (src, 0);
10417 op1 = &XEXP (src, 1);
10418 }
10419 else
10420 return false;
10421
10422 if (!REG_P (*op0) || !REG_P (*op1))
10423 return false;
10424
10425 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10426 return false;
10427
10428 /* Swap the COMPARE arguments and its mask if there is a
10429 conflicting access in the previous insn. */
10430 prev_insn = prev_active_insn (insn);
10431 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10432 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10433 s390_swap_cmp (cond, op0, op1, insn);
10434
10435 /* Check if there is a conflict with the next insn. If there
10436 was no conflict with the previous insn, then swap the
10437 COMPARE arguments and its mask. If we already swapped
10438 the operands, or if swapping them would cause a conflict
10439 with the previous insn, issue a NOP after the COMPARE in
10440 order to separate the two instuctions. */
10441 next_insn = next_active_insn (insn);
10442 if (next_insn != NULL_RTX && INSN_P (next_insn)
10443 && s390_non_addr_reg_read_p (*op1, next_insn))
10444 {
10445 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10446 && s390_non_addr_reg_read_p (*op0, prev_insn))
10447 {
10448 if (REGNO (*op1) == 0)
10449 emit_insn_after (gen_nop1 (), insn);
10450 else
10451 emit_insn_after (gen_nop (), insn);
10452 insn_added_p = true;
10453 }
10454 else
10455 s390_swap_cmp (cond, op0, op1, insn);
10456 }
10457 return insn_added_p;
10458 }
10459
10460 /* Perform machine-dependent processing. */
10461
10462 static void
10463 s390_reorg (void)
10464 {
10465 bool pool_overflow = false;
10466
10467 /* Make sure all splits have been performed; splits after
10468 machine_dependent_reorg might confuse insn length counts. */
10469 split_all_insns_noflow ();
10470
10471 /* Install the main literal pool and the associated base
10472 register load insns.
10473
10474 In addition, there are two problematic situations we need
10475 to correct:
10476
10477 - the literal pool might be > 4096 bytes in size, so that
10478 some of its elements cannot be directly accessed
10479
10480 - a branch target might be > 64K away from the branch, so that
10481 it is not possible to use a PC-relative instruction.
10482
10483 To fix those, we split the single literal pool into multiple
10484 pool chunks, reloading the pool base register at various
10485 points throughout the function to ensure it always points to
10486 the pool chunk the following code expects, and / or replace
10487 PC-relative branches by absolute branches.
10488
10489 However, the two problems are interdependent: splitting the
10490 literal pool can move a branch further away from its target,
10491 causing the 64K limit to overflow, and on the other hand,
10492 replacing a PC-relative branch by an absolute branch means
10493 we need to put the branch target address into the literal
10494 pool, possibly causing it to overflow.
10495
10496 So, we loop trying to fix up both problems until we manage
10497 to satisfy both conditions at the same time. Note that the
10498 loop is guaranteed to terminate as every pass of the loop
10499 strictly decreases the total number of PC-relative branches
10500 in the function. (This is not completely true as there
10501 might be branch-over-pool insns introduced by chunkify_start.
10502 Those never need to be split however.) */
10503
10504 for (;;)
10505 {
10506 struct constant_pool *pool = NULL;
10507
10508 /* Collect the literal pool. */
10509 if (!pool_overflow)
10510 {
10511 pool = s390_mainpool_start ();
10512 if (!pool)
10513 pool_overflow = true;
10514 }
10515
10516 /* If literal pool overflowed, start to chunkify it. */
10517 if (pool_overflow)
10518 pool = s390_chunkify_start ();
10519
10520 /* Split out-of-range branches. If this has created new
10521 literal pool entries, cancel current chunk list and
10522 recompute it. zSeries machines have large branch
10523 instructions, so we never need to split a branch. */
10524 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10525 {
10526 if (pool_overflow)
10527 s390_chunkify_cancel (pool);
10528 else
10529 s390_mainpool_cancel (pool);
10530
10531 continue;
10532 }
10533
10534 /* If we made it up to here, both conditions are satisfied.
10535 Finish up literal pool related changes. */
10536 if (pool_overflow)
10537 s390_chunkify_finish (pool);
10538 else
10539 s390_mainpool_finish (pool);
10540
10541 /* We're done splitting branches. */
10542 cfun->machine->split_branches_pending_p = false;
10543 break;
10544 }
10545
10546 /* Generate out-of-pool execute target insns. */
10547 if (TARGET_CPU_ZARCH)
10548 {
10549 rtx insn, label, target;
10550
10551 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10552 {
10553 label = s390_execute_label (insn);
10554 if (!label)
10555 continue;
10556
10557 gcc_assert (label != const0_rtx);
10558
10559 target = emit_label (XEXP (label, 0));
10560 INSN_ADDRESSES_NEW (target, -1);
10561
10562 target = emit_insn (s390_execute_target (insn));
10563 INSN_ADDRESSES_NEW (target, -1);
10564 }
10565 }
10566
10567 /* Try to optimize prologue and epilogue further. */
10568 s390_optimize_prologue ();
10569
10570 /* Walk over the insns and do some >=z10 specific changes. */
10571 if (s390_tune == PROCESSOR_2097_Z10
10572 || s390_tune == PROCESSOR_2817_Z196)
10573 {
10574 rtx insn;
10575 bool insn_added_p = false;
10576
10577 /* The insn lengths and addresses have to be up to date for the
10578 following manipulations. */
10579 shorten_branches (get_insns ());
10580
10581 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10582 {
10583 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10584 continue;
10585
10586 if (JUMP_P (insn))
10587 insn_added_p |= s390_fix_long_loop_prediction (insn);
10588
10589 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10590 || GET_CODE (PATTERN (insn)) == SET)
10591 && s390_tune == PROCESSOR_2097_Z10)
10592 insn_added_p |= s390_z10_optimize_cmp (insn);
10593 }
10594
10595 /* Adjust branches if we added new instructions. */
10596 if (insn_added_p)
10597 shorten_branches (get_insns ());
10598 }
10599 }
10600
10601 /* Return true if INSN is a fp load insn writing register REGNO. */
10602 static inline bool
10603 s390_fpload_toreg (rtx insn, unsigned int regno)
10604 {
10605 rtx set;
10606 enum attr_type flag = s390_safe_attr_type (insn);
10607
10608 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10609 return false;
10610
10611 set = single_set (insn);
10612
10613 if (set == NULL_RTX)
10614 return false;
10615
10616 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10617 return false;
10618
10619 if (REGNO (SET_DEST (set)) != regno)
10620 return false;
10621
10622 return true;
10623 }
10624
10625 /* This value describes the distance to be avoided between an
10626 aritmetic fp instruction and an fp load writing the same register.
10627 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10628 fine but the exact value has to be avoided. Otherwise the FP
10629 pipeline will throw an exception causing a major penalty. */
10630 #define Z10_EARLYLOAD_DISTANCE 7
10631
10632 /* Rearrange the ready list in order to avoid the situation described
10633 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10634 moved to the very end of the ready list. */
10635 static void
10636 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10637 {
10638 unsigned int regno;
10639 int nready = *nready_p;
10640 rtx tmp;
10641 int i;
10642 rtx insn;
10643 rtx set;
10644 enum attr_type flag;
10645 int distance;
10646
10647 /* Skip DISTANCE - 1 active insns. */
10648 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10649 distance > 0 && insn != NULL_RTX;
10650 distance--, insn = prev_active_insn (insn))
10651 if (CALL_P (insn) || JUMP_P (insn))
10652 return;
10653
10654 if (insn == NULL_RTX)
10655 return;
10656
10657 set = single_set (insn);
10658
10659 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10660 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10661 return;
10662
10663 flag = s390_safe_attr_type (insn);
10664
10665 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10666 return;
10667
10668 regno = REGNO (SET_DEST (set));
10669 i = nready - 1;
10670
10671 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10672 i--;
10673
10674 if (!i)
10675 return;
10676
10677 tmp = ready[i];
10678 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10679 ready[0] = tmp;
10680 }
10681
10682 /* This function is called via hook TARGET_SCHED_REORDER before
10683 issuing one insn from list READY which contains *NREADYP entries.
10684 For target z10 it reorders load instructions to avoid early load
10685 conflicts in the floating point pipeline */
10686 static int
10687 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10688 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10689 {
10690 if (s390_tune == PROCESSOR_2097_Z10)
10691 if (reload_completed && *nreadyp > 1)
10692 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10693
10694 return s390_issue_rate ();
10695 }
10696
10697 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10698 the scheduler has issued INSN. It stores the last issued insn into
10699 last_scheduled_insn in order to make it available for
10700 s390_sched_reorder. */
10701 static int
10702 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10703 int verbose ATTRIBUTE_UNUSED,
10704 rtx insn, int more)
10705 {
10706 last_scheduled_insn = insn;
10707
10708 if (GET_CODE (PATTERN (insn)) != USE
10709 && GET_CODE (PATTERN (insn)) != CLOBBER)
10710 return more - 1;
10711 else
10712 return more;
10713 }
10714
10715 static void
10716 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10717 int verbose ATTRIBUTE_UNUSED,
10718 int max_ready ATTRIBUTE_UNUSED)
10719 {
10720 last_scheduled_insn = NULL_RTX;
10721 }
10722
10723 /* This function checks the whole of insn X for memory references. The
10724 function always returns zero because the framework it is called
10725 from would stop recursively analyzing the insn upon a return value
10726 other than zero. The real result of this function is updating
10727 counter variable MEM_COUNT. */
10728 static int
10729 check_dpu (rtx *x, unsigned *mem_count)
10730 {
10731 if (*x != NULL_RTX && MEM_P (*x))
10732 (*mem_count)++;
10733 return 0;
10734 }
10735
10736 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10737 a new number struct loop *loop should be unrolled if tuned for cpus with
10738 a built-in stride prefetcher.
10739 The loop is analyzed for memory accesses by calling check_dpu for
10740 each rtx of the loop. Depending on the loop_depth and the amount of
10741 memory accesses a new number <=nunroll is returned to improve the
10742 behaviour of the hardware prefetch unit. */
10743 static unsigned
10744 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10745 {
10746 basic_block *bbs;
10747 rtx insn;
10748 unsigned i;
10749 unsigned mem_count = 0;
10750
10751 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10752 return nunroll;
10753
10754 /* Count the number of memory references within the loop body. */
10755 bbs = get_loop_body (loop);
10756 for (i = 0; i < loop->num_nodes; i++)
10757 {
10758 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10759 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10760 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10761 }
10762 free (bbs);
10763
10764 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10765 if (mem_count == 0)
10766 return nunroll;
10767
10768 switch (loop_depth(loop))
10769 {
10770 case 1:
10771 return MIN (nunroll, 28 / mem_count);
10772 case 2:
10773 return MIN (nunroll, 22 / mem_count);
10774 default:
10775 return MIN (nunroll, 16 / mem_count);
10776 }
10777 }
10778
10779 /* Initialize GCC target structure. */
10780
10781 #undef TARGET_ASM_ALIGNED_HI_OP
10782 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10783 #undef TARGET_ASM_ALIGNED_DI_OP
10784 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10785 #undef TARGET_ASM_INTEGER
10786 #define TARGET_ASM_INTEGER s390_assemble_integer
10787
10788 #undef TARGET_ASM_OPEN_PAREN
10789 #define TARGET_ASM_OPEN_PAREN ""
10790
10791 #undef TARGET_ASM_CLOSE_PAREN
10792 #define TARGET_ASM_CLOSE_PAREN ""
10793
10794 #undef TARGET_OPTION_OVERRIDE
10795 #define TARGET_OPTION_OVERRIDE s390_option_override
10796
10797 #undef TARGET_ENCODE_SECTION_INFO
10798 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10799
10800 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10801 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10802
10803 #ifdef HAVE_AS_TLS
10804 #undef TARGET_HAVE_TLS
10805 #define TARGET_HAVE_TLS true
10806 #endif
10807 #undef TARGET_CANNOT_FORCE_CONST_MEM
10808 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10809
10810 #undef TARGET_DELEGITIMIZE_ADDRESS
10811 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10812
10813 #undef TARGET_LEGITIMIZE_ADDRESS
10814 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10815
10816 #undef TARGET_RETURN_IN_MEMORY
10817 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10818
10819 #undef TARGET_INIT_BUILTINS
10820 #define TARGET_INIT_BUILTINS s390_init_builtins
10821 #undef TARGET_EXPAND_BUILTIN
10822 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10823
10824 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10825 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10826
10827 #undef TARGET_ASM_OUTPUT_MI_THUNK
10828 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10829 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10830 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10831
10832 #undef TARGET_SCHED_ADJUST_PRIORITY
10833 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10834 #undef TARGET_SCHED_ISSUE_RATE
10835 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10836 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10837 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10838
10839 #undef TARGET_SCHED_VARIABLE_ISSUE
10840 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10841 #undef TARGET_SCHED_REORDER
10842 #define TARGET_SCHED_REORDER s390_sched_reorder
10843 #undef TARGET_SCHED_INIT
10844 #define TARGET_SCHED_INIT s390_sched_init
10845
10846 #undef TARGET_CANNOT_COPY_INSN_P
10847 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10848 #undef TARGET_RTX_COSTS
10849 #define TARGET_RTX_COSTS s390_rtx_costs
10850 #undef TARGET_ADDRESS_COST
10851 #define TARGET_ADDRESS_COST s390_address_cost
10852 #undef TARGET_REGISTER_MOVE_COST
10853 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10854 #undef TARGET_MEMORY_MOVE_COST
10855 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10856
10857 #undef TARGET_MACHINE_DEPENDENT_REORG
10858 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10859
10860 #undef TARGET_VALID_POINTER_MODE
10861 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10862
10863 #undef TARGET_BUILD_BUILTIN_VA_LIST
10864 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10865 #undef TARGET_EXPAND_BUILTIN_VA_START
10866 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10867 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10868 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10869
10870 #undef TARGET_PROMOTE_FUNCTION_MODE
10871 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10872 #undef TARGET_PASS_BY_REFERENCE
10873 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10874
10875 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10876 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10877 #undef TARGET_FUNCTION_ARG
10878 #define TARGET_FUNCTION_ARG s390_function_arg
10879 #undef TARGET_FUNCTION_ARG_ADVANCE
10880 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10881 #undef TARGET_FUNCTION_VALUE
10882 #define TARGET_FUNCTION_VALUE s390_function_value
10883 #undef TARGET_LIBCALL_VALUE
10884 #define TARGET_LIBCALL_VALUE s390_libcall_value
10885
10886 #undef TARGET_FIXED_CONDITION_CODE_REGS
10887 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10888
10889 #undef TARGET_CC_MODES_COMPATIBLE
10890 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10891
10892 #undef TARGET_INVALID_WITHIN_DOLOOP
10893 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10894
10895 #ifdef HAVE_AS_TLS
10896 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10897 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10898 #endif
10899
10900 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10901 #undef TARGET_MANGLE_TYPE
10902 #define TARGET_MANGLE_TYPE s390_mangle_type
10903 #endif
10904
10905 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10906 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10907
10908 #undef TARGET_PREFERRED_RELOAD_CLASS
10909 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10910
10911 #undef TARGET_SECONDARY_RELOAD
10912 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10913
10914 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10915 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10916
10917 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10918 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10919
10920 #undef TARGET_LEGITIMATE_ADDRESS_P
10921 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10922
10923 #undef TARGET_LEGITIMATE_CONSTANT_P
10924 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10925
10926 #undef TARGET_CAN_ELIMINATE
10927 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10928
10929 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10930 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10931
10932 #undef TARGET_LOOP_UNROLL_ADJUST
10933 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10934
10935 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10936 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10937 #undef TARGET_TRAMPOLINE_INIT
10938 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10939
10940 #undef TARGET_UNWIND_WORD_MODE
10941 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10942
10943 struct gcc_target targetm = TARGET_INITIALIZER;
10944
10945 #include "gt-s390.h"