tm.texi.in (TARGET_RTX_COSTS): Add an opno paramter.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56 #include "opts.h"
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* The following structure is embedded in the machine
281 specific part of struct function. */
282
283 struct GTY (()) s390_frame_layout
284 {
285 /* Offset within stack frame. */
286 HOST_WIDE_INT gprs_offset;
287 HOST_WIDE_INT f0_offset;
288 HOST_WIDE_INT f4_offset;
289 HOST_WIDE_INT f8_offset;
290 HOST_WIDE_INT backchain_offset;
291
292 /* Number of first and last gpr where slots in the register
293 save area are reserved for. */
294 int first_save_gpr_slot;
295 int last_save_gpr_slot;
296
297 /* Number of first and last gpr to be saved, restored. */
298 int first_save_gpr;
299 int first_restore_gpr;
300 int last_save_gpr;
301 int last_restore_gpr;
302
303 /* Bits standing for floating point registers. Set, if the
304 respective register has to be saved. Starting with reg 16 (f0)
305 at the rightmost bit.
306 Bit 15 - 8 7 6 5 4 3 2 1 0
307 fpr 15 - 8 7 5 3 1 6 4 2 0
308 reg 31 - 24 23 22 21 20 19 18 17 16 */
309 unsigned int fpr_bitmap;
310
311 /* Number of floating point registers f8-f15 which must be saved. */
312 int high_fprs;
313
314 /* Set if return address needs to be saved.
315 This flag is set by s390_return_addr_rtx if it could not use
316 the initial value of r14 and therefore depends on r14 saved
317 to the stack. */
318 bool save_return_addr_p;
319
320 /* Size of stack frame. */
321 HOST_WIDE_INT frame_size;
322 };
323
324 /* Define the structure for the machine field in struct function. */
325
326 struct GTY(()) machine_function
327 {
328 struct s390_frame_layout frame_layout;
329
330 /* Literal pool base register. */
331 rtx base_reg;
332
333 /* True if we may need to perform branch splitting. */
334 bool split_branches_pending_p;
335
336 /* Some local-dynamic TLS symbol name. */
337 const char *some_ld_name;
338
339 bool has_landing_pad_p;
340 };
341
342 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
343
344 #define cfun_frame_layout (cfun->machine->frame_layout)
345 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
346 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
347 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
348 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
349 (1 << (BITNUM)))
350 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
351 (1 << (BITNUM))))
352
353 /* Number of GPRs and FPRs used for argument passing. */
354 #define GP_ARG_NUM_REG 5
355 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
356
357 /* A couple of shortcuts. */
358 #define CONST_OK_FOR_J(x) \
359 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
360 #define CONST_OK_FOR_K(x) \
361 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
362 #define CONST_OK_FOR_Os(x) \
363 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
364 #define CONST_OK_FOR_Op(x) \
365 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
366 #define CONST_OK_FOR_On(x) \
367 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
368
369 #define REGNO_PAIR_OK(REGNO, MODE) \
370 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
371
372 /* That's the read ahead of the dynamic branch prediction unit in
373 bytes on a z10 (or higher) CPU. */
374 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
375
376 /* Return the alignment for LABEL. We default to the -falign-labels
377 value except for the literal pool base label. */
378 int
379 s390_label_align (rtx label)
380 {
381 rtx prev_insn = prev_active_insn (label);
382
383 if (prev_insn == NULL_RTX)
384 goto old;
385
386 prev_insn = single_set (prev_insn);
387
388 if (prev_insn == NULL_RTX)
389 goto old;
390
391 prev_insn = SET_SRC (prev_insn);
392
393 /* Don't align literal pool base labels. */
394 if (GET_CODE (prev_insn) == UNSPEC
395 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
396 return 0;
397
398 old:
399 return align_labels_log;
400 }
401
402 static enum machine_mode
403 s390_libgcc_cmp_return_mode (void)
404 {
405 return TARGET_64BIT ? DImode : SImode;
406 }
407
408 static enum machine_mode
409 s390_libgcc_shift_count_mode (void)
410 {
411 return TARGET_64BIT ? DImode : SImode;
412 }
413
414 static enum machine_mode
415 s390_unwind_word_mode (void)
416 {
417 return TARGET_64BIT ? DImode : SImode;
418 }
419
420 /* Return true if the back end supports mode MODE. */
421 static bool
422 s390_scalar_mode_supported_p (enum machine_mode mode)
423 {
424 /* In contrast to the default implementation reject TImode constants on 31bit
425 TARGET_ZARCH for ABI compliance. */
426 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
427 return false;
428
429 if (DECIMAL_FLOAT_MODE_P (mode))
430 return default_decimal_float_supported_p ();
431
432 return default_scalar_mode_supported_p (mode);
433 }
434
435 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
436
437 void
438 s390_set_has_landing_pad_p (bool value)
439 {
440 cfun->machine->has_landing_pad_p = value;
441 }
442
443 /* If two condition code modes are compatible, return a condition code
444 mode which is compatible with both. Otherwise, return
445 VOIDmode. */
446
447 static enum machine_mode
448 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
449 {
450 if (m1 == m2)
451 return m1;
452
453 switch (m1)
454 {
455 case CCZmode:
456 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
457 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
458 return m2;
459 return VOIDmode;
460
461 case CCSmode:
462 case CCUmode:
463 case CCTmode:
464 case CCSRmode:
465 case CCURmode:
466 case CCZ1mode:
467 if (m2 == CCZmode)
468 return m1;
469
470 return VOIDmode;
471
472 default:
473 return VOIDmode;
474 }
475 return VOIDmode;
476 }
477
478 /* Return true if SET either doesn't set the CC register, or else
479 the source and destination have matching CC modes and that
480 CC mode is at least as constrained as REQ_MODE. */
481
482 static bool
483 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
484 {
485 enum machine_mode set_mode;
486
487 gcc_assert (GET_CODE (set) == SET);
488
489 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
490 return 1;
491
492 set_mode = GET_MODE (SET_DEST (set));
493 switch (set_mode)
494 {
495 case CCSmode:
496 case CCSRmode:
497 case CCUmode:
498 case CCURmode:
499 case CCLmode:
500 case CCL1mode:
501 case CCL2mode:
502 case CCL3mode:
503 case CCT1mode:
504 case CCT2mode:
505 case CCT3mode:
506 if (req_mode != set_mode)
507 return 0;
508 break;
509
510 case CCZmode:
511 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
512 && req_mode != CCSRmode && req_mode != CCURmode)
513 return 0;
514 break;
515
516 case CCAPmode:
517 case CCANmode:
518 if (req_mode != CCAmode)
519 return 0;
520 break;
521
522 default:
523 gcc_unreachable ();
524 }
525
526 return (GET_MODE (SET_SRC (set)) == set_mode);
527 }
528
529 /* Return true if every SET in INSN that sets the CC register
530 has source and destination with matching CC modes and that
531 CC mode is at least as constrained as REQ_MODE.
532 If REQ_MODE is VOIDmode, always return false. */
533
534 bool
535 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
536 {
537 int i;
538
539 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
540 if (req_mode == VOIDmode)
541 return false;
542
543 if (GET_CODE (PATTERN (insn)) == SET)
544 return s390_match_ccmode_set (PATTERN (insn), req_mode);
545
546 if (GET_CODE (PATTERN (insn)) == PARALLEL)
547 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
548 {
549 rtx set = XVECEXP (PATTERN (insn), 0, i);
550 if (GET_CODE (set) == SET)
551 if (!s390_match_ccmode_set (set, req_mode))
552 return false;
553 }
554
555 return true;
556 }
557
558 /* If a test-under-mask instruction can be used to implement
559 (compare (and ... OP1) OP2), return the CC mode required
560 to do that. Otherwise, return VOIDmode.
561 MIXED is true if the instruction can distinguish between
562 CC1 and CC2 for mixed selected bits (TMxx), it is false
563 if the instruction cannot (TM). */
564
565 enum machine_mode
566 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
567 {
568 int bit0, bit1;
569
570 /* ??? Fixme: should work on CONST_DOUBLE as well. */
571 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
572 return VOIDmode;
573
574 /* Selected bits all zero: CC0.
575 e.g.: int a; if ((a & (16 + 128)) == 0) */
576 if (INTVAL (op2) == 0)
577 return CCTmode;
578
579 /* Selected bits all one: CC3.
580 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
581 if (INTVAL (op2) == INTVAL (op1))
582 return CCT3mode;
583
584 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
585 int a;
586 if ((a & (16 + 128)) == 16) -> CCT1
587 if ((a & (16 + 128)) == 128) -> CCT2 */
588 if (mixed)
589 {
590 bit1 = exact_log2 (INTVAL (op2));
591 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
592 if (bit0 != -1 && bit1 != -1)
593 return bit0 > bit1 ? CCT1mode : CCT2mode;
594 }
595
596 return VOIDmode;
597 }
598
599 /* Given a comparison code OP (EQ, NE, etc.) and the operands
600 OP0 and OP1 of a COMPARE, return the mode to be used for the
601 comparison. */
602
603 enum machine_mode
604 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
605 {
606 switch (code)
607 {
608 case EQ:
609 case NE:
610 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
611 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
612 return CCAPmode;
613 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
614 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
615 return CCAPmode;
616 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
617 || GET_CODE (op1) == NEG)
618 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
619 return CCLmode;
620
621 if (GET_CODE (op0) == AND)
622 {
623 /* Check whether we can potentially do it via TM. */
624 enum machine_mode ccmode;
625 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
626 if (ccmode != VOIDmode)
627 {
628 /* Relax CCTmode to CCZmode to allow fall-back to AND
629 if that turns out to be beneficial. */
630 return ccmode == CCTmode ? CCZmode : ccmode;
631 }
632 }
633
634 if (register_operand (op0, HImode)
635 && GET_CODE (op1) == CONST_INT
636 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
637 return CCT3mode;
638 if (register_operand (op0, QImode)
639 && GET_CODE (op1) == CONST_INT
640 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
641 return CCT3mode;
642
643 return CCZmode;
644
645 case LE:
646 case LT:
647 case GE:
648 case GT:
649 /* The only overflow condition of NEG and ABS happens when
650 -INT_MAX is used as parameter, which stays negative. So
651 we have an overflow from a positive value to a negative.
652 Using CCAP mode the resulting cc can be used for comparisons. */
653 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
654 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
655 return CCAPmode;
656
657 /* If constants are involved in an add instruction it is possible to use
658 the resulting cc for comparisons with zero. Knowing the sign of the
659 constant the overflow behavior gets predictable. e.g.:
660 int a, b; if ((b = a + c) > 0)
661 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
662 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
663 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
664 {
665 if (INTVAL (XEXP((op0), 1)) < 0)
666 return CCANmode;
667 else
668 return CCAPmode;
669 }
670 /* Fall through. */
671 case UNORDERED:
672 case ORDERED:
673 case UNEQ:
674 case UNLE:
675 case UNLT:
676 case UNGE:
677 case UNGT:
678 case LTGT:
679 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
680 && GET_CODE (op1) != CONST_INT)
681 return CCSRmode;
682 return CCSmode;
683
684 case LTU:
685 case GEU:
686 if (GET_CODE (op0) == PLUS
687 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
688 return CCL1mode;
689
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCURmode;
693 return CCUmode;
694
695 case LEU:
696 case GTU:
697 if (GET_CODE (op0) == MINUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL2mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 default:
707 gcc_unreachable ();
708 }
709 }
710
711 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
712 that we can implement more efficiently. */
713
714 void
715 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
716 {
717 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
718 if ((*code == EQ || *code == NE)
719 && *op1 == const0_rtx
720 && GET_CODE (*op0) == ZERO_EXTRACT
721 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
722 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
723 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
724 {
725 rtx inner = XEXP (*op0, 0);
726 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
727 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
728 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
729
730 if (len > 0 && len < modesize
731 && pos >= 0 && pos + len <= modesize
732 && modesize <= HOST_BITS_PER_WIDE_INT)
733 {
734 unsigned HOST_WIDE_INT block;
735 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
736 block <<= modesize - pos - len;
737
738 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
739 gen_int_mode (block, GET_MODE (inner)));
740 }
741 }
742
743 /* Narrow AND of memory against immediate to enable TM. */
744 if ((*code == EQ || *code == NE)
745 && *op1 == const0_rtx
746 && GET_CODE (*op0) == AND
747 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
748 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
749 {
750 rtx inner = XEXP (*op0, 0);
751 rtx mask = XEXP (*op0, 1);
752
753 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
754 if (GET_CODE (inner) == SUBREG
755 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
756 && (GET_MODE_SIZE (GET_MODE (inner))
757 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
758 && ((INTVAL (mask)
759 & GET_MODE_MASK (GET_MODE (inner))
760 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
761 == 0))
762 inner = SUBREG_REG (inner);
763
764 /* Do not change volatile MEMs. */
765 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
766 {
767 int part = s390_single_part (XEXP (*op0, 1),
768 GET_MODE (inner), QImode, 0);
769 if (part >= 0)
770 {
771 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
772 inner = adjust_address_nv (inner, QImode, part);
773 *op0 = gen_rtx_AND (QImode, inner, mask);
774 }
775 }
776 }
777
778 /* Narrow comparisons against 0xffff to HImode if possible. */
779 if ((*code == EQ || *code == NE)
780 && GET_CODE (*op1) == CONST_INT
781 && INTVAL (*op1) == 0xffff
782 && SCALAR_INT_MODE_P (GET_MODE (*op0))
783 && (nonzero_bits (*op0, GET_MODE (*op0))
784 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
785 {
786 *op0 = gen_lowpart (HImode, *op0);
787 *op1 = constm1_rtx;
788 }
789
790 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
791 if (GET_CODE (*op0) == UNSPEC
792 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
793 && XVECLEN (*op0, 0) == 1
794 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
795 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
796 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
797 && *op1 == const0_rtx)
798 {
799 enum rtx_code new_code = UNKNOWN;
800 switch (*code)
801 {
802 case EQ: new_code = EQ; break;
803 case NE: new_code = NE; break;
804 case LT: new_code = GTU; break;
805 case GT: new_code = LTU; break;
806 case LE: new_code = GEU; break;
807 case GE: new_code = LEU; break;
808 default: break;
809 }
810
811 if (new_code != UNKNOWN)
812 {
813 *op0 = XVECEXP (*op0, 0, 0);
814 *code = new_code;
815 }
816 }
817
818 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
819 if (GET_CODE (*op0) == UNSPEC
820 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
821 && XVECLEN (*op0, 0) == 1
822 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
823 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
824 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
825 && *op1 == const0_rtx)
826 {
827 enum rtx_code new_code = UNKNOWN;
828 switch (*code)
829 {
830 case EQ: new_code = EQ; break;
831 case NE: new_code = NE; break;
832 default: break;
833 }
834
835 if (new_code != UNKNOWN)
836 {
837 *op0 = XVECEXP (*op0, 0, 0);
838 *code = new_code;
839 }
840 }
841
842 /* Simplify cascaded EQ, NE with const0_rtx. */
843 if ((*code == NE || *code == EQ)
844 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
845 && GET_MODE (*op0) == SImode
846 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
847 && REG_P (XEXP (*op0, 0))
848 && XEXP (*op0, 1) == const0_rtx
849 && *op1 == const0_rtx)
850 {
851 if ((*code == EQ && GET_CODE (*op0) == NE)
852 || (*code == NE && GET_CODE (*op0) == EQ))
853 *code = EQ;
854 else
855 *code = NE;
856 *op0 = XEXP (*op0, 0);
857 }
858
859 /* Prefer register over memory as first operand. */
860 if (MEM_P (*op0) && REG_P (*op1))
861 {
862 rtx tem = *op0; *op0 = *op1; *op1 = tem;
863 *code = swap_condition (*code);
864 }
865 }
866
867 /* Emit a compare instruction suitable to implement the comparison
868 OP0 CODE OP1. Return the correct condition RTL to be placed in
869 the IF_THEN_ELSE of the conditional branch testing the result. */
870
871 rtx
872 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
873 {
874 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
875 rtx cc;
876
877 /* Do not output a redundant compare instruction if a compare_and_swap
878 pattern already computed the result and the machine modes are compatible. */
879 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
880 {
881 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
882 == GET_MODE (op0));
883 cc = op0;
884 }
885 else
886 {
887 cc = gen_rtx_REG (mode, CC_REGNUM);
888 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
889 }
890
891 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
892 }
893
894 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
895 matches CMP.
896 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
897 conditional branch testing the result. */
898
899 static rtx
900 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
904 }
905
906 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
907 unconditional jump, else a conditional jump under condition COND. */
908
909 void
910 s390_emit_jump (rtx target, rtx cond)
911 {
912 rtx insn;
913
914 target = gen_rtx_LABEL_REF (VOIDmode, target);
915 if (cond)
916 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
917
918 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
919 emit_jump_insn (insn);
920 }
921
922 /* Return branch condition mask to implement a branch
923 specified by CODE. Return -1 for invalid comparisons. */
924
925 int
926 s390_branch_condition_mask (rtx code)
927 {
928 const int CC0 = 1 << 3;
929 const int CC1 = 1 << 2;
930 const int CC2 = 1 << 1;
931 const int CC3 = 1 << 0;
932
933 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
934 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
935 gcc_assert (XEXP (code, 1) == const0_rtx);
936
937 switch (GET_MODE (XEXP (code, 0)))
938 {
939 case CCZmode:
940 case CCZ1mode:
941 switch (GET_CODE (code))
942 {
943 case EQ: return CC0;
944 case NE: return CC1 | CC2 | CC3;
945 default: return -1;
946 }
947 break;
948
949 case CCT1mode:
950 switch (GET_CODE (code))
951 {
952 case EQ: return CC1;
953 case NE: return CC0 | CC2 | CC3;
954 default: return -1;
955 }
956 break;
957
958 case CCT2mode:
959 switch (GET_CODE (code))
960 {
961 case EQ: return CC2;
962 case NE: return CC0 | CC1 | CC3;
963 default: return -1;
964 }
965 break;
966
967 case CCT3mode:
968 switch (GET_CODE (code))
969 {
970 case EQ: return CC3;
971 case NE: return CC0 | CC1 | CC2;
972 default: return -1;
973 }
974 break;
975
976 case CCLmode:
977 switch (GET_CODE (code))
978 {
979 case EQ: return CC0 | CC2;
980 case NE: return CC1 | CC3;
981 default: return -1;
982 }
983 break;
984
985 case CCL1mode:
986 switch (GET_CODE (code))
987 {
988 case LTU: return CC2 | CC3; /* carry */
989 case GEU: return CC0 | CC1; /* no carry */
990 default: return -1;
991 }
992 break;
993
994 case CCL2mode:
995 switch (GET_CODE (code))
996 {
997 case GTU: return CC0 | CC1; /* borrow */
998 case LEU: return CC2 | CC3; /* no borrow */
999 default: return -1;
1000 }
1001 break;
1002
1003 case CCL3mode:
1004 switch (GET_CODE (code))
1005 {
1006 case EQ: return CC0 | CC2;
1007 case NE: return CC1 | CC3;
1008 case LTU: return CC1;
1009 case GTU: return CC3;
1010 case LEU: return CC1 | CC2;
1011 case GEU: return CC2 | CC3;
1012 default: return -1;
1013 }
1014
1015 case CCUmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0;
1019 case NE: return CC1 | CC2 | CC3;
1020 case LTU: return CC1;
1021 case GTU: return CC2;
1022 case LEU: return CC0 | CC1;
1023 case GEU: return CC0 | CC2;
1024 default: return -1;
1025 }
1026 break;
1027
1028 case CCURmode:
1029 switch (GET_CODE (code))
1030 {
1031 case EQ: return CC0;
1032 case NE: return CC2 | CC1 | CC3;
1033 case LTU: return CC2;
1034 case GTU: return CC1;
1035 case LEU: return CC0 | CC2;
1036 case GEU: return CC0 | CC1;
1037 default: return -1;
1038 }
1039 break;
1040
1041 case CCAPmode:
1042 switch (GET_CODE (code))
1043 {
1044 case EQ: return CC0;
1045 case NE: return CC1 | CC2 | CC3;
1046 case LT: return CC1 | CC3;
1047 case GT: return CC2;
1048 case LE: return CC0 | CC1 | CC3;
1049 case GE: return CC0 | CC2;
1050 default: return -1;
1051 }
1052 break;
1053
1054 case CCANmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LT: return CC1;
1060 case GT: return CC2 | CC3;
1061 case LE: return CC0 | CC1;
1062 case GE: return CC0 | CC2 | CC3;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCSmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC1 | CC2 | CC3;
1072 case LT: return CC1;
1073 case GT: return CC2;
1074 case LE: return CC0 | CC1;
1075 case GE: return CC0 | CC2;
1076 case UNORDERED: return CC3;
1077 case ORDERED: return CC0 | CC1 | CC2;
1078 case UNEQ: return CC0 | CC3;
1079 case UNLT: return CC1 | CC3;
1080 case UNGT: return CC2 | CC3;
1081 case UNLE: return CC0 | CC1 | CC3;
1082 case UNGE: return CC0 | CC2 | CC3;
1083 case LTGT: return CC1 | CC2;
1084 default: return -1;
1085 }
1086 break;
1087
1088 case CCSRmode:
1089 switch (GET_CODE (code))
1090 {
1091 case EQ: return CC0;
1092 case NE: return CC2 | CC1 | CC3;
1093 case LT: return CC2;
1094 case GT: return CC1;
1095 case LE: return CC0 | CC2;
1096 case GE: return CC0 | CC1;
1097 case UNORDERED: return CC3;
1098 case ORDERED: return CC0 | CC2 | CC1;
1099 case UNEQ: return CC0 | CC3;
1100 case UNLT: return CC2 | CC3;
1101 case UNGT: return CC1 | CC3;
1102 case UNLE: return CC0 | CC2 | CC3;
1103 case UNGE: return CC0 | CC1 | CC3;
1104 case LTGT: return CC2 | CC1;
1105 default: return -1;
1106 }
1107 break;
1108
1109 default:
1110 return -1;
1111 }
1112 }
1113
1114
1115 /* Return branch condition mask to implement a compare and branch
1116 specified by CODE. Return -1 for invalid comparisons. */
1117
1118 int
1119 s390_compare_and_branch_condition_mask (rtx code)
1120 {
1121 const int CC0 = 1 << 3;
1122 const int CC1 = 1 << 2;
1123 const int CC2 = 1 << 1;
1124
1125 switch (GET_CODE (code))
1126 {
1127 case EQ:
1128 return CC0;
1129 case NE:
1130 return CC1 | CC2;
1131 case LT:
1132 case LTU:
1133 return CC1;
1134 case GT:
1135 case GTU:
1136 return CC2;
1137 case LE:
1138 case LEU:
1139 return CC0 | CC1;
1140 case GE:
1141 case GEU:
1142 return CC0 | CC2;
1143 default:
1144 gcc_unreachable ();
1145 }
1146 return -1;
1147 }
1148
1149 /* If INV is false, return assembler mnemonic string to implement
1150 a branch specified by CODE. If INV is true, return mnemonic
1151 for the corresponding inverted branch. */
1152
1153 static const char *
1154 s390_branch_condition_mnemonic (rtx code, int inv)
1155 {
1156 int mask;
1157
1158 static const char *const mnemonic[16] =
1159 {
1160 NULL, "o", "h", "nle",
1161 "l", "nhe", "lh", "ne",
1162 "e", "nlh", "he", "nl",
1163 "le", "nh", "no", NULL
1164 };
1165
1166 if (GET_CODE (XEXP (code, 0)) == REG
1167 && REGNO (XEXP (code, 0)) == CC_REGNUM
1168 && XEXP (code, 1) == const0_rtx)
1169 mask = s390_branch_condition_mask (code);
1170 else
1171 mask = s390_compare_and_branch_condition_mask (code);
1172
1173 gcc_assert (mask >= 0);
1174
1175 if (inv)
1176 mask ^= 15;
1177
1178 gcc_assert (mask >= 1 && mask <= 14);
1179
1180 return mnemonic[mask];
1181 }
1182
1183 /* Return the part of op which has a value different from def.
1184 The size of the part is determined by mode.
1185 Use this function only if you already know that op really
1186 contains such a part. */
1187
1188 unsigned HOST_WIDE_INT
1189 s390_extract_part (rtx op, enum machine_mode mode, int def)
1190 {
1191 unsigned HOST_WIDE_INT value = 0;
1192 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1193 int part_bits = GET_MODE_BITSIZE (mode);
1194 unsigned HOST_WIDE_INT part_mask
1195 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1196 int i;
1197
1198 for (i = 0; i < max_parts; i++)
1199 {
1200 if (i == 0)
1201 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1202 else
1203 value >>= part_bits;
1204
1205 if ((value & part_mask) != (def & part_mask))
1206 return value & part_mask;
1207 }
1208
1209 gcc_unreachable ();
1210 }
1211
1212 /* If OP is an integer constant of mode MODE with exactly one
1213 part of mode PART_MODE unequal to DEF, return the number of that
1214 part. Otherwise, return -1. */
1215
1216 int
1217 s390_single_part (rtx op,
1218 enum machine_mode mode,
1219 enum machine_mode part_mode,
1220 int def)
1221 {
1222 unsigned HOST_WIDE_INT value = 0;
1223 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1224 unsigned HOST_WIDE_INT part_mask
1225 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1226 int i, part = -1;
1227
1228 if (GET_CODE (op) != CONST_INT)
1229 return -1;
1230
1231 for (i = 0; i < n_parts; i++)
1232 {
1233 if (i == 0)
1234 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1235 else
1236 value >>= GET_MODE_BITSIZE (part_mode);
1237
1238 if ((value & part_mask) != (def & part_mask))
1239 {
1240 if (part != -1)
1241 return -1;
1242 else
1243 part = i;
1244 }
1245 }
1246 return part == -1 ? -1 : n_parts - 1 - part;
1247 }
1248
1249 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1250 bits and no other bits are set in IN. POS and LENGTH can be used
1251 to obtain the start position and the length of the bitfield.
1252
1253 POS gives the position of the first bit of the bitfield counting
1254 from the lowest order bit starting with zero. In order to use this
1255 value for S/390 instructions this has to be converted to "bits big
1256 endian" style. */
1257
1258 bool
1259 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1260 int *pos, int *length)
1261 {
1262 int tmp_pos = 0;
1263 int tmp_length = 0;
1264 int i;
1265 unsigned HOST_WIDE_INT mask = 1ULL;
1266 bool contiguous = false;
1267
1268 for (i = 0; i < size; mask <<= 1, i++)
1269 {
1270 if (contiguous)
1271 {
1272 if (mask & in)
1273 tmp_length++;
1274 else
1275 break;
1276 }
1277 else
1278 {
1279 if (mask & in)
1280 {
1281 contiguous = true;
1282 tmp_length++;
1283 }
1284 else
1285 tmp_pos++;
1286 }
1287 }
1288
1289 if (!tmp_length)
1290 return false;
1291
1292 /* Calculate a mask for all bits beyond the contiguous bits. */
1293 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1294
1295 if (mask & in)
1296 return false;
1297
1298 if (tmp_length + tmp_pos - 1 > size)
1299 return false;
1300
1301 if (length)
1302 *length = tmp_length;
1303
1304 if (pos)
1305 *pos = tmp_pos;
1306
1307 return true;
1308 }
1309
1310 /* Check whether we can (and want to) split a double-word
1311 move in mode MODE from SRC to DST into two single-word
1312 moves, moving the subword FIRST_SUBWORD first. */
1313
1314 bool
1315 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1316 {
1317 /* Floating point registers cannot be split. */
1318 if (FP_REG_P (src) || FP_REG_P (dst))
1319 return false;
1320
1321 /* We don't need to split if operands are directly accessible. */
1322 if (s_operand (src, mode) || s_operand (dst, mode))
1323 return false;
1324
1325 /* Non-offsettable memory references cannot be split. */
1326 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1327 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1328 return false;
1329
1330 /* Moving the first subword must not clobber a register
1331 needed to move the second subword. */
1332 if (register_operand (dst, mode))
1333 {
1334 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1335 if (reg_overlap_mentioned_p (subreg, src))
1336 return false;
1337 }
1338
1339 return true;
1340 }
1341
1342 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1343 and [MEM2, MEM2 + SIZE] do overlap and false
1344 otherwise. */
1345
1346 bool
1347 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1348 {
1349 rtx addr1, addr2, addr_delta;
1350 HOST_WIDE_INT delta;
1351
1352 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1353 return true;
1354
1355 if (size == 0)
1356 return false;
1357
1358 addr1 = XEXP (mem1, 0);
1359 addr2 = XEXP (mem2, 0);
1360
1361 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1362
1363 /* This overlapping check is used by peepholes merging memory block operations.
1364 Overlapping operations would otherwise be recognized by the S/390 hardware
1365 and would fall back to a slower implementation. Allowing overlapping
1366 operations would lead to slow code but not to wrong code. Therefore we are
1367 somewhat optimistic if we cannot prove that the memory blocks are
1368 overlapping.
1369 That's why we return false here although this may accept operations on
1370 overlapping memory areas. */
1371 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1372 return false;
1373
1374 delta = INTVAL (addr_delta);
1375
1376 if (delta == 0
1377 || (delta > 0 && delta < size)
1378 || (delta < 0 && -delta < size))
1379 return true;
1380
1381 return false;
1382 }
1383
1384 /* Check whether the address of memory reference MEM2 equals exactly
1385 the address of memory reference MEM1 plus DELTA. Return true if
1386 we can prove this to be the case, false otherwise. */
1387
1388 bool
1389 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1390 {
1391 rtx addr1, addr2, addr_delta;
1392
1393 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1394 return false;
1395
1396 addr1 = XEXP (mem1, 0);
1397 addr2 = XEXP (mem2, 0);
1398
1399 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1400 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1401 return false;
1402
1403 return true;
1404 }
1405
1406 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1407
1408 void
1409 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1410 rtx *operands)
1411 {
1412 enum machine_mode wmode = mode;
1413 rtx dst = operands[0];
1414 rtx src1 = operands[1];
1415 rtx src2 = operands[2];
1416 rtx op, clob, tem;
1417
1418 /* If we cannot handle the operation directly, use a temp register. */
1419 if (!s390_logical_operator_ok_p (operands))
1420 dst = gen_reg_rtx (mode);
1421
1422 /* QImode and HImode patterns make sense only if we have a destination
1423 in memory. Otherwise perform the operation in SImode. */
1424 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1425 wmode = SImode;
1426
1427 /* Widen operands if required. */
1428 if (mode != wmode)
1429 {
1430 if (GET_CODE (dst) == SUBREG
1431 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1432 dst = tem;
1433 else if (REG_P (dst))
1434 dst = gen_rtx_SUBREG (wmode, dst, 0);
1435 else
1436 dst = gen_reg_rtx (wmode);
1437
1438 if (GET_CODE (src1) == SUBREG
1439 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1440 src1 = tem;
1441 else if (GET_MODE (src1) != VOIDmode)
1442 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1443
1444 if (GET_CODE (src2) == SUBREG
1445 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1446 src2 = tem;
1447 else if (GET_MODE (src2) != VOIDmode)
1448 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1449 }
1450
1451 /* Emit the instruction. */
1452 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1453 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1454 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1455
1456 /* Fix up the destination if needed. */
1457 if (dst != operands[0])
1458 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1459 }
1460
1461 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1462
1463 bool
1464 s390_logical_operator_ok_p (rtx *operands)
1465 {
1466 /* If the destination operand is in memory, it needs to coincide
1467 with one of the source operands. After reload, it has to be
1468 the first source operand. */
1469 if (GET_CODE (operands[0]) == MEM)
1470 return rtx_equal_p (operands[0], operands[1])
1471 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1472
1473 return true;
1474 }
1475
1476 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1477 operand IMMOP to switch from SS to SI type instructions. */
1478
1479 void
1480 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1481 {
1482 int def = code == AND ? -1 : 0;
1483 HOST_WIDE_INT mask;
1484 int part;
1485
1486 gcc_assert (GET_CODE (*memop) == MEM);
1487 gcc_assert (!MEM_VOLATILE_P (*memop));
1488
1489 mask = s390_extract_part (*immop, QImode, def);
1490 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1491 gcc_assert (part >= 0);
1492
1493 *memop = adjust_address (*memop, QImode, part);
1494 *immop = gen_int_mode (mask, QImode);
1495 }
1496
1497
1498 /* How to allocate a 'struct machine_function'. */
1499
1500 static struct machine_function *
1501 s390_init_machine_status (void)
1502 {
1503 return ggc_alloc_cleared_machine_function ();
1504 }
1505
1506 static void
1507 s390_option_override (void)
1508 {
1509 /* Set up function hooks. */
1510 init_machine_status = s390_init_machine_status;
1511
1512 /* Architecture mode defaults according to ABI. */
1513 if (!(target_flags_explicit & MASK_ZARCH))
1514 {
1515 if (TARGET_64BIT)
1516 target_flags |= MASK_ZARCH;
1517 else
1518 target_flags &= ~MASK_ZARCH;
1519 }
1520
1521 /* Set the march default in case it hasn't been specified on
1522 cmdline. */
1523 if (s390_arch == PROCESSOR_max)
1524 {
1525 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1526 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1527 s390_arch_flags = processor_flags_table[(int)s390_arch];
1528 }
1529
1530 /* Determine processor to tune for. */
1531 if (s390_tune == PROCESSOR_max)
1532 {
1533 s390_tune = s390_arch;
1534 s390_tune_flags = s390_arch_flags;
1535 }
1536
1537 /* Sanity checks. */
1538 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1539 error ("z/Architecture mode not supported on %s", s390_arch_string);
1540 if (TARGET_64BIT && !TARGET_ZARCH)
1541 error ("64-bit ABI not supported in ESA/390 mode");
1542
1543 if (TARGET_HARD_DFP && !TARGET_DFP)
1544 {
1545 if (target_flags_explicit & MASK_HARD_DFP)
1546 {
1547 if (!TARGET_CPU_DFP)
1548 error ("hardware decimal floating point instructions"
1549 " not available on %s", s390_arch_string);
1550 if (!TARGET_ZARCH)
1551 error ("hardware decimal floating point instructions"
1552 " not available in ESA/390 mode");
1553 }
1554 else
1555 target_flags &= ~MASK_HARD_DFP;
1556 }
1557
1558 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1559 {
1560 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1561 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1562
1563 target_flags &= ~MASK_HARD_DFP;
1564 }
1565
1566 /* Set processor cost function. */
1567 switch (s390_tune)
1568 {
1569 case PROCESSOR_2084_Z990:
1570 s390_cost = &z990_cost;
1571 break;
1572 case PROCESSOR_2094_Z9_109:
1573 s390_cost = &z9_109_cost;
1574 break;
1575 case PROCESSOR_2097_Z10:
1576 s390_cost = &z10_cost;
1577 case PROCESSOR_2817_Z196:
1578 s390_cost = &z196_cost;
1579 break;
1580 default:
1581 s390_cost = &z900_cost;
1582 }
1583
1584 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1585 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1586 "in combination");
1587
1588 if (s390_stack_size)
1589 {
1590 if (s390_stack_guard >= s390_stack_size)
1591 error ("stack size must be greater than the stack guard value");
1592 else if (s390_stack_size > 1 << 16)
1593 error ("stack size must not be greater than 64k");
1594 }
1595 else if (s390_stack_guard)
1596 error ("-mstack-guard implies use of -mstack-size");
1597
1598 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1599 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1600 target_flags |= MASK_LONG_DOUBLE_128;
1601 #endif
1602
1603 if (s390_tune == PROCESSOR_2097_Z10
1604 || s390_tune == PROCESSOR_2817_Z196)
1605 {
1606 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1607 global_options.x_param_values,
1608 global_options_set.x_param_values);
1609 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1610 global_options.x_param_values,
1611 global_options_set.x_param_values);
1612 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 }
1619
1620 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1621 global_options.x_param_values,
1622 global_options_set.x_param_values);
1623 /* values for loop prefetching */
1624 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1625 global_options.x_param_values,
1626 global_options_set.x_param_values);
1627 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1628 global_options.x_param_values,
1629 global_options_set.x_param_values);
1630 /* s390 has more than 2 levels and the size is much larger. Since
1631 we are always running virtualized assume that we only get a small
1632 part of the caches above l1. */
1633 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1637 global_options.x_param_values,
1638 global_options_set.x_param_values);
1639 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642
1643 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1644 requires the arch flags to be evaluated already. Since prefetching
1645 is beneficial on s390, we enable it if available. */
1646 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1647 flag_prefetch_loop_arrays = 1;
1648 }
1649
1650 /* Map for smallest class containing reg regno. */
1651
1652 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1653 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1654 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1655 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1656 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1661 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1662 ACCESS_REGS, ACCESS_REGS
1663 };
1664
1665 /* Return attribute type of insn. */
1666
1667 static enum attr_type
1668 s390_safe_attr_type (rtx insn)
1669 {
1670 if (recog_memoized (insn) >= 0)
1671 return get_attr_type (insn);
1672 else
1673 return TYPE_NONE;
1674 }
1675
1676 /* Return true if DISP is a valid short displacement. */
1677
1678 static bool
1679 s390_short_displacement (rtx disp)
1680 {
1681 /* No displacement is OK. */
1682 if (!disp)
1683 return true;
1684
1685 /* Without the long displacement facility we don't need to
1686 distingiush between long and short displacement. */
1687 if (!TARGET_LONG_DISPLACEMENT)
1688 return true;
1689
1690 /* Integer displacement in range. */
1691 if (GET_CODE (disp) == CONST_INT)
1692 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1693
1694 /* GOT offset is not OK, the GOT can be large. */
1695 if (GET_CODE (disp) == CONST
1696 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1697 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1698 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1699 return false;
1700
1701 /* All other symbolic constants are literal pool references,
1702 which are OK as the literal pool must be small. */
1703 if (GET_CODE (disp) == CONST)
1704 return true;
1705
1706 return false;
1707 }
1708
1709 /* Decompose a RTL expression ADDR for a memory address into
1710 its components, returned in OUT.
1711
1712 Returns false if ADDR is not a valid memory address, true
1713 otherwise. If OUT is NULL, don't return the components,
1714 but check for validity only.
1715
1716 Note: Only addresses in canonical form are recognized.
1717 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1718 canonical form so that they will be recognized. */
1719
1720 static int
1721 s390_decompose_address (rtx addr, struct s390_address *out)
1722 {
1723 HOST_WIDE_INT offset = 0;
1724 rtx base = NULL_RTX;
1725 rtx indx = NULL_RTX;
1726 rtx disp = NULL_RTX;
1727 rtx orig_disp;
1728 bool pointer = false;
1729 bool base_ptr = false;
1730 bool indx_ptr = false;
1731 bool literal_pool = false;
1732
1733 /* We may need to substitute the literal pool base register into the address
1734 below. However, at this point we do not know which register is going to
1735 be used as base, so we substitute the arg pointer register. This is going
1736 to be treated as holding a pointer below -- it shouldn't be used for any
1737 other purpose. */
1738 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1739
1740 /* Decompose address into base + index + displacement. */
1741
1742 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1743 base = addr;
1744
1745 else if (GET_CODE (addr) == PLUS)
1746 {
1747 rtx op0 = XEXP (addr, 0);
1748 rtx op1 = XEXP (addr, 1);
1749 enum rtx_code code0 = GET_CODE (op0);
1750 enum rtx_code code1 = GET_CODE (op1);
1751
1752 if (code0 == REG || code0 == UNSPEC)
1753 {
1754 if (code1 == REG || code1 == UNSPEC)
1755 {
1756 indx = op0; /* index + base */
1757 base = op1;
1758 }
1759
1760 else
1761 {
1762 base = op0; /* base + displacement */
1763 disp = op1;
1764 }
1765 }
1766
1767 else if (code0 == PLUS)
1768 {
1769 indx = XEXP (op0, 0); /* index + base + disp */
1770 base = XEXP (op0, 1);
1771 disp = op1;
1772 }
1773
1774 else
1775 {
1776 return false;
1777 }
1778 }
1779
1780 else
1781 disp = addr; /* displacement */
1782
1783 /* Extract integer part of displacement. */
1784 orig_disp = disp;
1785 if (disp)
1786 {
1787 if (GET_CODE (disp) == CONST_INT)
1788 {
1789 offset = INTVAL (disp);
1790 disp = NULL_RTX;
1791 }
1792 else if (GET_CODE (disp) == CONST
1793 && GET_CODE (XEXP (disp, 0)) == PLUS
1794 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1795 {
1796 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1797 disp = XEXP (XEXP (disp, 0), 0);
1798 }
1799 }
1800
1801 /* Strip off CONST here to avoid special case tests later. */
1802 if (disp && GET_CODE (disp) == CONST)
1803 disp = XEXP (disp, 0);
1804
1805 /* We can convert literal pool addresses to
1806 displacements by basing them off the base register. */
1807 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1808 {
1809 /* Either base or index must be free to hold the base register. */
1810 if (!base)
1811 base = fake_pool_base, literal_pool = true;
1812 else if (!indx)
1813 indx = fake_pool_base, literal_pool = true;
1814 else
1815 return false;
1816
1817 /* Mark up the displacement. */
1818 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1819 UNSPEC_LTREL_OFFSET);
1820 }
1821
1822 /* Validate base register. */
1823 if (base)
1824 {
1825 if (GET_CODE (base) == UNSPEC)
1826 switch (XINT (base, 1))
1827 {
1828 case UNSPEC_LTREF:
1829 if (!disp)
1830 disp = gen_rtx_UNSPEC (Pmode,
1831 gen_rtvec (1, XVECEXP (base, 0, 0)),
1832 UNSPEC_LTREL_OFFSET);
1833 else
1834 return false;
1835
1836 base = XVECEXP (base, 0, 1);
1837 break;
1838
1839 case UNSPEC_LTREL_BASE:
1840 if (XVECLEN (base, 0) == 1)
1841 base = fake_pool_base, literal_pool = true;
1842 else
1843 base = XVECEXP (base, 0, 1);
1844 break;
1845
1846 default:
1847 return false;
1848 }
1849
1850 if (!REG_P (base)
1851 || (GET_MODE (base) != SImode
1852 && GET_MODE (base) != Pmode))
1853 return false;
1854
1855 if (REGNO (base) == STACK_POINTER_REGNUM
1856 || REGNO (base) == FRAME_POINTER_REGNUM
1857 || ((reload_completed || reload_in_progress)
1858 && frame_pointer_needed
1859 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1860 || REGNO (base) == ARG_POINTER_REGNUM
1861 || (flag_pic
1862 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1863 pointer = base_ptr = true;
1864
1865 if ((reload_completed || reload_in_progress)
1866 && base == cfun->machine->base_reg)
1867 pointer = base_ptr = literal_pool = true;
1868 }
1869
1870 /* Validate index register. */
1871 if (indx)
1872 {
1873 if (GET_CODE (indx) == UNSPEC)
1874 switch (XINT (indx, 1))
1875 {
1876 case UNSPEC_LTREF:
1877 if (!disp)
1878 disp = gen_rtx_UNSPEC (Pmode,
1879 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1880 UNSPEC_LTREL_OFFSET);
1881 else
1882 return false;
1883
1884 indx = XVECEXP (indx, 0, 1);
1885 break;
1886
1887 case UNSPEC_LTREL_BASE:
1888 if (XVECLEN (indx, 0) == 1)
1889 indx = fake_pool_base, literal_pool = true;
1890 else
1891 indx = XVECEXP (indx, 0, 1);
1892 break;
1893
1894 default:
1895 return false;
1896 }
1897
1898 if (!REG_P (indx)
1899 || (GET_MODE (indx) != SImode
1900 && GET_MODE (indx) != Pmode))
1901 return false;
1902
1903 if (REGNO (indx) == STACK_POINTER_REGNUM
1904 || REGNO (indx) == FRAME_POINTER_REGNUM
1905 || ((reload_completed || reload_in_progress)
1906 && frame_pointer_needed
1907 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1908 || REGNO (indx) == ARG_POINTER_REGNUM
1909 || (flag_pic
1910 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1911 pointer = indx_ptr = true;
1912
1913 if ((reload_completed || reload_in_progress)
1914 && indx == cfun->machine->base_reg)
1915 pointer = indx_ptr = literal_pool = true;
1916 }
1917
1918 /* Prefer to use pointer as base, not index. */
1919 if (base && indx && !base_ptr
1920 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1921 {
1922 rtx tmp = base;
1923 base = indx;
1924 indx = tmp;
1925 }
1926
1927 /* Validate displacement. */
1928 if (!disp)
1929 {
1930 /* If virtual registers are involved, the displacement will change later
1931 anyway as the virtual registers get eliminated. This could make a
1932 valid displacement invalid, but it is more likely to make an invalid
1933 displacement valid, because we sometimes access the register save area
1934 via negative offsets to one of those registers.
1935 Thus we don't check the displacement for validity here. If after
1936 elimination the displacement turns out to be invalid after all,
1937 this is fixed up by reload in any case. */
1938 if (base != arg_pointer_rtx
1939 && indx != arg_pointer_rtx
1940 && base != return_address_pointer_rtx
1941 && indx != return_address_pointer_rtx
1942 && base != frame_pointer_rtx
1943 && indx != frame_pointer_rtx
1944 && base != virtual_stack_vars_rtx
1945 && indx != virtual_stack_vars_rtx)
1946 if (!DISP_IN_RANGE (offset))
1947 return false;
1948 }
1949 else
1950 {
1951 /* All the special cases are pointers. */
1952 pointer = true;
1953
1954 /* In the small-PIC case, the linker converts @GOT
1955 and @GOTNTPOFF offsets to possible displacements. */
1956 if (GET_CODE (disp) == UNSPEC
1957 && (XINT (disp, 1) == UNSPEC_GOT
1958 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1959 && flag_pic == 1)
1960 {
1961 ;
1962 }
1963
1964 /* Accept pool label offsets. */
1965 else if (GET_CODE (disp) == UNSPEC
1966 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1967 ;
1968
1969 /* Accept literal pool references. */
1970 else if (GET_CODE (disp) == UNSPEC
1971 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1972 {
1973 /* In case CSE pulled a non literal pool reference out of
1974 the pool we have to reject the address. This is
1975 especially important when loading the GOT pointer on non
1976 zarch CPUs. In this case the literal pool contains an lt
1977 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1978 will most likely exceed the displacement. */
1979 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
1980 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
1981 return false;
1982
1983 orig_disp = gen_rtx_CONST (Pmode, disp);
1984 if (offset)
1985 {
1986 /* If we have an offset, make sure it does not
1987 exceed the size of the constant pool entry. */
1988 rtx sym = XVECEXP (disp, 0, 0);
1989 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1990 return false;
1991
1992 orig_disp = plus_constant (orig_disp, offset);
1993 }
1994 }
1995
1996 else
1997 return false;
1998 }
1999
2000 if (!base && !indx)
2001 pointer = true;
2002
2003 if (out)
2004 {
2005 out->base = base;
2006 out->indx = indx;
2007 out->disp = orig_disp;
2008 out->pointer = pointer;
2009 out->literal_pool = literal_pool;
2010 }
2011
2012 return true;
2013 }
2014
2015 /* Decompose a RTL expression OP for a shift count into its components,
2016 and return the base register in BASE and the offset in OFFSET.
2017
2018 Return true if OP is a valid shift count, false if not. */
2019
2020 bool
2021 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2022 {
2023 HOST_WIDE_INT off = 0;
2024
2025 /* We can have an integer constant, an address register,
2026 or a sum of the two. */
2027 if (GET_CODE (op) == CONST_INT)
2028 {
2029 off = INTVAL (op);
2030 op = NULL_RTX;
2031 }
2032 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2033 {
2034 off = INTVAL (XEXP (op, 1));
2035 op = XEXP (op, 0);
2036 }
2037 while (op && GET_CODE (op) == SUBREG)
2038 op = SUBREG_REG (op);
2039
2040 if (op && GET_CODE (op) != REG)
2041 return false;
2042
2043 if (offset)
2044 *offset = off;
2045 if (base)
2046 *base = op;
2047
2048 return true;
2049 }
2050
2051
2052 /* Return true if CODE is a valid address without index. */
2053
2054 bool
2055 s390_legitimate_address_without_index_p (rtx op)
2056 {
2057 struct s390_address addr;
2058
2059 if (!s390_decompose_address (XEXP (op, 0), &addr))
2060 return false;
2061 if (addr.indx)
2062 return false;
2063
2064 return true;
2065 }
2066
2067
2068 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2069 and return these parts in SYMREF and ADDEND. You can pass NULL in
2070 SYMREF and/or ADDEND if you are not interested in these values.
2071 Literal pool references are *not* considered symbol references. */
2072
2073 static bool
2074 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2075 {
2076 HOST_WIDE_INT tmpaddend = 0;
2077
2078 if (GET_CODE (addr) == CONST)
2079 addr = XEXP (addr, 0);
2080
2081 if (GET_CODE (addr) == PLUS)
2082 {
2083 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2084 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2085 && CONST_INT_P (XEXP (addr, 1)))
2086 {
2087 tmpaddend = INTVAL (XEXP (addr, 1));
2088 addr = XEXP (addr, 0);
2089 }
2090 else
2091 return false;
2092 }
2093 else
2094 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2095 return false;
2096
2097 if (symref)
2098 *symref = addr;
2099 if (addend)
2100 *addend = tmpaddend;
2101
2102 return true;
2103 }
2104
2105
2106 /* Return true if the address in OP is valid for constraint letter C
2107 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2108 pool MEMs should be accepted. Only the Q, R, S, T constraint
2109 letters are allowed for C. */
2110
2111 static int
2112 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2113 {
2114 struct s390_address addr;
2115 bool decomposed = false;
2116
2117 /* This check makes sure that no symbolic address (except literal
2118 pool references) are accepted by the R or T constraints. */
2119 if (s390_symref_operand_p (op, NULL, NULL))
2120 return 0;
2121
2122 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2123 if (!lit_pool_ok)
2124 {
2125 if (!s390_decompose_address (op, &addr))
2126 return 0;
2127 if (addr.literal_pool)
2128 return 0;
2129 decomposed = true;
2130 }
2131
2132 switch (c)
2133 {
2134 case 'Q': /* no index short displacement */
2135 if (!decomposed && !s390_decompose_address (op, &addr))
2136 return 0;
2137 if (addr.indx)
2138 return 0;
2139 if (!s390_short_displacement (addr.disp))
2140 return 0;
2141 break;
2142
2143 case 'R': /* with index short displacement */
2144 if (TARGET_LONG_DISPLACEMENT)
2145 {
2146 if (!decomposed && !s390_decompose_address (op, &addr))
2147 return 0;
2148 if (!s390_short_displacement (addr.disp))
2149 return 0;
2150 }
2151 /* Any invalid address here will be fixed up by reload,
2152 so accept it for the most generic constraint. */
2153 break;
2154
2155 case 'S': /* no index long displacement */
2156 if (!TARGET_LONG_DISPLACEMENT)
2157 return 0;
2158 if (!decomposed && !s390_decompose_address (op, &addr))
2159 return 0;
2160 if (addr.indx)
2161 return 0;
2162 if (s390_short_displacement (addr.disp))
2163 return 0;
2164 break;
2165
2166 case 'T': /* with index long displacement */
2167 if (!TARGET_LONG_DISPLACEMENT)
2168 return 0;
2169 /* Any invalid address here will be fixed up by reload,
2170 so accept it for the most generic constraint. */
2171 if ((decomposed || s390_decompose_address (op, &addr))
2172 && s390_short_displacement (addr.disp))
2173 return 0;
2174 break;
2175 default:
2176 return 0;
2177 }
2178 return 1;
2179 }
2180
2181
2182 /* Evaluates constraint strings described by the regular expression
2183 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2184 the constraint given in STR, or 0 else. */
2185
2186 int
2187 s390_mem_constraint (const char *str, rtx op)
2188 {
2189 char c = str[0];
2190
2191 switch (c)
2192 {
2193 case 'A':
2194 /* Check for offsettable variants of memory constraints. */
2195 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2196 return 0;
2197 if ((reload_completed || reload_in_progress)
2198 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2199 return 0;
2200 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2201 case 'B':
2202 /* Check for non-literal-pool variants of memory constraints. */
2203 if (!MEM_P (op))
2204 return 0;
2205 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2206 case 'Q':
2207 case 'R':
2208 case 'S':
2209 case 'T':
2210 if (GET_CODE (op) != MEM)
2211 return 0;
2212 return s390_check_qrst_address (c, XEXP (op, 0), true);
2213 case 'U':
2214 return (s390_check_qrst_address ('Q', op, true)
2215 || s390_check_qrst_address ('R', op, true));
2216 case 'W':
2217 return (s390_check_qrst_address ('S', op, true)
2218 || s390_check_qrst_address ('T', op, true));
2219 case 'Y':
2220 /* Simply check for the basic form of a shift count. Reload will
2221 take care of making sure we have a proper base register. */
2222 if (!s390_decompose_shift_count (op, NULL, NULL))
2223 return 0;
2224 break;
2225 case 'Z':
2226 return s390_check_qrst_address (str[1], op, true);
2227 default:
2228 return 0;
2229 }
2230 return 1;
2231 }
2232
2233
2234 /* Evaluates constraint strings starting with letter O. Input
2235 parameter C is the second letter following the "O" in the constraint
2236 string. Returns 1 if VALUE meets the respective constraint and 0
2237 otherwise. */
2238
2239 int
2240 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2241 {
2242 if (!TARGET_EXTIMM)
2243 return 0;
2244
2245 switch (c)
2246 {
2247 case 's':
2248 return trunc_int_for_mode (value, SImode) == value;
2249
2250 case 'p':
2251 return value == 0
2252 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2253
2254 case 'n':
2255 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2256
2257 default:
2258 gcc_unreachable ();
2259 }
2260 }
2261
2262
2263 /* Evaluates constraint strings starting with letter N. Parameter STR
2264 contains the letters following letter "N" in the constraint string.
2265 Returns true if VALUE matches the constraint. */
2266
2267 int
2268 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2269 {
2270 enum machine_mode mode, part_mode;
2271 int def;
2272 int part, part_goal;
2273
2274
2275 if (str[0] == 'x')
2276 part_goal = -1;
2277 else
2278 part_goal = str[0] - '0';
2279
2280 switch (str[1])
2281 {
2282 case 'Q':
2283 part_mode = QImode;
2284 break;
2285 case 'H':
2286 part_mode = HImode;
2287 break;
2288 case 'S':
2289 part_mode = SImode;
2290 break;
2291 default:
2292 return 0;
2293 }
2294
2295 switch (str[2])
2296 {
2297 case 'H':
2298 mode = HImode;
2299 break;
2300 case 'S':
2301 mode = SImode;
2302 break;
2303 case 'D':
2304 mode = DImode;
2305 break;
2306 default:
2307 return 0;
2308 }
2309
2310 switch (str[3])
2311 {
2312 case '0':
2313 def = 0;
2314 break;
2315 case 'F':
2316 def = -1;
2317 break;
2318 default:
2319 return 0;
2320 }
2321
2322 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2323 return 0;
2324
2325 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2326 if (part < 0)
2327 return 0;
2328 if (part_goal != -1 && part_goal != part)
2329 return 0;
2330
2331 return 1;
2332 }
2333
2334
2335 /* Returns true if the input parameter VALUE is a float zero. */
2336
2337 int
2338 s390_float_const_zero_p (rtx value)
2339 {
2340 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2341 && value == CONST0_RTX (GET_MODE (value)));
2342 }
2343
2344 /* Implement TARGET_REGISTER_MOVE_COST. */
2345
2346 static int
2347 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2348 reg_class_t from, reg_class_t to)
2349 {
2350 /* On s390, copy between fprs and gprs is expensive. */
2351 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2352 && reg_classes_intersect_p (to, FP_REGS))
2353 || (reg_classes_intersect_p (from, FP_REGS)
2354 && reg_classes_intersect_p (to, GENERAL_REGS)))
2355 return 10;
2356
2357 return 1;
2358 }
2359
2360 /* Implement TARGET_MEMORY_MOVE_COST. */
2361
2362 static int
2363 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2364 reg_class_t rclass ATTRIBUTE_UNUSED,
2365 bool in ATTRIBUTE_UNUSED)
2366 {
2367 return 1;
2368 }
2369
2370 /* Compute a (partial) cost for rtx X. Return true if the complete
2371 cost has been computed, and false if subexpressions should be
2372 scanned. In either case, *TOTAL contains the cost result.
2373 CODE contains GET_CODE (x), OUTER_CODE contains the code
2374 of the superexpression of x. */
2375
2376 static bool
2377 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2378 int *total, bool speed ATTRIBUTE_UNUSED)
2379 {
2380 switch (code)
2381 {
2382 case CONST:
2383 case CONST_INT:
2384 case LABEL_REF:
2385 case SYMBOL_REF:
2386 case CONST_DOUBLE:
2387 case MEM:
2388 *total = 0;
2389 return true;
2390
2391 case ASHIFT:
2392 case ASHIFTRT:
2393 case LSHIFTRT:
2394 case ROTATE:
2395 case ROTATERT:
2396 case AND:
2397 case IOR:
2398 case XOR:
2399 case NEG:
2400 case NOT:
2401 *total = COSTS_N_INSNS (1);
2402 return false;
2403
2404 case PLUS:
2405 case MINUS:
2406 *total = COSTS_N_INSNS (1);
2407 return false;
2408
2409 case MULT:
2410 switch (GET_MODE (x))
2411 {
2412 case SImode:
2413 {
2414 rtx left = XEXP (x, 0);
2415 rtx right = XEXP (x, 1);
2416 if (GET_CODE (right) == CONST_INT
2417 && CONST_OK_FOR_K (INTVAL (right)))
2418 *total = s390_cost->mhi;
2419 else if (GET_CODE (left) == SIGN_EXTEND)
2420 *total = s390_cost->mh;
2421 else
2422 *total = s390_cost->ms; /* msr, ms, msy */
2423 break;
2424 }
2425 case DImode:
2426 {
2427 rtx left = XEXP (x, 0);
2428 rtx right = XEXP (x, 1);
2429 if (TARGET_ZARCH)
2430 {
2431 if (GET_CODE (right) == CONST_INT
2432 && CONST_OK_FOR_K (INTVAL (right)))
2433 *total = s390_cost->mghi;
2434 else if (GET_CODE (left) == SIGN_EXTEND)
2435 *total = s390_cost->msgf;
2436 else
2437 *total = s390_cost->msg; /* msgr, msg */
2438 }
2439 else /* TARGET_31BIT */
2440 {
2441 if (GET_CODE (left) == SIGN_EXTEND
2442 && GET_CODE (right) == SIGN_EXTEND)
2443 /* mulsidi case: mr, m */
2444 *total = s390_cost->m;
2445 else if (GET_CODE (left) == ZERO_EXTEND
2446 && GET_CODE (right) == ZERO_EXTEND
2447 && TARGET_CPU_ZARCH)
2448 /* umulsidi case: ml, mlr */
2449 *total = s390_cost->ml;
2450 else
2451 /* Complex calculation is required. */
2452 *total = COSTS_N_INSNS (40);
2453 }
2454 break;
2455 }
2456 case SFmode:
2457 case DFmode:
2458 *total = s390_cost->mult_df;
2459 break;
2460 case TFmode:
2461 *total = s390_cost->mxbr;
2462 break;
2463 default:
2464 return false;
2465 }
2466 return false;
2467
2468 case FMA:
2469 switch (GET_MODE (x))
2470 {
2471 case DFmode:
2472 *total = s390_cost->madbr;
2473 break;
2474 case SFmode:
2475 *total = s390_cost->maebr;
2476 break;
2477 default:
2478 return false;
2479 }
2480 /* Negate in the third argument is free: FMSUB. */
2481 if (GET_CODE (XEXP (x, 2)) == NEG)
2482 {
2483 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2484 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2485 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2486 return true;
2487 }
2488 return false;
2489
2490 case UDIV:
2491 case UMOD:
2492 if (GET_MODE (x) == TImode) /* 128 bit division */
2493 *total = s390_cost->dlgr;
2494 else if (GET_MODE (x) == DImode)
2495 {
2496 rtx right = XEXP (x, 1);
2497 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2498 *total = s390_cost->dlr;
2499 else /* 64 by 64 bit division */
2500 *total = s390_cost->dlgr;
2501 }
2502 else if (GET_MODE (x) == SImode) /* 32 bit division */
2503 *total = s390_cost->dlr;
2504 return false;
2505
2506 case DIV:
2507 case MOD:
2508 if (GET_MODE (x) == DImode)
2509 {
2510 rtx right = XEXP (x, 1);
2511 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2512 if (TARGET_ZARCH)
2513 *total = s390_cost->dsgfr;
2514 else
2515 *total = s390_cost->dr;
2516 else /* 64 by 64 bit division */
2517 *total = s390_cost->dsgr;
2518 }
2519 else if (GET_MODE (x) == SImode) /* 32 bit division */
2520 *total = s390_cost->dlr;
2521 else if (GET_MODE (x) == SFmode)
2522 {
2523 *total = s390_cost->debr;
2524 }
2525 else if (GET_MODE (x) == DFmode)
2526 {
2527 *total = s390_cost->ddbr;
2528 }
2529 else if (GET_MODE (x) == TFmode)
2530 {
2531 *total = s390_cost->dxbr;
2532 }
2533 return false;
2534
2535 case SQRT:
2536 if (GET_MODE (x) == SFmode)
2537 *total = s390_cost->sqebr;
2538 else if (GET_MODE (x) == DFmode)
2539 *total = s390_cost->sqdbr;
2540 else /* TFmode */
2541 *total = s390_cost->sqxbr;
2542 return false;
2543
2544 case SIGN_EXTEND:
2545 case ZERO_EXTEND:
2546 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2547 || outer_code == PLUS || outer_code == MINUS
2548 || outer_code == COMPARE)
2549 *total = 0;
2550 return false;
2551
2552 case COMPARE:
2553 *total = COSTS_N_INSNS (1);
2554 if (GET_CODE (XEXP (x, 0)) == AND
2555 && GET_CODE (XEXP (x, 1)) == CONST_INT
2556 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2557 {
2558 rtx op0 = XEXP (XEXP (x, 0), 0);
2559 rtx op1 = XEXP (XEXP (x, 0), 1);
2560 rtx op2 = XEXP (x, 1);
2561
2562 if (memory_operand (op0, GET_MODE (op0))
2563 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2564 return true;
2565 if (register_operand (op0, GET_MODE (op0))
2566 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2567 return true;
2568 }
2569 return false;
2570
2571 default:
2572 return false;
2573 }
2574 }
2575
2576 /* Return the cost of an address rtx ADDR. */
2577
2578 static int
2579 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2580 {
2581 struct s390_address ad;
2582 if (!s390_decompose_address (addr, &ad))
2583 return 1000;
2584
2585 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2586 }
2587
2588 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2589 otherwise return 0. */
2590
2591 int
2592 tls_symbolic_operand (rtx op)
2593 {
2594 if (GET_CODE (op) != SYMBOL_REF)
2595 return 0;
2596 return SYMBOL_REF_TLS_MODEL (op);
2597 }
2598 \f
2599 /* Split DImode access register reference REG (on 64-bit) into its constituent
2600 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2601 gen_highpart cannot be used as they assume all registers are word-sized,
2602 while our access registers have only half that size. */
2603
2604 void
2605 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2606 {
2607 gcc_assert (TARGET_64BIT);
2608 gcc_assert (ACCESS_REG_P (reg));
2609 gcc_assert (GET_MODE (reg) == DImode);
2610 gcc_assert (!(REGNO (reg) & 1));
2611
2612 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2613 *hi = gen_rtx_REG (SImode, REGNO (reg));
2614 }
2615
2616 /* Return true if OP contains a symbol reference */
2617
2618 bool
2619 symbolic_reference_mentioned_p (rtx op)
2620 {
2621 const char *fmt;
2622 int i;
2623
2624 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2625 return 1;
2626
2627 fmt = GET_RTX_FORMAT (GET_CODE (op));
2628 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2629 {
2630 if (fmt[i] == 'E')
2631 {
2632 int j;
2633
2634 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2635 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2636 return 1;
2637 }
2638
2639 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2640 return 1;
2641 }
2642
2643 return 0;
2644 }
2645
2646 /* Return true if OP contains a reference to a thread-local symbol. */
2647
2648 bool
2649 tls_symbolic_reference_mentioned_p (rtx op)
2650 {
2651 const char *fmt;
2652 int i;
2653
2654 if (GET_CODE (op) == SYMBOL_REF)
2655 return tls_symbolic_operand (op);
2656
2657 fmt = GET_RTX_FORMAT (GET_CODE (op));
2658 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2659 {
2660 if (fmt[i] == 'E')
2661 {
2662 int j;
2663
2664 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2665 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2666 return true;
2667 }
2668
2669 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2670 return true;
2671 }
2672
2673 return false;
2674 }
2675
2676
2677 /* Return true if OP is a legitimate general operand when
2678 generating PIC code. It is given that flag_pic is on
2679 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2680
2681 int
2682 legitimate_pic_operand_p (rtx op)
2683 {
2684 /* Accept all non-symbolic constants. */
2685 if (!SYMBOLIC_CONST (op))
2686 return 1;
2687
2688 /* Reject everything else; must be handled
2689 via emit_symbolic_move. */
2690 return 0;
2691 }
2692
2693 /* Returns true if the constant value OP is a legitimate general operand.
2694 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2695
2696 static bool
2697 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2698 {
2699 /* Accept all non-symbolic constants. */
2700 if (!SYMBOLIC_CONST (op))
2701 return 1;
2702
2703 /* Accept immediate LARL operands. */
2704 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2705 return 1;
2706
2707 /* Thread-local symbols are never legal constants. This is
2708 so that emit_call knows that computing such addresses
2709 might require a function call. */
2710 if (TLS_SYMBOLIC_CONST (op))
2711 return 0;
2712
2713 /* In the PIC case, symbolic constants must *not* be
2714 forced into the literal pool. We accept them here,
2715 so that they will be handled by emit_symbolic_move. */
2716 if (flag_pic)
2717 return 1;
2718
2719 /* All remaining non-PIC symbolic constants are
2720 forced into the literal pool. */
2721 return 0;
2722 }
2723
2724 /* Determine if it's legal to put X into the constant pool. This
2725 is not possible if X contains the address of a symbol that is
2726 not constant (TLS) or not known at final link time (PIC). */
2727
2728 static bool
2729 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2730 {
2731 switch (GET_CODE (x))
2732 {
2733 case CONST_INT:
2734 case CONST_DOUBLE:
2735 /* Accept all non-symbolic constants. */
2736 return false;
2737
2738 case LABEL_REF:
2739 /* Labels are OK iff we are non-PIC. */
2740 return flag_pic != 0;
2741
2742 case SYMBOL_REF:
2743 /* 'Naked' TLS symbol references are never OK,
2744 non-TLS symbols are OK iff we are non-PIC. */
2745 if (tls_symbolic_operand (x))
2746 return true;
2747 else
2748 return flag_pic != 0;
2749
2750 case CONST:
2751 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2752 case PLUS:
2753 case MINUS:
2754 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2755 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2756
2757 case UNSPEC:
2758 switch (XINT (x, 1))
2759 {
2760 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2761 case UNSPEC_LTREL_OFFSET:
2762 case UNSPEC_GOT:
2763 case UNSPEC_GOTOFF:
2764 case UNSPEC_PLTOFF:
2765 case UNSPEC_TLSGD:
2766 case UNSPEC_TLSLDM:
2767 case UNSPEC_NTPOFF:
2768 case UNSPEC_DTPOFF:
2769 case UNSPEC_GOTNTPOFF:
2770 case UNSPEC_INDNTPOFF:
2771 return false;
2772
2773 /* If the literal pool shares the code section, be put
2774 execute template placeholders into the pool as well. */
2775 case UNSPEC_INSN:
2776 return TARGET_CPU_ZARCH;
2777
2778 default:
2779 return true;
2780 }
2781 break;
2782
2783 default:
2784 gcc_unreachable ();
2785 }
2786 }
2787
2788 /* Returns true if the constant value OP is a legitimate general
2789 operand during and after reload. The difference to
2790 legitimate_constant_p is that this function will not accept
2791 a constant that would need to be forced to the literal pool
2792 before it can be used as operand.
2793 This function accepts all constants which can be loaded directly
2794 into a GPR. */
2795
2796 bool
2797 legitimate_reload_constant_p (rtx op)
2798 {
2799 /* Accept la(y) operands. */
2800 if (GET_CODE (op) == CONST_INT
2801 && DISP_IN_RANGE (INTVAL (op)))
2802 return true;
2803
2804 /* Accept l(g)hi/l(g)fi operands. */
2805 if (GET_CODE (op) == CONST_INT
2806 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2807 return true;
2808
2809 /* Accept lliXX operands. */
2810 if (TARGET_ZARCH
2811 && GET_CODE (op) == CONST_INT
2812 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2813 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2814 return true;
2815
2816 if (TARGET_EXTIMM
2817 && GET_CODE (op) == CONST_INT
2818 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2819 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2820 return true;
2821
2822 /* Accept larl operands. */
2823 if (TARGET_CPU_ZARCH
2824 && larl_operand (op, VOIDmode))
2825 return true;
2826
2827 /* Accept floating-point zero operands that fit into a single GPR. */
2828 if (GET_CODE (op) == CONST_DOUBLE
2829 && s390_float_const_zero_p (op)
2830 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2831 return true;
2832
2833 /* Accept double-word operands that can be split. */
2834 if (GET_CODE (op) == CONST_INT
2835 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2836 {
2837 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2838 rtx hi = operand_subword (op, 0, 0, dword_mode);
2839 rtx lo = operand_subword (op, 1, 0, dword_mode);
2840 return legitimate_reload_constant_p (hi)
2841 && legitimate_reload_constant_p (lo);
2842 }
2843
2844 /* Everything else cannot be handled without reload. */
2845 return false;
2846 }
2847
2848 /* Returns true if the constant value OP is a legitimate fp operand
2849 during and after reload.
2850 This function accepts all constants which can be loaded directly
2851 into an FPR. */
2852
2853 static bool
2854 legitimate_reload_fp_constant_p (rtx op)
2855 {
2856 /* Accept floating-point zero operands if the load zero instruction
2857 can be used. */
2858 if (TARGET_Z196
2859 && GET_CODE (op) == CONST_DOUBLE
2860 && s390_float_const_zero_p (op))
2861 return true;
2862
2863 return false;
2864 }
2865
2866 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2867 return the class of reg to actually use. */
2868
2869 static reg_class_t
2870 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2871 {
2872 switch (GET_CODE (op))
2873 {
2874 /* Constants we cannot reload into general registers
2875 must be forced into the literal pool. */
2876 case CONST_DOUBLE:
2877 case CONST_INT:
2878 if (reg_class_subset_p (GENERAL_REGS, rclass)
2879 && legitimate_reload_constant_p (op))
2880 return GENERAL_REGS;
2881 else if (reg_class_subset_p (ADDR_REGS, rclass)
2882 && legitimate_reload_constant_p (op))
2883 return ADDR_REGS;
2884 else if (reg_class_subset_p (FP_REGS, rclass)
2885 && legitimate_reload_fp_constant_p (op))
2886 return FP_REGS;
2887 return NO_REGS;
2888
2889 /* If a symbolic constant or a PLUS is reloaded,
2890 it is most likely being used as an address, so
2891 prefer ADDR_REGS. If 'class' is not a superset
2892 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2893 case LABEL_REF:
2894 case SYMBOL_REF:
2895 case CONST:
2896 if (!legitimate_reload_constant_p (op))
2897 return NO_REGS;
2898 /* fallthrough */
2899 case PLUS:
2900 /* load address will be used. */
2901 if (reg_class_subset_p (ADDR_REGS, rclass))
2902 return ADDR_REGS;
2903 else
2904 return NO_REGS;
2905
2906 default:
2907 break;
2908 }
2909
2910 return rclass;
2911 }
2912
2913 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2914 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2915 aligned. */
2916
2917 bool
2918 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2919 {
2920 HOST_WIDE_INT addend;
2921 rtx symref;
2922
2923 if (!s390_symref_operand_p (addr, &symref, &addend))
2924 return false;
2925
2926 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2927 && !(addend & (alignment - 1)));
2928 }
2929
2930 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2931 operand SCRATCH is used to reload the even part of the address and
2932 adding one. */
2933
2934 void
2935 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2936 {
2937 HOST_WIDE_INT addend;
2938 rtx symref;
2939
2940 if (!s390_symref_operand_p (addr, &symref, &addend))
2941 gcc_unreachable ();
2942
2943 if (!(addend & 1))
2944 /* Easy case. The addend is even so larl will do fine. */
2945 emit_move_insn (reg, addr);
2946 else
2947 {
2948 /* We can leave the scratch register untouched if the target
2949 register is a valid base register. */
2950 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2951 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2952 scratch = reg;
2953
2954 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2955 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2956
2957 if (addend != 1)
2958 emit_move_insn (scratch,
2959 gen_rtx_CONST (Pmode,
2960 gen_rtx_PLUS (Pmode, symref,
2961 GEN_INT (addend - 1))));
2962 else
2963 emit_move_insn (scratch, symref);
2964
2965 /* Increment the address using la in order to avoid clobbering cc. */
2966 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2967 }
2968 }
2969
2970 /* Generate what is necessary to move between REG and MEM using
2971 SCRATCH. The direction is given by TOMEM. */
2972
2973 void
2974 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2975 {
2976 /* Reload might have pulled a constant out of the literal pool.
2977 Force it back in. */
2978 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2979 || GET_CODE (mem) == CONST)
2980 mem = force_const_mem (GET_MODE (reg), mem);
2981
2982 gcc_assert (MEM_P (mem));
2983
2984 /* For a load from memory we can leave the scratch register
2985 untouched if the target register is a valid base register. */
2986 if (!tomem
2987 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2988 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2989 && GET_MODE (reg) == GET_MODE (scratch))
2990 scratch = reg;
2991
2992 /* Load address into scratch register. Since we can't have a
2993 secondary reload for a secondary reload we have to cover the case
2994 where larl would need a secondary reload here as well. */
2995 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2996
2997 /* Now we can use a standard load/store to do the move. */
2998 if (tomem)
2999 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3000 else
3001 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3002 }
3003
3004 /* Inform reload about cases where moving X with a mode MODE to a register in
3005 RCLASS requires an extra scratch or immediate register. Return the class
3006 needed for the immediate register. */
3007
3008 static reg_class_t
3009 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3010 enum machine_mode mode, secondary_reload_info *sri)
3011 {
3012 enum reg_class rclass = (enum reg_class) rclass_i;
3013
3014 /* Intermediate register needed. */
3015 if (reg_classes_intersect_p (CC_REGS, rclass))
3016 return GENERAL_REGS;
3017
3018 if (TARGET_Z10)
3019 {
3020 HOST_WIDE_INT offset;
3021 rtx symref;
3022
3023 /* On z10 several optimizer steps may generate larl operands with
3024 an odd addend. */
3025 if (in_p
3026 && s390_symref_operand_p (x, &symref, &offset)
3027 && mode == Pmode
3028 && !SYMBOL_REF_ALIGN1_P (symref)
3029 && (offset & 1) == 1)
3030 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3031 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3032
3033 /* On z10 we need a scratch register when moving QI, TI or floating
3034 point mode values from or to a memory location with a SYMBOL_REF
3035 or if the symref addend of a SI or DI move is not aligned to the
3036 width of the access. */
3037 if (MEM_P (x)
3038 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3039 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3040 || (!TARGET_ZARCH && mode == DImode)
3041 || ((mode == HImode || mode == SImode || mode == DImode)
3042 && (!s390_check_symref_alignment (XEXP (x, 0),
3043 GET_MODE_SIZE (mode))))))
3044 {
3045 #define __SECONDARY_RELOAD_CASE(M,m) \
3046 case M##mode: \
3047 if (TARGET_64BIT) \
3048 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3049 CODE_FOR_reload##m##di_tomem_z10; \
3050 else \
3051 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3052 CODE_FOR_reload##m##si_tomem_z10; \
3053 break;
3054
3055 switch (GET_MODE (x))
3056 {
3057 __SECONDARY_RELOAD_CASE (QI, qi);
3058 __SECONDARY_RELOAD_CASE (HI, hi);
3059 __SECONDARY_RELOAD_CASE (SI, si);
3060 __SECONDARY_RELOAD_CASE (DI, di);
3061 __SECONDARY_RELOAD_CASE (TI, ti);
3062 __SECONDARY_RELOAD_CASE (SF, sf);
3063 __SECONDARY_RELOAD_CASE (DF, df);
3064 __SECONDARY_RELOAD_CASE (TF, tf);
3065 __SECONDARY_RELOAD_CASE (SD, sd);
3066 __SECONDARY_RELOAD_CASE (DD, dd);
3067 __SECONDARY_RELOAD_CASE (TD, td);
3068
3069 default:
3070 gcc_unreachable ();
3071 }
3072 #undef __SECONDARY_RELOAD_CASE
3073 }
3074 }
3075
3076 /* We need a scratch register when loading a PLUS expression which
3077 is not a legitimate operand of the LOAD ADDRESS instruction. */
3078 if (in_p && s390_plus_operand (x, mode))
3079 sri->icode = (TARGET_64BIT ?
3080 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3081
3082 /* Performing a multiword move from or to memory we have to make sure the
3083 second chunk in memory is addressable without causing a displacement
3084 overflow. If that would be the case we calculate the address in
3085 a scratch register. */
3086 if (MEM_P (x)
3087 && GET_CODE (XEXP (x, 0)) == PLUS
3088 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3089 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3090 + GET_MODE_SIZE (mode) - 1))
3091 {
3092 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3093 in a s_operand address since we may fallback to lm/stm. So we only
3094 have to care about overflows in the b+i+d case. */
3095 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3096 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3097 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3098 /* For FP_REGS no lm/stm is available so this check is triggered
3099 for displacement overflows in b+i+d and b+d like addresses. */
3100 || (reg_classes_intersect_p (FP_REGS, rclass)
3101 && s390_class_max_nregs (FP_REGS, mode) > 1))
3102 {
3103 if (in_p)
3104 sri->icode = (TARGET_64BIT ?
3105 CODE_FOR_reloaddi_nonoffmem_in :
3106 CODE_FOR_reloadsi_nonoffmem_in);
3107 else
3108 sri->icode = (TARGET_64BIT ?
3109 CODE_FOR_reloaddi_nonoffmem_out :
3110 CODE_FOR_reloadsi_nonoffmem_out);
3111 }
3112 }
3113
3114 /* A scratch address register is needed when a symbolic constant is
3115 copied to r0 compiling with -fPIC. In other cases the target
3116 register might be used as temporary (see legitimize_pic_address). */
3117 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3118 sri->icode = (TARGET_64BIT ?
3119 CODE_FOR_reloaddi_PIC_addr :
3120 CODE_FOR_reloadsi_PIC_addr);
3121
3122 /* Either scratch or no register needed. */
3123 return NO_REGS;
3124 }
3125
3126 /* Generate code to load SRC, which is PLUS that is not a
3127 legitimate operand for the LA instruction, into TARGET.
3128 SCRATCH may be used as scratch register. */
3129
3130 void
3131 s390_expand_plus_operand (rtx target, rtx src,
3132 rtx scratch)
3133 {
3134 rtx sum1, sum2;
3135 struct s390_address ad;
3136
3137 /* src must be a PLUS; get its two operands. */
3138 gcc_assert (GET_CODE (src) == PLUS);
3139 gcc_assert (GET_MODE (src) == Pmode);
3140
3141 /* Check if any of the two operands is already scheduled
3142 for replacement by reload. This can happen e.g. when
3143 float registers occur in an address. */
3144 sum1 = find_replacement (&XEXP (src, 0));
3145 sum2 = find_replacement (&XEXP (src, 1));
3146 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3147
3148 /* If the address is already strictly valid, there's nothing to do. */
3149 if (!s390_decompose_address (src, &ad)
3150 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3151 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3152 {
3153 /* Otherwise, one of the operands cannot be an address register;
3154 we reload its value into the scratch register. */
3155 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3156 {
3157 emit_move_insn (scratch, sum1);
3158 sum1 = scratch;
3159 }
3160 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3161 {
3162 emit_move_insn (scratch, sum2);
3163 sum2 = scratch;
3164 }
3165
3166 /* According to the way these invalid addresses are generated
3167 in reload.c, it should never happen (at least on s390) that
3168 *neither* of the PLUS components, after find_replacements
3169 was applied, is an address register. */
3170 if (sum1 == scratch && sum2 == scratch)
3171 {
3172 debug_rtx (src);
3173 gcc_unreachable ();
3174 }
3175
3176 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3177 }
3178
3179 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3180 is only ever performed on addresses, so we can mark the
3181 sum as legitimate for LA in any case. */
3182 s390_load_address (target, src);
3183 }
3184
3185
3186 /* Return true if ADDR is a valid memory address.
3187 STRICT specifies whether strict register checking applies. */
3188
3189 static bool
3190 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3191 {
3192 struct s390_address ad;
3193
3194 if (TARGET_Z10
3195 && larl_operand (addr, VOIDmode)
3196 && (mode == VOIDmode
3197 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3198 return true;
3199
3200 if (!s390_decompose_address (addr, &ad))
3201 return false;
3202
3203 if (strict)
3204 {
3205 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3206 return false;
3207
3208 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3209 return false;
3210 }
3211 else
3212 {
3213 if (ad.base
3214 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3215 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3216 return false;
3217
3218 if (ad.indx
3219 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3220 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3221 return false;
3222 }
3223 return true;
3224 }
3225
3226 /* Return true if OP is a valid operand for the LA instruction.
3227 In 31-bit, we need to prove that the result is used as an
3228 address, as LA performs only a 31-bit addition. */
3229
3230 bool
3231 legitimate_la_operand_p (rtx op)
3232 {
3233 struct s390_address addr;
3234 if (!s390_decompose_address (op, &addr))
3235 return false;
3236
3237 return (TARGET_64BIT || addr.pointer);
3238 }
3239
3240 /* Return true if it is valid *and* preferable to use LA to
3241 compute the sum of OP1 and OP2. */
3242
3243 bool
3244 preferred_la_operand_p (rtx op1, rtx op2)
3245 {
3246 struct s390_address addr;
3247
3248 if (op2 != const0_rtx)
3249 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3250
3251 if (!s390_decompose_address (op1, &addr))
3252 return false;
3253 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3254 return false;
3255 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3256 return false;
3257
3258 /* Avoid LA instructions with index register on z196; it is
3259 preferable to use regular add instructions when possible. */
3260 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3261 return false;
3262
3263 if (!TARGET_64BIT && !addr.pointer)
3264 return false;
3265
3266 if (addr.pointer)
3267 return true;
3268
3269 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3270 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3271 return true;
3272
3273 return false;
3274 }
3275
3276 /* Emit a forced load-address operation to load SRC into DST.
3277 This will use the LOAD ADDRESS instruction even in situations
3278 where legitimate_la_operand_p (SRC) returns false. */
3279
3280 void
3281 s390_load_address (rtx dst, rtx src)
3282 {
3283 if (TARGET_64BIT)
3284 emit_move_insn (dst, src);
3285 else
3286 emit_insn (gen_force_la_31 (dst, src));
3287 }
3288
3289 /* Return a legitimate reference for ORIG (an address) using the
3290 register REG. If REG is 0, a new pseudo is generated.
3291
3292 There are two types of references that must be handled:
3293
3294 1. Global data references must load the address from the GOT, via
3295 the PIC reg. An insn is emitted to do this load, and the reg is
3296 returned.
3297
3298 2. Static data references, constant pool addresses, and code labels
3299 compute the address as an offset from the GOT, whose base is in
3300 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3301 differentiate them from global data objects. The returned
3302 address is the PIC reg + an unspec constant.
3303
3304 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3305 reg also appears in the address. */
3306
3307 rtx
3308 legitimize_pic_address (rtx orig, rtx reg)
3309 {
3310 rtx addr = orig;
3311 rtx new_rtx = orig;
3312 rtx base;
3313
3314 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3315
3316 if (GET_CODE (addr) == LABEL_REF
3317 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3318 {
3319 /* This is a local symbol. */
3320 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3321 {
3322 /* Access local symbols PC-relative via LARL.
3323 This is the same as in the non-PIC case, so it is
3324 handled automatically ... */
3325 }
3326 else
3327 {
3328 /* Access local symbols relative to the GOT. */
3329
3330 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3331
3332 if (reload_in_progress || reload_completed)
3333 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3334
3335 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3336 addr = gen_rtx_CONST (Pmode, addr);
3337 addr = force_const_mem (Pmode, addr);
3338 emit_move_insn (temp, addr);
3339
3340 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3341 if (reg != 0)
3342 {
3343 s390_load_address (reg, new_rtx);
3344 new_rtx = reg;
3345 }
3346 }
3347 }
3348 else if (GET_CODE (addr) == SYMBOL_REF)
3349 {
3350 if (reg == 0)
3351 reg = gen_reg_rtx (Pmode);
3352
3353 if (flag_pic == 1)
3354 {
3355 /* Assume GOT offset < 4k. This is handled the same way
3356 in both 31- and 64-bit code (@GOT). */
3357
3358 if (reload_in_progress || reload_completed)
3359 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3360
3361 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3362 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3363 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3364 new_rtx = gen_const_mem (Pmode, new_rtx);
3365 emit_move_insn (reg, new_rtx);
3366 new_rtx = reg;
3367 }
3368 else if (TARGET_CPU_ZARCH)
3369 {
3370 /* If the GOT offset might be >= 4k, we determine the position
3371 of the GOT entry via a PC-relative LARL (@GOTENT). */
3372
3373 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3374
3375 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3376 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3377
3378 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3379 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3380 emit_move_insn (temp, new_rtx);
3381
3382 new_rtx = gen_const_mem (Pmode, temp);
3383 emit_move_insn (reg, new_rtx);
3384 new_rtx = reg;
3385 }
3386 else
3387 {
3388 /* If the GOT offset might be >= 4k, we have to load it
3389 from the literal pool (@GOT). */
3390
3391 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3392
3393 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3394 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3395
3396 if (reload_in_progress || reload_completed)
3397 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3398
3399 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3400 addr = gen_rtx_CONST (Pmode, addr);
3401 addr = force_const_mem (Pmode, addr);
3402 emit_move_insn (temp, addr);
3403
3404 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3405 new_rtx = gen_const_mem (Pmode, new_rtx);
3406 emit_move_insn (reg, new_rtx);
3407 new_rtx = reg;
3408 }
3409 }
3410 else
3411 {
3412 if (GET_CODE (addr) == CONST)
3413 {
3414 addr = XEXP (addr, 0);
3415 if (GET_CODE (addr) == UNSPEC)
3416 {
3417 gcc_assert (XVECLEN (addr, 0) == 1);
3418 switch (XINT (addr, 1))
3419 {
3420 /* If someone moved a GOT-relative UNSPEC
3421 out of the literal pool, force them back in. */
3422 case UNSPEC_GOTOFF:
3423 case UNSPEC_PLTOFF:
3424 new_rtx = force_const_mem (Pmode, orig);
3425 break;
3426
3427 /* @GOT is OK as is if small. */
3428 case UNSPEC_GOT:
3429 if (flag_pic == 2)
3430 new_rtx = force_const_mem (Pmode, orig);
3431 break;
3432
3433 /* @GOTENT is OK as is. */
3434 case UNSPEC_GOTENT:
3435 break;
3436
3437 /* @PLT is OK as is on 64-bit, must be converted to
3438 GOT-relative @PLTOFF on 31-bit. */
3439 case UNSPEC_PLT:
3440 if (!TARGET_CPU_ZARCH)
3441 {
3442 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3443
3444 if (reload_in_progress || reload_completed)
3445 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3446
3447 addr = XVECEXP (addr, 0, 0);
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3449 UNSPEC_PLTOFF);
3450 addr = gen_rtx_CONST (Pmode, addr);
3451 addr = force_const_mem (Pmode, addr);
3452 emit_move_insn (temp, addr);
3453
3454 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3455 if (reg != 0)
3456 {
3457 s390_load_address (reg, new_rtx);
3458 new_rtx = reg;
3459 }
3460 }
3461 break;
3462
3463 /* Everything else cannot happen. */
3464 default:
3465 gcc_unreachable ();
3466 }
3467 }
3468 else
3469 gcc_assert (GET_CODE (addr) == PLUS);
3470 }
3471 if (GET_CODE (addr) == PLUS)
3472 {
3473 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3474
3475 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3476 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3477
3478 /* Check first to see if this is a constant offset
3479 from a local symbol reference. */
3480 if ((GET_CODE (op0) == LABEL_REF
3481 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3482 && GET_CODE (op1) == CONST_INT)
3483 {
3484 if (TARGET_CPU_ZARCH
3485 && larl_operand (op0, VOIDmode)
3486 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3487 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3488 {
3489 if (INTVAL (op1) & 1)
3490 {
3491 /* LARL can't handle odd offsets, so emit a
3492 pair of LARL and LA. */
3493 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3494
3495 if (!DISP_IN_RANGE (INTVAL (op1)))
3496 {
3497 HOST_WIDE_INT even = INTVAL (op1) - 1;
3498 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3499 op0 = gen_rtx_CONST (Pmode, op0);
3500 op1 = const1_rtx;
3501 }
3502
3503 emit_move_insn (temp, op0);
3504 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3505
3506 if (reg != 0)
3507 {
3508 s390_load_address (reg, new_rtx);
3509 new_rtx = reg;
3510 }
3511 }
3512 else
3513 {
3514 /* If the offset is even, we can just use LARL.
3515 This will happen automatically. */
3516 }
3517 }
3518 else
3519 {
3520 /* Access local symbols relative to the GOT. */
3521
3522 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3523
3524 if (reload_in_progress || reload_completed)
3525 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3526
3527 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3528 UNSPEC_GOTOFF);
3529 addr = gen_rtx_PLUS (Pmode, addr, op1);
3530 addr = gen_rtx_CONST (Pmode, addr);
3531 addr = force_const_mem (Pmode, addr);
3532 emit_move_insn (temp, addr);
3533
3534 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3535 if (reg != 0)
3536 {
3537 s390_load_address (reg, new_rtx);
3538 new_rtx = reg;
3539 }
3540 }
3541 }
3542
3543 /* Now, check whether it is a GOT relative symbol plus offset
3544 that was pulled out of the literal pool. Force it back in. */
3545
3546 else if (GET_CODE (op0) == UNSPEC
3547 && GET_CODE (op1) == CONST_INT
3548 && XINT (op0, 1) == UNSPEC_GOTOFF)
3549 {
3550 gcc_assert (XVECLEN (op0, 0) == 1);
3551
3552 new_rtx = force_const_mem (Pmode, orig);
3553 }
3554
3555 /* Otherwise, compute the sum. */
3556 else
3557 {
3558 base = legitimize_pic_address (XEXP (addr, 0), reg);
3559 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3560 base == reg ? NULL_RTX : reg);
3561 if (GET_CODE (new_rtx) == CONST_INT)
3562 new_rtx = plus_constant (base, INTVAL (new_rtx));
3563 else
3564 {
3565 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3566 {
3567 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3568 new_rtx = XEXP (new_rtx, 1);
3569 }
3570 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3571 }
3572
3573 if (GET_CODE (new_rtx) == CONST)
3574 new_rtx = XEXP (new_rtx, 0);
3575 new_rtx = force_operand (new_rtx, 0);
3576 }
3577 }
3578 }
3579 return new_rtx;
3580 }
3581
3582 /* Load the thread pointer into a register. */
3583
3584 rtx
3585 s390_get_thread_pointer (void)
3586 {
3587 rtx tp = gen_reg_rtx (Pmode);
3588
3589 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3590 mark_reg_pointer (tp, BITS_PER_WORD);
3591
3592 return tp;
3593 }
3594
3595 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3596 in s390_tls_symbol which always refers to __tls_get_offset.
3597 The returned offset is written to RESULT_REG and an USE rtx is
3598 generated for TLS_CALL. */
3599
3600 static GTY(()) rtx s390_tls_symbol;
3601
3602 static void
3603 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3604 {
3605 rtx insn;
3606
3607 gcc_assert (flag_pic);
3608
3609 if (!s390_tls_symbol)
3610 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3611
3612 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3613 gen_rtx_REG (Pmode, RETURN_REGNUM));
3614
3615 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3616 RTL_CONST_CALL_P (insn) = 1;
3617 }
3618
3619 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3620 this (thread-local) address. REG may be used as temporary. */
3621
3622 static rtx
3623 legitimize_tls_address (rtx addr, rtx reg)
3624 {
3625 rtx new_rtx, tls_call, temp, base, r2, insn;
3626
3627 if (GET_CODE (addr) == SYMBOL_REF)
3628 switch (tls_symbolic_operand (addr))
3629 {
3630 case TLS_MODEL_GLOBAL_DYNAMIC:
3631 start_sequence ();
3632 r2 = gen_rtx_REG (Pmode, 2);
3633 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3634 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3635 new_rtx = force_const_mem (Pmode, new_rtx);
3636 emit_move_insn (r2, new_rtx);
3637 s390_emit_tls_call_insn (r2, tls_call);
3638 insn = get_insns ();
3639 end_sequence ();
3640
3641 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3642 temp = gen_reg_rtx (Pmode);
3643 emit_libcall_block (insn, temp, r2, new_rtx);
3644
3645 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3646 if (reg != 0)
3647 {
3648 s390_load_address (reg, new_rtx);
3649 new_rtx = reg;
3650 }
3651 break;
3652
3653 case TLS_MODEL_LOCAL_DYNAMIC:
3654 start_sequence ();
3655 r2 = gen_rtx_REG (Pmode, 2);
3656 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3657 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3658 new_rtx = force_const_mem (Pmode, new_rtx);
3659 emit_move_insn (r2, new_rtx);
3660 s390_emit_tls_call_insn (r2, tls_call);
3661 insn = get_insns ();
3662 end_sequence ();
3663
3664 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3665 temp = gen_reg_rtx (Pmode);
3666 emit_libcall_block (insn, temp, r2, new_rtx);
3667
3668 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3669 base = gen_reg_rtx (Pmode);
3670 s390_load_address (base, new_rtx);
3671
3672 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3673 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3674 new_rtx = force_const_mem (Pmode, new_rtx);
3675 temp = gen_reg_rtx (Pmode);
3676 emit_move_insn (temp, new_rtx);
3677
3678 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3679 if (reg != 0)
3680 {
3681 s390_load_address (reg, new_rtx);
3682 new_rtx = reg;
3683 }
3684 break;
3685
3686 case TLS_MODEL_INITIAL_EXEC:
3687 if (flag_pic == 1)
3688 {
3689 /* Assume GOT offset < 4k. This is handled the same way
3690 in both 31- and 64-bit code. */
3691
3692 if (reload_in_progress || reload_completed)
3693 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3694
3695 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3696 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3697 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3698 new_rtx = gen_const_mem (Pmode, new_rtx);
3699 temp = gen_reg_rtx (Pmode);
3700 emit_move_insn (temp, new_rtx);
3701 }
3702 else if (TARGET_CPU_ZARCH)
3703 {
3704 /* If the GOT offset might be >= 4k, we determine the position
3705 of the GOT entry via a PC-relative LARL. */
3706
3707 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3708 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3709 temp = gen_reg_rtx (Pmode);
3710 emit_move_insn (temp, new_rtx);
3711
3712 new_rtx = gen_const_mem (Pmode, temp);
3713 temp = gen_reg_rtx (Pmode);
3714 emit_move_insn (temp, new_rtx);
3715 }
3716 else if (flag_pic)
3717 {
3718 /* If the GOT offset might be >= 4k, we have to load it
3719 from the literal pool. */
3720
3721 if (reload_in_progress || reload_completed)
3722 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3723
3724 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3725 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3726 new_rtx = force_const_mem (Pmode, new_rtx);
3727 temp = gen_reg_rtx (Pmode);
3728 emit_move_insn (temp, new_rtx);
3729
3730 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3731 new_rtx = gen_const_mem (Pmode, new_rtx);
3732
3733 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3734 temp = gen_reg_rtx (Pmode);
3735 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3736 }
3737 else
3738 {
3739 /* In position-dependent code, load the absolute address of
3740 the GOT entry from the literal pool. */
3741
3742 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3743 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3744 new_rtx = force_const_mem (Pmode, new_rtx);
3745 temp = gen_reg_rtx (Pmode);
3746 emit_move_insn (temp, new_rtx);
3747
3748 new_rtx = temp;
3749 new_rtx = gen_const_mem (Pmode, new_rtx);
3750 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3751 temp = gen_reg_rtx (Pmode);
3752 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3753 }
3754
3755 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3756 if (reg != 0)
3757 {
3758 s390_load_address (reg, new_rtx);
3759 new_rtx = reg;
3760 }
3761 break;
3762
3763 case TLS_MODEL_LOCAL_EXEC:
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3771 if (reg != 0)
3772 {
3773 s390_load_address (reg, new_rtx);
3774 new_rtx = reg;
3775 }
3776 break;
3777
3778 default:
3779 gcc_unreachable ();
3780 }
3781
3782 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3783 {
3784 switch (XINT (XEXP (addr, 0), 1))
3785 {
3786 case UNSPEC_INDNTPOFF:
3787 gcc_assert (TARGET_CPU_ZARCH);
3788 new_rtx = addr;
3789 break;
3790
3791 default:
3792 gcc_unreachable ();
3793 }
3794 }
3795
3796 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3797 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3798 {
3799 new_rtx = XEXP (XEXP (addr, 0), 0);
3800 if (GET_CODE (new_rtx) != SYMBOL_REF)
3801 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3802
3803 new_rtx = legitimize_tls_address (new_rtx, reg);
3804 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3805 new_rtx = force_operand (new_rtx, 0);
3806 }
3807
3808 else
3809 gcc_unreachable (); /* for now ... */
3810
3811 return new_rtx;
3812 }
3813
3814 /* Emit insns making the address in operands[1] valid for a standard
3815 move to operands[0]. operands[1] is replaced by an address which
3816 should be used instead of the former RTX to emit the move
3817 pattern. */
3818
3819 void
3820 emit_symbolic_move (rtx *operands)
3821 {
3822 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3823
3824 if (GET_CODE (operands[0]) == MEM)
3825 operands[1] = force_reg (Pmode, operands[1]);
3826 else if (TLS_SYMBOLIC_CONST (operands[1]))
3827 operands[1] = legitimize_tls_address (operands[1], temp);
3828 else if (flag_pic)
3829 operands[1] = legitimize_pic_address (operands[1], temp);
3830 }
3831
3832 /* Try machine-dependent ways of modifying an illegitimate address X
3833 to be legitimate. If we find one, return the new, valid address.
3834
3835 OLDX is the address as it was before break_out_memory_refs was called.
3836 In some cases it is useful to look at this to decide what needs to be done.
3837
3838 MODE is the mode of the operand pointed to by X.
3839
3840 When -fpic is used, special handling is needed for symbolic references.
3841 See comments by legitimize_pic_address for details. */
3842
3843 static rtx
3844 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3845 enum machine_mode mode ATTRIBUTE_UNUSED)
3846 {
3847 rtx constant_term = const0_rtx;
3848
3849 if (TLS_SYMBOLIC_CONST (x))
3850 {
3851 x = legitimize_tls_address (x, 0);
3852
3853 if (s390_legitimate_address_p (mode, x, FALSE))
3854 return x;
3855 }
3856 else if (GET_CODE (x) == PLUS
3857 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3858 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3859 {
3860 return x;
3861 }
3862 else if (flag_pic)
3863 {
3864 if (SYMBOLIC_CONST (x)
3865 || (GET_CODE (x) == PLUS
3866 && (SYMBOLIC_CONST (XEXP (x, 0))
3867 || SYMBOLIC_CONST (XEXP (x, 1)))))
3868 x = legitimize_pic_address (x, 0);
3869
3870 if (s390_legitimate_address_p (mode, x, FALSE))
3871 return x;
3872 }
3873
3874 x = eliminate_constant_term (x, &constant_term);
3875
3876 /* Optimize loading of large displacements by splitting them
3877 into the multiple of 4K and the rest; this allows the
3878 former to be CSE'd if possible.
3879
3880 Don't do this if the displacement is added to a register
3881 pointing into the stack frame, as the offsets will
3882 change later anyway. */
3883
3884 if (GET_CODE (constant_term) == CONST_INT
3885 && !TARGET_LONG_DISPLACEMENT
3886 && !DISP_IN_RANGE (INTVAL (constant_term))
3887 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3888 {
3889 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3890 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3891
3892 rtx temp = gen_reg_rtx (Pmode);
3893 rtx val = force_operand (GEN_INT (upper), temp);
3894 if (val != temp)
3895 emit_move_insn (temp, val);
3896
3897 x = gen_rtx_PLUS (Pmode, x, temp);
3898 constant_term = GEN_INT (lower);
3899 }
3900
3901 if (GET_CODE (x) == PLUS)
3902 {
3903 if (GET_CODE (XEXP (x, 0)) == REG)
3904 {
3905 rtx temp = gen_reg_rtx (Pmode);
3906 rtx val = force_operand (XEXP (x, 1), temp);
3907 if (val != temp)
3908 emit_move_insn (temp, val);
3909
3910 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3911 }
3912
3913 else if (GET_CODE (XEXP (x, 1)) == REG)
3914 {
3915 rtx temp = gen_reg_rtx (Pmode);
3916 rtx val = force_operand (XEXP (x, 0), temp);
3917 if (val != temp)
3918 emit_move_insn (temp, val);
3919
3920 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3921 }
3922 }
3923
3924 if (constant_term != const0_rtx)
3925 x = gen_rtx_PLUS (Pmode, x, constant_term);
3926
3927 return x;
3928 }
3929
3930 /* Try a machine-dependent way of reloading an illegitimate address AD
3931 operand. If we find one, push the reload and return the new address.
3932
3933 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3934 and TYPE is the reload type of the current reload. */
3935
3936 rtx
3937 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3938 int opnum, int type)
3939 {
3940 if (!optimize || TARGET_LONG_DISPLACEMENT)
3941 return NULL_RTX;
3942
3943 if (GET_CODE (ad) == PLUS)
3944 {
3945 rtx tem = simplify_binary_operation (PLUS, Pmode,
3946 XEXP (ad, 0), XEXP (ad, 1));
3947 if (tem)
3948 ad = tem;
3949 }
3950
3951 if (GET_CODE (ad) == PLUS
3952 && GET_CODE (XEXP (ad, 0)) == REG
3953 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3954 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3955 {
3956 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3957 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3958 rtx cst, tem, new_rtx;
3959
3960 cst = GEN_INT (upper);
3961 if (!legitimate_reload_constant_p (cst))
3962 cst = force_const_mem (Pmode, cst);
3963
3964 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3965 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3966
3967 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3968 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3969 opnum, (enum reload_type) type);
3970 return new_rtx;
3971 }
3972
3973 return NULL_RTX;
3974 }
3975
3976 /* Emit code to move LEN bytes from DST to SRC. */
3977
3978 void
3979 s390_expand_movmem (rtx dst, rtx src, rtx len)
3980 {
3981 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3982 {
3983 if (INTVAL (len) > 0)
3984 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3985 }
3986
3987 else if (TARGET_MVCLE)
3988 {
3989 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3990 }
3991
3992 else
3993 {
3994 rtx dst_addr, src_addr, count, blocks, temp;
3995 rtx loop_start_label = gen_label_rtx ();
3996 rtx loop_end_label = gen_label_rtx ();
3997 rtx end_label = gen_label_rtx ();
3998 enum machine_mode mode;
3999
4000 mode = GET_MODE (len);
4001 if (mode == VOIDmode)
4002 mode = Pmode;
4003
4004 dst_addr = gen_reg_rtx (Pmode);
4005 src_addr = gen_reg_rtx (Pmode);
4006 count = gen_reg_rtx (mode);
4007 blocks = gen_reg_rtx (mode);
4008
4009 convert_move (count, len, 1);
4010 emit_cmp_and_jump_insns (count, const0_rtx,
4011 EQ, NULL_RTX, mode, 1, end_label);
4012
4013 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4014 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4015 dst = change_address (dst, VOIDmode, dst_addr);
4016 src = change_address (src, VOIDmode, src_addr);
4017
4018 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4019 OPTAB_DIRECT);
4020 if (temp != count)
4021 emit_move_insn (count, temp);
4022
4023 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4024 OPTAB_DIRECT);
4025 if (temp != blocks)
4026 emit_move_insn (blocks, temp);
4027
4028 emit_cmp_and_jump_insns (blocks, const0_rtx,
4029 EQ, NULL_RTX, mode, 1, loop_end_label);
4030
4031 emit_label (loop_start_label);
4032
4033 if (TARGET_Z10
4034 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4035 {
4036 rtx prefetch;
4037
4038 /* Issue a read prefetch for the +3 cache line. */
4039 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4040 const0_rtx, const0_rtx);
4041 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4042 emit_insn (prefetch);
4043
4044 /* Issue a write prefetch for the +3 cache line. */
4045 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4046 const1_rtx, const0_rtx);
4047 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4048 emit_insn (prefetch);
4049 }
4050
4051 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4052 s390_load_address (dst_addr,
4053 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4054 s390_load_address (src_addr,
4055 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4056
4057 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4058 OPTAB_DIRECT);
4059 if (temp != blocks)
4060 emit_move_insn (blocks, temp);
4061
4062 emit_cmp_and_jump_insns (blocks, const0_rtx,
4063 EQ, NULL_RTX, mode, 1, loop_end_label);
4064
4065 emit_jump (loop_start_label);
4066 emit_label (loop_end_label);
4067
4068 emit_insn (gen_movmem_short (dst, src,
4069 convert_to_mode (Pmode, count, 1)));
4070 emit_label (end_label);
4071 }
4072 }
4073
4074 /* Emit code to set LEN bytes at DST to VAL.
4075 Make use of clrmem if VAL is zero. */
4076
4077 void
4078 s390_expand_setmem (rtx dst, rtx len, rtx val)
4079 {
4080 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4081 return;
4082
4083 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4084
4085 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4086 {
4087 if (val == const0_rtx && INTVAL (len) <= 256)
4088 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4089 else
4090 {
4091 /* Initialize memory by storing the first byte. */
4092 emit_move_insn (adjust_address (dst, QImode, 0), val);
4093
4094 if (INTVAL (len) > 1)
4095 {
4096 /* Initiate 1 byte overlap move.
4097 The first byte of DST is propagated through DSTP1.
4098 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4099 DST is set to size 1 so the rest of the memory location
4100 does not count as source operand. */
4101 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4102 set_mem_size (dst, 1);
4103
4104 emit_insn (gen_movmem_short (dstp1, dst,
4105 GEN_INT (INTVAL (len) - 2)));
4106 }
4107 }
4108 }
4109
4110 else if (TARGET_MVCLE)
4111 {
4112 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4113 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4114 }
4115
4116 else
4117 {
4118 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4119 rtx loop_start_label = gen_label_rtx ();
4120 rtx loop_end_label = gen_label_rtx ();
4121 rtx end_label = gen_label_rtx ();
4122 enum machine_mode mode;
4123
4124 mode = GET_MODE (len);
4125 if (mode == VOIDmode)
4126 mode = Pmode;
4127
4128 dst_addr = gen_reg_rtx (Pmode);
4129 count = gen_reg_rtx (mode);
4130 blocks = gen_reg_rtx (mode);
4131
4132 convert_move (count, len, 1);
4133 emit_cmp_and_jump_insns (count, const0_rtx,
4134 EQ, NULL_RTX, mode, 1, end_label);
4135
4136 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4137 dst = change_address (dst, VOIDmode, dst_addr);
4138
4139 if (val == const0_rtx)
4140 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4141 OPTAB_DIRECT);
4142 else
4143 {
4144 dstp1 = adjust_address (dst, VOIDmode, 1);
4145 set_mem_size (dst, 1);
4146
4147 /* Initialize memory by storing the first byte. */
4148 emit_move_insn (adjust_address (dst, QImode, 0), val);
4149
4150 /* If count is 1 we are done. */
4151 emit_cmp_and_jump_insns (count, const1_rtx,
4152 EQ, NULL_RTX, mode, 1, end_label);
4153
4154 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4155 OPTAB_DIRECT);
4156 }
4157 if (temp != count)
4158 emit_move_insn (count, temp);
4159
4160 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4161 OPTAB_DIRECT);
4162 if (temp != blocks)
4163 emit_move_insn (blocks, temp);
4164
4165 emit_cmp_and_jump_insns (blocks, const0_rtx,
4166 EQ, NULL_RTX, mode, 1, loop_end_label);
4167
4168 emit_label (loop_start_label);
4169
4170 if (TARGET_Z10
4171 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4172 {
4173 /* Issue a write prefetch for the +4 cache line. */
4174 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4175 GEN_INT (1024)),
4176 const1_rtx, const0_rtx);
4177 emit_insn (prefetch);
4178 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4179 }
4180
4181 if (val == const0_rtx)
4182 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4183 else
4184 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4185 s390_load_address (dst_addr,
4186 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4187
4188 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4189 OPTAB_DIRECT);
4190 if (temp != blocks)
4191 emit_move_insn (blocks, temp);
4192
4193 emit_cmp_and_jump_insns (blocks, const0_rtx,
4194 EQ, NULL_RTX, mode, 1, loop_end_label);
4195
4196 emit_jump (loop_start_label);
4197 emit_label (loop_end_label);
4198
4199 if (val == const0_rtx)
4200 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4201 else
4202 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4203 emit_label (end_label);
4204 }
4205 }
4206
4207 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4208 and return the result in TARGET. */
4209
4210 void
4211 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4212 {
4213 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4214 rtx tmp;
4215
4216 /* As the result of CMPINT is inverted compared to what we need,
4217 we have to swap the operands. */
4218 tmp = op0; op0 = op1; op1 = tmp;
4219
4220 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4221 {
4222 if (INTVAL (len) > 0)
4223 {
4224 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4225 emit_insn (gen_cmpint (target, ccreg));
4226 }
4227 else
4228 emit_move_insn (target, const0_rtx);
4229 }
4230 else if (TARGET_MVCLE)
4231 {
4232 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4233 emit_insn (gen_cmpint (target, ccreg));
4234 }
4235 else
4236 {
4237 rtx addr0, addr1, count, blocks, temp;
4238 rtx loop_start_label = gen_label_rtx ();
4239 rtx loop_end_label = gen_label_rtx ();
4240 rtx end_label = gen_label_rtx ();
4241 enum machine_mode mode;
4242
4243 mode = GET_MODE (len);
4244 if (mode == VOIDmode)
4245 mode = Pmode;
4246
4247 addr0 = gen_reg_rtx (Pmode);
4248 addr1 = gen_reg_rtx (Pmode);
4249 count = gen_reg_rtx (mode);
4250 blocks = gen_reg_rtx (mode);
4251
4252 convert_move (count, len, 1);
4253 emit_cmp_and_jump_insns (count, const0_rtx,
4254 EQ, NULL_RTX, mode, 1, end_label);
4255
4256 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4257 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4258 op0 = change_address (op0, VOIDmode, addr0);
4259 op1 = change_address (op1, VOIDmode, addr1);
4260
4261 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4262 OPTAB_DIRECT);
4263 if (temp != count)
4264 emit_move_insn (count, temp);
4265
4266 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4267 OPTAB_DIRECT);
4268 if (temp != blocks)
4269 emit_move_insn (blocks, temp);
4270
4271 emit_cmp_and_jump_insns (blocks, const0_rtx,
4272 EQ, NULL_RTX, mode, 1, loop_end_label);
4273
4274 emit_label (loop_start_label);
4275
4276 if (TARGET_Z10
4277 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4278 {
4279 rtx prefetch;
4280
4281 /* Issue a read prefetch for the +2 cache line of operand 1. */
4282 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4283 const0_rtx, const0_rtx);
4284 emit_insn (prefetch);
4285 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4286
4287 /* Issue a read prefetch for the +2 cache line of operand 2. */
4288 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4289 const0_rtx, const0_rtx);
4290 emit_insn (prefetch);
4291 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4292 }
4293
4294 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4295 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4296 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4297 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4298 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4299 emit_jump_insn (temp);
4300
4301 s390_load_address (addr0,
4302 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4303 s390_load_address (addr1,
4304 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4305
4306 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4307 OPTAB_DIRECT);
4308 if (temp != blocks)
4309 emit_move_insn (blocks, temp);
4310
4311 emit_cmp_and_jump_insns (blocks, const0_rtx,
4312 EQ, NULL_RTX, mode, 1, loop_end_label);
4313
4314 emit_jump (loop_start_label);
4315 emit_label (loop_end_label);
4316
4317 emit_insn (gen_cmpmem_short (op0, op1,
4318 convert_to_mode (Pmode, count, 1)));
4319 emit_label (end_label);
4320
4321 emit_insn (gen_cmpint (target, ccreg));
4322 }
4323 }
4324
4325
4326 /* Expand conditional increment or decrement using alc/slb instructions.
4327 Should generate code setting DST to either SRC or SRC + INCREMENT,
4328 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4329 Returns true if successful, false otherwise.
4330
4331 That makes it possible to implement some if-constructs without jumps e.g.:
4332 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4333 unsigned int a, b, c;
4334 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4335 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4336 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4337 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4338
4339 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4340 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4341 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4342 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4343 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4344
4345 bool
4346 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4347 rtx dst, rtx src, rtx increment)
4348 {
4349 enum machine_mode cmp_mode;
4350 enum machine_mode cc_mode;
4351 rtx op_res;
4352 rtx insn;
4353 rtvec p;
4354 int ret;
4355
4356 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4357 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4358 cmp_mode = SImode;
4359 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4360 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4361 cmp_mode = DImode;
4362 else
4363 return false;
4364
4365 /* Try ADD LOGICAL WITH CARRY. */
4366 if (increment == const1_rtx)
4367 {
4368 /* Determine CC mode to use. */
4369 if (cmp_code == EQ || cmp_code == NE)
4370 {
4371 if (cmp_op1 != const0_rtx)
4372 {
4373 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4374 NULL_RTX, 0, OPTAB_WIDEN);
4375 cmp_op1 = const0_rtx;
4376 }
4377
4378 cmp_code = cmp_code == EQ ? LEU : GTU;
4379 }
4380
4381 if (cmp_code == LTU || cmp_code == LEU)
4382 {
4383 rtx tem = cmp_op0;
4384 cmp_op0 = cmp_op1;
4385 cmp_op1 = tem;
4386 cmp_code = swap_condition (cmp_code);
4387 }
4388
4389 switch (cmp_code)
4390 {
4391 case GTU:
4392 cc_mode = CCUmode;
4393 break;
4394
4395 case GEU:
4396 cc_mode = CCL3mode;
4397 break;
4398
4399 default:
4400 return false;
4401 }
4402
4403 /* Emit comparison instruction pattern. */
4404 if (!register_operand (cmp_op0, cmp_mode))
4405 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4406
4407 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4408 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4409 /* We use insn_invalid_p here to add clobbers if required. */
4410 ret = insn_invalid_p (emit_insn (insn));
4411 gcc_assert (!ret);
4412
4413 /* Emit ALC instruction pattern. */
4414 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4415 gen_rtx_REG (cc_mode, CC_REGNUM),
4416 const0_rtx);
4417
4418 if (src != const0_rtx)
4419 {
4420 if (!register_operand (src, GET_MODE (dst)))
4421 src = force_reg (GET_MODE (dst), src);
4422
4423 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4424 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4425 }
4426
4427 p = rtvec_alloc (2);
4428 RTVEC_ELT (p, 0) =
4429 gen_rtx_SET (VOIDmode, dst, op_res);
4430 RTVEC_ELT (p, 1) =
4431 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4432 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4433
4434 return true;
4435 }
4436
4437 /* Try SUBTRACT LOGICAL WITH BORROW. */
4438 if (increment == constm1_rtx)
4439 {
4440 /* Determine CC mode to use. */
4441 if (cmp_code == EQ || cmp_code == NE)
4442 {
4443 if (cmp_op1 != const0_rtx)
4444 {
4445 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4446 NULL_RTX, 0, OPTAB_WIDEN);
4447 cmp_op1 = const0_rtx;
4448 }
4449
4450 cmp_code = cmp_code == EQ ? LEU : GTU;
4451 }
4452
4453 if (cmp_code == GTU || cmp_code == GEU)
4454 {
4455 rtx tem = cmp_op0;
4456 cmp_op0 = cmp_op1;
4457 cmp_op1 = tem;
4458 cmp_code = swap_condition (cmp_code);
4459 }
4460
4461 switch (cmp_code)
4462 {
4463 case LEU:
4464 cc_mode = CCUmode;
4465 break;
4466
4467 case LTU:
4468 cc_mode = CCL3mode;
4469 break;
4470
4471 default:
4472 return false;
4473 }
4474
4475 /* Emit comparison instruction pattern. */
4476 if (!register_operand (cmp_op0, cmp_mode))
4477 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4478
4479 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4480 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4481 /* We use insn_invalid_p here to add clobbers if required. */
4482 ret = insn_invalid_p (emit_insn (insn));
4483 gcc_assert (!ret);
4484
4485 /* Emit SLB instruction pattern. */
4486 if (!register_operand (src, GET_MODE (dst)))
4487 src = force_reg (GET_MODE (dst), src);
4488
4489 op_res = gen_rtx_MINUS (GET_MODE (dst),
4490 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4491 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4492 gen_rtx_REG (cc_mode, CC_REGNUM),
4493 const0_rtx));
4494 p = rtvec_alloc (2);
4495 RTVEC_ELT (p, 0) =
4496 gen_rtx_SET (VOIDmode, dst, op_res);
4497 RTVEC_ELT (p, 1) =
4498 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4499 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4500
4501 return true;
4502 }
4503
4504 return false;
4505 }
4506
4507 /* Expand code for the insv template. Return true if successful. */
4508
4509 bool
4510 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4511 {
4512 int bitsize = INTVAL (op1);
4513 int bitpos = INTVAL (op2);
4514
4515 /* On z10 we can use the risbg instruction to implement insv. */
4516 if (TARGET_Z10
4517 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4518 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4519 {
4520 rtx op;
4521 rtx clobber;
4522
4523 op = gen_rtx_SET (GET_MODE(src),
4524 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4525 src);
4526 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4527 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4528
4529 return true;
4530 }
4531
4532 /* We need byte alignment. */
4533 if (bitsize % BITS_PER_UNIT)
4534 return false;
4535
4536 if (bitpos == 0
4537 && memory_operand (dest, VOIDmode)
4538 && (register_operand (src, word_mode)
4539 || const_int_operand (src, VOIDmode)))
4540 {
4541 /* Emit standard pattern if possible. */
4542 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4543 if (GET_MODE_BITSIZE (mode) == bitsize)
4544 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4545
4546 /* (set (ze (mem)) (const_int)). */
4547 else if (const_int_operand (src, VOIDmode))
4548 {
4549 int size = bitsize / BITS_PER_UNIT;
4550 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4551 GET_MODE_SIZE (word_mode) - size);
4552
4553 dest = adjust_address (dest, BLKmode, 0);
4554 set_mem_size (dest, size);
4555 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4556 }
4557
4558 /* (set (ze (mem)) (reg)). */
4559 else if (register_operand (src, word_mode))
4560 {
4561 if (bitsize <= GET_MODE_BITSIZE (SImode))
4562 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4563 const0_rtx), src);
4564 else
4565 {
4566 /* Emit st,stcmh sequence. */
4567 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4568 int size = stcmh_width / BITS_PER_UNIT;
4569
4570 emit_move_insn (adjust_address (dest, SImode, size),
4571 gen_lowpart (SImode, src));
4572 set_mem_size (dest, size);
4573 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4574 (stcmh_width), const0_rtx),
4575 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4576 (GET_MODE_BITSIZE (SImode))));
4577 }
4578 }
4579 else
4580 return false;
4581
4582 return true;
4583 }
4584
4585 /* (set (ze (reg)) (const_int)). */
4586 if (TARGET_ZARCH
4587 && register_operand (dest, word_mode)
4588 && (bitpos % 16) == 0
4589 && (bitsize % 16) == 0
4590 && const_int_operand (src, VOIDmode))
4591 {
4592 HOST_WIDE_INT val = INTVAL (src);
4593 int regpos = bitpos + bitsize;
4594
4595 while (regpos > bitpos)
4596 {
4597 enum machine_mode putmode;
4598 int putsize;
4599
4600 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4601 putmode = SImode;
4602 else
4603 putmode = HImode;
4604
4605 putsize = GET_MODE_BITSIZE (putmode);
4606 regpos -= putsize;
4607 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4608 GEN_INT (putsize),
4609 GEN_INT (regpos)),
4610 gen_int_mode (val, putmode));
4611 val >>= putsize;
4612 }
4613 gcc_assert (regpos == bitpos);
4614 return true;
4615 }
4616
4617 return false;
4618 }
4619
4620 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4621 register that holds VAL of mode MODE shifted by COUNT bits. */
4622
4623 static inline rtx
4624 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4625 {
4626 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4627 NULL_RTX, 1, OPTAB_DIRECT);
4628 return expand_simple_binop (SImode, ASHIFT, val, count,
4629 NULL_RTX, 1, OPTAB_DIRECT);
4630 }
4631
4632 /* Structure to hold the initial parameters for a compare_and_swap operation
4633 in HImode and QImode. */
4634
4635 struct alignment_context
4636 {
4637 rtx memsi; /* SI aligned memory location. */
4638 rtx shift; /* Bit offset with regard to lsb. */
4639 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4640 rtx modemaski; /* ~modemask */
4641 bool aligned; /* True if memory is aligned, false else. */
4642 };
4643
4644 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4645 structure AC for transparent simplifying, if the memory alignment is known
4646 to be at least 32bit. MEM is the memory location for the actual operation
4647 and MODE its mode. */
4648
4649 static void
4650 init_alignment_context (struct alignment_context *ac, rtx mem,
4651 enum machine_mode mode)
4652 {
4653 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4654 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4655
4656 if (ac->aligned)
4657 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4658 else
4659 {
4660 /* Alignment is unknown. */
4661 rtx byteoffset, addr, align;
4662
4663 /* Force the address into a register. */
4664 addr = force_reg (Pmode, XEXP (mem, 0));
4665
4666 /* Align it to SImode. */
4667 align = expand_simple_binop (Pmode, AND, addr,
4668 GEN_INT (-GET_MODE_SIZE (SImode)),
4669 NULL_RTX, 1, OPTAB_DIRECT);
4670 /* Generate MEM. */
4671 ac->memsi = gen_rtx_MEM (SImode, align);
4672 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4673 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4674 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4675
4676 /* Calculate shiftcount. */
4677 byteoffset = expand_simple_binop (Pmode, AND, addr,
4678 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4679 NULL_RTX, 1, OPTAB_DIRECT);
4680 /* As we already have some offset, evaluate the remaining distance. */
4681 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4682 NULL_RTX, 1, OPTAB_DIRECT);
4683
4684 }
4685 /* Shift is the byte count, but we need the bitcount. */
4686 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4687 NULL_RTX, 1, OPTAB_DIRECT);
4688 /* Calculate masks. */
4689 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4690 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4691 NULL_RTX, 1, OPTAB_DIRECT);
4692 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4693 }
4694
4695 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4696 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4697 to set if CMP == MEM.
4698 CMP is never in memory for compare_and_swap_cc because
4699 expand_bool_compare_and_swap puts it into a register for later compare. */
4700
4701 void
4702 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4703 {
4704 struct alignment_context ac;
4705 rtx cmpv, newv, val, resv, cc;
4706 rtx res = gen_reg_rtx (SImode);
4707 rtx csloop = gen_label_rtx ();
4708 rtx csend = gen_label_rtx ();
4709
4710 gcc_assert (register_operand (target, VOIDmode));
4711 gcc_assert (MEM_P (mem));
4712
4713 init_alignment_context (&ac, mem, mode);
4714
4715 /* Shift the values to the correct bit positions. */
4716 if (!(ac.aligned && MEM_P (cmp)))
4717 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4718 if (!(ac.aligned && MEM_P (new_rtx)))
4719 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4720
4721 /* Load full word. Subsequent loads are performed by CS. */
4722 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4723 NULL_RTX, 1, OPTAB_DIRECT);
4724
4725 /* Start CS loop. */
4726 emit_label (csloop);
4727 /* val = "<mem>00..0<mem>"
4728 * cmp = "00..0<cmp>00..0"
4729 * new = "00..0<new>00..0"
4730 */
4731
4732 /* Patch cmp and new with val at correct position. */
4733 if (ac.aligned && MEM_P (cmp))
4734 {
4735 cmpv = force_reg (SImode, val);
4736 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0,
4737 0, 0, SImode, cmp);
4738 }
4739 else
4740 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4741 NULL_RTX, 1, OPTAB_DIRECT));
4742 if (ac.aligned && MEM_P (new_rtx))
4743 {
4744 newv = force_reg (SImode, val);
4745 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0,
4746 0, 0, SImode, new_rtx);
4747 }
4748 else
4749 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4750 NULL_RTX, 1, OPTAB_DIRECT));
4751
4752 /* Jump to end if we're done (likely?). */
4753 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4754 cmpv, newv));
4755
4756 /* Check for changes outside mode. */
4757 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4758 NULL_RTX, 1, OPTAB_DIRECT);
4759 cc = s390_emit_compare (NE, resv, val);
4760 emit_move_insn (val, resv);
4761 /* Loop internal if so. */
4762 s390_emit_jump (csloop, cc);
4763
4764 emit_label (csend);
4765
4766 /* Return the correct part of the bitfield. */
4767 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4768 NULL_RTX, 1, OPTAB_DIRECT), 1);
4769 }
4770
4771 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4772 and VAL the value to play with. If AFTER is true then store the value
4773 MEM holds after the operation, if AFTER is false then store the value MEM
4774 holds before the operation. If TARGET is zero then discard that value, else
4775 store it to TARGET. */
4776
4777 void
4778 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4779 rtx target, rtx mem, rtx val, bool after)
4780 {
4781 struct alignment_context ac;
4782 rtx cmp;
4783 rtx new_rtx = gen_reg_rtx (SImode);
4784 rtx orig = gen_reg_rtx (SImode);
4785 rtx csloop = gen_label_rtx ();
4786
4787 gcc_assert (!target || register_operand (target, VOIDmode));
4788 gcc_assert (MEM_P (mem));
4789
4790 init_alignment_context (&ac, mem, mode);
4791
4792 /* Shift val to the correct bit positions.
4793 Preserve "icm", but prevent "ex icm". */
4794 if (!(ac.aligned && code == SET && MEM_P (val)))
4795 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4796
4797 /* Further preparation insns. */
4798 if (code == PLUS || code == MINUS)
4799 emit_move_insn (orig, val);
4800 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4801 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4802 NULL_RTX, 1, OPTAB_DIRECT);
4803
4804 /* Load full word. Subsequent loads are performed by CS. */
4805 cmp = force_reg (SImode, ac.memsi);
4806
4807 /* Start CS loop. */
4808 emit_label (csloop);
4809 emit_move_insn (new_rtx, cmp);
4810
4811 /* Patch new with val at correct position. */
4812 switch (code)
4813 {
4814 case PLUS:
4815 case MINUS:
4816 val = expand_simple_binop (SImode, code, new_rtx, orig,
4817 NULL_RTX, 1, OPTAB_DIRECT);
4818 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4819 NULL_RTX, 1, OPTAB_DIRECT);
4820 /* FALLTHRU */
4821 case SET:
4822 if (ac.aligned && MEM_P (val))
4823 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4824 0, 0, SImode, val);
4825 else
4826 {
4827 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4828 NULL_RTX, 1, OPTAB_DIRECT);
4829 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4830 NULL_RTX, 1, OPTAB_DIRECT);
4831 }
4832 break;
4833 case AND:
4834 case IOR:
4835 case XOR:
4836 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4837 NULL_RTX, 1, OPTAB_DIRECT);
4838 break;
4839 case MULT: /* NAND */
4840 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4841 NULL_RTX, 1, OPTAB_DIRECT);
4842 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4843 NULL_RTX, 1, OPTAB_DIRECT);
4844 break;
4845 default:
4846 gcc_unreachable ();
4847 }
4848
4849 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4850 ac.memsi, cmp, new_rtx));
4851
4852 /* Return the correct part of the bitfield. */
4853 if (target)
4854 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4855 after ? new_rtx : cmp, ac.shift,
4856 NULL_RTX, 1, OPTAB_DIRECT), 1);
4857 }
4858
4859 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4860 We need to emit DTP-relative relocations. */
4861
4862 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4863
4864 static void
4865 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4866 {
4867 switch (size)
4868 {
4869 case 4:
4870 fputs ("\t.long\t", file);
4871 break;
4872 case 8:
4873 fputs ("\t.quad\t", file);
4874 break;
4875 default:
4876 gcc_unreachable ();
4877 }
4878 output_addr_const (file, x);
4879 fputs ("@DTPOFF", file);
4880 }
4881
4882 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4883 /* Implement TARGET_MANGLE_TYPE. */
4884
4885 static const char *
4886 s390_mangle_type (const_tree type)
4887 {
4888 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4889 && TARGET_LONG_DOUBLE_128)
4890 return "g";
4891
4892 /* For all other types, use normal C++ mangling. */
4893 return NULL;
4894 }
4895 #endif
4896
4897 /* In the name of slightly smaller debug output, and to cater to
4898 general assembler lossage, recognize various UNSPEC sequences
4899 and turn them back into a direct symbol reference. */
4900
4901 static rtx
4902 s390_delegitimize_address (rtx orig_x)
4903 {
4904 rtx x, y;
4905
4906 orig_x = delegitimize_mem_from_attrs (orig_x);
4907 x = orig_x;
4908
4909 /* Extract the symbol ref from:
4910 (plus:SI (reg:SI 12 %r12)
4911 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
4912 UNSPEC_GOTOFF/PLTOFF)))
4913 and
4914 (plus:SI (reg:SI 12 %r12)
4915 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
4916 UNSPEC_GOTOFF/PLTOFF)
4917 (const_int 4 [0x4])))) */
4918 if (GET_CODE (x) == PLUS
4919 && REG_P (XEXP (x, 0))
4920 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4921 && GET_CODE (XEXP (x, 1)) == CONST)
4922 {
4923 HOST_WIDE_INT offset = 0;
4924
4925 /* The const operand. */
4926 y = XEXP (XEXP (x, 1), 0);
4927
4928 if (GET_CODE (y) == PLUS
4929 && GET_CODE (XEXP (y, 1)) == CONST_INT)
4930 {
4931 offset = INTVAL (XEXP (y, 1));
4932 y = XEXP (y, 0);
4933 }
4934
4935 if (GET_CODE (y) == UNSPEC
4936 && (XINT (y, 1) == UNSPEC_GOTOFF
4937 || XINT (y, 1) == UNSPEC_PLTOFF))
4938 return plus_constant (XVECEXP (y, 0, 0), offset);
4939 }
4940
4941 if (GET_CODE (x) != MEM)
4942 return orig_x;
4943
4944 x = XEXP (x, 0);
4945 if (GET_CODE (x) == PLUS
4946 && GET_CODE (XEXP (x, 1)) == CONST
4947 && GET_CODE (XEXP (x, 0)) == REG
4948 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4949 {
4950 y = XEXP (XEXP (x, 1), 0);
4951 if (GET_CODE (y) == UNSPEC
4952 && XINT (y, 1) == UNSPEC_GOT)
4953 y = XVECEXP (y, 0, 0);
4954 else
4955 return orig_x;
4956 }
4957 else if (GET_CODE (x) == CONST)
4958 {
4959 /* Extract the symbol ref from:
4960 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
4961 UNSPEC_PLT/GOTENT))) */
4962
4963 y = XEXP (x, 0);
4964 if (GET_CODE (y) == UNSPEC
4965 && (XINT (y, 1) == UNSPEC_GOTENT
4966 || XINT (y, 1) == UNSPEC_PLT))
4967 y = XVECEXP (y, 0, 0);
4968 else
4969 return orig_x;
4970 }
4971 else
4972 return orig_x;
4973
4974 if (GET_MODE (orig_x) != Pmode)
4975 {
4976 if (GET_MODE (orig_x) == BLKmode)
4977 return orig_x;
4978 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
4979 if (y == NULL_RTX)
4980 return orig_x;
4981 }
4982 return y;
4983 }
4984
4985 /* Output operand OP to stdio stream FILE.
4986 OP is an address (register + offset) which is not used to address data;
4987 instead the rightmost bits are interpreted as the value. */
4988
4989 static void
4990 print_shift_count_operand (FILE *file, rtx op)
4991 {
4992 HOST_WIDE_INT offset;
4993 rtx base;
4994
4995 /* Extract base register and offset. */
4996 if (!s390_decompose_shift_count (op, &base, &offset))
4997 gcc_unreachable ();
4998
4999 /* Sanity check. */
5000 if (base)
5001 {
5002 gcc_assert (GET_CODE (base) == REG);
5003 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5004 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5005 }
5006
5007 /* Offsets are constricted to twelve bits. */
5008 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5009 if (base)
5010 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5011 }
5012
5013 /* See 'get_some_local_dynamic_name'. */
5014
5015 static int
5016 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5017 {
5018 rtx x = *px;
5019
5020 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5021 {
5022 x = get_pool_constant (x);
5023 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5024 }
5025
5026 if (GET_CODE (x) == SYMBOL_REF
5027 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5028 {
5029 cfun->machine->some_ld_name = XSTR (x, 0);
5030 return 1;
5031 }
5032
5033 return 0;
5034 }
5035
5036 /* Locate some local-dynamic symbol still in use by this function
5037 so that we can print its name in local-dynamic base patterns. */
5038
5039 static const char *
5040 get_some_local_dynamic_name (void)
5041 {
5042 rtx insn;
5043
5044 if (cfun->machine->some_ld_name)
5045 return cfun->machine->some_ld_name;
5046
5047 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5048 if (INSN_P (insn)
5049 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5050 return cfun->machine->some_ld_name;
5051
5052 gcc_unreachable ();
5053 }
5054
5055 /* Output machine-dependent UNSPECs occurring in address constant X
5056 in assembler syntax to stdio stream FILE. Returns true if the
5057 constant X could be recognized, false otherwise. */
5058
5059 static bool
5060 s390_output_addr_const_extra (FILE *file, rtx x)
5061 {
5062 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5063 switch (XINT (x, 1))
5064 {
5065 case UNSPEC_GOTENT:
5066 output_addr_const (file, XVECEXP (x, 0, 0));
5067 fprintf (file, "@GOTENT");
5068 return true;
5069 case UNSPEC_GOT:
5070 output_addr_const (file, XVECEXP (x, 0, 0));
5071 fprintf (file, "@GOT");
5072 return true;
5073 case UNSPEC_GOTOFF:
5074 output_addr_const (file, XVECEXP (x, 0, 0));
5075 fprintf (file, "@GOTOFF");
5076 return true;
5077 case UNSPEC_PLT:
5078 output_addr_const (file, XVECEXP (x, 0, 0));
5079 fprintf (file, "@PLT");
5080 return true;
5081 case UNSPEC_PLTOFF:
5082 output_addr_const (file, XVECEXP (x, 0, 0));
5083 fprintf (file, "@PLTOFF");
5084 return true;
5085 case UNSPEC_TLSGD:
5086 output_addr_const (file, XVECEXP (x, 0, 0));
5087 fprintf (file, "@TLSGD");
5088 return true;
5089 case UNSPEC_TLSLDM:
5090 assemble_name (file, get_some_local_dynamic_name ());
5091 fprintf (file, "@TLSLDM");
5092 return true;
5093 case UNSPEC_DTPOFF:
5094 output_addr_const (file, XVECEXP (x, 0, 0));
5095 fprintf (file, "@DTPOFF");
5096 return true;
5097 case UNSPEC_NTPOFF:
5098 output_addr_const (file, XVECEXP (x, 0, 0));
5099 fprintf (file, "@NTPOFF");
5100 return true;
5101 case UNSPEC_GOTNTPOFF:
5102 output_addr_const (file, XVECEXP (x, 0, 0));
5103 fprintf (file, "@GOTNTPOFF");
5104 return true;
5105 case UNSPEC_INDNTPOFF:
5106 output_addr_const (file, XVECEXP (x, 0, 0));
5107 fprintf (file, "@INDNTPOFF");
5108 return true;
5109 }
5110
5111 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5112 switch (XINT (x, 1))
5113 {
5114 case UNSPEC_POOL_OFFSET:
5115 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5116 output_addr_const (file, x);
5117 return true;
5118 }
5119 return false;
5120 }
5121
5122 /* Output address operand ADDR in assembler syntax to
5123 stdio stream FILE. */
5124
5125 void
5126 print_operand_address (FILE *file, rtx addr)
5127 {
5128 struct s390_address ad;
5129
5130 if (s390_symref_operand_p (addr, NULL, NULL))
5131 {
5132 if (!TARGET_Z10)
5133 {
5134 output_operand_lossage ("symbolic memory references are "
5135 "only supported on z10 or later");
5136 return;
5137 }
5138 output_addr_const (file, addr);
5139 return;
5140 }
5141
5142 if (!s390_decompose_address (addr, &ad)
5143 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5144 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5145 output_operand_lossage ("cannot decompose address");
5146
5147 if (ad.disp)
5148 output_addr_const (file, ad.disp);
5149 else
5150 fprintf (file, "0");
5151
5152 if (ad.base && ad.indx)
5153 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5154 reg_names[REGNO (ad.base)]);
5155 else if (ad.base)
5156 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5157 }
5158
5159 /* Output operand X in assembler syntax to stdio stream FILE.
5160 CODE specified the format flag. The following format flags
5161 are recognized:
5162
5163 'C': print opcode suffix for branch condition.
5164 'D': print opcode suffix for inverse branch condition.
5165 'E': print opcode suffix for branch on index instruction.
5166 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5167 'G': print the size of the operand in bytes.
5168 'O': print only the displacement of a memory reference.
5169 'R': print only the base register of a memory reference.
5170 'S': print S-type memory reference (base+displacement).
5171 'N': print the second word of a DImode operand.
5172 'M': print the second word of a TImode operand.
5173 'Y': print shift count operand.
5174
5175 'b': print integer X as if it's an unsigned byte.
5176 'c': print integer X as if it's an signed byte.
5177 'x': print integer X as if it's an unsigned halfword.
5178 'h': print integer X as if it's a signed halfword.
5179 'i': print the first nonzero HImode part of X.
5180 'j': print the first HImode part unequal to -1 of X.
5181 'k': print the first nonzero SImode part of X.
5182 'm': print the first SImode part unequal to -1 of X.
5183 'o': print integer X as if it's an unsigned 32bit word. */
5184
5185 void
5186 print_operand (FILE *file, rtx x, int code)
5187 {
5188 switch (code)
5189 {
5190 case 'C':
5191 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5192 return;
5193
5194 case 'D':
5195 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5196 return;
5197
5198 case 'E':
5199 if (GET_CODE (x) == LE)
5200 fprintf (file, "l");
5201 else if (GET_CODE (x) == GT)
5202 fprintf (file, "h");
5203 else
5204 output_operand_lossage ("invalid comparison operator "
5205 "for 'E' output modifier");
5206 return;
5207
5208 case 'J':
5209 if (GET_CODE (x) == SYMBOL_REF)
5210 {
5211 fprintf (file, "%s", ":tls_load:");
5212 output_addr_const (file, x);
5213 }
5214 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5215 {
5216 fprintf (file, "%s", ":tls_gdcall:");
5217 output_addr_const (file, XVECEXP (x, 0, 0));
5218 }
5219 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5220 {
5221 fprintf (file, "%s", ":tls_ldcall:");
5222 assemble_name (file, get_some_local_dynamic_name ());
5223 }
5224 else
5225 output_operand_lossage ("invalid reference for 'J' output modifier");
5226 return;
5227
5228 case 'G':
5229 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5230 return;
5231
5232 case 'O':
5233 {
5234 struct s390_address ad;
5235 int ret;
5236
5237 if (!MEM_P (x))
5238 {
5239 output_operand_lossage ("memory reference expected for "
5240 "'O' output modifier");
5241 return;
5242 }
5243
5244 ret = s390_decompose_address (XEXP (x, 0), &ad);
5245
5246 if (!ret
5247 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5248 || ad.indx)
5249 {
5250 output_operand_lossage ("invalid address for 'O' output modifier");
5251 return;
5252 }
5253
5254 if (ad.disp)
5255 output_addr_const (file, ad.disp);
5256 else
5257 fprintf (file, "0");
5258 }
5259 return;
5260
5261 case 'R':
5262 {
5263 struct s390_address ad;
5264 int ret;
5265
5266 if (!MEM_P (x))
5267 {
5268 output_operand_lossage ("memory reference expected for "
5269 "'R' output modifier");
5270 return;
5271 }
5272
5273 ret = s390_decompose_address (XEXP (x, 0), &ad);
5274
5275 if (!ret
5276 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5277 || ad.indx)
5278 {
5279 output_operand_lossage ("invalid address for 'R' output modifier");
5280 return;
5281 }
5282
5283 if (ad.base)
5284 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5285 else
5286 fprintf (file, "0");
5287 }
5288 return;
5289
5290 case 'S':
5291 {
5292 struct s390_address ad;
5293 int ret;
5294
5295 if (!MEM_P (x))
5296 {
5297 output_operand_lossage ("memory reference expected for "
5298 "'S' output modifier");
5299 return;
5300 }
5301 ret = s390_decompose_address (XEXP (x, 0), &ad);
5302
5303 if (!ret
5304 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5305 || ad.indx)
5306 {
5307 output_operand_lossage ("invalid address for 'S' output modifier");
5308 return;
5309 }
5310
5311 if (ad.disp)
5312 output_addr_const (file, ad.disp);
5313 else
5314 fprintf (file, "0");
5315
5316 if (ad.base)
5317 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5318 }
5319 return;
5320
5321 case 'N':
5322 if (GET_CODE (x) == REG)
5323 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5324 else if (GET_CODE (x) == MEM)
5325 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5326 else
5327 output_operand_lossage ("register or memory expression expected "
5328 "for 'N' output modifier");
5329 break;
5330
5331 case 'M':
5332 if (GET_CODE (x) == REG)
5333 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5334 else if (GET_CODE (x) == MEM)
5335 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5336 else
5337 output_operand_lossage ("register or memory expression expected "
5338 "for 'M' output modifier");
5339 break;
5340
5341 case 'Y':
5342 print_shift_count_operand (file, x);
5343 return;
5344 }
5345
5346 switch (GET_CODE (x))
5347 {
5348 case REG:
5349 fprintf (file, "%s", reg_names[REGNO (x)]);
5350 break;
5351
5352 case MEM:
5353 output_address (XEXP (x, 0));
5354 break;
5355
5356 case CONST:
5357 case CODE_LABEL:
5358 case LABEL_REF:
5359 case SYMBOL_REF:
5360 output_addr_const (file, x);
5361 break;
5362
5363 case CONST_INT:
5364 if (code == 'b')
5365 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5366 else if (code == 'c')
5367 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5368 else if (code == 'x')
5369 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5370 else if (code == 'h')
5371 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5372 else if (code == 'i')
5373 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5374 s390_extract_part (x, HImode, 0));
5375 else if (code == 'j')
5376 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5377 s390_extract_part (x, HImode, -1));
5378 else if (code == 'k')
5379 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5380 s390_extract_part (x, SImode, 0));
5381 else if (code == 'm')
5382 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5383 s390_extract_part (x, SImode, -1));
5384 else if (code == 'o')
5385 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5386 else
5387 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5388 break;
5389
5390 case CONST_DOUBLE:
5391 gcc_assert (GET_MODE (x) == VOIDmode);
5392 if (code == 'b')
5393 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5394 else if (code == 'x')
5395 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5396 else if (code == 'h')
5397 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5398 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5399 else
5400 {
5401 if (code == 0)
5402 output_operand_lossage ("invalid constant - try using "
5403 "an output modifier");
5404 else
5405 output_operand_lossage ("invalid constant for output modifier '%c'",
5406 code);
5407 }
5408 break;
5409
5410 default:
5411 if (code == 0)
5412 output_operand_lossage ("invalid expression - try using "
5413 "an output modifier");
5414 else
5415 output_operand_lossage ("invalid expression for output "
5416 "modifier '%c'", code);
5417 break;
5418 }
5419 }
5420
5421 /* Target hook for assembling integer objects. We need to define it
5422 here to work a round a bug in some versions of GAS, which couldn't
5423 handle values smaller than INT_MIN when printed in decimal. */
5424
5425 static bool
5426 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5427 {
5428 if (size == 8 && aligned_p
5429 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5430 {
5431 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5432 INTVAL (x));
5433 return true;
5434 }
5435 return default_assemble_integer (x, size, aligned_p);
5436 }
5437
5438 /* Returns true if register REGNO is used for forming
5439 a memory address in expression X. */
5440
5441 static bool
5442 reg_used_in_mem_p (int regno, rtx x)
5443 {
5444 enum rtx_code code = GET_CODE (x);
5445 int i, j;
5446 const char *fmt;
5447
5448 if (code == MEM)
5449 {
5450 if (refers_to_regno_p (regno, regno+1,
5451 XEXP (x, 0), 0))
5452 return true;
5453 }
5454 else if (code == SET
5455 && GET_CODE (SET_DEST (x)) == PC)
5456 {
5457 if (refers_to_regno_p (regno, regno+1,
5458 SET_SRC (x), 0))
5459 return true;
5460 }
5461
5462 fmt = GET_RTX_FORMAT (code);
5463 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5464 {
5465 if (fmt[i] == 'e'
5466 && reg_used_in_mem_p (regno, XEXP (x, i)))
5467 return true;
5468
5469 else if (fmt[i] == 'E')
5470 for (j = 0; j < XVECLEN (x, i); j++)
5471 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5472 return true;
5473 }
5474 return false;
5475 }
5476
5477 /* Returns true if expression DEP_RTX sets an address register
5478 used by instruction INSN to address memory. */
5479
5480 static bool
5481 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5482 {
5483 rtx target, pat;
5484
5485 if (GET_CODE (dep_rtx) == INSN)
5486 dep_rtx = PATTERN (dep_rtx);
5487
5488 if (GET_CODE (dep_rtx) == SET)
5489 {
5490 target = SET_DEST (dep_rtx);
5491 if (GET_CODE (target) == STRICT_LOW_PART)
5492 target = XEXP (target, 0);
5493 while (GET_CODE (target) == SUBREG)
5494 target = SUBREG_REG (target);
5495
5496 if (GET_CODE (target) == REG)
5497 {
5498 int regno = REGNO (target);
5499
5500 if (s390_safe_attr_type (insn) == TYPE_LA)
5501 {
5502 pat = PATTERN (insn);
5503 if (GET_CODE (pat) == PARALLEL)
5504 {
5505 gcc_assert (XVECLEN (pat, 0) == 2);
5506 pat = XVECEXP (pat, 0, 0);
5507 }
5508 gcc_assert (GET_CODE (pat) == SET);
5509 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5510 }
5511 else if (get_attr_atype (insn) == ATYPE_AGEN)
5512 return reg_used_in_mem_p (regno, PATTERN (insn));
5513 }
5514 }
5515 return false;
5516 }
5517
5518 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5519
5520 int
5521 s390_agen_dep_p (rtx dep_insn, rtx insn)
5522 {
5523 rtx dep_rtx = PATTERN (dep_insn);
5524 int i;
5525
5526 if (GET_CODE (dep_rtx) == SET
5527 && addr_generation_dependency_p (dep_rtx, insn))
5528 return 1;
5529 else if (GET_CODE (dep_rtx) == PARALLEL)
5530 {
5531 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5532 {
5533 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5534 return 1;
5535 }
5536 }
5537 return 0;
5538 }
5539
5540
5541 /* A C statement (sans semicolon) to update the integer scheduling priority
5542 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5543 reduce the priority to execute INSN later. Do not define this macro if
5544 you do not need to adjust the scheduling priorities of insns.
5545
5546 A STD instruction should be scheduled earlier,
5547 in order to use the bypass. */
5548 static int
5549 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5550 {
5551 if (! INSN_P (insn))
5552 return priority;
5553
5554 if (s390_tune != PROCESSOR_2084_Z990
5555 && s390_tune != PROCESSOR_2094_Z9_109
5556 && s390_tune != PROCESSOR_2097_Z10
5557 && s390_tune != PROCESSOR_2817_Z196)
5558 return priority;
5559
5560 switch (s390_safe_attr_type (insn))
5561 {
5562 case TYPE_FSTOREDF:
5563 case TYPE_FSTORESF:
5564 priority = priority << 3;
5565 break;
5566 case TYPE_STORE:
5567 case TYPE_STM:
5568 priority = priority << 1;
5569 break;
5570 default:
5571 break;
5572 }
5573 return priority;
5574 }
5575
5576
5577 /* The number of instructions that can be issued per cycle. */
5578
5579 static int
5580 s390_issue_rate (void)
5581 {
5582 switch (s390_tune)
5583 {
5584 case PROCESSOR_2084_Z990:
5585 case PROCESSOR_2094_Z9_109:
5586 case PROCESSOR_2817_Z196:
5587 return 3;
5588 case PROCESSOR_2097_Z10:
5589 return 2;
5590 default:
5591 return 1;
5592 }
5593 }
5594
5595 static int
5596 s390_first_cycle_multipass_dfa_lookahead (void)
5597 {
5598 return 4;
5599 }
5600
5601 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5602 Fix up MEMs as required. */
5603
5604 static void
5605 annotate_constant_pool_refs (rtx *x)
5606 {
5607 int i, j;
5608 const char *fmt;
5609
5610 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5611 || !CONSTANT_POOL_ADDRESS_P (*x));
5612
5613 /* Literal pool references can only occur inside a MEM ... */
5614 if (GET_CODE (*x) == MEM)
5615 {
5616 rtx memref = XEXP (*x, 0);
5617
5618 if (GET_CODE (memref) == SYMBOL_REF
5619 && CONSTANT_POOL_ADDRESS_P (memref))
5620 {
5621 rtx base = cfun->machine->base_reg;
5622 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5623 UNSPEC_LTREF);
5624
5625 *x = replace_equiv_address (*x, addr);
5626 return;
5627 }
5628
5629 if (GET_CODE (memref) == CONST
5630 && GET_CODE (XEXP (memref, 0)) == PLUS
5631 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5632 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5633 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5634 {
5635 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5636 rtx sym = XEXP (XEXP (memref, 0), 0);
5637 rtx base = cfun->machine->base_reg;
5638 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5639 UNSPEC_LTREF);
5640
5641 *x = replace_equiv_address (*x, plus_constant (addr, off));
5642 return;
5643 }
5644 }
5645
5646 /* ... or a load-address type pattern. */
5647 if (GET_CODE (*x) == SET)
5648 {
5649 rtx addrref = SET_SRC (*x);
5650
5651 if (GET_CODE (addrref) == SYMBOL_REF
5652 && CONSTANT_POOL_ADDRESS_P (addrref))
5653 {
5654 rtx base = cfun->machine->base_reg;
5655 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5656 UNSPEC_LTREF);
5657
5658 SET_SRC (*x) = addr;
5659 return;
5660 }
5661
5662 if (GET_CODE (addrref) == CONST
5663 && GET_CODE (XEXP (addrref, 0)) == PLUS
5664 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5665 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5666 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5667 {
5668 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5669 rtx sym = XEXP (XEXP (addrref, 0), 0);
5670 rtx base = cfun->machine->base_reg;
5671 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5672 UNSPEC_LTREF);
5673
5674 SET_SRC (*x) = plus_constant (addr, off);
5675 return;
5676 }
5677 }
5678
5679 /* Annotate LTREL_BASE as well. */
5680 if (GET_CODE (*x) == UNSPEC
5681 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5682 {
5683 rtx base = cfun->machine->base_reg;
5684 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5685 UNSPEC_LTREL_BASE);
5686 return;
5687 }
5688
5689 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5690 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5691 {
5692 if (fmt[i] == 'e')
5693 {
5694 annotate_constant_pool_refs (&XEXP (*x, i));
5695 }
5696 else if (fmt[i] == 'E')
5697 {
5698 for (j = 0; j < XVECLEN (*x, i); j++)
5699 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5700 }
5701 }
5702 }
5703
5704 /* Split all branches that exceed the maximum distance.
5705 Returns true if this created a new literal pool entry. */
5706
5707 static int
5708 s390_split_branches (void)
5709 {
5710 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5711 int new_literal = 0, ret;
5712 rtx insn, pat, tmp, target;
5713 rtx *label;
5714
5715 /* We need correct insn addresses. */
5716
5717 shorten_branches (get_insns ());
5718
5719 /* Find all branches that exceed 64KB, and split them. */
5720
5721 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5722 {
5723 if (GET_CODE (insn) != JUMP_INSN)
5724 continue;
5725
5726 pat = PATTERN (insn);
5727 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5728 pat = XVECEXP (pat, 0, 0);
5729 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5730 continue;
5731
5732 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5733 {
5734 label = &SET_SRC (pat);
5735 }
5736 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5737 {
5738 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5739 label = &XEXP (SET_SRC (pat), 1);
5740 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5741 label = &XEXP (SET_SRC (pat), 2);
5742 else
5743 continue;
5744 }
5745 else
5746 continue;
5747
5748 if (get_attr_length (insn) <= 4)
5749 continue;
5750
5751 /* We are going to use the return register as scratch register,
5752 make sure it will be saved/restored by the prologue/epilogue. */
5753 cfun_frame_layout.save_return_addr_p = 1;
5754
5755 if (!flag_pic)
5756 {
5757 new_literal = 1;
5758 tmp = force_const_mem (Pmode, *label);
5759 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5760 INSN_ADDRESSES_NEW (tmp, -1);
5761 annotate_constant_pool_refs (&PATTERN (tmp));
5762
5763 target = temp_reg;
5764 }
5765 else
5766 {
5767 new_literal = 1;
5768 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5769 UNSPEC_LTREL_OFFSET);
5770 target = gen_rtx_CONST (Pmode, target);
5771 target = force_const_mem (Pmode, target);
5772 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5773 INSN_ADDRESSES_NEW (tmp, -1);
5774 annotate_constant_pool_refs (&PATTERN (tmp));
5775
5776 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5777 cfun->machine->base_reg),
5778 UNSPEC_LTREL_BASE);
5779 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5780 }
5781
5782 ret = validate_change (insn, label, target, 0);
5783 gcc_assert (ret);
5784 }
5785
5786 return new_literal;
5787 }
5788
5789
5790 /* Find an annotated literal pool symbol referenced in RTX X,
5791 and store it at REF. Will abort if X contains references to
5792 more than one such pool symbol; multiple references to the same
5793 symbol are allowed, however.
5794
5795 The rtx pointed to by REF must be initialized to NULL_RTX
5796 by the caller before calling this routine. */
5797
5798 static void
5799 find_constant_pool_ref (rtx x, rtx *ref)
5800 {
5801 int i, j;
5802 const char *fmt;
5803
5804 /* Ignore LTREL_BASE references. */
5805 if (GET_CODE (x) == UNSPEC
5806 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5807 return;
5808 /* Likewise POOL_ENTRY insns. */
5809 if (GET_CODE (x) == UNSPEC_VOLATILE
5810 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5811 return;
5812
5813 gcc_assert (GET_CODE (x) != SYMBOL_REF
5814 || !CONSTANT_POOL_ADDRESS_P (x));
5815
5816 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5817 {
5818 rtx sym = XVECEXP (x, 0, 0);
5819 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5820 && CONSTANT_POOL_ADDRESS_P (sym));
5821
5822 if (*ref == NULL_RTX)
5823 *ref = sym;
5824 else
5825 gcc_assert (*ref == sym);
5826
5827 return;
5828 }
5829
5830 fmt = GET_RTX_FORMAT (GET_CODE (x));
5831 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5832 {
5833 if (fmt[i] == 'e')
5834 {
5835 find_constant_pool_ref (XEXP (x, i), ref);
5836 }
5837 else if (fmt[i] == 'E')
5838 {
5839 for (j = 0; j < XVECLEN (x, i); j++)
5840 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5841 }
5842 }
5843 }
5844
5845 /* Replace every reference to the annotated literal pool
5846 symbol REF in X by its base plus OFFSET. */
5847
5848 static void
5849 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5850 {
5851 int i, j;
5852 const char *fmt;
5853
5854 gcc_assert (*x != ref);
5855
5856 if (GET_CODE (*x) == UNSPEC
5857 && XINT (*x, 1) == UNSPEC_LTREF
5858 && XVECEXP (*x, 0, 0) == ref)
5859 {
5860 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5861 return;
5862 }
5863
5864 if (GET_CODE (*x) == PLUS
5865 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5866 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5867 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5868 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5869 {
5870 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5871 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5872 return;
5873 }
5874
5875 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5876 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5877 {
5878 if (fmt[i] == 'e')
5879 {
5880 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5881 }
5882 else if (fmt[i] == 'E')
5883 {
5884 for (j = 0; j < XVECLEN (*x, i); j++)
5885 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5886 }
5887 }
5888 }
5889
5890 /* Check whether X contains an UNSPEC_LTREL_BASE.
5891 Return its constant pool symbol if found, NULL_RTX otherwise. */
5892
5893 static rtx
5894 find_ltrel_base (rtx x)
5895 {
5896 int i, j;
5897 const char *fmt;
5898
5899 if (GET_CODE (x) == UNSPEC
5900 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5901 return XVECEXP (x, 0, 0);
5902
5903 fmt = GET_RTX_FORMAT (GET_CODE (x));
5904 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5905 {
5906 if (fmt[i] == 'e')
5907 {
5908 rtx fnd = find_ltrel_base (XEXP (x, i));
5909 if (fnd)
5910 return fnd;
5911 }
5912 else if (fmt[i] == 'E')
5913 {
5914 for (j = 0; j < XVECLEN (x, i); j++)
5915 {
5916 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5917 if (fnd)
5918 return fnd;
5919 }
5920 }
5921 }
5922
5923 return NULL_RTX;
5924 }
5925
5926 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5927
5928 static void
5929 replace_ltrel_base (rtx *x)
5930 {
5931 int i, j;
5932 const char *fmt;
5933
5934 if (GET_CODE (*x) == UNSPEC
5935 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5936 {
5937 *x = XVECEXP (*x, 0, 1);
5938 return;
5939 }
5940
5941 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5942 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5943 {
5944 if (fmt[i] == 'e')
5945 {
5946 replace_ltrel_base (&XEXP (*x, i));
5947 }
5948 else if (fmt[i] == 'E')
5949 {
5950 for (j = 0; j < XVECLEN (*x, i); j++)
5951 replace_ltrel_base (&XVECEXP (*x, i, j));
5952 }
5953 }
5954 }
5955
5956
5957 /* We keep a list of constants which we have to add to internal
5958 constant tables in the middle of large functions. */
5959
5960 #define NR_C_MODES 11
5961 enum machine_mode constant_modes[NR_C_MODES] =
5962 {
5963 TFmode, TImode, TDmode,
5964 DFmode, DImode, DDmode,
5965 SFmode, SImode, SDmode,
5966 HImode,
5967 QImode
5968 };
5969
5970 struct constant
5971 {
5972 struct constant *next;
5973 rtx value;
5974 rtx label;
5975 };
5976
5977 struct constant_pool
5978 {
5979 struct constant_pool *next;
5980 rtx first_insn;
5981 rtx pool_insn;
5982 bitmap insns;
5983 rtx emit_pool_after;
5984
5985 struct constant *constants[NR_C_MODES];
5986 struct constant *execute;
5987 rtx label;
5988 int size;
5989 };
5990
5991 /* Allocate new constant_pool structure. */
5992
5993 static struct constant_pool *
5994 s390_alloc_pool (void)
5995 {
5996 struct constant_pool *pool;
5997 int i;
5998
5999 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6000 pool->next = NULL;
6001 for (i = 0; i < NR_C_MODES; i++)
6002 pool->constants[i] = NULL;
6003
6004 pool->execute = NULL;
6005 pool->label = gen_label_rtx ();
6006 pool->first_insn = NULL_RTX;
6007 pool->pool_insn = NULL_RTX;
6008 pool->insns = BITMAP_ALLOC (NULL);
6009 pool->size = 0;
6010 pool->emit_pool_after = NULL_RTX;
6011
6012 return pool;
6013 }
6014
6015 /* Create new constant pool covering instructions starting at INSN
6016 and chain it to the end of POOL_LIST. */
6017
6018 static struct constant_pool *
6019 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6020 {
6021 struct constant_pool *pool, **prev;
6022
6023 pool = s390_alloc_pool ();
6024 pool->first_insn = insn;
6025
6026 for (prev = pool_list; *prev; prev = &(*prev)->next)
6027 ;
6028 *prev = pool;
6029
6030 return pool;
6031 }
6032
6033 /* End range of instructions covered by POOL at INSN and emit
6034 placeholder insn representing the pool. */
6035
6036 static void
6037 s390_end_pool (struct constant_pool *pool, rtx insn)
6038 {
6039 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6040
6041 if (!insn)
6042 insn = get_last_insn ();
6043
6044 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6045 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6046 }
6047
6048 /* Add INSN to the list of insns covered by POOL. */
6049
6050 static void
6051 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6052 {
6053 bitmap_set_bit (pool->insns, INSN_UID (insn));
6054 }
6055
6056 /* Return pool out of POOL_LIST that covers INSN. */
6057
6058 static struct constant_pool *
6059 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6060 {
6061 struct constant_pool *pool;
6062
6063 for (pool = pool_list; pool; pool = pool->next)
6064 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6065 break;
6066
6067 return pool;
6068 }
6069
6070 /* Add constant VAL of mode MODE to the constant pool POOL. */
6071
6072 static void
6073 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6074 {
6075 struct constant *c;
6076 int i;
6077
6078 for (i = 0; i < NR_C_MODES; i++)
6079 if (constant_modes[i] == mode)
6080 break;
6081 gcc_assert (i != NR_C_MODES);
6082
6083 for (c = pool->constants[i]; c != NULL; c = c->next)
6084 if (rtx_equal_p (val, c->value))
6085 break;
6086
6087 if (c == NULL)
6088 {
6089 c = (struct constant *) xmalloc (sizeof *c);
6090 c->value = val;
6091 c->label = gen_label_rtx ();
6092 c->next = pool->constants[i];
6093 pool->constants[i] = c;
6094 pool->size += GET_MODE_SIZE (mode);
6095 }
6096 }
6097
6098 /* Return an rtx that represents the offset of X from the start of
6099 pool POOL. */
6100
6101 static rtx
6102 s390_pool_offset (struct constant_pool *pool, rtx x)
6103 {
6104 rtx label;
6105
6106 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6107 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6108 UNSPEC_POOL_OFFSET);
6109 return gen_rtx_CONST (GET_MODE (x), x);
6110 }
6111
6112 /* Find constant VAL of mode MODE in the constant pool POOL.
6113 Return an RTX describing the distance from the start of
6114 the pool to the location of the new constant. */
6115
6116 static rtx
6117 s390_find_constant (struct constant_pool *pool, rtx val,
6118 enum machine_mode mode)
6119 {
6120 struct constant *c;
6121 int i;
6122
6123 for (i = 0; i < NR_C_MODES; i++)
6124 if (constant_modes[i] == mode)
6125 break;
6126 gcc_assert (i != NR_C_MODES);
6127
6128 for (c = pool->constants[i]; c != NULL; c = c->next)
6129 if (rtx_equal_p (val, c->value))
6130 break;
6131
6132 gcc_assert (c);
6133
6134 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6135 }
6136
6137 /* Check whether INSN is an execute. Return the label_ref to its
6138 execute target template if so, NULL_RTX otherwise. */
6139
6140 static rtx
6141 s390_execute_label (rtx insn)
6142 {
6143 if (GET_CODE (insn) == INSN
6144 && GET_CODE (PATTERN (insn)) == PARALLEL
6145 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6146 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6147 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6148
6149 return NULL_RTX;
6150 }
6151
6152 /* Add execute target for INSN to the constant pool POOL. */
6153
6154 static void
6155 s390_add_execute (struct constant_pool *pool, rtx insn)
6156 {
6157 struct constant *c;
6158
6159 for (c = pool->execute; c != NULL; c = c->next)
6160 if (INSN_UID (insn) == INSN_UID (c->value))
6161 break;
6162
6163 if (c == NULL)
6164 {
6165 c = (struct constant *) xmalloc (sizeof *c);
6166 c->value = insn;
6167 c->label = gen_label_rtx ();
6168 c->next = pool->execute;
6169 pool->execute = c;
6170 pool->size += 6;
6171 }
6172 }
6173
6174 /* Find execute target for INSN in the constant pool POOL.
6175 Return an RTX describing the distance from the start of
6176 the pool to the location of the execute target. */
6177
6178 static rtx
6179 s390_find_execute (struct constant_pool *pool, rtx insn)
6180 {
6181 struct constant *c;
6182
6183 for (c = pool->execute; c != NULL; c = c->next)
6184 if (INSN_UID (insn) == INSN_UID (c->value))
6185 break;
6186
6187 gcc_assert (c);
6188
6189 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6190 }
6191
6192 /* For an execute INSN, extract the execute target template. */
6193
6194 static rtx
6195 s390_execute_target (rtx insn)
6196 {
6197 rtx pattern = PATTERN (insn);
6198 gcc_assert (s390_execute_label (insn));
6199
6200 if (XVECLEN (pattern, 0) == 2)
6201 {
6202 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6203 }
6204 else
6205 {
6206 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6207 int i;
6208
6209 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6210 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6211
6212 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6213 }
6214
6215 return pattern;
6216 }
6217
6218 /* Indicate that INSN cannot be duplicated. This is the case for
6219 execute insns that carry a unique label. */
6220
6221 static bool
6222 s390_cannot_copy_insn_p (rtx insn)
6223 {
6224 rtx label = s390_execute_label (insn);
6225 return label && label != const0_rtx;
6226 }
6227
6228 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6229 do not emit the pool base label. */
6230
6231 static void
6232 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6233 {
6234 struct constant *c;
6235 rtx insn = pool->pool_insn;
6236 int i;
6237
6238 /* Switch to rodata section. */
6239 if (TARGET_CPU_ZARCH)
6240 {
6241 insn = emit_insn_after (gen_pool_section_start (), insn);
6242 INSN_ADDRESSES_NEW (insn, -1);
6243 }
6244
6245 /* Ensure minimum pool alignment. */
6246 if (TARGET_CPU_ZARCH)
6247 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6248 else
6249 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6250 INSN_ADDRESSES_NEW (insn, -1);
6251
6252 /* Emit pool base label. */
6253 if (!remote_label)
6254 {
6255 insn = emit_label_after (pool->label, insn);
6256 INSN_ADDRESSES_NEW (insn, -1);
6257 }
6258
6259 /* Dump constants in descending alignment requirement order,
6260 ensuring proper alignment for every constant. */
6261 for (i = 0; i < NR_C_MODES; i++)
6262 for (c = pool->constants[i]; c; c = c->next)
6263 {
6264 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6265 rtx value = copy_rtx (c->value);
6266 if (GET_CODE (value) == CONST
6267 && GET_CODE (XEXP (value, 0)) == UNSPEC
6268 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6269 && XVECLEN (XEXP (value, 0), 0) == 1)
6270 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6271
6272 insn = emit_label_after (c->label, insn);
6273 INSN_ADDRESSES_NEW (insn, -1);
6274
6275 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6276 gen_rtvec (1, value),
6277 UNSPECV_POOL_ENTRY);
6278 insn = emit_insn_after (value, insn);
6279 INSN_ADDRESSES_NEW (insn, -1);
6280 }
6281
6282 /* Ensure minimum alignment for instructions. */
6283 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6284 INSN_ADDRESSES_NEW (insn, -1);
6285
6286 /* Output in-pool execute template insns. */
6287 for (c = pool->execute; c; c = c->next)
6288 {
6289 insn = emit_label_after (c->label, insn);
6290 INSN_ADDRESSES_NEW (insn, -1);
6291
6292 insn = emit_insn_after (s390_execute_target (c->value), insn);
6293 INSN_ADDRESSES_NEW (insn, -1);
6294 }
6295
6296 /* Switch back to previous section. */
6297 if (TARGET_CPU_ZARCH)
6298 {
6299 insn = emit_insn_after (gen_pool_section_end (), insn);
6300 INSN_ADDRESSES_NEW (insn, -1);
6301 }
6302
6303 insn = emit_barrier_after (insn);
6304 INSN_ADDRESSES_NEW (insn, -1);
6305
6306 /* Remove placeholder insn. */
6307 remove_insn (pool->pool_insn);
6308 }
6309
6310 /* Free all memory used by POOL. */
6311
6312 static void
6313 s390_free_pool (struct constant_pool *pool)
6314 {
6315 struct constant *c, *next;
6316 int i;
6317
6318 for (i = 0; i < NR_C_MODES; i++)
6319 for (c = pool->constants[i]; c; c = next)
6320 {
6321 next = c->next;
6322 free (c);
6323 }
6324
6325 for (c = pool->execute; c; c = next)
6326 {
6327 next = c->next;
6328 free (c);
6329 }
6330
6331 BITMAP_FREE (pool->insns);
6332 free (pool);
6333 }
6334
6335
6336 /* Collect main literal pool. Return NULL on overflow. */
6337
6338 static struct constant_pool *
6339 s390_mainpool_start (void)
6340 {
6341 struct constant_pool *pool;
6342 rtx insn;
6343
6344 pool = s390_alloc_pool ();
6345
6346 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6347 {
6348 if (GET_CODE (insn) == INSN
6349 && GET_CODE (PATTERN (insn)) == SET
6350 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6351 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6352 {
6353 gcc_assert (!pool->pool_insn);
6354 pool->pool_insn = insn;
6355 }
6356
6357 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6358 {
6359 s390_add_execute (pool, insn);
6360 }
6361 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6362 {
6363 rtx pool_ref = NULL_RTX;
6364 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6365 if (pool_ref)
6366 {
6367 rtx constant = get_pool_constant (pool_ref);
6368 enum machine_mode mode = get_pool_mode (pool_ref);
6369 s390_add_constant (pool, constant, mode);
6370 }
6371 }
6372
6373 /* If hot/cold partitioning is enabled we have to make sure that
6374 the literal pool is emitted in the same section where the
6375 initialization of the literal pool base pointer takes place.
6376 emit_pool_after is only used in the non-overflow case on non
6377 Z cpus where we can emit the literal pool at the end of the
6378 function body within the text section. */
6379 if (NOTE_P (insn)
6380 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6381 && !pool->emit_pool_after)
6382 pool->emit_pool_after = PREV_INSN (insn);
6383 }
6384
6385 gcc_assert (pool->pool_insn || pool->size == 0);
6386
6387 if (pool->size >= 4096)
6388 {
6389 /* We're going to chunkify the pool, so remove the main
6390 pool placeholder insn. */
6391 remove_insn (pool->pool_insn);
6392
6393 s390_free_pool (pool);
6394 pool = NULL;
6395 }
6396
6397 /* If the functions ends with the section where the literal pool
6398 should be emitted set the marker to its end. */
6399 if (pool && !pool->emit_pool_after)
6400 pool->emit_pool_after = get_last_insn ();
6401
6402 return pool;
6403 }
6404
6405 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6406 Modify the current function to output the pool constants as well as
6407 the pool register setup instruction. */
6408
6409 static void
6410 s390_mainpool_finish (struct constant_pool *pool)
6411 {
6412 rtx base_reg = cfun->machine->base_reg;
6413 rtx insn;
6414
6415 /* If the pool is empty, we're done. */
6416 if (pool->size == 0)
6417 {
6418 /* We don't actually need a base register after all. */
6419 cfun->machine->base_reg = NULL_RTX;
6420
6421 if (pool->pool_insn)
6422 remove_insn (pool->pool_insn);
6423 s390_free_pool (pool);
6424 return;
6425 }
6426
6427 /* We need correct insn addresses. */
6428 shorten_branches (get_insns ());
6429
6430 /* On zSeries, we use a LARL to load the pool register. The pool is
6431 located in the .rodata section, so we emit it after the function. */
6432 if (TARGET_CPU_ZARCH)
6433 {
6434 insn = gen_main_base_64 (base_reg, pool->label);
6435 insn = emit_insn_after (insn, pool->pool_insn);
6436 INSN_ADDRESSES_NEW (insn, -1);
6437 remove_insn (pool->pool_insn);
6438
6439 insn = get_last_insn ();
6440 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6441 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6442
6443 s390_dump_pool (pool, 0);
6444 }
6445
6446 /* On S/390, if the total size of the function's code plus literal pool
6447 does not exceed 4096 bytes, we use BASR to set up a function base
6448 pointer, and emit the literal pool at the end of the function. */
6449 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6450 + pool->size + 8 /* alignment slop */ < 4096)
6451 {
6452 insn = gen_main_base_31_small (base_reg, pool->label);
6453 insn = emit_insn_after (insn, pool->pool_insn);
6454 INSN_ADDRESSES_NEW (insn, -1);
6455 remove_insn (pool->pool_insn);
6456
6457 insn = emit_label_after (pool->label, insn);
6458 INSN_ADDRESSES_NEW (insn, -1);
6459
6460 /* emit_pool_after will be set by s390_mainpool_start to the
6461 last insn of the section where the literal pool should be
6462 emitted. */
6463 insn = pool->emit_pool_after;
6464
6465 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6466 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6467
6468 s390_dump_pool (pool, 1);
6469 }
6470
6471 /* Otherwise, we emit an inline literal pool and use BASR to branch
6472 over it, setting up the pool register at the same time. */
6473 else
6474 {
6475 rtx pool_end = gen_label_rtx ();
6476
6477 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6478 insn = emit_insn_after (insn, pool->pool_insn);
6479 INSN_ADDRESSES_NEW (insn, -1);
6480 remove_insn (pool->pool_insn);
6481
6482 insn = emit_label_after (pool->label, insn);
6483 INSN_ADDRESSES_NEW (insn, -1);
6484
6485 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6486 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6487
6488 insn = emit_label_after (pool_end, pool->pool_insn);
6489 INSN_ADDRESSES_NEW (insn, -1);
6490
6491 s390_dump_pool (pool, 1);
6492 }
6493
6494
6495 /* Replace all literal pool references. */
6496
6497 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6498 {
6499 if (INSN_P (insn))
6500 replace_ltrel_base (&PATTERN (insn));
6501
6502 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6503 {
6504 rtx addr, pool_ref = NULL_RTX;
6505 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6506 if (pool_ref)
6507 {
6508 if (s390_execute_label (insn))
6509 addr = s390_find_execute (pool, insn);
6510 else
6511 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6512 get_pool_mode (pool_ref));
6513
6514 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6515 INSN_CODE (insn) = -1;
6516 }
6517 }
6518 }
6519
6520
6521 /* Free the pool. */
6522 s390_free_pool (pool);
6523 }
6524
6525 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6526 We have decided we cannot use this pool, so revert all changes
6527 to the current function that were done by s390_mainpool_start. */
6528 static void
6529 s390_mainpool_cancel (struct constant_pool *pool)
6530 {
6531 /* We didn't actually change the instruction stream, so simply
6532 free the pool memory. */
6533 s390_free_pool (pool);
6534 }
6535
6536
6537 /* Chunkify the literal pool. */
6538
6539 #define S390_POOL_CHUNK_MIN 0xc00
6540 #define S390_POOL_CHUNK_MAX 0xe00
6541
6542 static struct constant_pool *
6543 s390_chunkify_start (void)
6544 {
6545 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6546 int extra_size = 0;
6547 bitmap far_labels;
6548 rtx pending_ltrel = NULL_RTX;
6549 rtx insn;
6550
6551 rtx (*gen_reload_base) (rtx, rtx) =
6552 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6553
6554
6555 /* We need correct insn addresses. */
6556
6557 shorten_branches (get_insns ());
6558
6559 /* Scan all insns and move literals to pool chunks. */
6560
6561 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6562 {
6563 bool section_switch_p = false;
6564
6565 /* Check for pending LTREL_BASE. */
6566 if (INSN_P (insn))
6567 {
6568 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6569 if (ltrel_base)
6570 {
6571 gcc_assert (ltrel_base == pending_ltrel);
6572 pending_ltrel = NULL_RTX;
6573 }
6574 }
6575
6576 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6577 {
6578 if (!curr_pool)
6579 curr_pool = s390_start_pool (&pool_list, insn);
6580
6581 s390_add_execute (curr_pool, insn);
6582 s390_add_pool_insn (curr_pool, insn);
6583 }
6584 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6585 {
6586 rtx pool_ref = NULL_RTX;
6587 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6588 if (pool_ref)
6589 {
6590 rtx constant = get_pool_constant (pool_ref);
6591 enum machine_mode mode = get_pool_mode (pool_ref);
6592
6593 if (!curr_pool)
6594 curr_pool = s390_start_pool (&pool_list, insn);
6595
6596 s390_add_constant (curr_pool, constant, mode);
6597 s390_add_pool_insn (curr_pool, insn);
6598
6599 /* Don't split the pool chunk between a LTREL_OFFSET load
6600 and the corresponding LTREL_BASE. */
6601 if (GET_CODE (constant) == CONST
6602 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6603 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6604 {
6605 gcc_assert (!pending_ltrel);
6606 pending_ltrel = pool_ref;
6607 }
6608 }
6609 /* Make sure we do not split between a call and its
6610 corresponding CALL_ARG_LOCATION note. */
6611 if (CALL_P (insn))
6612 {
6613 rtx next = NEXT_INSN (insn);
6614 if (next && NOTE_P (next)
6615 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6616 continue;
6617 }
6618 }
6619
6620 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6621 {
6622 if (curr_pool)
6623 s390_add_pool_insn (curr_pool, insn);
6624 /* An LTREL_BASE must follow within the same basic block. */
6625 gcc_assert (!pending_ltrel);
6626 }
6627
6628 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6629 section_switch_p = true;
6630
6631 if (!curr_pool
6632 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6633 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6634 continue;
6635
6636 if (TARGET_CPU_ZARCH)
6637 {
6638 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6639 continue;
6640
6641 s390_end_pool (curr_pool, NULL_RTX);
6642 curr_pool = NULL;
6643 }
6644 else
6645 {
6646 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6647 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6648 + extra_size;
6649
6650 /* We will later have to insert base register reload insns.
6651 Those will have an effect on code size, which we need to
6652 consider here. This calculation makes rather pessimistic
6653 worst-case assumptions. */
6654 if (GET_CODE (insn) == CODE_LABEL)
6655 extra_size += 6;
6656
6657 if (chunk_size < S390_POOL_CHUNK_MIN
6658 && curr_pool->size < S390_POOL_CHUNK_MIN
6659 && !section_switch_p)
6660 continue;
6661
6662 /* Pool chunks can only be inserted after BARRIERs ... */
6663 if (GET_CODE (insn) == BARRIER)
6664 {
6665 s390_end_pool (curr_pool, insn);
6666 curr_pool = NULL;
6667 extra_size = 0;
6668 }
6669
6670 /* ... so if we don't find one in time, create one. */
6671 else if (chunk_size > S390_POOL_CHUNK_MAX
6672 || curr_pool->size > S390_POOL_CHUNK_MAX
6673 || section_switch_p)
6674 {
6675 rtx label, jump, barrier;
6676
6677 if (!section_switch_p)
6678 {
6679 /* We can insert the barrier only after a 'real' insn. */
6680 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6681 continue;
6682 if (get_attr_length (insn) == 0)
6683 continue;
6684 /* Don't separate LTREL_BASE from the corresponding
6685 LTREL_OFFSET load. */
6686 if (pending_ltrel)
6687 continue;
6688 }
6689 else
6690 {
6691 gcc_assert (!pending_ltrel);
6692
6693 /* The old pool has to end before the section switch
6694 note in order to make it part of the current
6695 section. */
6696 insn = PREV_INSN (insn);
6697 }
6698
6699 label = gen_label_rtx ();
6700 jump = emit_jump_insn_after (gen_jump (label), insn);
6701 barrier = emit_barrier_after (jump);
6702 insn = emit_label_after (label, barrier);
6703 JUMP_LABEL (jump) = label;
6704 LABEL_NUSES (label) = 1;
6705
6706 INSN_ADDRESSES_NEW (jump, -1);
6707 INSN_ADDRESSES_NEW (barrier, -1);
6708 INSN_ADDRESSES_NEW (insn, -1);
6709
6710 s390_end_pool (curr_pool, barrier);
6711 curr_pool = NULL;
6712 extra_size = 0;
6713 }
6714 }
6715 }
6716
6717 if (curr_pool)
6718 s390_end_pool (curr_pool, NULL_RTX);
6719 gcc_assert (!pending_ltrel);
6720
6721 /* Find all labels that are branched into
6722 from an insn belonging to a different chunk. */
6723
6724 far_labels = BITMAP_ALLOC (NULL);
6725
6726 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6727 {
6728 /* Labels marked with LABEL_PRESERVE_P can be target
6729 of non-local jumps, so we have to mark them.
6730 The same holds for named labels.
6731
6732 Don't do that, however, if it is the label before
6733 a jump table. */
6734
6735 if (GET_CODE (insn) == CODE_LABEL
6736 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6737 {
6738 rtx vec_insn = next_real_insn (insn);
6739 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6740 PATTERN (vec_insn) : NULL_RTX;
6741 if (!vec_pat
6742 || !(GET_CODE (vec_pat) == ADDR_VEC
6743 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6744 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6745 }
6746
6747 /* If we have a direct jump (conditional or unconditional)
6748 or a casesi jump, check all potential targets. */
6749 else if (GET_CODE (insn) == JUMP_INSN)
6750 {
6751 rtx pat = PATTERN (insn);
6752 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6753 pat = XVECEXP (pat, 0, 0);
6754
6755 if (GET_CODE (pat) == SET)
6756 {
6757 rtx label = JUMP_LABEL (insn);
6758 if (label)
6759 {
6760 if (s390_find_pool (pool_list, label)
6761 != s390_find_pool (pool_list, insn))
6762 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6763 }
6764 }
6765 else if (GET_CODE (pat) == PARALLEL
6766 && XVECLEN (pat, 0) == 2
6767 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6768 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6769 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6770 {
6771 /* Find the jump table used by this casesi jump. */
6772 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6773 rtx vec_insn = next_real_insn (vec_label);
6774 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6775 PATTERN (vec_insn) : NULL_RTX;
6776 if (vec_pat
6777 && (GET_CODE (vec_pat) == ADDR_VEC
6778 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6779 {
6780 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6781
6782 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6783 {
6784 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6785
6786 if (s390_find_pool (pool_list, label)
6787 != s390_find_pool (pool_list, insn))
6788 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6789 }
6790 }
6791 }
6792 }
6793 }
6794
6795 /* Insert base register reload insns before every pool. */
6796
6797 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6798 {
6799 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6800 curr_pool->label);
6801 rtx insn = curr_pool->first_insn;
6802 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6803 }
6804
6805 /* Insert base register reload insns at every far label. */
6806
6807 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6808 if (GET_CODE (insn) == CODE_LABEL
6809 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6810 {
6811 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6812 if (pool)
6813 {
6814 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6815 pool->label);
6816 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6817 }
6818 }
6819
6820
6821 BITMAP_FREE (far_labels);
6822
6823
6824 /* Recompute insn addresses. */
6825
6826 init_insn_lengths ();
6827 shorten_branches (get_insns ());
6828
6829 return pool_list;
6830 }
6831
6832 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6833 After we have decided to use this list, finish implementing
6834 all changes to the current function as required. */
6835
6836 static void
6837 s390_chunkify_finish (struct constant_pool *pool_list)
6838 {
6839 struct constant_pool *curr_pool = NULL;
6840 rtx insn;
6841
6842
6843 /* Replace all literal pool references. */
6844
6845 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6846 {
6847 if (INSN_P (insn))
6848 replace_ltrel_base (&PATTERN (insn));
6849
6850 curr_pool = s390_find_pool (pool_list, insn);
6851 if (!curr_pool)
6852 continue;
6853
6854 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6855 {
6856 rtx addr, pool_ref = NULL_RTX;
6857 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6858 if (pool_ref)
6859 {
6860 if (s390_execute_label (insn))
6861 addr = s390_find_execute (curr_pool, insn);
6862 else
6863 addr = s390_find_constant (curr_pool,
6864 get_pool_constant (pool_ref),
6865 get_pool_mode (pool_ref));
6866
6867 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6868 INSN_CODE (insn) = -1;
6869 }
6870 }
6871 }
6872
6873 /* Dump out all literal pools. */
6874
6875 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6876 s390_dump_pool (curr_pool, 0);
6877
6878 /* Free pool list. */
6879
6880 while (pool_list)
6881 {
6882 struct constant_pool *next = pool_list->next;
6883 s390_free_pool (pool_list);
6884 pool_list = next;
6885 }
6886 }
6887
6888 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6889 We have decided we cannot use this list, so revert all changes
6890 to the current function that were done by s390_chunkify_start. */
6891
6892 static void
6893 s390_chunkify_cancel (struct constant_pool *pool_list)
6894 {
6895 struct constant_pool *curr_pool = NULL;
6896 rtx insn;
6897
6898 /* Remove all pool placeholder insns. */
6899
6900 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6901 {
6902 /* Did we insert an extra barrier? Remove it. */
6903 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6904 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6905 rtx label = NEXT_INSN (curr_pool->pool_insn);
6906
6907 if (jump && GET_CODE (jump) == JUMP_INSN
6908 && barrier && GET_CODE (barrier) == BARRIER
6909 && label && GET_CODE (label) == CODE_LABEL
6910 && GET_CODE (PATTERN (jump)) == SET
6911 && SET_DEST (PATTERN (jump)) == pc_rtx
6912 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6913 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6914 {
6915 remove_insn (jump);
6916 remove_insn (barrier);
6917 remove_insn (label);
6918 }
6919
6920 remove_insn (curr_pool->pool_insn);
6921 }
6922
6923 /* Remove all base register reload insns. */
6924
6925 for (insn = get_insns (); insn; )
6926 {
6927 rtx next_insn = NEXT_INSN (insn);
6928
6929 if (GET_CODE (insn) == INSN
6930 && GET_CODE (PATTERN (insn)) == SET
6931 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6932 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6933 remove_insn (insn);
6934
6935 insn = next_insn;
6936 }
6937
6938 /* Free pool list. */
6939
6940 while (pool_list)
6941 {
6942 struct constant_pool *next = pool_list->next;
6943 s390_free_pool (pool_list);
6944 pool_list = next;
6945 }
6946 }
6947
6948 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6949
6950 void
6951 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6952 {
6953 REAL_VALUE_TYPE r;
6954
6955 switch (GET_MODE_CLASS (mode))
6956 {
6957 case MODE_FLOAT:
6958 case MODE_DECIMAL_FLOAT:
6959 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6960
6961 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6962 assemble_real (r, mode, align);
6963 break;
6964
6965 case MODE_INT:
6966 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6967 mark_symbol_refs_as_used (exp);
6968 break;
6969
6970 default:
6971 gcc_unreachable ();
6972 }
6973 }
6974
6975
6976 /* Return an RTL expression representing the value of the return address
6977 for the frame COUNT steps up from the current frame. FRAME is the
6978 frame pointer of that frame. */
6979
6980 rtx
6981 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6982 {
6983 int offset;
6984 rtx addr;
6985
6986 /* Without backchain, we fail for all but the current frame. */
6987
6988 if (!TARGET_BACKCHAIN && count > 0)
6989 return NULL_RTX;
6990
6991 /* For the current frame, we need to make sure the initial
6992 value of RETURN_REGNUM is actually saved. */
6993
6994 if (count == 0)
6995 {
6996 /* On non-z architectures branch splitting could overwrite r14. */
6997 if (TARGET_CPU_ZARCH)
6998 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6999 else
7000 {
7001 cfun_frame_layout.save_return_addr_p = true;
7002 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7003 }
7004 }
7005
7006 if (TARGET_PACKED_STACK)
7007 offset = -2 * UNITS_PER_LONG;
7008 else
7009 offset = RETURN_REGNUM * UNITS_PER_LONG;
7010
7011 addr = plus_constant (frame, offset);
7012 addr = memory_address (Pmode, addr);
7013 return gen_rtx_MEM (Pmode, addr);
7014 }
7015
7016 /* Return an RTL expression representing the back chain stored in
7017 the current stack frame. */
7018
7019 rtx
7020 s390_back_chain_rtx (void)
7021 {
7022 rtx chain;
7023
7024 gcc_assert (TARGET_BACKCHAIN);
7025
7026 if (TARGET_PACKED_STACK)
7027 chain = plus_constant (stack_pointer_rtx,
7028 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7029 else
7030 chain = stack_pointer_rtx;
7031
7032 chain = gen_rtx_MEM (Pmode, chain);
7033 return chain;
7034 }
7035
7036 /* Find first call clobbered register unused in a function.
7037 This could be used as base register in a leaf function
7038 or for holding the return address before epilogue. */
7039
7040 static int
7041 find_unused_clobbered_reg (void)
7042 {
7043 int i;
7044 for (i = 0; i < 6; i++)
7045 if (!df_regs_ever_live_p (i))
7046 return i;
7047 return 0;
7048 }
7049
7050
7051 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7052 clobbered hard regs in SETREG. */
7053
7054 static void
7055 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7056 {
7057 int *regs_ever_clobbered = (int *)data;
7058 unsigned int i, regno;
7059 enum machine_mode mode = GET_MODE (setreg);
7060
7061 if (GET_CODE (setreg) == SUBREG)
7062 {
7063 rtx inner = SUBREG_REG (setreg);
7064 if (!GENERAL_REG_P (inner))
7065 return;
7066 regno = subreg_regno (setreg);
7067 }
7068 else if (GENERAL_REG_P (setreg))
7069 regno = REGNO (setreg);
7070 else
7071 return;
7072
7073 for (i = regno;
7074 i < regno + HARD_REGNO_NREGS (regno, mode);
7075 i++)
7076 regs_ever_clobbered[i] = 1;
7077 }
7078
7079 /* Walks through all basic blocks of the current function looking
7080 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7081 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7082 each of those regs. */
7083
7084 static void
7085 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7086 {
7087 basic_block cur_bb;
7088 rtx cur_insn;
7089 unsigned int i;
7090
7091 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7092
7093 /* For non-leaf functions we have to consider all call clobbered regs to be
7094 clobbered. */
7095 if (!current_function_is_leaf)
7096 {
7097 for (i = 0; i < 16; i++)
7098 regs_ever_clobbered[i] = call_really_used_regs[i];
7099 }
7100
7101 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7102 this work is done by liveness analysis (mark_regs_live_at_end).
7103 Special care is needed for functions containing landing pads. Landing pads
7104 may use the eh registers, but the code which sets these registers is not
7105 contained in that function. Hence s390_regs_ever_clobbered is not able to
7106 deal with this automatically. */
7107 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7108 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7109 if (crtl->calls_eh_return
7110 || (cfun->machine->has_landing_pad_p
7111 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7112 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7113
7114 /* For nonlocal gotos all call-saved registers have to be saved.
7115 This flag is also set for the unwinding code in libgcc.
7116 See expand_builtin_unwind_init. For regs_ever_live this is done by
7117 reload. */
7118 if (cfun->has_nonlocal_label)
7119 for (i = 0; i < 16; i++)
7120 if (!call_really_used_regs[i])
7121 regs_ever_clobbered[i] = 1;
7122
7123 FOR_EACH_BB (cur_bb)
7124 {
7125 FOR_BB_INSNS (cur_bb, cur_insn)
7126 {
7127 if (INSN_P (cur_insn))
7128 note_stores (PATTERN (cur_insn),
7129 s390_reg_clobbered_rtx,
7130 regs_ever_clobbered);
7131 }
7132 }
7133 }
7134
7135 /* Determine the frame area which actually has to be accessed
7136 in the function epilogue. The values are stored at the
7137 given pointers AREA_BOTTOM (address of the lowest used stack
7138 address) and AREA_TOP (address of the first item which does
7139 not belong to the stack frame). */
7140
7141 static void
7142 s390_frame_area (int *area_bottom, int *area_top)
7143 {
7144 int b, t;
7145 int i;
7146
7147 b = INT_MAX;
7148 t = INT_MIN;
7149
7150 if (cfun_frame_layout.first_restore_gpr != -1)
7151 {
7152 b = (cfun_frame_layout.gprs_offset
7153 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7154 t = b + (cfun_frame_layout.last_restore_gpr
7155 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7156 }
7157
7158 if (TARGET_64BIT && cfun_save_high_fprs_p)
7159 {
7160 b = MIN (b, cfun_frame_layout.f8_offset);
7161 t = MAX (t, (cfun_frame_layout.f8_offset
7162 + cfun_frame_layout.high_fprs * 8));
7163 }
7164
7165 if (!TARGET_64BIT)
7166 for (i = 2; i < 4; i++)
7167 if (cfun_fpr_bit_p (i))
7168 {
7169 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7170 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7171 }
7172
7173 *area_bottom = b;
7174 *area_top = t;
7175 }
7176
7177 /* Fill cfun->machine with info about register usage of current function.
7178 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7179
7180 static void
7181 s390_register_info (int clobbered_regs[])
7182 {
7183 int i, j;
7184
7185 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7186 cfun_frame_layout.fpr_bitmap = 0;
7187 cfun_frame_layout.high_fprs = 0;
7188 if (TARGET_64BIT)
7189 for (i = 24; i < 32; i++)
7190 if (df_regs_ever_live_p (i) && !global_regs[i])
7191 {
7192 cfun_set_fpr_bit (i - 16);
7193 cfun_frame_layout.high_fprs++;
7194 }
7195
7196 /* Find first and last gpr to be saved. We trust regs_ever_live
7197 data, except that we don't save and restore global registers.
7198
7199 Also, all registers with special meaning to the compiler need
7200 to be handled extra. */
7201
7202 s390_regs_ever_clobbered (clobbered_regs);
7203
7204 for (i = 0; i < 16; i++)
7205 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7206
7207 if (frame_pointer_needed)
7208 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7209
7210 if (flag_pic)
7211 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7212 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7213
7214 clobbered_regs[BASE_REGNUM]
7215 |= (cfun->machine->base_reg
7216 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7217
7218 clobbered_regs[RETURN_REGNUM]
7219 |= (!current_function_is_leaf
7220 || TARGET_TPF_PROFILING
7221 || cfun->machine->split_branches_pending_p
7222 || cfun_frame_layout.save_return_addr_p
7223 || crtl->calls_eh_return
7224 || cfun->stdarg);
7225
7226 clobbered_regs[STACK_POINTER_REGNUM]
7227 |= (!current_function_is_leaf
7228 || TARGET_TPF_PROFILING
7229 || cfun_save_high_fprs_p
7230 || get_frame_size () > 0
7231 || cfun->calls_alloca
7232 || cfun->stdarg);
7233
7234 for (i = 6; i < 16; i++)
7235 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7236 break;
7237 for (j = 15; j > i; j--)
7238 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7239 break;
7240
7241 if (i == 16)
7242 {
7243 /* Nothing to save/restore. */
7244 cfun_frame_layout.first_save_gpr_slot = -1;
7245 cfun_frame_layout.last_save_gpr_slot = -1;
7246 cfun_frame_layout.first_save_gpr = -1;
7247 cfun_frame_layout.first_restore_gpr = -1;
7248 cfun_frame_layout.last_save_gpr = -1;
7249 cfun_frame_layout.last_restore_gpr = -1;
7250 }
7251 else
7252 {
7253 /* Save slots for gprs from i to j. */
7254 cfun_frame_layout.first_save_gpr_slot = i;
7255 cfun_frame_layout.last_save_gpr_slot = j;
7256
7257 for (i = cfun_frame_layout.first_save_gpr_slot;
7258 i < cfun_frame_layout.last_save_gpr_slot + 1;
7259 i++)
7260 if (clobbered_regs[i])
7261 break;
7262
7263 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7264 if (clobbered_regs[j])
7265 break;
7266
7267 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7268 {
7269 /* Nothing to save/restore. */
7270 cfun_frame_layout.first_save_gpr = -1;
7271 cfun_frame_layout.first_restore_gpr = -1;
7272 cfun_frame_layout.last_save_gpr = -1;
7273 cfun_frame_layout.last_restore_gpr = -1;
7274 }
7275 else
7276 {
7277 /* Save / Restore from gpr i to j. */
7278 cfun_frame_layout.first_save_gpr = i;
7279 cfun_frame_layout.first_restore_gpr = i;
7280 cfun_frame_layout.last_save_gpr = j;
7281 cfun_frame_layout.last_restore_gpr = j;
7282 }
7283 }
7284
7285 if (cfun->stdarg)
7286 {
7287 /* Varargs functions need to save gprs 2 to 6. */
7288 if (cfun->va_list_gpr_size
7289 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7290 {
7291 int min_gpr = crtl->args.info.gprs;
7292 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7293 if (max_gpr > GP_ARG_NUM_REG)
7294 max_gpr = GP_ARG_NUM_REG;
7295
7296 if (cfun_frame_layout.first_save_gpr == -1
7297 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7298 {
7299 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7300 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7301 }
7302
7303 if (cfun_frame_layout.last_save_gpr == -1
7304 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7305 {
7306 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7307 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7308 }
7309 }
7310
7311 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7312 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7313 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7314 {
7315 int min_fpr = crtl->args.info.fprs;
7316 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7317 if (max_fpr > FP_ARG_NUM_REG)
7318 max_fpr = FP_ARG_NUM_REG;
7319
7320 /* ??? This is currently required to ensure proper location
7321 of the fpr save slots within the va_list save area. */
7322 if (TARGET_PACKED_STACK)
7323 min_fpr = 0;
7324
7325 for (i = min_fpr; i < max_fpr; i++)
7326 cfun_set_fpr_bit (i);
7327 }
7328 }
7329
7330 if (!TARGET_64BIT)
7331 for (i = 2; i < 4; i++)
7332 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7333 cfun_set_fpr_bit (i);
7334 }
7335
7336 /* Fill cfun->machine with info about frame of current function. */
7337
7338 static void
7339 s390_frame_info (void)
7340 {
7341 int i;
7342
7343 cfun_frame_layout.frame_size = get_frame_size ();
7344 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7345 fatal_error ("total size of local variables exceeds architecture limit");
7346
7347 if (!TARGET_PACKED_STACK)
7348 {
7349 cfun_frame_layout.backchain_offset = 0;
7350 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7351 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7352 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7353 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7354 * UNITS_PER_LONG);
7355 }
7356 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7357 {
7358 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7359 - UNITS_PER_LONG);
7360 cfun_frame_layout.gprs_offset
7361 = (cfun_frame_layout.backchain_offset
7362 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7363 * UNITS_PER_LONG);
7364
7365 if (TARGET_64BIT)
7366 {
7367 cfun_frame_layout.f4_offset
7368 = (cfun_frame_layout.gprs_offset
7369 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7370
7371 cfun_frame_layout.f0_offset
7372 = (cfun_frame_layout.f4_offset
7373 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7374 }
7375 else
7376 {
7377 /* On 31 bit we have to care about alignment of the
7378 floating point regs to provide fastest access. */
7379 cfun_frame_layout.f0_offset
7380 = ((cfun_frame_layout.gprs_offset
7381 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7382 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7383
7384 cfun_frame_layout.f4_offset
7385 = (cfun_frame_layout.f0_offset
7386 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7387 }
7388 }
7389 else /* no backchain */
7390 {
7391 cfun_frame_layout.f4_offset
7392 = (STACK_POINTER_OFFSET
7393 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7394
7395 cfun_frame_layout.f0_offset
7396 = (cfun_frame_layout.f4_offset
7397 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7398
7399 cfun_frame_layout.gprs_offset
7400 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7401 }
7402
7403 if (current_function_is_leaf
7404 && !TARGET_TPF_PROFILING
7405 && cfun_frame_layout.frame_size == 0
7406 && !cfun_save_high_fprs_p
7407 && !cfun->calls_alloca
7408 && !cfun->stdarg)
7409 return;
7410
7411 if (!TARGET_PACKED_STACK)
7412 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7413 + crtl->outgoing_args_size
7414 + cfun_frame_layout.high_fprs * 8);
7415 else
7416 {
7417 if (TARGET_BACKCHAIN)
7418 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7419
7420 /* No alignment trouble here because f8-f15 are only saved under
7421 64 bit. */
7422 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7423 cfun_frame_layout.f4_offset),
7424 cfun_frame_layout.gprs_offset)
7425 - cfun_frame_layout.high_fprs * 8);
7426
7427 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7428
7429 for (i = 0; i < 8; i++)
7430 if (cfun_fpr_bit_p (i))
7431 cfun_frame_layout.frame_size += 8;
7432
7433 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7434
7435 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7436 the frame size to sustain 8 byte alignment of stack frames. */
7437 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7438 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7439 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7440
7441 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7442 }
7443 }
7444
7445 /* Generate frame layout. Fills in register and frame data for the current
7446 function in cfun->machine. This routine can be called multiple times;
7447 it will re-do the complete frame layout every time. */
7448
7449 static void
7450 s390_init_frame_layout (void)
7451 {
7452 HOST_WIDE_INT frame_size;
7453 int base_used;
7454 int clobbered_regs[16];
7455
7456 /* On S/390 machines, we may need to perform branch splitting, which
7457 will require both base and return address register. We have no
7458 choice but to assume we're going to need them until right at the
7459 end of the machine dependent reorg phase. */
7460 if (!TARGET_CPU_ZARCH)
7461 cfun->machine->split_branches_pending_p = true;
7462
7463 do
7464 {
7465 frame_size = cfun_frame_layout.frame_size;
7466
7467 /* Try to predict whether we'll need the base register. */
7468 base_used = cfun->machine->split_branches_pending_p
7469 || crtl->uses_const_pool
7470 || (!DISP_IN_RANGE (frame_size)
7471 && !CONST_OK_FOR_K (frame_size));
7472
7473 /* Decide which register to use as literal pool base. In small
7474 leaf functions, try to use an unused call-clobbered register
7475 as base register to avoid save/restore overhead. */
7476 if (!base_used)
7477 cfun->machine->base_reg = NULL_RTX;
7478 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7479 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7480 else
7481 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7482
7483 s390_register_info (clobbered_regs);
7484 s390_frame_info ();
7485 }
7486 while (frame_size != cfun_frame_layout.frame_size);
7487 }
7488
7489 /* Update frame layout. Recompute actual register save data based on
7490 current info and update regs_ever_live for the special registers.
7491 May be called multiple times, but may never cause *more* registers
7492 to be saved than s390_init_frame_layout allocated room for. */
7493
7494 static void
7495 s390_update_frame_layout (void)
7496 {
7497 int clobbered_regs[16];
7498
7499 s390_register_info (clobbered_regs);
7500
7501 df_set_regs_ever_live (BASE_REGNUM,
7502 clobbered_regs[BASE_REGNUM] ? true : false);
7503 df_set_regs_ever_live (RETURN_REGNUM,
7504 clobbered_regs[RETURN_REGNUM] ? true : false);
7505 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7506 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7507
7508 if (cfun->machine->base_reg)
7509 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7510 }
7511
7512 /* Return true if it is legal to put a value with MODE into REGNO. */
7513
7514 bool
7515 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7516 {
7517 switch (REGNO_REG_CLASS (regno))
7518 {
7519 case FP_REGS:
7520 if (REGNO_PAIR_OK (regno, mode))
7521 {
7522 if (mode == SImode || mode == DImode)
7523 return true;
7524
7525 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7526 return true;
7527 }
7528 break;
7529 case ADDR_REGS:
7530 if (FRAME_REGNO_P (regno) && mode == Pmode)
7531 return true;
7532
7533 /* fallthrough */
7534 case GENERAL_REGS:
7535 if (REGNO_PAIR_OK (regno, mode))
7536 {
7537 if (TARGET_ZARCH
7538 || (mode != TFmode && mode != TCmode && mode != TDmode))
7539 return true;
7540 }
7541 break;
7542 case CC_REGS:
7543 if (GET_MODE_CLASS (mode) == MODE_CC)
7544 return true;
7545 break;
7546 case ACCESS_REGS:
7547 if (REGNO_PAIR_OK (regno, mode))
7548 {
7549 if (mode == SImode || mode == Pmode)
7550 return true;
7551 }
7552 break;
7553 default:
7554 return false;
7555 }
7556
7557 return false;
7558 }
7559
7560 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7561
7562 bool
7563 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7564 {
7565 /* Once we've decided upon a register to use as base register, it must
7566 no longer be used for any other purpose. */
7567 if (cfun->machine->base_reg)
7568 if (REGNO (cfun->machine->base_reg) == old_reg
7569 || REGNO (cfun->machine->base_reg) == new_reg)
7570 return false;
7571
7572 return true;
7573 }
7574
7575 /* Maximum number of registers to represent a value of mode MODE
7576 in a register of class RCLASS. */
7577
7578 int
7579 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7580 {
7581 switch (rclass)
7582 {
7583 case FP_REGS:
7584 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7585 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7586 else
7587 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7588 case ACCESS_REGS:
7589 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7590 default:
7591 break;
7592 }
7593 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7594 }
7595
7596 /* Return true if register FROM can be eliminated via register TO. */
7597
7598 static bool
7599 s390_can_eliminate (const int from, const int to)
7600 {
7601 /* On zSeries machines, we have not marked the base register as fixed.
7602 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7603 If a function requires the base register, we say here that this
7604 elimination cannot be performed. This will cause reload to free
7605 up the base register (as if it were fixed). On the other hand,
7606 if the current function does *not* require the base register, we
7607 say here the elimination succeeds, which in turn allows reload
7608 to allocate the base register for any other purpose. */
7609 if (from == BASE_REGNUM && to == BASE_REGNUM)
7610 {
7611 if (TARGET_CPU_ZARCH)
7612 {
7613 s390_init_frame_layout ();
7614 return cfun->machine->base_reg == NULL_RTX;
7615 }
7616
7617 return false;
7618 }
7619
7620 /* Everything else must point into the stack frame. */
7621 gcc_assert (to == STACK_POINTER_REGNUM
7622 || to == HARD_FRAME_POINTER_REGNUM);
7623
7624 gcc_assert (from == FRAME_POINTER_REGNUM
7625 || from == ARG_POINTER_REGNUM
7626 || from == RETURN_ADDRESS_POINTER_REGNUM);
7627
7628 /* Make sure we actually saved the return address. */
7629 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7630 if (!crtl->calls_eh_return
7631 && !cfun->stdarg
7632 && !cfun_frame_layout.save_return_addr_p)
7633 return false;
7634
7635 return true;
7636 }
7637
7638 /* Return offset between register FROM and TO initially after prolog. */
7639
7640 HOST_WIDE_INT
7641 s390_initial_elimination_offset (int from, int to)
7642 {
7643 HOST_WIDE_INT offset;
7644 int index;
7645
7646 /* ??? Why are we called for non-eliminable pairs? */
7647 if (!s390_can_eliminate (from, to))
7648 return 0;
7649
7650 switch (from)
7651 {
7652 case FRAME_POINTER_REGNUM:
7653 offset = (get_frame_size()
7654 + STACK_POINTER_OFFSET
7655 + crtl->outgoing_args_size);
7656 break;
7657
7658 case ARG_POINTER_REGNUM:
7659 s390_init_frame_layout ();
7660 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7661 break;
7662
7663 case RETURN_ADDRESS_POINTER_REGNUM:
7664 s390_init_frame_layout ();
7665 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7666 gcc_assert (index >= 0);
7667 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7668 offset += index * UNITS_PER_LONG;
7669 break;
7670
7671 case BASE_REGNUM:
7672 offset = 0;
7673 break;
7674
7675 default:
7676 gcc_unreachable ();
7677 }
7678
7679 return offset;
7680 }
7681
7682 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7683 to register BASE. Return generated insn. */
7684
7685 static rtx
7686 save_fpr (rtx base, int offset, int regnum)
7687 {
7688 rtx addr;
7689 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7690
7691 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7692 set_mem_alias_set (addr, get_varargs_alias_set ());
7693 else
7694 set_mem_alias_set (addr, get_frame_alias_set ());
7695
7696 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7697 }
7698
7699 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7700 to register BASE. Return generated insn. */
7701
7702 static rtx
7703 restore_fpr (rtx base, int offset, int regnum)
7704 {
7705 rtx addr;
7706 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7707 set_mem_alias_set (addr, get_frame_alias_set ());
7708
7709 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7710 }
7711
7712 /* Return true if REGNO is a global register, but not one
7713 of the special ones that need to be saved/restored in anyway. */
7714
7715 static inline bool
7716 global_not_special_regno_p (int regno)
7717 {
7718 return (global_regs[regno]
7719 /* These registers are special and need to be
7720 restored in any case. */
7721 && !(regno == STACK_POINTER_REGNUM
7722 || regno == RETURN_REGNUM
7723 || regno == BASE_REGNUM
7724 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7725 }
7726
7727 /* Generate insn to save registers FIRST to LAST into
7728 the register save area located at offset OFFSET
7729 relative to register BASE. */
7730
7731 static rtx
7732 save_gprs (rtx base, int offset, int first, int last)
7733 {
7734 rtx addr, insn, note;
7735 int i;
7736
7737 addr = plus_constant (base, offset);
7738 addr = gen_rtx_MEM (Pmode, addr);
7739
7740 set_mem_alias_set (addr, get_frame_alias_set ());
7741
7742 /* Special-case single register. */
7743 if (first == last)
7744 {
7745 if (TARGET_64BIT)
7746 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7747 else
7748 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7749
7750 if (!global_not_special_regno_p (first))
7751 RTX_FRAME_RELATED_P (insn) = 1;
7752 return insn;
7753 }
7754
7755
7756 insn = gen_store_multiple (addr,
7757 gen_rtx_REG (Pmode, first),
7758 GEN_INT (last - first + 1));
7759
7760 if (first <= 6 && cfun->stdarg)
7761 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7762 {
7763 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7764
7765 if (first + i <= 6)
7766 set_mem_alias_set (mem, get_varargs_alias_set ());
7767 }
7768
7769 /* We need to set the FRAME_RELATED flag on all SETs
7770 inside the store-multiple pattern.
7771
7772 However, we must not emit DWARF records for registers 2..5
7773 if they are stored for use by variable arguments ...
7774
7775 ??? Unfortunately, it is not enough to simply not the
7776 FRAME_RELATED flags for those SETs, because the first SET
7777 of the PARALLEL is always treated as if it had the flag
7778 set, even if it does not. Therefore we emit a new pattern
7779 without those registers as REG_FRAME_RELATED_EXPR note. */
7780
7781 if (first >= 6 && !global_not_special_regno_p (first))
7782 {
7783 rtx pat = PATTERN (insn);
7784
7785 for (i = 0; i < XVECLEN (pat, 0); i++)
7786 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7787 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7788 0, i)))))
7789 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7790
7791 RTX_FRAME_RELATED_P (insn) = 1;
7792 }
7793 else if (last >= 6)
7794 {
7795 int start;
7796
7797 for (start = first >= 6 ? first : 6; start <= last; start++)
7798 if (!global_not_special_regno_p (start))
7799 break;
7800
7801 if (start > last)
7802 return insn;
7803
7804 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7805 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7806 gen_rtx_REG (Pmode, start),
7807 GEN_INT (last - start + 1));
7808 note = PATTERN (note);
7809
7810 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7811
7812 for (i = 0; i < XVECLEN (note, 0); i++)
7813 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7814 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7815 0, i)))))
7816 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7817
7818 RTX_FRAME_RELATED_P (insn) = 1;
7819 }
7820
7821 return insn;
7822 }
7823
7824 /* Generate insn to restore registers FIRST to LAST from
7825 the register save area located at offset OFFSET
7826 relative to register BASE. */
7827
7828 static rtx
7829 restore_gprs (rtx base, int offset, int first, int last)
7830 {
7831 rtx addr, insn;
7832
7833 addr = plus_constant (base, offset);
7834 addr = gen_rtx_MEM (Pmode, addr);
7835 set_mem_alias_set (addr, get_frame_alias_set ());
7836
7837 /* Special-case single register. */
7838 if (first == last)
7839 {
7840 if (TARGET_64BIT)
7841 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7842 else
7843 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7844
7845 return insn;
7846 }
7847
7848 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7849 addr,
7850 GEN_INT (last - first + 1));
7851 return insn;
7852 }
7853
7854 /* Return insn sequence to load the GOT register. */
7855
7856 static GTY(()) rtx got_symbol;
7857 rtx
7858 s390_load_got (void)
7859 {
7860 rtx insns;
7861
7862 if (!got_symbol)
7863 {
7864 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7865 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7866 }
7867
7868 start_sequence ();
7869
7870 if (TARGET_CPU_ZARCH)
7871 {
7872 emit_move_insn (pic_offset_table_rtx, got_symbol);
7873 }
7874 else
7875 {
7876 rtx offset;
7877
7878 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7879 UNSPEC_LTREL_OFFSET);
7880 offset = gen_rtx_CONST (Pmode, offset);
7881 offset = force_const_mem (Pmode, offset);
7882
7883 emit_move_insn (pic_offset_table_rtx, offset);
7884
7885 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7886 UNSPEC_LTREL_BASE);
7887 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7888
7889 emit_move_insn (pic_offset_table_rtx, offset);
7890 }
7891
7892 insns = get_insns ();
7893 end_sequence ();
7894 return insns;
7895 }
7896
7897 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7898 and the change to the stack pointer. */
7899
7900 static void
7901 s390_emit_stack_tie (void)
7902 {
7903 rtx mem = gen_frame_mem (BLKmode,
7904 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7905
7906 emit_insn (gen_stack_tie (mem));
7907 }
7908
7909 /* Expand the prologue into a bunch of separate insns. */
7910
7911 void
7912 s390_emit_prologue (void)
7913 {
7914 rtx insn, addr;
7915 rtx temp_reg;
7916 int i;
7917 int offset;
7918 int next_fpr = 0;
7919
7920 /* Complete frame layout. */
7921
7922 s390_update_frame_layout ();
7923
7924 /* Annotate all constant pool references to let the scheduler know
7925 they implicitly use the base register. */
7926
7927 push_topmost_sequence ();
7928
7929 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7930 if (INSN_P (insn))
7931 {
7932 annotate_constant_pool_refs (&PATTERN (insn));
7933 df_insn_rescan (insn);
7934 }
7935
7936 pop_topmost_sequence ();
7937
7938 /* Choose best register to use for temp use within prologue.
7939 See below for why TPF must use the register 1. */
7940
7941 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7942 && !current_function_is_leaf
7943 && !TARGET_TPF_PROFILING)
7944 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7945 else
7946 temp_reg = gen_rtx_REG (Pmode, 1);
7947
7948 /* Save call saved gprs. */
7949 if (cfun_frame_layout.first_save_gpr != -1)
7950 {
7951 insn = save_gprs (stack_pointer_rtx,
7952 cfun_frame_layout.gprs_offset +
7953 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7954 - cfun_frame_layout.first_save_gpr_slot),
7955 cfun_frame_layout.first_save_gpr,
7956 cfun_frame_layout.last_save_gpr);
7957 emit_insn (insn);
7958 }
7959
7960 /* Dummy insn to mark literal pool slot. */
7961
7962 if (cfun->machine->base_reg)
7963 emit_insn (gen_main_pool (cfun->machine->base_reg));
7964
7965 offset = cfun_frame_layout.f0_offset;
7966
7967 /* Save f0 and f2. */
7968 for (i = 0; i < 2; i++)
7969 {
7970 if (cfun_fpr_bit_p (i))
7971 {
7972 save_fpr (stack_pointer_rtx, offset, i + 16);
7973 offset += 8;
7974 }
7975 else if (!TARGET_PACKED_STACK)
7976 offset += 8;
7977 }
7978
7979 /* Save f4 and f6. */
7980 offset = cfun_frame_layout.f4_offset;
7981 for (i = 2; i < 4; i++)
7982 {
7983 if (cfun_fpr_bit_p (i))
7984 {
7985 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7986 offset += 8;
7987
7988 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7989 therefore are not frame related. */
7990 if (!call_really_used_regs[i + 16])
7991 RTX_FRAME_RELATED_P (insn) = 1;
7992 }
7993 else if (!TARGET_PACKED_STACK)
7994 offset += 8;
7995 }
7996
7997 if (TARGET_PACKED_STACK
7998 && cfun_save_high_fprs_p
7999 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8000 {
8001 offset = (cfun_frame_layout.f8_offset
8002 + (cfun_frame_layout.high_fprs - 1) * 8);
8003
8004 for (i = 15; i > 7 && offset >= 0; i--)
8005 if (cfun_fpr_bit_p (i))
8006 {
8007 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8008
8009 RTX_FRAME_RELATED_P (insn) = 1;
8010 offset -= 8;
8011 }
8012 if (offset >= cfun_frame_layout.f8_offset)
8013 next_fpr = i + 16;
8014 }
8015
8016 if (!TARGET_PACKED_STACK)
8017 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8018
8019 if (flag_stack_usage_info)
8020 current_function_static_stack_size = cfun_frame_layout.frame_size;
8021
8022 /* Decrement stack pointer. */
8023
8024 if (cfun_frame_layout.frame_size > 0)
8025 {
8026 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8027 rtx real_frame_off;
8028
8029 if (s390_stack_size)
8030 {
8031 HOST_WIDE_INT stack_guard;
8032
8033 if (s390_stack_guard)
8034 stack_guard = s390_stack_guard;
8035 else
8036 {
8037 /* If no value for stack guard is provided the smallest power of 2
8038 larger than the current frame size is chosen. */
8039 stack_guard = 1;
8040 while (stack_guard < cfun_frame_layout.frame_size)
8041 stack_guard <<= 1;
8042 }
8043
8044 if (cfun_frame_layout.frame_size >= s390_stack_size)
8045 {
8046 warning (0, "frame size of function %qs is %wd"
8047 " bytes exceeding user provided stack limit of "
8048 "%d bytes. "
8049 "An unconditional trap is added.",
8050 current_function_name(), cfun_frame_layout.frame_size,
8051 s390_stack_size);
8052 emit_insn (gen_trap ());
8053 }
8054 else
8055 {
8056 /* stack_guard has to be smaller than s390_stack_size.
8057 Otherwise we would emit an AND with zero which would
8058 not match the test under mask pattern. */
8059 if (stack_guard >= s390_stack_size)
8060 {
8061 warning (0, "frame size of function %qs is %wd"
8062 " bytes which is more than half the stack size. "
8063 "The dynamic check would not be reliable. "
8064 "No check emitted for this function.",
8065 current_function_name(),
8066 cfun_frame_layout.frame_size);
8067 }
8068 else
8069 {
8070 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8071 & ~(stack_guard - 1));
8072
8073 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8074 GEN_INT (stack_check_mask));
8075 if (TARGET_64BIT)
8076 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8077 t, const0_rtx),
8078 t, const0_rtx, const0_rtx));
8079 else
8080 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8081 t, const0_rtx),
8082 t, const0_rtx, const0_rtx));
8083 }
8084 }
8085 }
8086
8087 if (s390_warn_framesize > 0
8088 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8089 warning (0, "frame size of %qs is %wd bytes",
8090 current_function_name (), cfun_frame_layout.frame_size);
8091
8092 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8093 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8094
8095 /* Save incoming stack pointer into temp reg. */
8096 if (TARGET_BACKCHAIN || next_fpr)
8097 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8098
8099 /* Subtract frame size from stack pointer. */
8100
8101 if (DISP_IN_RANGE (INTVAL (frame_off)))
8102 {
8103 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8104 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8105 frame_off));
8106 insn = emit_insn (insn);
8107 }
8108 else
8109 {
8110 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8111 frame_off = force_const_mem (Pmode, frame_off);
8112
8113 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8114 annotate_constant_pool_refs (&PATTERN (insn));
8115 }
8116
8117 RTX_FRAME_RELATED_P (insn) = 1;
8118 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8119 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8120 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8121 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8122 real_frame_off)));
8123
8124 /* Set backchain. */
8125
8126 if (TARGET_BACKCHAIN)
8127 {
8128 if (cfun_frame_layout.backchain_offset)
8129 addr = gen_rtx_MEM (Pmode,
8130 plus_constant (stack_pointer_rtx,
8131 cfun_frame_layout.backchain_offset));
8132 else
8133 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8134 set_mem_alias_set (addr, get_frame_alias_set ());
8135 insn = emit_insn (gen_move_insn (addr, temp_reg));
8136 }
8137
8138 /* If we support non-call exceptions (e.g. for Java),
8139 we need to make sure the backchain pointer is set up
8140 before any possibly trapping memory access. */
8141 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8142 {
8143 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8144 emit_clobber (addr);
8145 }
8146 }
8147
8148 /* Save fprs 8 - 15 (64 bit ABI). */
8149
8150 if (cfun_save_high_fprs_p && next_fpr)
8151 {
8152 /* If the stack might be accessed through a different register
8153 we have to make sure that the stack pointer decrement is not
8154 moved below the use of the stack slots. */
8155 s390_emit_stack_tie ();
8156
8157 insn = emit_insn (gen_add2_insn (temp_reg,
8158 GEN_INT (cfun_frame_layout.f8_offset)));
8159
8160 offset = 0;
8161
8162 for (i = 24; i <= next_fpr; i++)
8163 if (cfun_fpr_bit_p (i - 16))
8164 {
8165 rtx addr = plus_constant (stack_pointer_rtx,
8166 cfun_frame_layout.frame_size
8167 + cfun_frame_layout.f8_offset
8168 + offset);
8169
8170 insn = save_fpr (temp_reg, offset, i);
8171 offset += 8;
8172 RTX_FRAME_RELATED_P (insn) = 1;
8173 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8174 gen_rtx_SET (VOIDmode,
8175 gen_rtx_MEM (DFmode, addr),
8176 gen_rtx_REG (DFmode, i)));
8177 }
8178 }
8179
8180 /* Set frame pointer, if needed. */
8181
8182 if (frame_pointer_needed)
8183 {
8184 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8185 RTX_FRAME_RELATED_P (insn) = 1;
8186 }
8187
8188 /* Set up got pointer, if needed. */
8189
8190 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8191 {
8192 rtx insns = s390_load_got ();
8193
8194 for (insn = insns; insn; insn = NEXT_INSN (insn))
8195 annotate_constant_pool_refs (&PATTERN (insn));
8196
8197 emit_insn (insns);
8198 }
8199
8200 if (TARGET_TPF_PROFILING)
8201 {
8202 /* Generate a BAS instruction to serve as a function
8203 entry intercept to facilitate the use of tracing
8204 algorithms located at the branch target. */
8205 emit_insn (gen_prologue_tpf ());
8206
8207 /* Emit a blockage here so that all code
8208 lies between the profiling mechanisms. */
8209 emit_insn (gen_blockage ());
8210 }
8211 }
8212
8213 /* Expand the epilogue into a bunch of separate insns. */
8214
8215 void
8216 s390_emit_epilogue (bool sibcall)
8217 {
8218 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8219 int area_bottom, area_top, offset = 0;
8220 int next_offset;
8221 rtvec p;
8222 int i;
8223
8224 if (TARGET_TPF_PROFILING)
8225 {
8226
8227 /* Generate a BAS instruction to serve as a function
8228 entry intercept to facilitate the use of tracing
8229 algorithms located at the branch target. */
8230
8231 /* Emit a blockage here so that all code
8232 lies between the profiling mechanisms. */
8233 emit_insn (gen_blockage ());
8234
8235 emit_insn (gen_epilogue_tpf ());
8236 }
8237
8238 /* Check whether to use frame or stack pointer for restore. */
8239
8240 frame_pointer = (frame_pointer_needed
8241 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8242
8243 s390_frame_area (&area_bottom, &area_top);
8244
8245 /* Check whether we can access the register save area.
8246 If not, increment the frame pointer as required. */
8247
8248 if (area_top <= area_bottom)
8249 {
8250 /* Nothing to restore. */
8251 }
8252 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8253 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8254 {
8255 /* Area is in range. */
8256 offset = cfun_frame_layout.frame_size;
8257 }
8258 else
8259 {
8260 rtx insn, frame_off, cfa;
8261
8262 offset = area_bottom < 0 ? -area_bottom : 0;
8263 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8264
8265 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8266 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8267 if (DISP_IN_RANGE (INTVAL (frame_off)))
8268 {
8269 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8270 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8271 insn = emit_insn (insn);
8272 }
8273 else
8274 {
8275 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8276 frame_off = force_const_mem (Pmode, frame_off);
8277
8278 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8279 annotate_constant_pool_refs (&PATTERN (insn));
8280 }
8281 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8282 RTX_FRAME_RELATED_P (insn) = 1;
8283 }
8284
8285 /* Restore call saved fprs. */
8286
8287 if (TARGET_64BIT)
8288 {
8289 if (cfun_save_high_fprs_p)
8290 {
8291 next_offset = cfun_frame_layout.f8_offset;
8292 for (i = 24; i < 32; i++)
8293 {
8294 if (cfun_fpr_bit_p (i - 16))
8295 {
8296 restore_fpr (frame_pointer,
8297 offset + next_offset, i);
8298 cfa_restores
8299 = alloc_reg_note (REG_CFA_RESTORE,
8300 gen_rtx_REG (DFmode, i), cfa_restores);
8301 next_offset += 8;
8302 }
8303 }
8304 }
8305
8306 }
8307 else
8308 {
8309 next_offset = cfun_frame_layout.f4_offset;
8310 for (i = 18; i < 20; i++)
8311 {
8312 if (cfun_fpr_bit_p (i - 16))
8313 {
8314 restore_fpr (frame_pointer,
8315 offset + next_offset, i);
8316 cfa_restores
8317 = alloc_reg_note (REG_CFA_RESTORE,
8318 gen_rtx_REG (DFmode, i), cfa_restores);
8319 next_offset += 8;
8320 }
8321 else if (!TARGET_PACKED_STACK)
8322 next_offset += 8;
8323 }
8324
8325 }
8326
8327 /* Return register. */
8328
8329 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8330
8331 /* Restore call saved gprs. */
8332
8333 if (cfun_frame_layout.first_restore_gpr != -1)
8334 {
8335 rtx insn, addr;
8336 int i;
8337
8338 /* Check for global register and save them
8339 to stack location from where they get restored. */
8340
8341 for (i = cfun_frame_layout.first_restore_gpr;
8342 i <= cfun_frame_layout.last_restore_gpr;
8343 i++)
8344 {
8345 if (global_not_special_regno_p (i))
8346 {
8347 addr = plus_constant (frame_pointer,
8348 offset + cfun_frame_layout.gprs_offset
8349 + (i - cfun_frame_layout.first_save_gpr_slot)
8350 * UNITS_PER_LONG);
8351 addr = gen_rtx_MEM (Pmode, addr);
8352 set_mem_alias_set (addr, get_frame_alias_set ());
8353 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8354 }
8355 else
8356 cfa_restores
8357 = alloc_reg_note (REG_CFA_RESTORE,
8358 gen_rtx_REG (Pmode, i), cfa_restores);
8359 }
8360
8361 if (! sibcall)
8362 {
8363 /* Fetch return address from stack before load multiple,
8364 this will do good for scheduling. */
8365
8366 if (cfun_frame_layout.save_return_addr_p
8367 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8368 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8369 {
8370 int return_regnum = find_unused_clobbered_reg();
8371 if (!return_regnum)
8372 return_regnum = 4;
8373 return_reg = gen_rtx_REG (Pmode, return_regnum);
8374
8375 addr = plus_constant (frame_pointer,
8376 offset + cfun_frame_layout.gprs_offset
8377 + (RETURN_REGNUM
8378 - cfun_frame_layout.first_save_gpr_slot)
8379 * UNITS_PER_LONG);
8380 addr = gen_rtx_MEM (Pmode, addr);
8381 set_mem_alias_set (addr, get_frame_alias_set ());
8382 emit_move_insn (return_reg, addr);
8383 }
8384 }
8385
8386 insn = restore_gprs (frame_pointer,
8387 offset + cfun_frame_layout.gprs_offset
8388 + (cfun_frame_layout.first_restore_gpr
8389 - cfun_frame_layout.first_save_gpr_slot)
8390 * UNITS_PER_LONG,
8391 cfun_frame_layout.first_restore_gpr,
8392 cfun_frame_layout.last_restore_gpr);
8393 insn = emit_insn (insn);
8394 REG_NOTES (insn) = cfa_restores;
8395 add_reg_note (insn, REG_CFA_DEF_CFA,
8396 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8397 RTX_FRAME_RELATED_P (insn) = 1;
8398 }
8399
8400 if (! sibcall)
8401 {
8402
8403 /* Return to caller. */
8404
8405 p = rtvec_alloc (2);
8406
8407 RTVEC_ELT (p, 0) = ret_rtx;
8408 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8409 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8410 }
8411 }
8412
8413
8414 /* Return the size in bytes of a function argument of
8415 type TYPE and/or mode MODE. At least one of TYPE or
8416 MODE must be specified. */
8417
8418 static int
8419 s390_function_arg_size (enum machine_mode mode, const_tree type)
8420 {
8421 if (type)
8422 return int_size_in_bytes (type);
8423
8424 /* No type info available for some library calls ... */
8425 if (mode != BLKmode)
8426 return GET_MODE_SIZE (mode);
8427
8428 /* If we have neither type nor mode, abort */
8429 gcc_unreachable ();
8430 }
8431
8432 /* Return true if a function argument of type TYPE and mode MODE
8433 is to be passed in a floating-point register, if available. */
8434
8435 static bool
8436 s390_function_arg_float (enum machine_mode mode, const_tree type)
8437 {
8438 int size = s390_function_arg_size (mode, type);
8439 if (size > 8)
8440 return false;
8441
8442 /* Soft-float changes the ABI: no floating-point registers are used. */
8443 if (TARGET_SOFT_FLOAT)
8444 return false;
8445
8446 /* No type info available for some library calls ... */
8447 if (!type)
8448 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8449
8450 /* The ABI says that record types with a single member are treated
8451 just like that member would be. */
8452 while (TREE_CODE (type) == RECORD_TYPE)
8453 {
8454 tree field, single = NULL_TREE;
8455
8456 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8457 {
8458 if (TREE_CODE (field) != FIELD_DECL)
8459 continue;
8460
8461 if (single == NULL_TREE)
8462 single = TREE_TYPE (field);
8463 else
8464 return false;
8465 }
8466
8467 if (single == NULL_TREE)
8468 return false;
8469 else
8470 type = single;
8471 }
8472
8473 return TREE_CODE (type) == REAL_TYPE;
8474 }
8475
8476 /* Return true if a function argument of type TYPE and mode MODE
8477 is to be passed in an integer register, or a pair of integer
8478 registers, if available. */
8479
8480 static bool
8481 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8482 {
8483 int size = s390_function_arg_size (mode, type);
8484 if (size > 8)
8485 return false;
8486
8487 /* No type info available for some library calls ... */
8488 if (!type)
8489 return GET_MODE_CLASS (mode) == MODE_INT
8490 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8491
8492 /* We accept small integral (and similar) types. */
8493 if (INTEGRAL_TYPE_P (type)
8494 || POINTER_TYPE_P (type)
8495 || TREE_CODE (type) == NULLPTR_TYPE
8496 || TREE_CODE (type) == OFFSET_TYPE
8497 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8498 return true;
8499
8500 /* We also accept structs of size 1, 2, 4, 8 that are not
8501 passed in floating-point registers. */
8502 if (AGGREGATE_TYPE_P (type)
8503 && exact_log2 (size) >= 0
8504 && !s390_function_arg_float (mode, type))
8505 return true;
8506
8507 return false;
8508 }
8509
8510 /* Return 1 if a function argument of type TYPE and mode MODE
8511 is to be passed by reference. The ABI specifies that only
8512 structures of size 1, 2, 4, or 8 bytes are passed by value,
8513 all other structures (and complex numbers) are passed by
8514 reference. */
8515
8516 static bool
8517 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8518 enum machine_mode mode, const_tree type,
8519 bool named ATTRIBUTE_UNUSED)
8520 {
8521 int size = s390_function_arg_size (mode, type);
8522 if (size > 8)
8523 return true;
8524
8525 if (type)
8526 {
8527 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8528 return 1;
8529
8530 if (TREE_CODE (type) == COMPLEX_TYPE
8531 || TREE_CODE (type) == VECTOR_TYPE)
8532 return 1;
8533 }
8534
8535 return 0;
8536 }
8537
8538 /* Update the data in CUM to advance over an argument of mode MODE and
8539 data type TYPE. (TYPE is null for libcalls where that information
8540 may not be available.). The boolean NAMED specifies whether the
8541 argument is a named argument (as opposed to an unnamed argument
8542 matching an ellipsis). */
8543
8544 static void
8545 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8546 const_tree type, bool named ATTRIBUTE_UNUSED)
8547 {
8548 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8549
8550 if (s390_function_arg_float (mode, type))
8551 {
8552 cum->fprs += 1;
8553 }
8554 else if (s390_function_arg_integer (mode, type))
8555 {
8556 int size = s390_function_arg_size (mode, type);
8557 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8558 }
8559 else
8560 gcc_unreachable ();
8561 }
8562
8563 /* Define where to put the arguments to a function.
8564 Value is zero to push the argument on the stack,
8565 or a hard register in which to store the argument.
8566
8567 MODE is the argument's machine mode.
8568 TYPE is the data type of the argument (as a tree).
8569 This is null for libcalls where that information may
8570 not be available.
8571 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8572 the preceding args and about the function being called.
8573 NAMED is nonzero if this argument is a named parameter
8574 (otherwise it is an extra parameter matching an ellipsis).
8575
8576 On S/390, we use general purpose registers 2 through 6 to
8577 pass integer, pointer, and certain structure arguments, and
8578 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8579 to pass floating point arguments. All remaining arguments
8580 are pushed to the stack. */
8581
8582 static rtx
8583 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8584 const_tree type, bool named ATTRIBUTE_UNUSED)
8585 {
8586 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8587
8588 if (s390_function_arg_float (mode, type))
8589 {
8590 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8591 return 0;
8592 else
8593 return gen_rtx_REG (mode, cum->fprs + 16);
8594 }
8595 else if (s390_function_arg_integer (mode, type))
8596 {
8597 int size = s390_function_arg_size (mode, type);
8598 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8599
8600 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8601 return 0;
8602 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8603 return gen_rtx_REG (mode, cum->gprs + 2);
8604 else if (n_gprs == 2)
8605 {
8606 rtvec p = rtvec_alloc (2);
8607
8608 RTVEC_ELT (p, 0)
8609 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8610 const0_rtx);
8611 RTVEC_ELT (p, 1)
8612 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8613 GEN_INT (4));
8614
8615 return gen_rtx_PARALLEL (mode, p);
8616 }
8617 }
8618
8619 /* After the real arguments, expand_call calls us once again
8620 with a void_type_node type. Whatever we return here is
8621 passed as operand 2 to the call expanders.
8622
8623 We don't need this feature ... */
8624 else if (type == void_type_node)
8625 return const0_rtx;
8626
8627 gcc_unreachable ();
8628 }
8629
8630 /* Return true if return values of type TYPE should be returned
8631 in a memory buffer whose address is passed by the caller as
8632 hidden first argument. */
8633
8634 static bool
8635 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8636 {
8637 /* We accept small integral (and similar) types. */
8638 if (INTEGRAL_TYPE_P (type)
8639 || POINTER_TYPE_P (type)
8640 || TREE_CODE (type) == OFFSET_TYPE
8641 || TREE_CODE (type) == REAL_TYPE)
8642 return int_size_in_bytes (type) > 8;
8643
8644 /* Aggregates and similar constructs are always returned
8645 in memory. */
8646 if (AGGREGATE_TYPE_P (type)
8647 || TREE_CODE (type) == COMPLEX_TYPE
8648 || TREE_CODE (type) == VECTOR_TYPE)
8649 return true;
8650
8651 /* ??? We get called on all sorts of random stuff from
8652 aggregate_value_p. We can't abort, but it's not clear
8653 what's safe to return. Pretend it's a struct I guess. */
8654 return true;
8655 }
8656
8657 /* Function arguments and return values are promoted to word size. */
8658
8659 static enum machine_mode
8660 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8661 int *punsignedp,
8662 const_tree fntype ATTRIBUTE_UNUSED,
8663 int for_return ATTRIBUTE_UNUSED)
8664 {
8665 if (INTEGRAL_MODE_P (mode)
8666 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8667 {
8668 if (type != NULL_TREE && POINTER_TYPE_P (type))
8669 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8670 return Pmode;
8671 }
8672
8673 return mode;
8674 }
8675
8676 /* Define where to return a (scalar) value of type RET_TYPE.
8677 If RET_TYPE is null, define where to return a (scalar)
8678 value of mode MODE from a libcall. */
8679
8680 static rtx
8681 s390_function_and_libcall_value (enum machine_mode mode,
8682 const_tree ret_type,
8683 const_tree fntype_or_decl,
8684 bool outgoing ATTRIBUTE_UNUSED)
8685 {
8686 /* For normal functions perform the promotion as
8687 promote_function_mode would do. */
8688 if (ret_type)
8689 {
8690 int unsignedp = TYPE_UNSIGNED (ret_type);
8691 mode = promote_function_mode (ret_type, mode, &unsignedp,
8692 fntype_or_decl, 1);
8693 }
8694
8695 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8696 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8697
8698 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8699 return gen_rtx_REG (mode, 16);
8700 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8701 || UNITS_PER_LONG == UNITS_PER_WORD)
8702 return gen_rtx_REG (mode, 2);
8703 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8704 {
8705 /* This case is triggered when returning a 64 bit value with
8706 -m31 -mzarch. Although the value would fit into a single
8707 register it has to be forced into a 32 bit register pair in
8708 order to match the ABI. */
8709 rtvec p = rtvec_alloc (2);
8710
8711 RTVEC_ELT (p, 0)
8712 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8713 RTVEC_ELT (p, 1)
8714 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8715
8716 return gen_rtx_PARALLEL (mode, p);
8717 }
8718
8719 gcc_unreachable ();
8720 }
8721
8722 /* Define where to return a scalar return value of type RET_TYPE. */
8723
8724 static rtx
8725 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8726 bool outgoing)
8727 {
8728 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8729 fn_decl_or_type, outgoing);
8730 }
8731
8732 /* Define where to return a scalar libcall return value of mode
8733 MODE. */
8734
8735 static rtx
8736 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8737 {
8738 return s390_function_and_libcall_value (mode, NULL_TREE,
8739 NULL_TREE, true);
8740 }
8741
8742
8743 /* Create and return the va_list datatype.
8744
8745 On S/390, va_list is an array type equivalent to
8746
8747 typedef struct __va_list_tag
8748 {
8749 long __gpr;
8750 long __fpr;
8751 void *__overflow_arg_area;
8752 void *__reg_save_area;
8753 } va_list[1];
8754
8755 where __gpr and __fpr hold the number of general purpose
8756 or floating point arguments used up to now, respectively,
8757 __overflow_arg_area points to the stack location of the
8758 next argument passed on the stack, and __reg_save_area
8759 always points to the start of the register area in the
8760 call frame of the current function. The function prologue
8761 saves all registers used for argument passing into this
8762 area if the function uses variable arguments. */
8763
8764 static tree
8765 s390_build_builtin_va_list (void)
8766 {
8767 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8768
8769 record = lang_hooks.types.make_type (RECORD_TYPE);
8770
8771 type_decl =
8772 build_decl (BUILTINS_LOCATION,
8773 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8774
8775 f_gpr = build_decl (BUILTINS_LOCATION,
8776 FIELD_DECL, get_identifier ("__gpr"),
8777 long_integer_type_node);
8778 f_fpr = build_decl (BUILTINS_LOCATION,
8779 FIELD_DECL, get_identifier ("__fpr"),
8780 long_integer_type_node);
8781 f_ovf = build_decl (BUILTINS_LOCATION,
8782 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8783 ptr_type_node);
8784 f_sav = build_decl (BUILTINS_LOCATION,
8785 FIELD_DECL, get_identifier ("__reg_save_area"),
8786 ptr_type_node);
8787
8788 va_list_gpr_counter_field = f_gpr;
8789 va_list_fpr_counter_field = f_fpr;
8790
8791 DECL_FIELD_CONTEXT (f_gpr) = record;
8792 DECL_FIELD_CONTEXT (f_fpr) = record;
8793 DECL_FIELD_CONTEXT (f_ovf) = record;
8794 DECL_FIELD_CONTEXT (f_sav) = record;
8795
8796 TYPE_STUB_DECL (record) = type_decl;
8797 TYPE_NAME (record) = type_decl;
8798 TYPE_FIELDS (record) = f_gpr;
8799 DECL_CHAIN (f_gpr) = f_fpr;
8800 DECL_CHAIN (f_fpr) = f_ovf;
8801 DECL_CHAIN (f_ovf) = f_sav;
8802
8803 layout_type (record);
8804
8805 /* The correct type is an array type of one element. */
8806 return build_array_type (record, build_index_type (size_zero_node));
8807 }
8808
8809 /* Implement va_start by filling the va_list structure VALIST.
8810 STDARG_P is always true, and ignored.
8811 NEXTARG points to the first anonymous stack argument.
8812
8813 The following global variables are used to initialize
8814 the va_list structure:
8815
8816 crtl->args.info:
8817 holds number of gprs and fprs used for named arguments.
8818 crtl->args.arg_offset_rtx:
8819 holds the offset of the first anonymous stack argument
8820 (relative to the virtual arg pointer). */
8821
8822 static void
8823 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8824 {
8825 HOST_WIDE_INT n_gpr, n_fpr;
8826 int off;
8827 tree f_gpr, f_fpr, f_ovf, f_sav;
8828 tree gpr, fpr, ovf, sav, t;
8829
8830 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8831 f_fpr = DECL_CHAIN (f_gpr);
8832 f_ovf = DECL_CHAIN (f_fpr);
8833 f_sav = DECL_CHAIN (f_ovf);
8834
8835 valist = build_simple_mem_ref (valist);
8836 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8837 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8838 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8839 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8840
8841 /* Count number of gp and fp argument registers used. */
8842
8843 n_gpr = crtl->args.info.gprs;
8844 n_fpr = crtl->args.info.fprs;
8845
8846 if (cfun->va_list_gpr_size)
8847 {
8848 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8849 build_int_cst (NULL_TREE, n_gpr));
8850 TREE_SIDE_EFFECTS (t) = 1;
8851 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8852 }
8853
8854 if (cfun->va_list_fpr_size)
8855 {
8856 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8857 build_int_cst (NULL_TREE, n_fpr));
8858 TREE_SIDE_EFFECTS (t) = 1;
8859 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8860 }
8861
8862 /* Find the overflow area. */
8863 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8864 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8865 {
8866 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8867
8868 off = INTVAL (crtl->args.arg_offset_rtx);
8869 off = off < 0 ? 0 : off;
8870 if (TARGET_DEBUG_ARG)
8871 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8872 (int)n_gpr, (int)n_fpr, off);
8873
8874 t = fold_build_pointer_plus_hwi (t, off);
8875
8876 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8877 TREE_SIDE_EFFECTS (t) = 1;
8878 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8879 }
8880
8881 /* Find the register save area. */
8882 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8883 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8884 {
8885 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8886 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
8887
8888 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8889 TREE_SIDE_EFFECTS (t) = 1;
8890 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8891 }
8892 }
8893
8894 /* Implement va_arg by updating the va_list structure
8895 VALIST as required to retrieve an argument of type
8896 TYPE, and returning that argument.
8897
8898 Generates code equivalent to:
8899
8900 if (integral value) {
8901 if (size <= 4 && args.gpr < 5 ||
8902 size > 4 && args.gpr < 4 )
8903 ret = args.reg_save_area[args.gpr+8]
8904 else
8905 ret = *args.overflow_arg_area++;
8906 } else if (float value) {
8907 if (args.fgpr < 2)
8908 ret = args.reg_save_area[args.fpr+64]
8909 else
8910 ret = *args.overflow_arg_area++;
8911 } else if (aggregate value) {
8912 if (args.gpr < 5)
8913 ret = *args.reg_save_area[args.gpr]
8914 else
8915 ret = **args.overflow_arg_area++;
8916 } */
8917
8918 static tree
8919 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8920 gimple_seq *post_p ATTRIBUTE_UNUSED)
8921 {
8922 tree f_gpr, f_fpr, f_ovf, f_sav;
8923 tree gpr, fpr, ovf, sav, reg, t, u;
8924 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8925 tree lab_false, lab_over, addr;
8926
8927 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8928 f_fpr = DECL_CHAIN (f_gpr);
8929 f_ovf = DECL_CHAIN (f_fpr);
8930 f_sav = DECL_CHAIN (f_ovf);
8931
8932 valist = build_va_arg_indirect_ref (valist);
8933 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8934 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8935 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8936
8937 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8938 both appear on a lhs. */
8939 valist = unshare_expr (valist);
8940 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8941
8942 size = int_size_in_bytes (type);
8943
8944 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8945 {
8946 if (TARGET_DEBUG_ARG)
8947 {
8948 fprintf (stderr, "va_arg: aggregate type");
8949 debug_tree (type);
8950 }
8951
8952 /* Aggregates are passed by reference. */
8953 indirect_p = 1;
8954 reg = gpr;
8955 n_reg = 1;
8956
8957 /* kernel stack layout on 31 bit: It is assumed here that no padding
8958 will be added by s390_frame_info because for va_args always an even
8959 number of gprs has to be saved r15-r2 = 14 regs. */
8960 sav_ofs = 2 * UNITS_PER_LONG;
8961 sav_scale = UNITS_PER_LONG;
8962 size = UNITS_PER_LONG;
8963 max_reg = GP_ARG_NUM_REG - n_reg;
8964 }
8965 else if (s390_function_arg_float (TYPE_MODE (type), type))
8966 {
8967 if (TARGET_DEBUG_ARG)
8968 {
8969 fprintf (stderr, "va_arg: float type");
8970 debug_tree (type);
8971 }
8972
8973 /* FP args go in FP registers, if present. */
8974 indirect_p = 0;
8975 reg = fpr;
8976 n_reg = 1;
8977 sav_ofs = 16 * UNITS_PER_LONG;
8978 sav_scale = 8;
8979 max_reg = FP_ARG_NUM_REG - n_reg;
8980 }
8981 else
8982 {
8983 if (TARGET_DEBUG_ARG)
8984 {
8985 fprintf (stderr, "va_arg: other type");
8986 debug_tree (type);
8987 }
8988
8989 /* Otherwise into GP registers. */
8990 indirect_p = 0;
8991 reg = gpr;
8992 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8993
8994 /* kernel stack layout on 31 bit: It is assumed here that no padding
8995 will be added by s390_frame_info because for va_args always an even
8996 number of gprs has to be saved r15-r2 = 14 regs. */
8997 sav_ofs = 2 * UNITS_PER_LONG;
8998
8999 if (size < UNITS_PER_LONG)
9000 sav_ofs += UNITS_PER_LONG - size;
9001
9002 sav_scale = UNITS_PER_LONG;
9003 max_reg = GP_ARG_NUM_REG - n_reg;
9004 }
9005
9006 /* Pull the value out of the saved registers ... */
9007
9008 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9009 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9010 addr = create_tmp_var (ptr_type_node, "addr");
9011
9012 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9013 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9014 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9015 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9016 gimplify_and_add (t, pre_p);
9017
9018 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9019 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9020 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9021 t = fold_build_pointer_plus (t, u);
9022
9023 gimplify_assign (addr, t, pre_p);
9024
9025 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9026
9027 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9028
9029
9030 /* ... Otherwise out of the overflow area. */
9031
9032 t = ovf;
9033 if (size < UNITS_PER_LONG)
9034 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9035
9036 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9037
9038 gimplify_assign (addr, t, pre_p);
9039
9040 t = fold_build_pointer_plus_hwi (t, size);
9041 gimplify_assign (ovf, t, pre_p);
9042
9043 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9044
9045
9046 /* Increment register save count. */
9047
9048 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9049 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9050 gimplify_and_add (u, pre_p);
9051
9052 if (indirect_p)
9053 {
9054 t = build_pointer_type_for_mode (build_pointer_type (type),
9055 ptr_mode, true);
9056 addr = fold_convert (t, addr);
9057 addr = build_va_arg_indirect_ref (addr);
9058 }
9059 else
9060 {
9061 t = build_pointer_type_for_mode (type, ptr_mode, true);
9062 addr = fold_convert (t, addr);
9063 }
9064
9065 return build_va_arg_indirect_ref (addr);
9066 }
9067
9068
9069 /* Builtins. */
9070
9071 enum s390_builtin
9072 {
9073 S390_BUILTIN_THREAD_POINTER,
9074 S390_BUILTIN_SET_THREAD_POINTER,
9075
9076 S390_BUILTIN_max
9077 };
9078
9079 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9080 CODE_FOR_get_tp_64,
9081 CODE_FOR_set_tp_64
9082 };
9083
9084 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9085 CODE_FOR_get_tp_31,
9086 CODE_FOR_set_tp_31
9087 };
9088
9089 static void
9090 s390_init_builtins (void)
9091 {
9092 tree ftype;
9093
9094 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9095 add_builtin_function ("__builtin_thread_pointer", ftype,
9096 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9097 NULL, NULL_TREE);
9098
9099 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9100 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9101 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9102 NULL, NULL_TREE);
9103 }
9104
9105 /* Expand an expression EXP that calls a built-in function,
9106 with result going to TARGET if that's convenient
9107 (and in mode MODE if that's convenient).
9108 SUBTARGET may be used as the target for computing one of EXP's operands.
9109 IGNORE is nonzero if the value is to be ignored. */
9110
9111 static rtx
9112 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9113 enum machine_mode mode ATTRIBUTE_UNUSED,
9114 int ignore ATTRIBUTE_UNUSED)
9115 {
9116 #define MAX_ARGS 2
9117
9118 enum insn_code const *code_for_builtin =
9119 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9120
9121 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9122 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9123 enum insn_code icode;
9124 rtx op[MAX_ARGS], pat;
9125 int arity;
9126 bool nonvoid;
9127 tree arg;
9128 call_expr_arg_iterator iter;
9129
9130 if (fcode >= S390_BUILTIN_max)
9131 internal_error ("bad builtin fcode");
9132 icode = code_for_builtin[fcode];
9133 if (icode == 0)
9134 internal_error ("bad builtin fcode");
9135
9136 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9137
9138 arity = 0;
9139 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9140 {
9141 const struct insn_operand_data *insn_op;
9142
9143 if (arg == error_mark_node)
9144 return NULL_RTX;
9145 if (arity > MAX_ARGS)
9146 return NULL_RTX;
9147
9148 insn_op = &insn_data[icode].operand[arity + nonvoid];
9149
9150 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9151
9152 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9153 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9154 arity++;
9155 }
9156
9157 if (nonvoid)
9158 {
9159 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9160 if (!target
9161 || GET_MODE (target) != tmode
9162 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9163 target = gen_reg_rtx (tmode);
9164 }
9165
9166 switch (arity)
9167 {
9168 case 0:
9169 pat = GEN_FCN (icode) (target);
9170 break;
9171 case 1:
9172 if (nonvoid)
9173 pat = GEN_FCN (icode) (target, op[0]);
9174 else
9175 pat = GEN_FCN (icode) (op[0]);
9176 break;
9177 case 2:
9178 pat = GEN_FCN (icode) (target, op[0], op[1]);
9179 break;
9180 default:
9181 gcc_unreachable ();
9182 }
9183 if (!pat)
9184 return NULL_RTX;
9185 emit_insn (pat);
9186
9187 if (nonvoid)
9188 return target;
9189 else
9190 return const0_rtx;
9191 }
9192
9193
9194 /* Output assembly code for the trampoline template to
9195 stdio stream FILE.
9196
9197 On S/390, we use gpr 1 internally in the trampoline code;
9198 gpr 0 is used to hold the static chain. */
9199
9200 static void
9201 s390_asm_trampoline_template (FILE *file)
9202 {
9203 rtx op[2];
9204 op[0] = gen_rtx_REG (Pmode, 0);
9205 op[1] = gen_rtx_REG (Pmode, 1);
9206
9207 if (TARGET_64BIT)
9208 {
9209 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9210 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9211 output_asm_insn ("br\t%1", op); /* 2 byte */
9212 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9213 }
9214 else
9215 {
9216 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9217 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9218 output_asm_insn ("br\t%1", op); /* 2 byte */
9219 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9220 }
9221 }
9222
9223 /* Emit RTL insns to initialize the variable parts of a trampoline.
9224 FNADDR is an RTX for the address of the function's pure code.
9225 CXT is an RTX for the static chain value for the function. */
9226
9227 static void
9228 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9229 {
9230 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9231 rtx mem;
9232
9233 emit_block_move (m_tramp, assemble_trampoline_template (),
9234 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9235
9236 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9237 emit_move_insn (mem, cxt);
9238 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9239 emit_move_insn (mem, fnaddr);
9240 }
9241
9242 /* Output assembler code to FILE to increment profiler label # LABELNO
9243 for profiling a function entry. */
9244
9245 void
9246 s390_function_profiler (FILE *file, int labelno)
9247 {
9248 rtx op[7];
9249
9250 char label[128];
9251 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9252
9253 fprintf (file, "# function profiler \n");
9254
9255 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9256 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9257 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9258
9259 op[2] = gen_rtx_REG (Pmode, 1);
9260 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9261 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9262
9263 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9264 if (flag_pic)
9265 {
9266 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9267 op[4] = gen_rtx_CONST (Pmode, op[4]);
9268 }
9269
9270 if (TARGET_64BIT)
9271 {
9272 output_asm_insn ("stg\t%0,%1", op);
9273 output_asm_insn ("larl\t%2,%3", op);
9274 output_asm_insn ("brasl\t%0,%4", op);
9275 output_asm_insn ("lg\t%0,%1", op);
9276 }
9277 else if (!flag_pic)
9278 {
9279 op[6] = gen_label_rtx ();
9280
9281 output_asm_insn ("st\t%0,%1", op);
9282 output_asm_insn ("bras\t%2,%l6", op);
9283 output_asm_insn (".long\t%4", op);
9284 output_asm_insn (".long\t%3", op);
9285 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9286 output_asm_insn ("l\t%0,0(%2)", op);
9287 output_asm_insn ("l\t%2,4(%2)", op);
9288 output_asm_insn ("basr\t%0,%0", op);
9289 output_asm_insn ("l\t%0,%1", op);
9290 }
9291 else
9292 {
9293 op[5] = gen_label_rtx ();
9294 op[6] = gen_label_rtx ();
9295
9296 output_asm_insn ("st\t%0,%1", op);
9297 output_asm_insn ("bras\t%2,%l6", op);
9298 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9299 output_asm_insn (".long\t%4-%l5", op);
9300 output_asm_insn (".long\t%3-%l5", op);
9301 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9302 output_asm_insn ("lr\t%0,%2", op);
9303 output_asm_insn ("a\t%0,0(%2)", op);
9304 output_asm_insn ("a\t%2,4(%2)", op);
9305 output_asm_insn ("basr\t%0,%0", op);
9306 output_asm_insn ("l\t%0,%1", op);
9307 }
9308 }
9309
9310 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9311 into its SYMBOL_REF_FLAGS. */
9312
9313 static void
9314 s390_encode_section_info (tree decl, rtx rtl, int first)
9315 {
9316 default_encode_section_info (decl, rtl, first);
9317
9318 if (TREE_CODE (decl) == VAR_DECL)
9319 {
9320 /* If a variable has a forced alignment to < 2 bytes, mark it
9321 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9322 operand. */
9323 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9324 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9325 if (!DECL_SIZE (decl)
9326 || !DECL_ALIGN (decl)
9327 || !host_integerp (DECL_SIZE (decl), 0)
9328 || (DECL_ALIGN (decl) <= 64
9329 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9330 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9331 }
9332
9333 /* Literal pool references don't have a decl so they are handled
9334 differently here. We rely on the information in the MEM_ALIGN
9335 entry to decide upon natural alignment. */
9336 if (MEM_P (rtl)
9337 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9338 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9339 && (MEM_ALIGN (rtl) == 0
9340 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9341 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9342 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9343 }
9344
9345 /* Output thunk to FILE that implements a C++ virtual function call (with
9346 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9347 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9348 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9349 relative to the resulting this pointer. */
9350
9351 static void
9352 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9353 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9354 tree function)
9355 {
9356 rtx op[10];
9357 int nonlocal = 0;
9358
9359 /* Make sure unwind info is emitted for the thunk if needed. */
9360 final_start_function (emit_barrier (), file, 1);
9361
9362 /* Operand 0 is the target function. */
9363 op[0] = XEXP (DECL_RTL (function), 0);
9364 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9365 {
9366 nonlocal = 1;
9367 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9368 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9369 op[0] = gen_rtx_CONST (Pmode, op[0]);
9370 }
9371
9372 /* Operand 1 is the 'this' pointer. */
9373 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9374 op[1] = gen_rtx_REG (Pmode, 3);
9375 else
9376 op[1] = gen_rtx_REG (Pmode, 2);
9377
9378 /* Operand 2 is the delta. */
9379 op[2] = GEN_INT (delta);
9380
9381 /* Operand 3 is the vcall_offset. */
9382 op[3] = GEN_INT (vcall_offset);
9383
9384 /* Operand 4 is the temporary register. */
9385 op[4] = gen_rtx_REG (Pmode, 1);
9386
9387 /* Operands 5 to 8 can be used as labels. */
9388 op[5] = NULL_RTX;
9389 op[6] = NULL_RTX;
9390 op[7] = NULL_RTX;
9391 op[8] = NULL_RTX;
9392
9393 /* Operand 9 can be used for temporary register. */
9394 op[9] = NULL_RTX;
9395
9396 /* Generate code. */
9397 if (TARGET_64BIT)
9398 {
9399 /* Setup literal pool pointer if required. */
9400 if ((!DISP_IN_RANGE (delta)
9401 && !CONST_OK_FOR_K (delta)
9402 && !CONST_OK_FOR_Os (delta))
9403 || (!DISP_IN_RANGE (vcall_offset)
9404 && !CONST_OK_FOR_K (vcall_offset)
9405 && !CONST_OK_FOR_Os (vcall_offset)))
9406 {
9407 op[5] = gen_label_rtx ();
9408 output_asm_insn ("larl\t%4,%5", op);
9409 }
9410
9411 /* Add DELTA to this pointer. */
9412 if (delta)
9413 {
9414 if (CONST_OK_FOR_J (delta))
9415 output_asm_insn ("la\t%1,%2(%1)", op);
9416 else if (DISP_IN_RANGE (delta))
9417 output_asm_insn ("lay\t%1,%2(%1)", op);
9418 else if (CONST_OK_FOR_K (delta))
9419 output_asm_insn ("aghi\t%1,%2", op);
9420 else if (CONST_OK_FOR_Os (delta))
9421 output_asm_insn ("agfi\t%1,%2", op);
9422 else
9423 {
9424 op[6] = gen_label_rtx ();
9425 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9426 }
9427 }
9428
9429 /* Perform vcall adjustment. */
9430 if (vcall_offset)
9431 {
9432 if (DISP_IN_RANGE (vcall_offset))
9433 {
9434 output_asm_insn ("lg\t%4,0(%1)", op);
9435 output_asm_insn ("ag\t%1,%3(%4)", op);
9436 }
9437 else if (CONST_OK_FOR_K (vcall_offset))
9438 {
9439 output_asm_insn ("lghi\t%4,%3", op);
9440 output_asm_insn ("ag\t%4,0(%1)", op);
9441 output_asm_insn ("ag\t%1,0(%4)", op);
9442 }
9443 else if (CONST_OK_FOR_Os (vcall_offset))
9444 {
9445 output_asm_insn ("lgfi\t%4,%3", op);
9446 output_asm_insn ("ag\t%4,0(%1)", op);
9447 output_asm_insn ("ag\t%1,0(%4)", op);
9448 }
9449 else
9450 {
9451 op[7] = gen_label_rtx ();
9452 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9453 output_asm_insn ("ag\t%4,0(%1)", op);
9454 output_asm_insn ("ag\t%1,0(%4)", op);
9455 }
9456 }
9457
9458 /* Jump to target. */
9459 output_asm_insn ("jg\t%0", op);
9460
9461 /* Output literal pool if required. */
9462 if (op[5])
9463 {
9464 output_asm_insn (".align\t4", op);
9465 targetm.asm_out.internal_label (file, "L",
9466 CODE_LABEL_NUMBER (op[5]));
9467 }
9468 if (op[6])
9469 {
9470 targetm.asm_out.internal_label (file, "L",
9471 CODE_LABEL_NUMBER (op[6]));
9472 output_asm_insn (".long\t%2", op);
9473 }
9474 if (op[7])
9475 {
9476 targetm.asm_out.internal_label (file, "L",
9477 CODE_LABEL_NUMBER (op[7]));
9478 output_asm_insn (".long\t%3", op);
9479 }
9480 }
9481 else
9482 {
9483 /* Setup base pointer if required. */
9484 if (!vcall_offset
9485 || (!DISP_IN_RANGE (delta)
9486 && !CONST_OK_FOR_K (delta)
9487 && !CONST_OK_FOR_Os (delta))
9488 || (!DISP_IN_RANGE (delta)
9489 && !CONST_OK_FOR_K (vcall_offset)
9490 && !CONST_OK_FOR_Os (vcall_offset)))
9491 {
9492 op[5] = gen_label_rtx ();
9493 output_asm_insn ("basr\t%4,0", op);
9494 targetm.asm_out.internal_label (file, "L",
9495 CODE_LABEL_NUMBER (op[5]));
9496 }
9497
9498 /* Add DELTA to this pointer. */
9499 if (delta)
9500 {
9501 if (CONST_OK_FOR_J (delta))
9502 output_asm_insn ("la\t%1,%2(%1)", op);
9503 else if (DISP_IN_RANGE (delta))
9504 output_asm_insn ("lay\t%1,%2(%1)", op);
9505 else if (CONST_OK_FOR_K (delta))
9506 output_asm_insn ("ahi\t%1,%2", op);
9507 else if (CONST_OK_FOR_Os (delta))
9508 output_asm_insn ("afi\t%1,%2", op);
9509 else
9510 {
9511 op[6] = gen_label_rtx ();
9512 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9513 }
9514 }
9515
9516 /* Perform vcall adjustment. */
9517 if (vcall_offset)
9518 {
9519 if (CONST_OK_FOR_J (vcall_offset))
9520 {
9521 output_asm_insn ("l\t%4,0(%1)", op);
9522 output_asm_insn ("a\t%1,%3(%4)", op);
9523 }
9524 else if (DISP_IN_RANGE (vcall_offset))
9525 {
9526 output_asm_insn ("l\t%4,0(%1)", op);
9527 output_asm_insn ("ay\t%1,%3(%4)", op);
9528 }
9529 else if (CONST_OK_FOR_K (vcall_offset))
9530 {
9531 output_asm_insn ("lhi\t%4,%3", op);
9532 output_asm_insn ("a\t%4,0(%1)", op);
9533 output_asm_insn ("a\t%1,0(%4)", op);
9534 }
9535 else if (CONST_OK_FOR_Os (vcall_offset))
9536 {
9537 output_asm_insn ("iilf\t%4,%3", op);
9538 output_asm_insn ("a\t%4,0(%1)", op);
9539 output_asm_insn ("a\t%1,0(%4)", op);
9540 }
9541 else
9542 {
9543 op[7] = gen_label_rtx ();
9544 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9545 output_asm_insn ("a\t%4,0(%1)", op);
9546 output_asm_insn ("a\t%1,0(%4)", op);
9547 }
9548
9549 /* We had to clobber the base pointer register.
9550 Re-setup the base pointer (with a different base). */
9551 op[5] = gen_label_rtx ();
9552 output_asm_insn ("basr\t%4,0", op);
9553 targetm.asm_out.internal_label (file, "L",
9554 CODE_LABEL_NUMBER (op[5]));
9555 }
9556
9557 /* Jump to target. */
9558 op[8] = gen_label_rtx ();
9559
9560 if (!flag_pic)
9561 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9562 else if (!nonlocal)
9563 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9564 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9565 else if (flag_pic == 1)
9566 {
9567 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9568 output_asm_insn ("l\t%4,%0(%4)", op);
9569 }
9570 else if (flag_pic == 2)
9571 {
9572 op[9] = gen_rtx_REG (Pmode, 0);
9573 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9574 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9575 output_asm_insn ("ar\t%4,%9", op);
9576 output_asm_insn ("l\t%4,0(%4)", op);
9577 }
9578
9579 output_asm_insn ("br\t%4", op);
9580
9581 /* Output literal pool. */
9582 output_asm_insn (".align\t4", op);
9583
9584 if (nonlocal && flag_pic == 2)
9585 output_asm_insn (".long\t%0", op);
9586 if (nonlocal)
9587 {
9588 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9589 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9590 }
9591
9592 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9593 if (!flag_pic)
9594 output_asm_insn (".long\t%0", op);
9595 else
9596 output_asm_insn (".long\t%0-%5", op);
9597
9598 if (op[6])
9599 {
9600 targetm.asm_out.internal_label (file, "L",
9601 CODE_LABEL_NUMBER (op[6]));
9602 output_asm_insn (".long\t%2", op);
9603 }
9604 if (op[7])
9605 {
9606 targetm.asm_out.internal_label (file, "L",
9607 CODE_LABEL_NUMBER (op[7]));
9608 output_asm_insn (".long\t%3", op);
9609 }
9610 }
9611 final_end_function ();
9612 }
9613
9614 static bool
9615 s390_valid_pointer_mode (enum machine_mode mode)
9616 {
9617 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9618 }
9619
9620 /* Checks whether the given CALL_EXPR would use a caller
9621 saved register. This is used to decide whether sibling call
9622 optimization could be performed on the respective function
9623 call. */
9624
9625 static bool
9626 s390_call_saved_register_used (tree call_expr)
9627 {
9628 CUMULATIVE_ARGS cum_v;
9629 cumulative_args_t cum;
9630 tree parameter;
9631 enum machine_mode mode;
9632 tree type;
9633 rtx parm_rtx;
9634 int reg, i;
9635
9636 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9637 cum = pack_cumulative_args (&cum_v);
9638
9639 for (i = 0; i < call_expr_nargs (call_expr); i++)
9640 {
9641 parameter = CALL_EXPR_ARG (call_expr, i);
9642 gcc_assert (parameter);
9643
9644 /* For an undeclared variable passed as parameter we will get
9645 an ERROR_MARK node here. */
9646 if (TREE_CODE (parameter) == ERROR_MARK)
9647 return true;
9648
9649 type = TREE_TYPE (parameter);
9650 gcc_assert (type);
9651
9652 mode = TYPE_MODE (type);
9653 gcc_assert (mode);
9654
9655 if (pass_by_reference (&cum_v, mode, type, true))
9656 {
9657 mode = Pmode;
9658 type = build_pointer_type (type);
9659 }
9660
9661 parm_rtx = s390_function_arg (cum, mode, type, 0);
9662
9663 s390_function_arg_advance (cum, mode, type, 0);
9664
9665 if (!parm_rtx)
9666 continue;
9667
9668 if (REG_P (parm_rtx))
9669 {
9670 for (reg = 0;
9671 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9672 reg++)
9673 if (!call_used_regs[reg + REGNO (parm_rtx)])
9674 return true;
9675 }
9676
9677 if (GET_CODE (parm_rtx) == PARALLEL)
9678 {
9679 int i;
9680
9681 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9682 {
9683 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9684
9685 gcc_assert (REG_P (r));
9686
9687 for (reg = 0;
9688 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9689 reg++)
9690 if (!call_used_regs[reg + REGNO (r)])
9691 return true;
9692 }
9693 }
9694
9695 }
9696 return false;
9697 }
9698
9699 /* Return true if the given call expression can be
9700 turned into a sibling call.
9701 DECL holds the declaration of the function to be called whereas
9702 EXP is the call expression itself. */
9703
9704 static bool
9705 s390_function_ok_for_sibcall (tree decl, tree exp)
9706 {
9707 /* The TPF epilogue uses register 1. */
9708 if (TARGET_TPF_PROFILING)
9709 return false;
9710
9711 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9712 which would have to be restored before the sibcall. */
9713 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9714 return false;
9715
9716 /* Register 6 on s390 is available as an argument register but unfortunately
9717 "caller saved". This makes functions needing this register for arguments
9718 not suitable for sibcalls. */
9719 return !s390_call_saved_register_used (exp);
9720 }
9721
9722 /* Return the fixed registers used for condition codes. */
9723
9724 static bool
9725 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9726 {
9727 *p1 = CC_REGNUM;
9728 *p2 = INVALID_REGNUM;
9729
9730 return true;
9731 }
9732
9733 /* This function is used by the call expanders of the machine description.
9734 It emits the call insn itself together with the necessary operations
9735 to adjust the target address and returns the emitted insn.
9736 ADDR_LOCATION is the target address rtx
9737 TLS_CALL the location of the thread-local symbol
9738 RESULT_REG the register where the result of the call should be stored
9739 RETADDR_REG the register where the return address should be stored
9740 If this parameter is NULL_RTX the call is considered
9741 to be a sibling call. */
9742
9743 rtx
9744 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9745 rtx retaddr_reg)
9746 {
9747 bool plt_call = false;
9748 rtx insn;
9749 rtx call;
9750 rtx clobber;
9751 rtvec vec;
9752
9753 /* Direct function calls need special treatment. */
9754 if (GET_CODE (addr_location) == SYMBOL_REF)
9755 {
9756 /* When calling a global routine in PIC mode, we must
9757 replace the symbol itself with the PLT stub. */
9758 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9759 {
9760 if (retaddr_reg != NULL_RTX)
9761 {
9762 addr_location = gen_rtx_UNSPEC (Pmode,
9763 gen_rtvec (1, addr_location),
9764 UNSPEC_PLT);
9765 addr_location = gen_rtx_CONST (Pmode, addr_location);
9766 plt_call = true;
9767 }
9768 else
9769 /* For -fpic code the PLT entries might use r12 which is
9770 call-saved. Therefore we cannot do a sibcall when
9771 calling directly using a symbol ref. When reaching
9772 this point we decided (in s390_function_ok_for_sibcall)
9773 to do a sibcall for a function pointer but one of the
9774 optimizers was able to get rid of the function pointer
9775 by propagating the symbol ref into the call. This
9776 optimization is illegal for S/390 so we turn the direct
9777 call into a indirect call again. */
9778 addr_location = force_reg (Pmode, addr_location);
9779 }
9780
9781 /* Unless we can use the bras(l) insn, force the
9782 routine address into a register. */
9783 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9784 {
9785 if (flag_pic)
9786 addr_location = legitimize_pic_address (addr_location, 0);
9787 else
9788 addr_location = force_reg (Pmode, addr_location);
9789 }
9790 }
9791
9792 /* If it is already an indirect call or the code above moved the
9793 SYMBOL_REF to somewhere else make sure the address can be found in
9794 register 1. */
9795 if (retaddr_reg == NULL_RTX
9796 && GET_CODE (addr_location) != SYMBOL_REF
9797 && !plt_call)
9798 {
9799 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9800 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9801 }
9802
9803 addr_location = gen_rtx_MEM (QImode, addr_location);
9804 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9805
9806 if (result_reg != NULL_RTX)
9807 call = gen_rtx_SET (VOIDmode, result_reg, call);
9808
9809 if (retaddr_reg != NULL_RTX)
9810 {
9811 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9812
9813 if (tls_call != NULL_RTX)
9814 vec = gen_rtvec (3, call, clobber,
9815 gen_rtx_USE (VOIDmode, tls_call));
9816 else
9817 vec = gen_rtvec (2, call, clobber);
9818
9819 call = gen_rtx_PARALLEL (VOIDmode, vec);
9820 }
9821
9822 insn = emit_call_insn (call);
9823
9824 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9825 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9826 {
9827 /* s390_function_ok_for_sibcall should
9828 have denied sibcalls in this case. */
9829 gcc_assert (retaddr_reg != NULL_RTX);
9830
9831 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9832 }
9833 return insn;
9834 }
9835
9836 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9837
9838 static void
9839 s390_conditional_register_usage (void)
9840 {
9841 int i;
9842
9843 if (flag_pic)
9844 {
9845 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9846 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9847 }
9848 if (TARGET_CPU_ZARCH)
9849 {
9850 fixed_regs[BASE_REGNUM] = 0;
9851 call_used_regs[BASE_REGNUM] = 0;
9852 fixed_regs[RETURN_REGNUM] = 0;
9853 call_used_regs[RETURN_REGNUM] = 0;
9854 }
9855 if (TARGET_64BIT)
9856 {
9857 for (i = 24; i < 32; i++)
9858 call_used_regs[i] = call_really_used_regs[i] = 0;
9859 }
9860 else
9861 {
9862 for (i = 18; i < 20; i++)
9863 call_used_regs[i] = call_really_used_regs[i] = 0;
9864 }
9865
9866 if (TARGET_SOFT_FLOAT)
9867 {
9868 for (i = 16; i < 32; i++)
9869 call_used_regs[i] = fixed_regs[i] = 1;
9870 }
9871 }
9872
9873 /* Corresponding function to eh_return expander. */
9874
9875 static GTY(()) rtx s390_tpf_eh_return_symbol;
9876 void
9877 s390_emit_tpf_eh_return (rtx target)
9878 {
9879 rtx insn, reg;
9880
9881 if (!s390_tpf_eh_return_symbol)
9882 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9883
9884 reg = gen_rtx_REG (Pmode, 2);
9885
9886 emit_move_insn (reg, target);
9887 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9888 gen_rtx_REG (Pmode, RETURN_REGNUM));
9889 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9890
9891 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9892 }
9893
9894 /* Rework the prologue/epilogue to avoid saving/restoring
9895 registers unnecessarily. */
9896
9897 static void
9898 s390_optimize_prologue (void)
9899 {
9900 rtx insn, new_insn, next_insn;
9901
9902 /* Do a final recompute of the frame-related data. */
9903
9904 s390_update_frame_layout ();
9905
9906 /* If all special registers are in fact used, there's nothing we
9907 can do, so no point in walking the insn list. */
9908
9909 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9910 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9911 && (TARGET_CPU_ZARCH
9912 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9913 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9914 return;
9915
9916 /* Search for prologue/epilogue insns and replace them. */
9917
9918 for (insn = get_insns (); insn; insn = next_insn)
9919 {
9920 int first, last, off;
9921 rtx set, base, offset;
9922
9923 next_insn = NEXT_INSN (insn);
9924
9925 if (GET_CODE (insn) != INSN)
9926 continue;
9927
9928 if (GET_CODE (PATTERN (insn)) == PARALLEL
9929 && store_multiple_operation (PATTERN (insn), VOIDmode))
9930 {
9931 set = XVECEXP (PATTERN (insn), 0, 0);
9932 first = REGNO (SET_SRC (set));
9933 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9934 offset = const0_rtx;
9935 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9936 off = INTVAL (offset);
9937
9938 if (GET_CODE (base) != REG || off < 0)
9939 continue;
9940 if (cfun_frame_layout.first_save_gpr != -1
9941 && (cfun_frame_layout.first_save_gpr < first
9942 || cfun_frame_layout.last_save_gpr > last))
9943 continue;
9944 if (REGNO (base) != STACK_POINTER_REGNUM
9945 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9946 continue;
9947 if (first > BASE_REGNUM || last < BASE_REGNUM)
9948 continue;
9949
9950 if (cfun_frame_layout.first_save_gpr != -1)
9951 {
9952 new_insn = save_gprs (base,
9953 off + (cfun_frame_layout.first_save_gpr
9954 - first) * UNITS_PER_LONG,
9955 cfun_frame_layout.first_save_gpr,
9956 cfun_frame_layout.last_save_gpr);
9957 new_insn = emit_insn_before (new_insn, insn);
9958 INSN_ADDRESSES_NEW (new_insn, -1);
9959 }
9960
9961 remove_insn (insn);
9962 continue;
9963 }
9964
9965 if (cfun_frame_layout.first_save_gpr == -1
9966 && GET_CODE (PATTERN (insn)) == SET
9967 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9968 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9969 || (!TARGET_CPU_ZARCH
9970 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9971 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9972 {
9973 set = PATTERN (insn);
9974 first = REGNO (SET_SRC (set));
9975 offset = const0_rtx;
9976 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9977 off = INTVAL (offset);
9978
9979 if (GET_CODE (base) != REG || off < 0)
9980 continue;
9981 if (REGNO (base) != STACK_POINTER_REGNUM
9982 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9983 continue;
9984
9985 remove_insn (insn);
9986 continue;
9987 }
9988
9989 if (GET_CODE (PATTERN (insn)) == PARALLEL
9990 && load_multiple_operation (PATTERN (insn), VOIDmode))
9991 {
9992 set = XVECEXP (PATTERN (insn), 0, 0);
9993 first = REGNO (SET_DEST (set));
9994 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9995 offset = const0_rtx;
9996 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9997 off = INTVAL (offset);
9998
9999 if (GET_CODE (base) != REG || off < 0)
10000 continue;
10001 if (cfun_frame_layout.first_restore_gpr != -1
10002 && (cfun_frame_layout.first_restore_gpr < first
10003 || cfun_frame_layout.last_restore_gpr > last))
10004 continue;
10005 if (REGNO (base) != STACK_POINTER_REGNUM
10006 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10007 continue;
10008 if (first > BASE_REGNUM || last < BASE_REGNUM)
10009 continue;
10010
10011 if (cfun_frame_layout.first_restore_gpr != -1)
10012 {
10013 new_insn = restore_gprs (base,
10014 off + (cfun_frame_layout.first_restore_gpr
10015 - first) * UNITS_PER_LONG,
10016 cfun_frame_layout.first_restore_gpr,
10017 cfun_frame_layout.last_restore_gpr);
10018 new_insn = emit_insn_before (new_insn, insn);
10019 INSN_ADDRESSES_NEW (new_insn, -1);
10020 }
10021
10022 remove_insn (insn);
10023 continue;
10024 }
10025
10026 if (cfun_frame_layout.first_restore_gpr == -1
10027 && GET_CODE (PATTERN (insn)) == SET
10028 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10029 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10030 || (!TARGET_CPU_ZARCH
10031 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10032 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10033 {
10034 set = PATTERN (insn);
10035 first = REGNO (SET_DEST (set));
10036 offset = const0_rtx;
10037 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10038 off = INTVAL (offset);
10039
10040 if (GET_CODE (base) != REG || off < 0)
10041 continue;
10042 if (REGNO (base) != STACK_POINTER_REGNUM
10043 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10044 continue;
10045
10046 remove_insn (insn);
10047 continue;
10048 }
10049 }
10050 }
10051
10052 /* On z10 and later the dynamic branch prediction must see the
10053 backward jump within a certain windows. If not it falls back to
10054 the static prediction. This function rearranges the loop backward
10055 branch in a way which makes the static prediction always correct.
10056 The function returns true if it added an instruction. */
10057 static bool
10058 s390_fix_long_loop_prediction (rtx insn)
10059 {
10060 rtx set = single_set (insn);
10061 rtx code_label, label_ref, new_label;
10062 rtx uncond_jump;
10063 rtx cur_insn;
10064 rtx tmp;
10065 int distance;
10066
10067 /* This will exclude branch on count and branch on index patterns
10068 since these are correctly statically predicted. */
10069 if (!set
10070 || SET_DEST (set) != pc_rtx
10071 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10072 return false;
10073
10074 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10075 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10076
10077 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10078
10079 code_label = XEXP (label_ref, 0);
10080
10081 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10082 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10083 || (INSN_ADDRESSES (INSN_UID (insn))
10084 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10085 return false;
10086
10087 for (distance = 0, cur_insn = PREV_INSN (insn);
10088 distance < PREDICT_DISTANCE - 6;
10089 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10090 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10091 return false;
10092
10093 new_label = gen_label_rtx ();
10094 uncond_jump = emit_jump_insn_after (
10095 gen_rtx_SET (VOIDmode, pc_rtx,
10096 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10097 insn);
10098 emit_label_after (new_label, uncond_jump);
10099
10100 tmp = XEXP (SET_SRC (set), 1);
10101 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10102 XEXP (SET_SRC (set), 2) = tmp;
10103 INSN_CODE (insn) = -1;
10104
10105 XEXP (label_ref, 0) = new_label;
10106 JUMP_LABEL (insn) = new_label;
10107 JUMP_LABEL (uncond_jump) = code_label;
10108
10109 return true;
10110 }
10111
10112 /* Returns 1 if INSN reads the value of REG for purposes not related
10113 to addressing of memory, and 0 otherwise. */
10114 static int
10115 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10116 {
10117 return reg_referenced_p (reg, PATTERN (insn))
10118 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10119 }
10120
10121 /* Starting from INSN find_cond_jump looks downwards in the insn
10122 stream for a single jump insn which is the last user of the
10123 condition code set in INSN. */
10124 static rtx
10125 find_cond_jump (rtx insn)
10126 {
10127 for (; insn; insn = NEXT_INSN (insn))
10128 {
10129 rtx ite, cc;
10130
10131 if (LABEL_P (insn))
10132 break;
10133
10134 if (!JUMP_P (insn))
10135 {
10136 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10137 break;
10138 continue;
10139 }
10140
10141 /* This will be triggered by a return. */
10142 if (GET_CODE (PATTERN (insn)) != SET)
10143 break;
10144
10145 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10146 ite = SET_SRC (PATTERN (insn));
10147
10148 if (GET_CODE (ite) != IF_THEN_ELSE)
10149 break;
10150
10151 cc = XEXP (XEXP (ite, 0), 0);
10152 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10153 break;
10154
10155 if (find_reg_note (insn, REG_DEAD, cc))
10156 return insn;
10157 break;
10158 }
10159
10160 return NULL_RTX;
10161 }
10162
10163 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10164 the semantics does not change. If NULL_RTX is passed as COND the
10165 function tries to find the conditional jump starting with INSN. */
10166 static void
10167 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10168 {
10169 rtx tmp = *op0;
10170
10171 if (cond == NULL_RTX)
10172 {
10173 rtx jump = find_cond_jump (NEXT_INSN (insn));
10174 jump = jump ? single_set (jump) : NULL_RTX;
10175
10176 if (jump == NULL_RTX)
10177 return;
10178
10179 cond = XEXP (XEXP (jump, 1), 0);
10180 }
10181
10182 *op0 = *op1;
10183 *op1 = tmp;
10184 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10185 }
10186
10187 /* On z10, instructions of the compare-and-branch family have the
10188 property to access the register occurring as second operand with
10189 its bits complemented. If such a compare is grouped with a second
10190 instruction that accesses the same register non-complemented, and
10191 if that register's value is delivered via a bypass, then the
10192 pipeline recycles, thereby causing significant performance decline.
10193 This function locates such situations and exchanges the two
10194 operands of the compare. The function return true whenever it
10195 added an insn. */
10196 static bool
10197 s390_z10_optimize_cmp (rtx insn)
10198 {
10199 rtx prev_insn, next_insn;
10200 bool insn_added_p = false;
10201 rtx cond, *op0, *op1;
10202
10203 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10204 {
10205 /* Handle compare and branch and branch on count
10206 instructions. */
10207 rtx pattern = single_set (insn);
10208
10209 if (!pattern
10210 || SET_DEST (pattern) != pc_rtx
10211 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10212 return false;
10213
10214 cond = XEXP (SET_SRC (pattern), 0);
10215 op0 = &XEXP (cond, 0);
10216 op1 = &XEXP (cond, 1);
10217 }
10218 else if (GET_CODE (PATTERN (insn)) == SET)
10219 {
10220 rtx src, dest;
10221
10222 /* Handle normal compare instructions. */
10223 src = SET_SRC (PATTERN (insn));
10224 dest = SET_DEST (PATTERN (insn));
10225
10226 if (!REG_P (dest)
10227 || !CC_REGNO_P (REGNO (dest))
10228 || GET_CODE (src) != COMPARE)
10229 return false;
10230
10231 /* s390_swap_cmp will try to find the conditional
10232 jump when passing NULL_RTX as condition. */
10233 cond = NULL_RTX;
10234 op0 = &XEXP (src, 0);
10235 op1 = &XEXP (src, 1);
10236 }
10237 else
10238 return false;
10239
10240 if (!REG_P (*op0) || !REG_P (*op1))
10241 return false;
10242
10243 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10244 return false;
10245
10246 /* Swap the COMPARE arguments and its mask if there is a
10247 conflicting access in the previous insn. */
10248 prev_insn = prev_active_insn (insn);
10249 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10250 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10251 s390_swap_cmp (cond, op0, op1, insn);
10252
10253 /* Check if there is a conflict with the next insn. If there
10254 was no conflict with the previous insn, then swap the
10255 COMPARE arguments and its mask. If we already swapped
10256 the operands, or if swapping them would cause a conflict
10257 with the previous insn, issue a NOP after the COMPARE in
10258 order to separate the two instuctions. */
10259 next_insn = next_active_insn (insn);
10260 if (next_insn != NULL_RTX && INSN_P (next_insn)
10261 && s390_non_addr_reg_read_p (*op1, next_insn))
10262 {
10263 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10264 && s390_non_addr_reg_read_p (*op0, prev_insn))
10265 {
10266 if (REGNO (*op1) == 0)
10267 emit_insn_after (gen_nop1 (), insn);
10268 else
10269 emit_insn_after (gen_nop (), insn);
10270 insn_added_p = true;
10271 }
10272 else
10273 s390_swap_cmp (cond, op0, op1, insn);
10274 }
10275 return insn_added_p;
10276 }
10277
10278 /* Perform machine-dependent processing. */
10279
10280 static void
10281 s390_reorg (void)
10282 {
10283 bool pool_overflow = false;
10284
10285 /* Make sure all splits have been performed; splits after
10286 machine_dependent_reorg might confuse insn length counts. */
10287 split_all_insns_noflow ();
10288
10289 /* Install the main literal pool and the associated base
10290 register load insns.
10291
10292 In addition, there are two problematic situations we need
10293 to correct:
10294
10295 - the literal pool might be > 4096 bytes in size, so that
10296 some of its elements cannot be directly accessed
10297
10298 - a branch target might be > 64K away from the branch, so that
10299 it is not possible to use a PC-relative instruction.
10300
10301 To fix those, we split the single literal pool into multiple
10302 pool chunks, reloading the pool base register at various
10303 points throughout the function to ensure it always points to
10304 the pool chunk the following code expects, and / or replace
10305 PC-relative branches by absolute branches.
10306
10307 However, the two problems are interdependent: splitting the
10308 literal pool can move a branch further away from its target,
10309 causing the 64K limit to overflow, and on the other hand,
10310 replacing a PC-relative branch by an absolute branch means
10311 we need to put the branch target address into the literal
10312 pool, possibly causing it to overflow.
10313
10314 So, we loop trying to fix up both problems until we manage
10315 to satisfy both conditions at the same time. Note that the
10316 loop is guaranteed to terminate as every pass of the loop
10317 strictly decreases the total number of PC-relative branches
10318 in the function. (This is not completely true as there
10319 might be branch-over-pool insns introduced by chunkify_start.
10320 Those never need to be split however.) */
10321
10322 for (;;)
10323 {
10324 struct constant_pool *pool = NULL;
10325
10326 /* Collect the literal pool. */
10327 if (!pool_overflow)
10328 {
10329 pool = s390_mainpool_start ();
10330 if (!pool)
10331 pool_overflow = true;
10332 }
10333
10334 /* If literal pool overflowed, start to chunkify it. */
10335 if (pool_overflow)
10336 pool = s390_chunkify_start ();
10337
10338 /* Split out-of-range branches. If this has created new
10339 literal pool entries, cancel current chunk list and
10340 recompute it. zSeries machines have large branch
10341 instructions, so we never need to split a branch. */
10342 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10343 {
10344 if (pool_overflow)
10345 s390_chunkify_cancel (pool);
10346 else
10347 s390_mainpool_cancel (pool);
10348
10349 continue;
10350 }
10351
10352 /* If we made it up to here, both conditions are satisfied.
10353 Finish up literal pool related changes. */
10354 if (pool_overflow)
10355 s390_chunkify_finish (pool);
10356 else
10357 s390_mainpool_finish (pool);
10358
10359 /* We're done splitting branches. */
10360 cfun->machine->split_branches_pending_p = false;
10361 break;
10362 }
10363
10364 /* Generate out-of-pool execute target insns. */
10365 if (TARGET_CPU_ZARCH)
10366 {
10367 rtx insn, label, target;
10368
10369 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10370 {
10371 label = s390_execute_label (insn);
10372 if (!label)
10373 continue;
10374
10375 gcc_assert (label != const0_rtx);
10376
10377 target = emit_label (XEXP (label, 0));
10378 INSN_ADDRESSES_NEW (target, -1);
10379
10380 target = emit_insn (s390_execute_target (insn));
10381 INSN_ADDRESSES_NEW (target, -1);
10382 }
10383 }
10384
10385 /* Try to optimize prologue and epilogue further. */
10386 s390_optimize_prologue ();
10387
10388 /* Walk over the insns and do some >=z10 specific changes. */
10389 if (s390_tune == PROCESSOR_2097_Z10
10390 || s390_tune == PROCESSOR_2817_Z196)
10391 {
10392 rtx insn;
10393 bool insn_added_p = false;
10394
10395 /* The insn lengths and addresses have to be up to date for the
10396 following manipulations. */
10397 shorten_branches (get_insns ());
10398
10399 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10400 {
10401 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10402 continue;
10403
10404 if (JUMP_P (insn))
10405 insn_added_p |= s390_fix_long_loop_prediction (insn);
10406
10407 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10408 || GET_CODE (PATTERN (insn)) == SET)
10409 && s390_tune == PROCESSOR_2097_Z10)
10410 insn_added_p |= s390_z10_optimize_cmp (insn);
10411 }
10412
10413 /* Adjust branches if we added new instructions. */
10414 if (insn_added_p)
10415 shorten_branches (get_insns ());
10416 }
10417 }
10418
10419 /* Return true if INSN is a fp load insn writing register REGNO. */
10420 static inline bool
10421 s390_fpload_toreg (rtx insn, unsigned int regno)
10422 {
10423 rtx set;
10424 enum attr_type flag = s390_safe_attr_type (insn);
10425
10426 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10427 return false;
10428
10429 set = single_set (insn);
10430
10431 if (set == NULL_RTX)
10432 return false;
10433
10434 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10435 return false;
10436
10437 if (REGNO (SET_DEST (set)) != regno)
10438 return false;
10439
10440 return true;
10441 }
10442
10443 /* This value describes the distance to be avoided between an
10444 aritmetic fp instruction and an fp load writing the same register.
10445 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10446 fine but the exact value has to be avoided. Otherwise the FP
10447 pipeline will throw an exception causing a major penalty. */
10448 #define Z10_EARLYLOAD_DISTANCE 7
10449
10450 /* Rearrange the ready list in order to avoid the situation described
10451 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10452 moved to the very end of the ready list. */
10453 static void
10454 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10455 {
10456 unsigned int regno;
10457 int nready = *nready_p;
10458 rtx tmp;
10459 int i;
10460 rtx insn;
10461 rtx set;
10462 enum attr_type flag;
10463 int distance;
10464
10465 /* Skip DISTANCE - 1 active insns. */
10466 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10467 distance > 0 && insn != NULL_RTX;
10468 distance--, insn = prev_active_insn (insn))
10469 if (CALL_P (insn) || JUMP_P (insn))
10470 return;
10471
10472 if (insn == NULL_RTX)
10473 return;
10474
10475 set = single_set (insn);
10476
10477 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10478 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10479 return;
10480
10481 flag = s390_safe_attr_type (insn);
10482
10483 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10484 return;
10485
10486 regno = REGNO (SET_DEST (set));
10487 i = nready - 1;
10488
10489 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10490 i--;
10491
10492 if (!i)
10493 return;
10494
10495 tmp = ready[i];
10496 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10497 ready[0] = tmp;
10498 }
10499
10500 /* This function is called via hook TARGET_SCHED_REORDER before
10501 issueing one insn from list READY which contains *NREADYP entries.
10502 For target z10 it reorders load instructions to avoid early load
10503 conflicts in the floating point pipeline */
10504 static int
10505 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10506 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10507 {
10508 if (s390_tune == PROCESSOR_2097_Z10)
10509 if (reload_completed && *nreadyp > 1)
10510 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10511
10512 return s390_issue_rate ();
10513 }
10514
10515 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10516 the scheduler has issued INSN. It stores the last issued insn into
10517 last_scheduled_insn in order to make it available for
10518 s390_sched_reorder. */
10519 static int
10520 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10521 int verbose ATTRIBUTE_UNUSED,
10522 rtx insn, int more)
10523 {
10524 last_scheduled_insn = insn;
10525
10526 if (GET_CODE (PATTERN (insn)) != USE
10527 && GET_CODE (PATTERN (insn)) != CLOBBER)
10528 return more - 1;
10529 else
10530 return more;
10531 }
10532
10533 static void
10534 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10535 int verbose ATTRIBUTE_UNUSED,
10536 int max_ready ATTRIBUTE_UNUSED)
10537 {
10538 last_scheduled_insn = NULL_RTX;
10539 }
10540
10541 /* This function checks the whole of insn X for memory references. The
10542 function always returns zero because the framework it is called
10543 from would stop recursively analyzing the insn upon a return value
10544 other than zero. The real result of this function is updating
10545 counter variable MEM_COUNT. */
10546 static int
10547 check_dpu (rtx *x, unsigned *mem_count)
10548 {
10549 if (*x != NULL_RTX && MEM_P (*x))
10550 (*mem_count)++;
10551 return 0;
10552 }
10553
10554 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10555 a new number struct loop *loop should be unrolled if tuned for cpus with
10556 a built-in stride prefetcher.
10557 The loop is analyzed for memory accesses by calling check_dpu for
10558 each rtx of the loop. Depending on the loop_depth and the amount of
10559 memory accesses a new number <=nunroll is returned to improve the
10560 behaviour of the hardware prefetch unit. */
10561 static unsigned
10562 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10563 {
10564 basic_block *bbs;
10565 rtx insn;
10566 unsigned i;
10567 unsigned mem_count = 0;
10568
10569 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10570 return nunroll;
10571
10572 /* Count the number of memory references within the loop body. */
10573 bbs = get_loop_body (loop);
10574 for (i = 0; i < loop->num_nodes; i++)
10575 {
10576 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10577 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10578 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10579 }
10580 free (bbs);
10581
10582 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10583 if (mem_count == 0)
10584 return nunroll;
10585
10586 switch (loop_depth(loop))
10587 {
10588 case 1:
10589 return MIN (nunroll, 28 / mem_count);
10590 case 2:
10591 return MIN (nunroll, 22 / mem_count);
10592 default:
10593 return MIN (nunroll, 16 / mem_count);
10594 }
10595 }
10596
10597 /* Initialize GCC target structure. */
10598
10599 #undef TARGET_ASM_ALIGNED_HI_OP
10600 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10601 #undef TARGET_ASM_ALIGNED_DI_OP
10602 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10603 #undef TARGET_ASM_INTEGER
10604 #define TARGET_ASM_INTEGER s390_assemble_integer
10605
10606 #undef TARGET_ASM_OPEN_PAREN
10607 #define TARGET_ASM_OPEN_PAREN ""
10608
10609 #undef TARGET_ASM_CLOSE_PAREN
10610 #define TARGET_ASM_CLOSE_PAREN ""
10611
10612 #undef TARGET_OPTION_OVERRIDE
10613 #define TARGET_OPTION_OVERRIDE s390_option_override
10614
10615 #undef TARGET_ENCODE_SECTION_INFO
10616 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10617
10618 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10619 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10620
10621 #ifdef HAVE_AS_TLS
10622 #undef TARGET_HAVE_TLS
10623 #define TARGET_HAVE_TLS true
10624 #endif
10625 #undef TARGET_CANNOT_FORCE_CONST_MEM
10626 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10627
10628 #undef TARGET_DELEGITIMIZE_ADDRESS
10629 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10630
10631 #undef TARGET_LEGITIMIZE_ADDRESS
10632 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10633
10634 #undef TARGET_RETURN_IN_MEMORY
10635 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10636
10637 #undef TARGET_INIT_BUILTINS
10638 #define TARGET_INIT_BUILTINS s390_init_builtins
10639 #undef TARGET_EXPAND_BUILTIN
10640 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10641
10642 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10643 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10644
10645 #undef TARGET_ASM_OUTPUT_MI_THUNK
10646 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10647 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10648 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10649
10650 #undef TARGET_SCHED_ADJUST_PRIORITY
10651 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10652 #undef TARGET_SCHED_ISSUE_RATE
10653 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10654 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10655 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10656
10657 #undef TARGET_SCHED_VARIABLE_ISSUE
10658 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10659 #undef TARGET_SCHED_REORDER
10660 #define TARGET_SCHED_REORDER s390_sched_reorder
10661 #undef TARGET_SCHED_INIT
10662 #define TARGET_SCHED_INIT s390_sched_init
10663
10664 #undef TARGET_CANNOT_COPY_INSN_P
10665 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10666 #undef TARGET_RTX_COSTS
10667 #define TARGET_RTX_COSTS s390_rtx_costs
10668 #undef TARGET_ADDRESS_COST
10669 #define TARGET_ADDRESS_COST s390_address_cost
10670 #undef TARGET_REGISTER_MOVE_COST
10671 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10672 #undef TARGET_MEMORY_MOVE_COST
10673 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10674
10675 #undef TARGET_MACHINE_DEPENDENT_REORG
10676 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10677
10678 #undef TARGET_VALID_POINTER_MODE
10679 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10680
10681 #undef TARGET_BUILD_BUILTIN_VA_LIST
10682 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10683 #undef TARGET_EXPAND_BUILTIN_VA_START
10684 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10685 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10686 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10687
10688 #undef TARGET_PROMOTE_FUNCTION_MODE
10689 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10690 #undef TARGET_PASS_BY_REFERENCE
10691 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10692
10693 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10694 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10695 #undef TARGET_FUNCTION_ARG
10696 #define TARGET_FUNCTION_ARG s390_function_arg
10697 #undef TARGET_FUNCTION_ARG_ADVANCE
10698 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10699 #undef TARGET_FUNCTION_VALUE
10700 #define TARGET_FUNCTION_VALUE s390_function_value
10701 #undef TARGET_LIBCALL_VALUE
10702 #define TARGET_LIBCALL_VALUE s390_libcall_value
10703
10704 #undef TARGET_FIXED_CONDITION_CODE_REGS
10705 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10706
10707 #undef TARGET_CC_MODES_COMPATIBLE
10708 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10709
10710 #undef TARGET_INVALID_WITHIN_DOLOOP
10711 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10712
10713 #ifdef HAVE_AS_TLS
10714 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10715 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10716 #endif
10717
10718 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10719 #undef TARGET_MANGLE_TYPE
10720 #define TARGET_MANGLE_TYPE s390_mangle_type
10721 #endif
10722
10723 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10724 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10725
10726 #undef TARGET_PREFERRED_RELOAD_CLASS
10727 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10728
10729 #undef TARGET_SECONDARY_RELOAD
10730 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10731
10732 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10733 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10734
10735 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10736 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10737
10738 #undef TARGET_LEGITIMATE_ADDRESS_P
10739 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10740
10741 #undef TARGET_LEGITIMATE_CONSTANT_P
10742 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10743
10744 #undef TARGET_CAN_ELIMINATE
10745 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10746
10747 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10748 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10749
10750 #undef TARGET_LOOP_UNROLL_ADJUST
10751 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10752
10753 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10754 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10755 #undef TARGET_TRAMPOLINE_INIT
10756 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10757
10758 #undef TARGET_UNWIND_WORD_MODE
10759 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10760
10761 struct gcc_target targetm = TARGET_INITIALIZER;
10762
10763 #include "gt-s390.h"