2097.md: Removed two incorrect bypasses.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "reload.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "integrate.h"
47 #include "ggc.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "optabs.h"
53 #include "gimple.h"
54 #include "df.h"
55 #include "params.h"
56
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 extern int reload_completed;
226
227 /* Structure used to hold the components of a S/390 memory
228 address. A legitimate address on S/390 is of the general
229 form
230 base + index + displacement
231 where any of the components is optional.
232
233 base and index are registers of the class ADDR_REGS,
234 displacement is an unsigned 12-bit immediate constant. */
235
236 struct s390_address
237 {
238 rtx base;
239 rtx indx;
240 rtx disp;
241 bool pointer;
242 bool literal_pool;
243 };
244
245 /* Which cpu are we tuning for. */
246 enum processor_type s390_tune = PROCESSOR_max;
247 int s390_tune_flags;
248 /* Which instruction set architecture to use. */
249 enum processor_type s390_arch;
250 int s390_arch_flags;
251
252 HOST_WIDE_INT s390_warn_framesize = 0;
253 HOST_WIDE_INT s390_stack_size = 0;
254 HOST_WIDE_INT s390_stack_guard = 0;
255
256 /* The following structure is embedded in the machine
257 specific part of struct function. */
258
259 struct GTY (()) s390_frame_layout
260 {
261 /* Offset within stack frame. */
262 HOST_WIDE_INT gprs_offset;
263 HOST_WIDE_INT f0_offset;
264 HOST_WIDE_INT f4_offset;
265 HOST_WIDE_INT f8_offset;
266 HOST_WIDE_INT backchain_offset;
267
268 /* Number of first and last gpr where slots in the register
269 save area are reserved for. */
270 int first_save_gpr_slot;
271 int last_save_gpr_slot;
272
273 /* Number of first and last gpr to be saved, restored. */
274 int first_save_gpr;
275 int first_restore_gpr;
276 int last_save_gpr;
277 int last_restore_gpr;
278
279 /* Bits standing for floating point registers. Set, if the
280 respective register has to be saved. Starting with reg 16 (f0)
281 at the rightmost bit.
282 Bit 15 - 8 7 6 5 4 3 2 1 0
283 fpr 15 - 8 7 5 3 1 6 4 2 0
284 reg 31 - 24 23 22 21 20 19 18 17 16 */
285 unsigned int fpr_bitmap;
286
287 /* Number of floating point registers f8-f15 which must be saved. */
288 int high_fprs;
289
290 /* Set if return address needs to be saved.
291 This flag is set by s390_return_addr_rtx if it could not use
292 the initial value of r14 and therefore depends on r14 saved
293 to the stack. */
294 bool save_return_addr_p;
295
296 /* Size of stack frame. */
297 HOST_WIDE_INT frame_size;
298 };
299
300 /* Define the structure for the machine field in struct function. */
301
302 struct GTY(()) machine_function
303 {
304 struct s390_frame_layout frame_layout;
305
306 /* Literal pool base register. */
307 rtx base_reg;
308
309 /* True if we may need to perform branch splitting. */
310 bool split_branches_pending_p;
311
312 /* Some local-dynamic TLS symbol name. */
313 const char *some_ld_name;
314
315 bool has_landing_pad_p;
316 };
317
318 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
319
320 #define cfun_frame_layout (cfun->machine->frame_layout)
321 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
322 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
323 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_WORD)
324 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
325 (1 << (BITNUM)))
326 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
327 (1 << (BITNUM))))
328
329 /* Number of GPRs and FPRs used for argument passing. */
330 #define GP_ARG_NUM_REG 5
331 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
332
333 /* A couple of shortcuts. */
334 #define CONST_OK_FOR_J(x) \
335 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
336 #define CONST_OK_FOR_K(x) \
337 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
338 #define CONST_OK_FOR_Os(x) \
339 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
340 #define CONST_OK_FOR_Op(x) \
341 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
342 #define CONST_OK_FOR_On(x) \
343 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
344
345 #define REGNO_PAIR_OK(REGNO, MODE) \
346 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
347
348 /* That's the read ahead of the dynamic branch prediction unit in
349 bytes on a z10 CPU. */
350 #define Z10_PREDICT_DISTANCE 384
351
352 static enum machine_mode
353 s390_libgcc_cmp_return_mode (void)
354 {
355 return TARGET_64BIT ? DImode : SImode;
356 }
357
358 static enum machine_mode
359 s390_libgcc_shift_count_mode (void)
360 {
361 return TARGET_64BIT ? DImode : SImode;
362 }
363
364 /* Return true if the back end supports mode MODE. */
365 static bool
366 s390_scalar_mode_supported_p (enum machine_mode mode)
367 {
368 if (DECIMAL_FLOAT_MODE_P (mode))
369 return true;
370 else
371 return default_scalar_mode_supported_p (mode);
372 }
373
374 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
375
376 void
377 s390_set_has_landing_pad_p (bool value)
378 {
379 cfun->machine->has_landing_pad_p = value;
380 }
381
382 /* If two condition code modes are compatible, return a condition code
383 mode which is compatible with both. Otherwise, return
384 VOIDmode. */
385
386 static enum machine_mode
387 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
388 {
389 if (m1 == m2)
390 return m1;
391
392 switch (m1)
393 {
394 case CCZmode:
395 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
396 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
397 return m2;
398 return VOIDmode;
399
400 case CCSmode:
401 case CCUmode:
402 case CCTmode:
403 case CCSRmode:
404 case CCURmode:
405 case CCZ1mode:
406 if (m2 == CCZmode)
407 return m1;
408
409 return VOIDmode;
410
411 default:
412 return VOIDmode;
413 }
414 return VOIDmode;
415 }
416
417 /* Return true if SET either doesn't set the CC register, or else
418 the source and destination have matching CC modes and that
419 CC mode is at least as constrained as REQ_MODE. */
420
421 static bool
422 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
423 {
424 enum machine_mode set_mode;
425
426 gcc_assert (GET_CODE (set) == SET);
427
428 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
429 return 1;
430
431 set_mode = GET_MODE (SET_DEST (set));
432 switch (set_mode)
433 {
434 case CCSmode:
435 case CCSRmode:
436 case CCUmode:
437 case CCURmode:
438 case CCLmode:
439 case CCL1mode:
440 case CCL2mode:
441 case CCL3mode:
442 case CCT1mode:
443 case CCT2mode:
444 case CCT3mode:
445 if (req_mode != set_mode)
446 return 0;
447 break;
448
449 case CCZmode:
450 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
451 && req_mode != CCSRmode && req_mode != CCURmode)
452 return 0;
453 break;
454
455 case CCAPmode:
456 case CCANmode:
457 if (req_mode != CCAmode)
458 return 0;
459 break;
460
461 default:
462 gcc_unreachable ();
463 }
464
465 return (GET_MODE (SET_SRC (set)) == set_mode);
466 }
467
468 /* Return true if every SET in INSN that sets the CC register
469 has source and destination with matching CC modes and that
470 CC mode is at least as constrained as REQ_MODE.
471 If REQ_MODE is VOIDmode, always return false. */
472
473 bool
474 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
475 {
476 int i;
477
478 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
479 if (req_mode == VOIDmode)
480 return false;
481
482 if (GET_CODE (PATTERN (insn)) == SET)
483 return s390_match_ccmode_set (PATTERN (insn), req_mode);
484
485 if (GET_CODE (PATTERN (insn)) == PARALLEL)
486 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
487 {
488 rtx set = XVECEXP (PATTERN (insn), 0, i);
489 if (GET_CODE (set) == SET)
490 if (!s390_match_ccmode_set (set, req_mode))
491 return false;
492 }
493
494 return true;
495 }
496
497 /* If a test-under-mask instruction can be used to implement
498 (compare (and ... OP1) OP2), return the CC mode required
499 to do that. Otherwise, return VOIDmode.
500 MIXED is true if the instruction can distinguish between
501 CC1 and CC2 for mixed selected bits (TMxx), it is false
502 if the instruction cannot (TM). */
503
504 enum machine_mode
505 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
506 {
507 int bit0, bit1;
508
509 /* ??? Fixme: should work on CONST_DOUBLE as well. */
510 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
511 return VOIDmode;
512
513 /* Selected bits all zero: CC0.
514 e.g.: int a; if ((a & (16 + 128)) == 0) */
515 if (INTVAL (op2) == 0)
516 return CCTmode;
517
518 /* Selected bits all one: CC3.
519 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
520 if (INTVAL (op2) == INTVAL (op1))
521 return CCT3mode;
522
523 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
524 int a;
525 if ((a & (16 + 128)) == 16) -> CCT1
526 if ((a & (16 + 128)) == 128) -> CCT2 */
527 if (mixed)
528 {
529 bit1 = exact_log2 (INTVAL (op2));
530 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
531 if (bit0 != -1 && bit1 != -1)
532 return bit0 > bit1 ? CCT1mode : CCT2mode;
533 }
534
535 return VOIDmode;
536 }
537
538 /* Given a comparison code OP (EQ, NE, etc.) and the operands
539 OP0 and OP1 of a COMPARE, return the mode to be used for the
540 comparison. */
541
542 enum machine_mode
543 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
544 {
545 switch (code)
546 {
547 case EQ:
548 case NE:
549 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
550 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
551 return CCAPmode;
552 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
553 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
554 return CCAPmode;
555 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
556 || GET_CODE (op1) == NEG)
557 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
558 return CCLmode;
559
560 if (GET_CODE (op0) == AND)
561 {
562 /* Check whether we can potentially do it via TM. */
563 enum machine_mode ccmode;
564 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
565 if (ccmode != VOIDmode)
566 {
567 /* Relax CCTmode to CCZmode to allow fall-back to AND
568 if that turns out to be beneficial. */
569 return ccmode == CCTmode ? CCZmode : ccmode;
570 }
571 }
572
573 if (register_operand (op0, HImode)
574 && GET_CODE (op1) == CONST_INT
575 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
576 return CCT3mode;
577 if (register_operand (op0, QImode)
578 && GET_CODE (op1) == CONST_INT
579 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
580 return CCT3mode;
581
582 return CCZmode;
583
584 case LE:
585 case LT:
586 case GE:
587 case GT:
588 /* The only overflow condition of NEG and ABS happens when
589 -INT_MAX is used as parameter, which stays negative. So
590 we have an overflow from a positive value to a negative.
591 Using CCAP mode the resulting cc can be used for comparisons. */
592 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
593 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
594 return CCAPmode;
595
596 /* If constants are involved in an add instruction it is possible to use
597 the resulting cc for comparisons with zero. Knowing the sign of the
598 constant the overflow behavior gets predictable. e.g.:
599 int a, b; if ((b = a + c) > 0)
600 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
601 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
602 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
603 {
604 if (INTVAL (XEXP((op0), 1)) < 0)
605 return CCANmode;
606 else
607 return CCAPmode;
608 }
609 /* Fall through. */
610 case UNORDERED:
611 case ORDERED:
612 case UNEQ:
613 case UNLE:
614 case UNLT:
615 case UNGE:
616 case UNGT:
617 case LTGT:
618 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
619 && GET_CODE (op1) != CONST_INT)
620 return CCSRmode;
621 return CCSmode;
622
623 case LTU:
624 case GEU:
625 if (GET_CODE (op0) == PLUS
626 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
627 return CCL1mode;
628
629 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
630 && GET_CODE (op1) != CONST_INT)
631 return CCURmode;
632 return CCUmode;
633
634 case LEU:
635 case GTU:
636 if (GET_CODE (op0) == MINUS
637 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
638 return CCL2mode;
639
640 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
641 && GET_CODE (op1) != CONST_INT)
642 return CCURmode;
643 return CCUmode;
644
645 default:
646 gcc_unreachable ();
647 }
648 }
649
650 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
651 that we can implement more efficiently. */
652
653 void
654 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
655 {
656 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
657 if ((*code == EQ || *code == NE)
658 && *op1 == const0_rtx
659 && GET_CODE (*op0) == ZERO_EXTRACT
660 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
661 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
662 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
663 {
664 rtx inner = XEXP (*op0, 0);
665 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
666 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
667 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
668
669 if (len > 0 && len < modesize
670 && pos >= 0 && pos + len <= modesize
671 && modesize <= HOST_BITS_PER_WIDE_INT)
672 {
673 unsigned HOST_WIDE_INT block;
674 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
675 block <<= modesize - pos - len;
676
677 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
678 gen_int_mode (block, GET_MODE (inner)));
679 }
680 }
681
682 /* Narrow AND of memory against immediate to enable TM. */
683 if ((*code == EQ || *code == NE)
684 && *op1 == const0_rtx
685 && GET_CODE (*op0) == AND
686 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
687 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
688 {
689 rtx inner = XEXP (*op0, 0);
690 rtx mask = XEXP (*op0, 1);
691
692 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
693 if (GET_CODE (inner) == SUBREG
694 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
695 && (GET_MODE_SIZE (GET_MODE (inner))
696 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
697 && ((INTVAL (mask)
698 & GET_MODE_MASK (GET_MODE (inner))
699 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
700 == 0))
701 inner = SUBREG_REG (inner);
702
703 /* Do not change volatile MEMs. */
704 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
705 {
706 int part = s390_single_part (XEXP (*op0, 1),
707 GET_MODE (inner), QImode, 0);
708 if (part >= 0)
709 {
710 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
711 inner = adjust_address_nv (inner, QImode, part);
712 *op0 = gen_rtx_AND (QImode, inner, mask);
713 }
714 }
715 }
716
717 /* Narrow comparisons against 0xffff to HImode if possible. */
718 if ((*code == EQ || *code == NE)
719 && GET_CODE (*op1) == CONST_INT
720 && INTVAL (*op1) == 0xffff
721 && SCALAR_INT_MODE_P (GET_MODE (*op0))
722 && (nonzero_bits (*op0, GET_MODE (*op0))
723 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
724 {
725 *op0 = gen_lowpart (HImode, *op0);
726 *op1 = constm1_rtx;
727 }
728
729 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
730 if (GET_CODE (*op0) == UNSPEC
731 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
732 && XVECLEN (*op0, 0) == 1
733 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
734 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
735 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
736 && *op1 == const0_rtx)
737 {
738 enum rtx_code new_code = UNKNOWN;
739 switch (*code)
740 {
741 case EQ: new_code = EQ; break;
742 case NE: new_code = NE; break;
743 case LT: new_code = GTU; break;
744 case GT: new_code = LTU; break;
745 case LE: new_code = GEU; break;
746 case GE: new_code = LEU; break;
747 default: break;
748 }
749
750 if (new_code != UNKNOWN)
751 {
752 *op0 = XVECEXP (*op0, 0, 0);
753 *code = new_code;
754 }
755 }
756
757 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
758 if (GET_CODE (*op0) == UNSPEC
759 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
760 && XVECLEN (*op0, 0) == 1
761 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
762 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
763 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
764 && *op1 == const0_rtx)
765 {
766 enum rtx_code new_code = UNKNOWN;
767 switch (*code)
768 {
769 case EQ: new_code = EQ; break;
770 case NE: new_code = NE; break;
771 default: break;
772 }
773
774 if (new_code != UNKNOWN)
775 {
776 *op0 = XVECEXP (*op0, 0, 0);
777 *code = new_code;
778 }
779 }
780
781 /* Simplify cascaded EQ, NE with const0_rtx. */
782 if ((*code == NE || *code == EQ)
783 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
784 && GET_MODE (*op0) == SImode
785 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
786 && REG_P (XEXP (*op0, 0))
787 && XEXP (*op0, 1) == const0_rtx
788 && *op1 == const0_rtx)
789 {
790 if ((*code == EQ && GET_CODE (*op0) == NE)
791 || (*code == NE && GET_CODE (*op0) == EQ))
792 *code = EQ;
793 else
794 *code = NE;
795 *op0 = XEXP (*op0, 0);
796 }
797
798 /* Prefer register over memory as first operand. */
799 if (MEM_P (*op0) && REG_P (*op1))
800 {
801 rtx tem = *op0; *op0 = *op1; *op1 = tem;
802 *code = swap_condition (*code);
803 }
804 }
805
806 /* Emit a compare instruction suitable to implement the comparison
807 OP0 CODE OP1. Return the correct condition RTL to be placed in
808 the IF_THEN_ELSE of the conditional branch testing the result. */
809
810 rtx
811 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
812 {
813 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
814 rtx cc;
815
816 /* Do not output a redundant compare instruction if a compare_and_swap
817 pattern already computed the result and the machine modes are compatible. */
818 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
819 {
820 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
821 == GET_MODE (op0));
822 cc = op0;
823 }
824 else
825 {
826 cc = gen_rtx_REG (mode, CC_REGNUM);
827 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
828 }
829
830 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
831 }
832
833 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
834 matches CMP.
835 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
836 conditional branch testing the result. */
837
838 static rtx
839 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
840 {
841 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
842 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
843 }
844
845 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
846 unconditional jump, else a conditional jump under condition COND. */
847
848 void
849 s390_emit_jump (rtx target, rtx cond)
850 {
851 rtx insn;
852
853 target = gen_rtx_LABEL_REF (VOIDmode, target);
854 if (cond)
855 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
856
857 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
858 emit_jump_insn (insn);
859 }
860
861 /* Return branch condition mask to implement a branch
862 specified by CODE. Return -1 for invalid comparisons. */
863
864 int
865 s390_branch_condition_mask (rtx code)
866 {
867 const int CC0 = 1 << 3;
868 const int CC1 = 1 << 2;
869 const int CC2 = 1 << 1;
870 const int CC3 = 1 << 0;
871
872 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
873 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
874 gcc_assert (XEXP (code, 1) == const0_rtx);
875
876 switch (GET_MODE (XEXP (code, 0)))
877 {
878 case CCZmode:
879 case CCZ1mode:
880 switch (GET_CODE (code))
881 {
882 case EQ: return CC0;
883 case NE: return CC1 | CC2 | CC3;
884 default: return -1;
885 }
886 break;
887
888 case CCT1mode:
889 switch (GET_CODE (code))
890 {
891 case EQ: return CC1;
892 case NE: return CC0 | CC2 | CC3;
893 default: return -1;
894 }
895 break;
896
897 case CCT2mode:
898 switch (GET_CODE (code))
899 {
900 case EQ: return CC2;
901 case NE: return CC0 | CC1 | CC3;
902 default: return -1;
903 }
904 break;
905
906 case CCT3mode:
907 switch (GET_CODE (code))
908 {
909 case EQ: return CC3;
910 case NE: return CC0 | CC1 | CC2;
911 default: return -1;
912 }
913 break;
914
915 case CCLmode:
916 switch (GET_CODE (code))
917 {
918 case EQ: return CC0 | CC2;
919 case NE: return CC1 | CC3;
920 default: return -1;
921 }
922 break;
923
924 case CCL1mode:
925 switch (GET_CODE (code))
926 {
927 case LTU: return CC2 | CC3; /* carry */
928 case GEU: return CC0 | CC1; /* no carry */
929 default: return -1;
930 }
931 break;
932
933 case CCL2mode:
934 switch (GET_CODE (code))
935 {
936 case GTU: return CC0 | CC1; /* borrow */
937 case LEU: return CC2 | CC3; /* no borrow */
938 default: return -1;
939 }
940 break;
941
942 case CCL3mode:
943 switch (GET_CODE (code))
944 {
945 case EQ: return CC0 | CC2;
946 case NE: return CC1 | CC3;
947 case LTU: return CC1;
948 case GTU: return CC3;
949 case LEU: return CC1 | CC2;
950 case GEU: return CC2 | CC3;
951 default: return -1;
952 }
953
954 case CCUmode:
955 switch (GET_CODE (code))
956 {
957 case EQ: return CC0;
958 case NE: return CC1 | CC2 | CC3;
959 case LTU: return CC1;
960 case GTU: return CC2;
961 case LEU: return CC0 | CC1;
962 case GEU: return CC0 | CC2;
963 default: return -1;
964 }
965 break;
966
967 case CCURmode:
968 switch (GET_CODE (code))
969 {
970 case EQ: return CC0;
971 case NE: return CC2 | CC1 | CC3;
972 case LTU: return CC2;
973 case GTU: return CC1;
974 case LEU: return CC0 | CC2;
975 case GEU: return CC0 | CC1;
976 default: return -1;
977 }
978 break;
979
980 case CCAPmode:
981 switch (GET_CODE (code))
982 {
983 case EQ: return CC0;
984 case NE: return CC1 | CC2 | CC3;
985 case LT: return CC1 | CC3;
986 case GT: return CC2;
987 case LE: return CC0 | CC1 | CC3;
988 case GE: return CC0 | CC2;
989 default: return -1;
990 }
991 break;
992
993 case CCANmode:
994 switch (GET_CODE (code))
995 {
996 case EQ: return CC0;
997 case NE: return CC1 | CC2 | CC3;
998 case LT: return CC1;
999 case GT: return CC2 | CC3;
1000 case LE: return CC0 | CC1;
1001 case GE: return CC0 | CC2 | CC3;
1002 default: return -1;
1003 }
1004 break;
1005
1006 case CCSmode:
1007 switch (GET_CODE (code))
1008 {
1009 case EQ: return CC0;
1010 case NE: return CC1 | CC2 | CC3;
1011 case LT: return CC1;
1012 case GT: return CC2;
1013 case LE: return CC0 | CC1;
1014 case GE: return CC0 | CC2;
1015 case UNORDERED: return CC3;
1016 case ORDERED: return CC0 | CC1 | CC2;
1017 case UNEQ: return CC0 | CC3;
1018 case UNLT: return CC1 | CC3;
1019 case UNGT: return CC2 | CC3;
1020 case UNLE: return CC0 | CC1 | CC3;
1021 case UNGE: return CC0 | CC2 | CC3;
1022 case LTGT: return CC1 | CC2;
1023 default: return -1;
1024 }
1025 break;
1026
1027 case CCSRmode:
1028 switch (GET_CODE (code))
1029 {
1030 case EQ: return CC0;
1031 case NE: return CC2 | CC1 | CC3;
1032 case LT: return CC2;
1033 case GT: return CC1;
1034 case LE: return CC0 | CC2;
1035 case GE: return CC0 | CC1;
1036 case UNORDERED: return CC3;
1037 case ORDERED: return CC0 | CC2 | CC1;
1038 case UNEQ: return CC0 | CC3;
1039 case UNLT: return CC2 | CC3;
1040 case UNGT: return CC1 | CC3;
1041 case UNLE: return CC0 | CC2 | CC3;
1042 case UNGE: return CC0 | CC1 | CC3;
1043 case LTGT: return CC2 | CC1;
1044 default: return -1;
1045 }
1046 break;
1047
1048 default:
1049 return -1;
1050 }
1051 }
1052
1053
1054 /* Return branch condition mask to implement a compare and branch
1055 specified by CODE. Return -1 for invalid comparisons. */
1056
1057 int
1058 s390_compare_and_branch_condition_mask (rtx code)
1059 {
1060 const int CC0 = 1 << 3;
1061 const int CC1 = 1 << 2;
1062 const int CC2 = 1 << 1;
1063
1064 switch (GET_CODE (code))
1065 {
1066 case EQ:
1067 return CC0;
1068 case NE:
1069 return CC1 | CC2;
1070 case LT:
1071 case LTU:
1072 return CC1;
1073 case GT:
1074 case GTU:
1075 return CC2;
1076 case LE:
1077 case LEU:
1078 return CC0 | CC1;
1079 case GE:
1080 case GEU:
1081 return CC0 | CC2;
1082 default:
1083 gcc_unreachable ();
1084 }
1085 return -1;
1086 }
1087
1088 /* If INV is false, return assembler mnemonic string to implement
1089 a branch specified by CODE. If INV is true, return mnemonic
1090 for the corresponding inverted branch. */
1091
1092 static const char *
1093 s390_branch_condition_mnemonic (rtx code, int inv)
1094 {
1095 int mask;
1096
1097 static const char *const mnemonic[16] =
1098 {
1099 NULL, "o", "h", "nle",
1100 "l", "nhe", "lh", "ne",
1101 "e", "nlh", "he", "nl",
1102 "le", "nh", "no", NULL
1103 };
1104
1105 if (GET_CODE (XEXP (code, 0)) == REG
1106 && REGNO (XEXP (code, 0)) == CC_REGNUM
1107 && XEXP (code, 1) == const0_rtx)
1108 mask = s390_branch_condition_mask (code);
1109 else
1110 mask = s390_compare_and_branch_condition_mask (code);
1111
1112 gcc_assert (mask >= 0);
1113
1114 if (inv)
1115 mask ^= 15;
1116
1117 gcc_assert (mask >= 1 && mask <= 14);
1118
1119 return mnemonic[mask];
1120 }
1121
1122 /* Return the part of op which has a value different from def.
1123 The size of the part is determined by mode.
1124 Use this function only if you already know that op really
1125 contains such a part. */
1126
1127 unsigned HOST_WIDE_INT
1128 s390_extract_part (rtx op, enum machine_mode mode, int def)
1129 {
1130 unsigned HOST_WIDE_INT value = 0;
1131 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1132 int part_bits = GET_MODE_BITSIZE (mode);
1133 unsigned HOST_WIDE_INT part_mask
1134 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1135 int i;
1136
1137 for (i = 0; i < max_parts; i++)
1138 {
1139 if (i == 0)
1140 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1141 else
1142 value >>= part_bits;
1143
1144 if ((value & part_mask) != (def & part_mask))
1145 return value & part_mask;
1146 }
1147
1148 gcc_unreachable ();
1149 }
1150
1151 /* If OP is an integer constant of mode MODE with exactly one
1152 part of mode PART_MODE unequal to DEF, return the number of that
1153 part. Otherwise, return -1. */
1154
1155 int
1156 s390_single_part (rtx op,
1157 enum machine_mode mode,
1158 enum machine_mode part_mode,
1159 int def)
1160 {
1161 unsigned HOST_WIDE_INT value = 0;
1162 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1163 unsigned HOST_WIDE_INT part_mask
1164 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1165 int i, part = -1;
1166
1167 if (GET_CODE (op) != CONST_INT)
1168 return -1;
1169
1170 for (i = 0; i < n_parts; i++)
1171 {
1172 if (i == 0)
1173 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1174 else
1175 value >>= GET_MODE_BITSIZE (part_mode);
1176
1177 if ((value & part_mask) != (def & part_mask))
1178 {
1179 if (part != -1)
1180 return -1;
1181 else
1182 part = i;
1183 }
1184 }
1185 return part == -1 ? -1 : n_parts - 1 - part;
1186 }
1187
1188 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1189 bits and no other bits are set in IN. POS and LENGTH can be used
1190 to obtain the start position and the length of the bitfield.
1191
1192 POS gives the position of the first bit of the bitfield counting
1193 from the lowest order bit starting with zero. In order to use this
1194 value for S/390 instructions this has to be converted to "bits big
1195 endian" style. */
1196
1197 bool
1198 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1199 int *pos, int *length)
1200 {
1201 int tmp_pos = 0;
1202 int tmp_length = 0;
1203 int i;
1204 unsigned HOST_WIDE_INT mask = 1ULL;
1205 bool contiguous = false;
1206
1207 for (i = 0; i < size; mask <<= 1, i++)
1208 {
1209 if (contiguous)
1210 {
1211 if (mask & in)
1212 tmp_length++;
1213 else
1214 break;
1215 }
1216 else
1217 {
1218 if (mask & in)
1219 {
1220 contiguous = true;
1221 tmp_length++;
1222 }
1223 else
1224 tmp_pos++;
1225 }
1226 }
1227
1228 if (!tmp_length)
1229 return false;
1230
1231 /* Calculate a mask for all bits beyond the contiguous bits. */
1232 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1233
1234 if (mask & in)
1235 return false;
1236
1237 if (tmp_length + tmp_pos - 1 > size)
1238 return false;
1239
1240 if (length)
1241 *length = tmp_length;
1242
1243 if (pos)
1244 *pos = tmp_pos;
1245
1246 return true;
1247 }
1248
1249 /* Check whether we can (and want to) split a double-word
1250 move in mode MODE from SRC to DST into two single-word
1251 moves, moving the subword FIRST_SUBWORD first. */
1252
1253 bool
1254 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1255 {
1256 /* Floating point registers cannot be split. */
1257 if (FP_REG_P (src) || FP_REG_P (dst))
1258 return false;
1259
1260 /* We don't need to split if operands are directly accessible. */
1261 if (s_operand (src, mode) || s_operand (dst, mode))
1262 return false;
1263
1264 /* Non-offsettable memory references cannot be split. */
1265 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1266 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1267 return false;
1268
1269 /* Moving the first subword must not clobber a register
1270 needed to move the second subword. */
1271 if (register_operand (dst, mode))
1272 {
1273 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1274 if (reg_overlap_mentioned_p (subreg, src))
1275 return false;
1276 }
1277
1278 return true;
1279 }
1280
1281 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1282 and [MEM2, MEM2 + SIZE] do overlap and false
1283 otherwise. */
1284
1285 bool
1286 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1287 {
1288 rtx addr1, addr2, addr_delta;
1289 HOST_WIDE_INT delta;
1290
1291 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1292 return true;
1293
1294 if (size == 0)
1295 return false;
1296
1297 addr1 = XEXP (mem1, 0);
1298 addr2 = XEXP (mem2, 0);
1299
1300 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1301
1302 /* This overlapping check is used by peepholes merging memory block operations.
1303 Overlapping operations would otherwise be recognized by the S/390 hardware
1304 and would fall back to a slower implementation. Allowing overlapping
1305 operations would lead to slow code but not to wrong code. Therefore we are
1306 somewhat optimistic if we cannot prove that the memory blocks are
1307 overlapping.
1308 That's why we return false here although this may accept operations on
1309 overlapping memory areas. */
1310 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1311 return false;
1312
1313 delta = INTVAL (addr_delta);
1314
1315 if (delta == 0
1316 || (delta > 0 && delta < size)
1317 || (delta < 0 && -delta < size))
1318 return true;
1319
1320 return false;
1321 }
1322
1323 /* Check whether the address of memory reference MEM2 equals exactly
1324 the address of memory reference MEM1 plus DELTA. Return true if
1325 we can prove this to be the case, false otherwise. */
1326
1327 bool
1328 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1329 {
1330 rtx addr1, addr2, addr_delta;
1331
1332 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1333 return false;
1334
1335 addr1 = XEXP (mem1, 0);
1336 addr2 = XEXP (mem2, 0);
1337
1338 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1339 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1340 return false;
1341
1342 return true;
1343 }
1344
1345 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1346
1347 void
1348 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1349 rtx *operands)
1350 {
1351 enum machine_mode wmode = mode;
1352 rtx dst = operands[0];
1353 rtx src1 = operands[1];
1354 rtx src2 = operands[2];
1355 rtx op, clob, tem;
1356
1357 /* If we cannot handle the operation directly, use a temp register. */
1358 if (!s390_logical_operator_ok_p (operands))
1359 dst = gen_reg_rtx (mode);
1360
1361 /* QImode and HImode patterns make sense only if we have a destination
1362 in memory. Otherwise perform the operation in SImode. */
1363 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1364 wmode = SImode;
1365
1366 /* Widen operands if required. */
1367 if (mode != wmode)
1368 {
1369 if (GET_CODE (dst) == SUBREG
1370 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1371 dst = tem;
1372 else if (REG_P (dst))
1373 dst = gen_rtx_SUBREG (wmode, dst, 0);
1374 else
1375 dst = gen_reg_rtx (wmode);
1376
1377 if (GET_CODE (src1) == SUBREG
1378 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1379 src1 = tem;
1380 else if (GET_MODE (src1) != VOIDmode)
1381 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1382
1383 if (GET_CODE (src2) == SUBREG
1384 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1385 src2 = tem;
1386 else if (GET_MODE (src2) != VOIDmode)
1387 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1388 }
1389
1390 /* Emit the instruction. */
1391 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1392 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1393 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1394
1395 /* Fix up the destination if needed. */
1396 if (dst != operands[0])
1397 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1398 }
1399
1400 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1401
1402 bool
1403 s390_logical_operator_ok_p (rtx *operands)
1404 {
1405 /* If the destination operand is in memory, it needs to coincide
1406 with one of the source operands. After reload, it has to be
1407 the first source operand. */
1408 if (GET_CODE (operands[0]) == MEM)
1409 return rtx_equal_p (operands[0], operands[1])
1410 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1411
1412 return true;
1413 }
1414
1415 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1416 operand IMMOP to switch from SS to SI type instructions. */
1417
1418 void
1419 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1420 {
1421 int def = code == AND ? -1 : 0;
1422 HOST_WIDE_INT mask;
1423 int part;
1424
1425 gcc_assert (GET_CODE (*memop) == MEM);
1426 gcc_assert (!MEM_VOLATILE_P (*memop));
1427
1428 mask = s390_extract_part (*immop, QImode, def);
1429 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1430 gcc_assert (part >= 0);
1431
1432 *memop = adjust_address (*memop, QImode, part);
1433 *immop = gen_int_mode (mask, QImode);
1434 }
1435
1436
1437 /* How to allocate a 'struct machine_function'. */
1438
1439 static struct machine_function *
1440 s390_init_machine_status (void)
1441 {
1442 return GGC_CNEW (struct machine_function);
1443 }
1444
1445 /* Change optimizations to be performed, depending on the
1446 optimization level.
1447
1448 LEVEL is the optimization level specified; 2 if `-O2' is
1449 specified, 1 if `-O' is specified, and 0 if neither is specified.
1450
1451 SIZE is nonzero if `-Os' is specified and zero otherwise. */
1452
1453 void
1454 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
1455 {
1456 /* ??? There are apparently still problems with -fcaller-saves. */
1457 flag_caller_saves = 0;
1458
1459 /* By default, always emit DWARF-2 unwind info. This allows debugging
1460 without maintaining a stack frame back-chain. */
1461 flag_asynchronous_unwind_tables = 1;
1462
1463 /* Use MVCLE instructions to decrease code size if requested. */
1464 if (size != 0)
1465 target_flags |= MASK_MVCLE;
1466 }
1467
1468 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1469 to the associated processor_type and processor_flags if so. */
1470
1471 static bool
1472 s390_handle_arch_option (const char *arg,
1473 enum processor_type *type,
1474 int *flags)
1475 {
1476 static struct pta
1477 {
1478 const char *const name; /* processor name or nickname. */
1479 const enum processor_type processor;
1480 const int flags; /* From enum processor_flags. */
1481 }
1482 const processor_alias_table[] =
1483 {
1484 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1485 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1486 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1487 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1488 | PF_LONG_DISPLACEMENT},
1489 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1490 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1491 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1492 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1493 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1494 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1495 };
1496 size_t i;
1497
1498 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1499 if (strcmp (arg, processor_alias_table[i].name) == 0)
1500 {
1501 *type = processor_alias_table[i].processor;
1502 *flags = processor_alias_table[i].flags;
1503 return true;
1504 }
1505 return false;
1506 }
1507
1508 /* Implement TARGET_HANDLE_OPTION. */
1509
1510 static bool
1511 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1512 {
1513 switch (code)
1514 {
1515 case OPT_march_:
1516 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1517
1518 case OPT_mstack_guard_:
1519 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1520 return false;
1521 if (exact_log2 (s390_stack_guard) == -1)
1522 error ("stack guard value must be an exact power of 2");
1523 return true;
1524
1525 case OPT_mstack_size_:
1526 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1527 return false;
1528 if (exact_log2 (s390_stack_size) == -1)
1529 error ("stack size must be an exact power of 2");
1530 return true;
1531
1532 case OPT_mtune_:
1533 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1534
1535 case OPT_mwarn_framesize_:
1536 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1537
1538 default:
1539 return true;
1540 }
1541 }
1542
1543 void
1544 override_options (void)
1545 {
1546 /* Set up function hooks. */
1547 init_machine_status = s390_init_machine_status;
1548
1549 /* Architecture mode defaults according to ABI. */
1550 if (!(target_flags_explicit & MASK_ZARCH))
1551 {
1552 if (TARGET_64BIT)
1553 target_flags |= MASK_ZARCH;
1554 else
1555 target_flags &= ~MASK_ZARCH;
1556 }
1557
1558 /* Determine processor architectural level. */
1559 if (!s390_arch_string)
1560 {
1561 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1562 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1563 }
1564
1565 /* Determine processor to tune for. */
1566 if (s390_tune == PROCESSOR_max)
1567 {
1568 s390_tune = s390_arch;
1569 s390_tune_flags = s390_arch_flags;
1570 }
1571
1572 /* Sanity checks. */
1573 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1574 error ("z/Architecture mode not supported on %s", s390_arch_string);
1575 if (TARGET_64BIT && !TARGET_ZARCH)
1576 error ("64-bit ABI not supported in ESA/390 mode");
1577
1578 if (TARGET_HARD_DFP && !TARGET_DFP)
1579 {
1580 if (target_flags_explicit & MASK_HARD_DFP)
1581 {
1582 if (!TARGET_CPU_DFP)
1583 error ("Hardware decimal floating point instructions"
1584 " not available on %s", s390_arch_string);
1585 if (!TARGET_ZARCH)
1586 error ("Hardware decimal floating point instructions"
1587 " not available in ESA/390 mode");
1588 }
1589 else
1590 target_flags &= ~MASK_HARD_DFP;
1591 }
1592
1593 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1594 {
1595 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1596 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1597
1598 target_flags &= ~MASK_HARD_DFP;
1599 }
1600
1601 /* Set processor cost function. */
1602 switch (s390_tune)
1603 {
1604 case PROCESSOR_2084_Z990:
1605 s390_cost = &z990_cost;
1606 break;
1607 case PROCESSOR_2094_Z9_109:
1608 s390_cost = &z9_109_cost;
1609 break;
1610 case PROCESSOR_2097_Z10:
1611 s390_cost = &z10_cost;
1612 break;
1613 default:
1614 s390_cost = &z900_cost;
1615 }
1616
1617 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1618 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1619 "in combination");
1620
1621 if (s390_stack_size)
1622 {
1623 if (s390_stack_guard >= s390_stack_size)
1624 error ("stack size must be greater than the stack guard value");
1625 else if (s390_stack_size > 1 << 16)
1626 error ("stack size must not be greater than 64k");
1627 }
1628 else if (s390_stack_guard)
1629 error ("-mstack-guard implies use of -mstack-size");
1630
1631 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1632 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1633 target_flags |= MASK_LONG_DOUBLE_128;
1634 #endif
1635
1636 if (s390_tune == PROCESSOR_2097_Z10
1637 && !PARAM_SET_P (PARAM_MAX_UNROLLED_INSNS))
1638 set_param_value ("max-unrolled-insns", 100);
1639 }
1640
1641 /* Map for smallest class containing reg regno. */
1642
1643 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1644 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1645 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1646 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1647 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1648 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1649 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1650 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1651 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1652 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1653 ACCESS_REGS, ACCESS_REGS
1654 };
1655
1656 /* Return attribute type of insn. */
1657
1658 static enum attr_type
1659 s390_safe_attr_type (rtx insn)
1660 {
1661 if (recog_memoized (insn) >= 0)
1662 return get_attr_type (insn);
1663 else
1664 return TYPE_NONE;
1665 }
1666
1667 /* Return true if DISP is a valid short displacement. */
1668
1669 static bool
1670 s390_short_displacement (rtx disp)
1671 {
1672 /* No displacement is OK. */
1673 if (!disp)
1674 return true;
1675
1676 /* Without the long displacement facility we don't need to
1677 distingiush between long and short displacement. */
1678 if (!TARGET_LONG_DISPLACEMENT)
1679 return true;
1680
1681 /* Integer displacement in range. */
1682 if (GET_CODE (disp) == CONST_INT)
1683 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1684
1685 /* GOT offset is not OK, the GOT can be large. */
1686 if (GET_CODE (disp) == CONST
1687 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1688 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1689 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1690 return false;
1691
1692 /* All other symbolic constants are literal pool references,
1693 which are OK as the literal pool must be small. */
1694 if (GET_CODE (disp) == CONST)
1695 return true;
1696
1697 return false;
1698 }
1699
1700 /* Decompose a RTL expression ADDR for a memory address into
1701 its components, returned in OUT.
1702
1703 Returns false if ADDR is not a valid memory address, true
1704 otherwise. If OUT is NULL, don't return the components,
1705 but check for validity only.
1706
1707 Note: Only addresses in canonical form are recognized.
1708 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1709 canonical form so that they will be recognized. */
1710
1711 static int
1712 s390_decompose_address (rtx addr, struct s390_address *out)
1713 {
1714 HOST_WIDE_INT offset = 0;
1715 rtx base = NULL_RTX;
1716 rtx indx = NULL_RTX;
1717 rtx disp = NULL_RTX;
1718 rtx orig_disp;
1719 bool pointer = false;
1720 bool base_ptr = false;
1721 bool indx_ptr = false;
1722 bool literal_pool = false;
1723
1724 /* We may need to substitute the literal pool base register into the address
1725 below. However, at this point we do not know which register is going to
1726 be used as base, so we substitute the arg pointer register. This is going
1727 to be treated as holding a pointer below -- it shouldn't be used for any
1728 other purpose. */
1729 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1730
1731 /* Decompose address into base + index + displacement. */
1732
1733 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1734 base = addr;
1735
1736 else if (GET_CODE (addr) == PLUS)
1737 {
1738 rtx op0 = XEXP (addr, 0);
1739 rtx op1 = XEXP (addr, 1);
1740 enum rtx_code code0 = GET_CODE (op0);
1741 enum rtx_code code1 = GET_CODE (op1);
1742
1743 if (code0 == REG || code0 == UNSPEC)
1744 {
1745 if (code1 == REG || code1 == UNSPEC)
1746 {
1747 indx = op0; /* index + base */
1748 base = op1;
1749 }
1750
1751 else
1752 {
1753 base = op0; /* base + displacement */
1754 disp = op1;
1755 }
1756 }
1757
1758 else if (code0 == PLUS)
1759 {
1760 indx = XEXP (op0, 0); /* index + base + disp */
1761 base = XEXP (op0, 1);
1762 disp = op1;
1763 }
1764
1765 else
1766 {
1767 return false;
1768 }
1769 }
1770
1771 else
1772 disp = addr; /* displacement */
1773
1774 /* Extract integer part of displacement. */
1775 orig_disp = disp;
1776 if (disp)
1777 {
1778 if (GET_CODE (disp) == CONST_INT)
1779 {
1780 offset = INTVAL (disp);
1781 disp = NULL_RTX;
1782 }
1783 else if (GET_CODE (disp) == CONST
1784 && GET_CODE (XEXP (disp, 0)) == PLUS
1785 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1786 {
1787 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1788 disp = XEXP (XEXP (disp, 0), 0);
1789 }
1790 }
1791
1792 /* Strip off CONST here to avoid special case tests later. */
1793 if (disp && GET_CODE (disp) == CONST)
1794 disp = XEXP (disp, 0);
1795
1796 /* We can convert literal pool addresses to
1797 displacements by basing them off the base register. */
1798 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1799 {
1800 /* Either base or index must be free to hold the base register. */
1801 if (!base)
1802 base = fake_pool_base, literal_pool = true;
1803 else if (!indx)
1804 indx = fake_pool_base, literal_pool = true;
1805 else
1806 return false;
1807
1808 /* Mark up the displacement. */
1809 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1810 UNSPEC_LTREL_OFFSET);
1811 }
1812
1813 /* Validate base register. */
1814 if (base)
1815 {
1816 if (GET_CODE (base) == UNSPEC)
1817 switch (XINT (base, 1))
1818 {
1819 case UNSPEC_LTREF:
1820 if (!disp)
1821 disp = gen_rtx_UNSPEC (Pmode,
1822 gen_rtvec (1, XVECEXP (base, 0, 0)),
1823 UNSPEC_LTREL_OFFSET);
1824 else
1825 return false;
1826
1827 base = XVECEXP (base, 0, 1);
1828 break;
1829
1830 case UNSPEC_LTREL_BASE:
1831 if (XVECLEN (base, 0) == 1)
1832 base = fake_pool_base, literal_pool = true;
1833 else
1834 base = XVECEXP (base, 0, 1);
1835 break;
1836
1837 default:
1838 return false;
1839 }
1840
1841 if (!REG_P (base)
1842 || (GET_MODE (base) != SImode
1843 && GET_MODE (base) != Pmode))
1844 return false;
1845
1846 if (REGNO (base) == STACK_POINTER_REGNUM
1847 || REGNO (base) == FRAME_POINTER_REGNUM
1848 || ((reload_completed || reload_in_progress)
1849 && frame_pointer_needed
1850 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1851 || REGNO (base) == ARG_POINTER_REGNUM
1852 || (flag_pic
1853 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1854 pointer = base_ptr = true;
1855
1856 if ((reload_completed || reload_in_progress)
1857 && base == cfun->machine->base_reg)
1858 pointer = base_ptr = literal_pool = true;
1859 }
1860
1861 /* Validate index register. */
1862 if (indx)
1863 {
1864 if (GET_CODE (indx) == UNSPEC)
1865 switch (XINT (indx, 1))
1866 {
1867 case UNSPEC_LTREF:
1868 if (!disp)
1869 disp = gen_rtx_UNSPEC (Pmode,
1870 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1871 UNSPEC_LTREL_OFFSET);
1872 else
1873 return false;
1874
1875 indx = XVECEXP (indx, 0, 1);
1876 break;
1877
1878 case UNSPEC_LTREL_BASE:
1879 if (XVECLEN (indx, 0) == 1)
1880 indx = fake_pool_base, literal_pool = true;
1881 else
1882 indx = XVECEXP (indx, 0, 1);
1883 break;
1884
1885 default:
1886 return false;
1887 }
1888
1889 if (!REG_P (indx)
1890 || (GET_MODE (indx) != SImode
1891 && GET_MODE (indx) != Pmode))
1892 return false;
1893
1894 if (REGNO (indx) == STACK_POINTER_REGNUM
1895 || REGNO (indx) == FRAME_POINTER_REGNUM
1896 || ((reload_completed || reload_in_progress)
1897 && frame_pointer_needed
1898 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1899 || REGNO (indx) == ARG_POINTER_REGNUM
1900 || (flag_pic
1901 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1902 pointer = indx_ptr = true;
1903
1904 if ((reload_completed || reload_in_progress)
1905 && indx == cfun->machine->base_reg)
1906 pointer = indx_ptr = literal_pool = true;
1907 }
1908
1909 /* Prefer to use pointer as base, not index. */
1910 if (base && indx && !base_ptr
1911 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1912 {
1913 rtx tmp = base;
1914 base = indx;
1915 indx = tmp;
1916 }
1917
1918 /* Validate displacement. */
1919 if (!disp)
1920 {
1921 /* If virtual registers are involved, the displacement will change later
1922 anyway as the virtual registers get eliminated. This could make a
1923 valid displacement invalid, but it is more likely to make an invalid
1924 displacement valid, because we sometimes access the register save area
1925 via negative offsets to one of those registers.
1926 Thus we don't check the displacement for validity here. If after
1927 elimination the displacement turns out to be invalid after all,
1928 this is fixed up by reload in any case. */
1929 if (base != arg_pointer_rtx
1930 && indx != arg_pointer_rtx
1931 && base != return_address_pointer_rtx
1932 && indx != return_address_pointer_rtx
1933 && base != frame_pointer_rtx
1934 && indx != frame_pointer_rtx
1935 && base != virtual_stack_vars_rtx
1936 && indx != virtual_stack_vars_rtx)
1937 if (!DISP_IN_RANGE (offset))
1938 return false;
1939 }
1940 else
1941 {
1942 /* All the special cases are pointers. */
1943 pointer = true;
1944
1945 /* In the small-PIC case, the linker converts @GOT
1946 and @GOTNTPOFF offsets to possible displacements. */
1947 if (GET_CODE (disp) == UNSPEC
1948 && (XINT (disp, 1) == UNSPEC_GOT
1949 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1950 && flag_pic == 1)
1951 {
1952 ;
1953 }
1954
1955 /* Accept pool label offsets. */
1956 else if (GET_CODE (disp) == UNSPEC
1957 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1958 ;
1959
1960 /* Accept literal pool references. */
1961 else if (GET_CODE (disp) == UNSPEC
1962 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1963 {
1964 orig_disp = gen_rtx_CONST (Pmode, disp);
1965 if (offset)
1966 {
1967 /* If we have an offset, make sure it does not
1968 exceed the size of the constant pool entry. */
1969 rtx sym = XVECEXP (disp, 0, 0);
1970 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1971 return false;
1972
1973 orig_disp = plus_constant (orig_disp, offset);
1974 }
1975 }
1976
1977 else
1978 return false;
1979 }
1980
1981 if (!base && !indx)
1982 pointer = true;
1983
1984 if (out)
1985 {
1986 out->base = base;
1987 out->indx = indx;
1988 out->disp = orig_disp;
1989 out->pointer = pointer;
1990 out->literal_pool = literal_pool;
1991 }
1992
1993 return true;
1994 }
1995
1996 /* Decompose a RTL expression OP for a shift count into its components,
1997 and return the base register in BASE and the offset in OFFSET.
1998
1999 Return true if OP is a valid shift count, false if not. */
2000
2001 bool
2002 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2003 {
2004 HOST_WIDE_INT off = 0;
2005
2006 /* We can have an integer constant, an address register,
2007 or a sum of the two. */
2008 if (GET_CODE (op) == CONST_INT)
2009 {
2010 off = INTVAL (op);
2011 op = NULL_RTX;
2012 }
2013 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2014 {
2015 off = INTVAL (XEXP (op, 1));
2016 op = XEXP (op, 0);
2017 }
2018 while (op && GET_CODE (op) == SUBREG)
2019 op = SUBREG_REG (op);
2020
2021 if (op && GET_CODE (op) != REG)
2022 return false;
2023
2024 if (offset)
2025 *offset = off;
2026 if (base)
2027 *base = op;
2028
2029 return true;
2030 }
2031
2032
2033 /* Return true if CODE is a valid address without index. */
2034
2035 bool
2036 s390_legitimate_address_without_index_p (rtx op)
2037 {
2038 struct s390_address addr;
2039
2040 if (!s390_decompose_address (XEXP (op, 0), &addr))
2041 return false;
2042 if (addr.indx)
2043 return false;
2044
2045 return true;
2046 }
2047
2048
2049 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2050 and return these parts in SYMREF and ADDEND. You can pass NULL in
2051 SYMREF and/or ADDEND if you are not interested in these values. */
2052
2053 static bool
2054 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2055 {
2056 HOST_WIDE_INT tmpaddend = 0;
2057
2058 if (GET_CODE (addr) == CONST)
2059 addr = XEXP (addr, 0);
2060
2061 if (GET_CODE (addr) == PLUS)
2062 {
2063 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2064 && CONST_INT_P (XEXP (addr, 1)))
2065 {
2066 tmpaddend = INTVAL (XEXP (addr, 1));
2067 addr = XEXP (addr, 0);
2068 }
2069 else
2070 return false;
2071 }
2072 else
2073 if (GET_CODE (addr) != SYMBOL_REF)
2074 return false;
2075
2076 if (symref)
2077 *symref = addr;
2078 if (addend)
2079 *addend = tmpaddend;
2080
2081 return true;
2082 }
2083
2084
2085 /* Return true if the address in OP is valid for constraint letter C
2086 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2087 pool MEMs should be accepted. Only the Q, R, S, T constraint
2088 letters are allowed for C. */
2089
2090 static int
2091 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2092 {
2093 struct s390_address addr;
2094 bool decomposed = false;
2095
2096 /* This check makes sure that no symbolic address (except literal
2097 pool references) are accepted by the R or T constraints. */
2098 if (s390_symref_operand_p (op, NULL, NULL))
2099 {
2100 if (!lit_pool_ok)
2101 return 0;
2102 if (!s390_decompose_address (op, &addr))
2103 return 0;
2104 if (!addr.literal_pool)
2105 return 0;
2106 decomposed = true;
2107 }
2108
2109 switch (c)
2110 {
2111 case 'Q': /* no index short displacement */
2112 if (!decomposed && !s390_decompose_address (op, &addr))
2113 return 0;
2114 if (addr.indx)
2115 return 0;
2116 if (!s390_short_displacement (addr.disp))
2117 return 0;
2118 break;
2119
2120 case 'R': /* with index short displacement */
2121 if (TARGET_LONG_DISPLACEMENT)
2122 {
2123 if (!decomposed && !s390_decompose_address (op, &addr))
2124 return 0;
2125 if (!s390_short_displacement (addr.disp))
2126 return 0;
2127 }
2128 /* Any invalid address here will be fixed up by reload,
2129 so accept it for the most generic constraint. */
2130 break;
2131
2132 case 'S': /* no index long displacement */
2133 if (!TARGET_LONG_DISPLACEMENT)
2134 return 0;
2135 if (!decomposed && !s390_decompose_address (op, &addr))
2136 return 0;
2137 if (addr.indx)
2138 return 0;
2139 if (s390_short_displacement (addr.disp))
2140 return 0;
2141 break;
2142
2143 case 'T': /* with index long displacement */
2144 if (!TARGET_LONG_DISPLACEMENT)
2145 return 0;
2146 /* Any invalid address here will be fixed up by reload,
2147 so accept it for the most generic constraint. */
2148 if ((decomposed || s390_decompose_address (op, &addr))
2149 && s390_short_displacement (addr.disp))
2150 return 0;
2151 break;
2152 default:
2153 return 0;
2154 }
2155 return 1;
2156 }
2157
2158
2159 /* Evaluates constraint strings described by the regular expression
2160 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2161 the constraint given in STR, or 0 else. */
2162
2163 int
2164 s390_mem_constraint (const char *str, rtx op)
2165 {
2166 char c = str[0];
2167
2168 switch (c)
2169 {
2170 case 'A':
2171 /* Check for offsettable variants of memory constraints. */
2172 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2173 return 0;
2174 if ((reload_completed || reload_in_progress)
2175 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2176 return 0;
2177 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2178 case 'B':
2179 /* Check for non-literal-pool variants of memory constraints. */
2180 if (!MEM_P (op))
2181 return 0;
2182 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2183 case 'Q':
2184 case 'R':
2185 case 'S':
2186 case 'T':
2187 if (GET_CODE (op) != MEM)
2188 return 0;
2189 return s390_check_qrst_address (c, XEXP (op, 0), true);
2190 case 'U':
2191 return (s390_check_qrst_address ('Q', op, true)
2192 || s390_check_qrst_address ('R', op, true));
2193 case 'W':
2194 return (s390_check_qrst_address ('S', op, true)
2195 || s390_check_qrst_address ('T', op, true));
2196 case 'Y':
2197 /* Simply check for the basic form of a shift count. Reload will
2198 take care of making sure we have a proper base register. */
2199 if (!s390_decompose_shift_count (op, NULL, NULL))
2200 return 0;
2201 break;
2202 case 'Z':
2203 return s390_check_qrst_address (str[1], op, true);
2204 default:
2205 return 0;
2206 }
2207 return 1;
2208 }
2209
2210
2211 /* Evaluates constraint strings starting with letter O. Input
2212 parameter C is the second letter following the "O" in the constraint
2213 string. Returns 1 if VALUE meets the respective constraint and 0
2214 otherwise. */
2215
2216 int
2217 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2218 {
2219 if (!TARGET_EXTIMM)
2220 return 0;
2221
2222 switch (c)
2223 {
2224 case 's':
2225 return trunc_int_for_mode (value, SImode) == value;
2226
2227 case 'p':
2228 return value == 0
2229 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2230
2231 case 'n':
2232 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2233
2234 default:
2235 gcc_unreachable ();
2236 }
2237 }
2238
2239
2240 /* Evaluates constraint strings starting with letter N. Parameter STR
2241 contains the letters following letter "N" in the constraint string.
2242 Returns true if VALUE matches the constraint. */
2243
2244 int
2245 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2246 {
2247 enum machine_mode mode, part_mode;
2248 int def;
2249 int part, part_goal;
2250
2251
2252 if (str[0] == 'x')
2253 part_goal = -1;
2254 else
2255 part_goal = str[0] - '0';
2256
2257 switch (str[1])
2258 {
2259 case 'Q':
2260 part_mode = QImode;
2261 break;
2262 case 'H':
2263 part_mode = HImode;
2264 break;
2265 case 'S':
2266 part_mode = SImode;
2267 break;
2268 default:
2269 return 0;
2270 }
2271
2272 switch (str[2])
2273 {
2274 case 'H':
2275 mode = HImode;
2276 break;
2277 case 'S':
2278 mode = SImode;
2279 break;
2280 case 'D':
2281 mode = DImode;
2282 break;
2283 default:
2284 return 0;
2285 }
2286
2287 switch (str[3])
2288 {
2289 case '0':
2290 def = 0;
2291 break;
2292 case 'F':
2293 def = -1;
2294 break;
2295 default:
2296 return 0;
2297 }
2298
2299 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2300 return 0;
2301
2302 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2303 if (part < 0)
2304 return 0;
2305 if (part_goal != -1 && part_goal != part)
2306 return 0;
2307
2308 return 1;
2309 }
2310
2311
2312 /* Returns true if the input parameter VALUE is a float zero. */
2313
2314 int
2315 s390_float_const_zero_p (rtx value)
2316 {
2317 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2318 && value == CONST0_RTX (GET_MODE (value)));
2319 }
2320
2321
2322 /* Compute a (partial) cost for rtx X. Return true if the complete
2323 cost has been computed, and false if subexpressions should be
2324 scanned. In either case, *TOTAL contains the cost result.
2325 CODE contains GET_CODE (x), OUTER_CODE contains the code
2326 of the superexpression of x. */
2327
2328 static bool
2329 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2330 bool speed ATTRIBUTE_UNUSED)
2331 {
2332 switch (code)
2333 {
2334 case CONST:
2335 case CONST_INT:
2336 case LABEL_REF:
2337 case SYMBOL_REF:
2338 case CONST_DOUBLE:
2339 case MEM:
2340 *total = 0;
2341 return true;
2342
2343 case ASHIFT:
2344 case ASHIFTRT:
2345 case LSHIFTRT:
2346 case ROTATE:
2347 case ROTATERT:
2348 case AND:
2349 case IOR:
2350 case XOR:
2351 case NEG:
2352 case NOT:
2353 *total = COSTS_N_INSNS (1);
2354 return false;
2355
2356 case PLUS:
2357 case MINUS:
2358 /* Check for multiply and add. */
2359 if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2360 && GET_CODE (XEXP (x, 0)) == MULT
2361 && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
2362 {
2363 /* This is the multiply and add case. */
2364 if (GET_MODE (x) == DFmode)
2365 *total = s390_cost->madbr;
2366 else
2367 *total = s390_cost->maebr;
2368 *total += (rtx_cost (XEXP (XEXP (x, 0), 0), MULT, speed)
2369 + rtx_cost (XEXP (XEXP (x, 0), 1), MULT, speed)
2370 + rtx_cost (XEXP (x, 1), (enum rtx_code) code, speed));
2371 return true; /* Do not do an additional recursive descent. */
2372 }
2373 *total = COSTS_N_INSNS (1);
2374 return false;
2375
2376 case MULT:
2377 switch (GET_MODE (x))
2378 {
2379 case SImode:
2380 {
2381 rtx left = XEXP (x, 0);
2382 rtx right = XEXP (x, 1);
2383 if (GET_CODE (right) == CONST_INT
2384 && CONST_OK_FOR_K (INTVAL (right)))
2385 *total = s390_cost->mhi;
2386 else if (GET_CODE (left) == SIGN_EXTEND)
2387 *total = s390_cost->mh;
2388 else
2389 *total = s390_cost->ms; /* msr, ms, msy */
2390 break;
2391 }
2392 case DImode:
2393 {
2394 rtx left = XEXP (x, 0);
2395 rtx right = XEXP (x, 1);
2396 if (TARGET_64BIT)
2397 {
2398 if (GET_CODE (right) == CONST_INT
2399 && CONST_OK_FOR_K (INTVAL (right)))
2400 *total = s390_cost->mghi;
2401 else if (GET_CODE (left) == SIGN_EXTEND)
2402 *total = s390_cost->msgf;
2403 else
2404 *total = s390_cost->msg; /* msgr, msg */
2405 }
2406 else /* TARGET_31BIT */
2407 {
2408 if (GET_CODE (left) == SIGN_EXTEND
2409 && GET_CODE (right) == SIGN_EXTEND)
2410 /* mulsidi case: mr, m */
2411 *total = s390_cost->m;
2412 else if (GET_CODE (left) == ZERO_EXTEND
2413 && GET_CODE (right) == ZERO_EXTEND
2414 && TARGET_CPU_ZARCH)
2415 /* umulsidi case: ml, mlr */
2416 *total = s390_cost->ml;
2417 else
2418 /* Complex calculation is required. */
2419 *total = COSTS_N_INSNS (40);
2420 }
2421 break;
2422 }
2423 case SFmode:
2424 case DFmode:
2425 *total = s390_cost->mult_df;
2426 break;
2427 case TFmode:
2428 *total = s390_cost->mxbr;
2429 break;
2430 default:
2431 return false;
2432 }
2433 return false;
2434
2435 case UDIV:
2436 case UMOD:
2437 if (GET_MODE (x) == TImode) /* 128 bit division */
2438 *total = s390_cost->dlgr;
2439 else if (GET_MODE (x) == DImode)
2440 {
2441 rtx right = XEXP (x, 1);
2442 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2443 *total = s390_cost->dlr;
2444 else /* 64 by 64 bit division */
2445 *total = s390_cost->dlgr;
2446 }
2447 else if (GET_MODE (x) == SImode) /* 32 bit division */
2448 *total = s390_cost->dlr;
2449 return false;
2450
2451 case DIV:
2452 case MOD:
2453 if (GET_MODE (x) == DImode)
2454 {
2455 rtx right = XEXP (x, 1);
2456 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2457 if (TARGET_64BIT)
2458 *total = s390_cost->dsgfr;
2459 else
2460 *total = s390_cost->dr;
2461 else /* 64 by 64 bit division */
2462 *total = s390_cost->dsgr;
2463 }
2464 else if (GET_MODE (x) == SImode) /* 32 bit division */
2465 *total = s390_cost->dlr;
2466 else if (GET_MODE (x) == SFmode)
2467 {
2468 *total = s390_cost->debr;
2469 }
2470 else if (GET_MODE (x) == DFmode)
2471 {
2472 *total = s390_cost->ddbr;
2473 }
2474 else if (GET_MODE (x) == TFmode)
2475 {
2476 *total = s390_cost->dxbr;
2477 }
2478 return false;
2479
2480 case SQRT:
2481 if (GET_MODE (x) == SFmode)
2482 *total = s390_cost->sqebr;
2483 else if (GET_MODE (x) == DFmode)
2484 *total = s390_cost->sqdbr;
2485 else /* TFmode */
2486 *total = s390_cost->sqxbr;
2487 return false;
2488
2489 case SIGN_EXTEND:
2490 case ZERO_EXTEND:
2491 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2492 || outer_code == PLUS || outer_code == MINUS
2493 || outer_code == COMPARE)
2494 *total = 0;
2495 return false;
2496
2497 case COMPARE:
2498 *total = COSTS_N_INSNS (1);
2499 if (GET_CODE (XEXP (x, 0)) == AND
2500 && GET_CODE (XEXP (x, 1)) == CONST_INT
2501 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2502 {
2503 rtx op0 = XEXP (XEXP (x, 0), 0);
2504 rtx op1 = XEXP (XEXP (x, 0), 1);
2505 rtx op2 = XEXP (x, 1);
2506
2507 if (memory_operand (op0, GET_MODE (op0))
2508 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2509 return true;
2510 if (register_operand (op0, GET_MODE (op0))
2511 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2512 return true;
2513 }
2514 return false;
2515
2516 default:
2517 return false;
2518 }
2519 }
2520
2521 /* Return the cost of an address rtx ADDR. */
2522
2523 static int
2524 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2525 {
2526 struct s390_address ad;
2527 if (!s390_decompose_address (addr, &ad))
2528 return 1000;
2529
2530 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2531 }
2532
2533 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2534 otherwise return 0. */
2535
2536 int
2537 tls_symbolic_operand (rtx op)
2538 {
2539 if (GET_CODE (op) != SYMBOL_REF)
2540 return 0;
2541 return SYMBOL_REF_TLS_MODEL (op);
2542 }
2543 \f
2544 /* Split DImode access register reference REG (on 64-bit) into its constituent
2545 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2546 gen_highpart cannot be used as they assume all registers are word-sized,
2547 while our access registers have only half that size. */
2548
2549 void
2550 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2551 {
2552 gcc_assert (TARGET_64BIT);
2553 gcc_assert (ACCESS_REG_P (reg));
2554 gcc_assert (GET_MODE (reg) == DImode);
2555 gcc_assert (!(REGNO (reg) & 1));
2556
2557 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2558 *hi = gen_rtx_REG (SImode, REGNO (reg));
2559 }
2560
2561 /* Return true if OP contains a symbol reference */
2562
2563 bool
2564 symbolic_reference_mentioned_p (rtx op)
2565 {
2566 const char *fmt;
2567 int i;
2568
2569 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2570 return 1;
2571
2572 fmt = GET_RTX_FORMAT (GET_CODE (op));
2573 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2574 {
2575 if (fmt[i] == 'E')
2576 {
2577 int j;
2578
2579 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2580 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2581 return 1;
2582 }
2583
2584 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2585 return 1;
2586 }
2587
2588 return 0;
2589 }
2590
2591 /* Return true if OP contains a reference to a thread-local symbol. */
2592
2593 bool
2594 tls_symbolic_reference_mentioned_p (rtx op)
2595 {
2596 const char *fmt;
2597 int i;
2598
2599 if (GET_CODE (op) == SYMBOL_REF)
2600 return tls_symbolic_operand (op);
2601
2602 fmt = GET_RTX_FORMAT (GET_CODE (op));
2603 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2604 {
2605 if (fmt[i] == 'E')
2606 {
2607 int j;
2608
2609 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2610 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2611 return true;
2612 }
2613
2614 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2615 return true;
2616 }
2617
2618 return false;
2619 }
2620
2621
2622 /* Return true if OP is a legitimate general operand when
2623 generating PIC code. It is given that flag_pic is on
2624 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2625
2626 int
2627 legitimate_pic_operand_p (rtx op)
2628 {
2629 /* Accept all non-symbolic constants. */
2630 if (!SYMBOLIC_CONST (op))
2631 return 1;
2632
2633 /* Reject everything else; must be handled
2634 via emit_symbolic_move. */
2635 return 0;
2636 }
2637
2638 /* Returns true if the constant value OP is a legitimate general operand.
2639 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2640
2641 int
2642 legitimate_constant_p (rtx op)
2643 {
2644 /* Accept all non-symbolic constants. */
2645 if (!SYMBOLIC_CONST (op))
2646 return 1;
2647
2648 /* Accept immediate LARL operands. */
2649 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2650 return 1;
2651
2652 /* Thread-local symbols are never legal constants. This is
2653 so that emit_call knows that computing such addresses
2654 might require a function call. */
2655 if (TLS_SYMBOLIC_CONST (op))
2656 return 0;
2657
2658 /* In the PIC case, symbolic constants must *not* be
2659 forced into the literal pool. We accept them here,
2660 so that they will be handled by emit_symbolic_move. */
2661 if (flag_pic)
2662 return 1;
2663
2664 /* All remaining non-PIC symbolic constants are
2665 forced into the literal pool. */
2666 return 0;
2667 }
2668
2669 /* Determine if it's legal to put X into the constant pool. This
2670 is not possible if X contains the address of a symbol that is
2671 not constant (TLS) or not known at final link time (PIC). */
2672
2673 static bool
2674 s390_cannot_force_const_mem (rtx x)
2675 {
2676 switch (GET_CODE (x))
2677 {
2678 case CONST_INT:
2679 case CONST_DOUBLE:
2680 /* Accept all non-symbolic constants. */
2681 return false;
2682
2683 case LABEL_REF:
2684 /* Labels are OK iff we are non-PIC. */
2685 return flag_pic != 0;
2686
2687 case SYMBOL_REF:
2688 /* 'Naked' TLS symbol references are never OK,
2689 non-TLS symbols are OK iff we are non-PIC. */
2690 if (tls_symbolic_operand (x))
2691 return true;
2692 else
2693 return flag_pic != 0;
2694
2695 case CONST:
2696 return s390_cannot_force_const_mem (XEXP (x, 0));
2697 case PLUS:
2698 case MINUS:
2699 return s390_cannot_force_const_mem (XEXP (x, 0))
2700 || s390_cannot_force_const_mem (XEXP (x, 1));
2701
2702 case UNSPEC:
2703 switch (XINT (x, 1))
2704 {
2705 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2706 case UNSPEC_LTREL_OFFSET:
2707 case UNSPEC_GOT:
2708 case UNSPEC_GOTOFF:
2709 case UNSPEC_PLTOFF:
2710 case UNSPEC_TLSGD:
2711 case UNSPEC_TLSLDM:
2712 case UNSPEC_NTPOFF:
2713 case UNSPEC_DTPOFF:
2714 case UNSPEC_GOTNTPOFF:
2715 case UNSPEC_INDNTPOFF:
2716 return false;
2717
2718 /* If the literal pool shares the code section, be put
2719 execute template placeholders into the pool as well. */
2720 case UNSPEC_INSN:
2721 return TARGET_CPU_ZARCH;
2722
2723 default:
2724 return true;
2725 }
2726 break;
2727
2728 default:
2729 gcc_unreachable ();
2730 }
2731 }
2732
2733 /* Returns true if the constant value OP is a legitimate general
2734 operand during and after reload. The difference to
2735 legitimate_constant_p is that this function will not accept
2736 a constant that would need to be forced to the literal pool
2737 before it can be used as operand. */
2738
2739 bool
2740 legitimate_reload_constant_p (rtx op)
2741 {
2742 /* Accept la(y) operands. */
2743 if (GET_CODE (op) == CONST_INT
2744 && DISP_IN_RANGE (INTVAL (op)))
2745 return true;
2746
2747 /* Accept l(g)hi/l(g)fi operands. */
2748 if (GET_CODE (op) == CONST_INT
2749 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2750 return true;
2751
2752 /* Accept lliXX operands. */
2753 if (TARGET_ZARCH
2754 && GET_CODE (op) == CONST_INT
2755 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2756 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2757 return true;
2758
2759 if (TARGET_EXTIMM
2760 && GET_CODE (op) == CONST_INT
2761 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2762 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2763 return true;
2764
2765 /* Accept larl operands. */
2766 if (TARGET_CPU_ZARCH
2767 && larl_operand (op, VOIDmode))
2768 return true;
2769
2770 /* Accept lzXX operands. */
2771 if (GET_CODE (op) == CONST_DOUBLE
2772 && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'G', "G"))
2773 return true;
2774
2775 /* Accept double-word operands that can be split. */
2776 if (GET_CODE (op) == CONST_INT
2777 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2778 {
2779 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2780 rtx hi = operand_subword (op, 0, 0, dword_mode);
2781 rtx lo = operand_subword (op, 1, 0, dword_mode);
2782 return legitimate_reload_constant_p (hi)
2783 && legitimate_reload_constant_p (lo);
2784 }
2785
2786 /* Everything else cannot be handled without reload. */
2787 return false;
2788 }
2789
2790 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2791 return the class of reg to actually use. */
2792
2793 enum reg_class
2794 s390_preferred_reload_class (rtx op, enum reg_class rclass)
2795 {
2796 switch (GET_CODE (op))
2797 {
2798 /* Constants we cannot reload must be forced into the
2799 literal pool. */
2800
2801 case CONST_DOUBLE:
2802 case CONST_INT:
2803 if (legitimate_reload_constant_p (op))
2804 return rclass;
2805 else
2806 return NO_REGS;
2807
2808 /* If a symbolic constant or a PLUS is reloaded,
2809 it is most likely being used as an address, so
2810 prefer ADDR_REGS. If 'class' is not a superset
2811 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2812 case PLUS:
2813 case LABEL_REF:
2814 case SYMBOL_REF:
2815 case CONST:
2816 if (reg_class_subset_p (ADDR_REGS, rclass))
2817 return ADDR_REGS;
2818 else
2819 return NO_REGS;
2820
2821 default:
2822 break;
2823 }
2824
2825 return rclass;
2826 }
2827
2828 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2829 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2830 aligned. */
2831
2832 bool
2833 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2834 {
2835 HOST_WIDE_INT addend;
2836 rtx symref;
2837
2838 if (!s390_symref_operand_p (addr, &symref, &addend))
2839 return false;
2840
2841 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2842 && !(addend & (alignment - 1)));
2843 }
2844
2845 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2846 operand SCRATCH is used to reload the even part of the address and
2847 adding one. */
2848
2849 void
2850 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2851 {
2852 HOST_WIDE_INT addend;
2853 rtx symref;
2854
2855 if (!s390_symref_operand_p (addr, &symref, &addend))
2856 gcc_unreachable ();
2857
2858 if (!(addend & 1))
2859 /* Easy case. The addend is even so larl will do fine. */
2860 emit_move_insn (reg, addr);
2861 else
2862 {
2863 /* We can leave the scratch register untouched if the target
2864 register is a valid base register. */
2865 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2866 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2867 scratch = reg;
2868
2869 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2870 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2871
2872 if (addend != 1)
2873 emit_move_insn (scratch,
2874 gen_rtx_CONST (Pmode,
2875 gen_rtx_PLUS (Pmode, symref,
2876 GEN_INT (addend - 1))));
2877 else
2878 emit_move_insn (scratch, symref);
2879
2880 /* Increment the address using la in order to avoid clobbering cc. */
2881 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2882 }
2883 }
2884
2885 /* Generate what is necessary to move between REG and MEM using
2886 SCRATCH. The direction is given by TOMEM. */
2887
2888 void
2889 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2890 {
2891 /* Reload might have pulled a constant out of the literal pool.
2892 Force it back in. */
2893 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2894 || GET_CODE (mem) == CONST)
2895 mem = force_const_mem (GET_MODE (reg), mem);
2896
2897 gcc_assert (MEM_P (mem));
2898
2899 /* For a load from memory we can leave the scratch register
2900 untouched if the target register is a valid base register. */
2901 if (!tomem
2902 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2903 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2904 && GET_MODE (reg) == GET_MODE (scratch))
2905 scratch = reg;
2906
2907 /* Load address into scratch register. Since we can't have a
2908 secondary reload for a secondary reload we have to cover the case
2909 where larl would need a secondary reload here as well. */
2910 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2911
2912 /* Now we can use a standard load/store to do the move. */
2913 if (tomem)
2914 emit_move_insn (replace_equiv_address (mem, scratch), reg);
2915 else
2916 emit_move_insn (reg, replace_equiv_address (mem, scratch));
2917 }
2918
2919 /* Inform reload about cases where moving X with a mode MODE to a register in
2920 RCLASS requires an extra scratch or immediate register. Return the class
2921 needed for the immediate register. */
2922
2923 static enum reg_class
2924 s390_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
2925 enum machine_mode mode, secondary_reload_info *sri)
2926 {
2927 /* Intermediate register needed. */
2928 if (reg_classes_intersect_p (CC_REGS, rclass))
2929 return GENERAL_REGS;
2930
2931 if (TARGET_Z10)
2932 {
2933 /* On z10 several optimizer steps may generate larl operands with
2934 an odd addend. */
2935 if (in_p
2936 && s390_symref_operand_p (x, NULL, NULL)
2937 && mode == Pmode
2938 && !s390_check_symref_alignment (x, 2))
2939 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
2940 : CODE_FOR_reloadsi_larl_odd_addend_z10);
2941
2942 /* On z10 we need a scratch register when moving QI, TI or floating
2943 point mode values from or to a memory location with a SYMBOL_REF
2944 or if the symref addend of a SI or DI move is not aligned to the
2945 width of the access. */
2946 if (MEM_P (x)
2947 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
2948 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
2949 || (!TARGET_64BIT && mode == DImode)
2950 || ((mode == HImode || mode == SImode || mode == DImode)
2951 && (!s390_check_symref_alignment (XEXP (x, 0),
2952 GET_MODE_SIZE (mode))))))
2953 {
2954 #define __SECONDARY_RELOAD_CASE(M,m) \
2955 case M##mode: \
2956 if (TARGET_64BIT) \
2957 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
2958 CODE_FOR_reload##m##di_tomem_z10; \
2959 else \
2960 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
2961 CODE_FOR_reload##m##si_tomem_z10; \
2962 break;
2963
2964 switch (GET_MODE (x))
2965 {
2966 __SECONDARY_RELOAD_CASE (QI, qi);
2967 __SECONDARY_RELOAD_CASE (HI, hi);
2968 __SECONDARY_RELOAD_CASE (SI, si);
2969 __SECONDARY_RELOAD_CASE (DI, di);
2970 __SECONDARY_RELOAD_CASE (TI, ti);
2971 __SECONDARY_RELOAD_CASE (SF, sf);
2972 __SECONDARY_RELOAD_CASE (DF, df);
2973 __SECONDARY_RELOAD_CASE (TF, tf);
2974 __SECONDARY_RELOAD_CASE (SD, sd);
2975 __SECONDARY_RELOAD_CASE (DD, dd);
2976 __SECONDARY_RELOAD_CASE (TD, td);
2977
2978 default:
2979 gcc_unreachable ();
2980 }
2981 #undef __SECONDARY_RELOAD_CASE
2982 }
2983 }
2984
2985 /* We need a scratch register when loading a PLUS expression which
2986 is not a legitimate operand of the LOAD ADDRESS instruction. */
2987 if (in_p && s390_plus_operand (x, mode))
2988 sri->icode = (TARGET_64BIT ?
2989 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
2990
2991 /* Performing a multiword move from or to memory we have to make sure the
2992 second chunk in memory is addressable without causing a displacement
2993 overflow. If that would be the case we calculate the address in
2994 a scratch register. */
2995 if (MEM_P (x)
2996 && GET_CODE (XEXP (x, 0)) == PLUS
2997 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2998 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
2999 + GET_MODE_SIZE (mode) - 1))
3000 {
3001 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3002 in a s_operand address since we may fallback to lm/stm. So we only
3003 have to care about overflows in the b+i+d case. */
3004 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3005 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3006 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3007 /* For FP_REGS no lm/stm is available so this check is triggered
3008 for displacement overflows in b+i+d and b+d like addresses. */
3009 || (reg_classes_intersect_p (FP_REGS, rclass)
3010 && s390_class_max_nregs (FP_REGS, mode) > 1))
3011 {
3012 if (in_p)
3013 sri->icode = (TARGET_64BIT ?
3014 CODE_FOR_reloaddi_nonoffmem_in :
3015 CODE_FOR_reloadsi_nonoffmem_in);
3016 else
3017 sri->icode = (TARGET_64BIT ?
3018 CODE_FOR_reloaddi_nonoffmem_out :
3019 CODE_FOR_reloadsi_nonoffmem_out);
3020 }
3021 }
3022
3023 /* A scratch address register is needed when a symbolic constant is
3024 copied to r0 compiling with -fPIC. In other cases the target
3025 register might be used as temporary (see legitimize_pic_address). */
3026 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3027 sri->icode = (TARGET_64BIT ?
3028 CODE_FOR_reloaddi_PIC_addr :
3029 CODE_FOR_reloadsi_PIC_addr);
3030
3031 /* Either scratch or no register needed. */
3032 return NO_REGS;
3033 }
3034
3035 /* Generate code to load SRC, which is PLUS that is not a
3036 legitimate operand for the LA instruction, into TARGET.
3037 SCRATCH may be used as scratch register. */
3038
3039 void
3040 s390_expand_plus_operand (rtx target, rtx src,
3041 rtx scratch)
3042 {
3043 rtx sum1, sum2;
3044 struct s390_address ad;
3045
3046 /* src must be a PLUS; get its two operands. */
3047 gcc_assert (GET_CODE (src) == PLUS);
3048 gcc_assert (GET_MODE (src) == Pmode);
3049
3050 /* Check if any of the two operands is already scheduled
3051 for replacement by reload. This can happen e.g. when
3052 float registers occur in an address. */
3053 sum1 = find_replacement (&XEXP (src, 0));
3054 sum2 = find_replacement (&XEXP (src, 1));
3055 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3056
3057 /* If the address is already strictly valid, there's nothing to do. */
3058 if (!s390_decompose_address (src, &ad)
3059 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3060 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3061 {
3062 /* Otherwise, one of the operands cannot be an address register;
3063 we reload its value into the scratch register. */
3064 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3065 {
3066 emit_move_insn (scratch, sum1);
3067 sum1 = scratch;
3068 }
3069 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3070 {
3071 emit_move_insn (scratch, sum2);
3072 sum2 = scratch;
3073 }
3074
3075 /* According to the way these invalid addresses are generated
3076 in reload.c, it should never happen (at least on s390) that
3077 *neither* of the PLUS components, after find_replacements
3078 was applied, is an address register. */
3079 if (sum1 == scratch && sum2 == scratch)
3080 {
3081 debug_rtx (src);
3082 gcc_unreachable ();
3083 }
3084
3085 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3086 }
3087
3088 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3089 is only ever performed on addresses, so we can mark the
3090 sum as legitimate for LA in any case. */
3091 s390_load_address (target, src);
3092 }
3093
3094
3095 /* Return true if ADDR is a valid memory address.
3096 STRICT specifies whether strict register checking applies. */
3097
3098 static bool
3099 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3100 {
3101 struct s390_address ad;
3102
3103 if (TARGET_Z10
3104 && larl_operand (addr, VOIDmode)
3105 && (mode == VOIDmode
3106 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3107 return true;
3108
3109 if (!s390_decompose_address (addr, &ad))
3110 return false;
3111
3112 if (strict)
3113 {
3114 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3115 return false;
3116
3117 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3118 return false;
3119 }
3120 else
3121 {
3122 if (ad.base
3123 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3124 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3125 return false;
3126
3127 if (ad.indx
3128 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3129 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3130 return false;
3131 }
3132 return true;
3133 }
3134
3135 /* Return true if OP is a valid operand for the LA instruction.
3136 In 31-bit, we need to prove that the result is used as an
3137 address, as LA performs only a 31-bit addition. */
3138
3139 bool
3140 legitimate_la_operand_p (rtx op)
3141 {
3142 struct s390_address addr;
3143 if (!s390_decompose_address (op, &addr))
3144 return false;
3145
3146 return (TARGET_64BIT || addr.pointer);
3147 }
3148
3149 /* Return true if it is valid *and* preferable to use LA to
3150 compute the sum of OP1 and OP2. */
3151
3152 bool
3153 preferred_la_operand_p (rtx op1, rtx op2)
3154 {
3155 struct s390_address addr;
3156
3157 if (op2 != const0_rtx)
3158 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3159
3160 if (!s390_decompose_address (op1, &addr))
3161 return false;
3162 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3163 return false;
3164 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3165 return false;
3166
3167 if (!TARGET_64BIT && !addr.pointer)
3168 return false;
3169
3170 if (addr.pointer)
3171 return true;
3172
3173 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3174 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3175 return true;
3176
3177 return false;
3178 }
3179
3180 /* Emit a forced load-address operation to load SRC into DST.
3181 This will use the LOAD ADDRESS instruction even in situations
3182 where legitimate_la_operand_p (SRC) returns false. */
3183
3184 void
3185 s390_load_address (rtx dst, rtx src)
3186 {
3187 if (TARGET_64BIT)
3188 emit_move_insn (dst, src);
3189 else
3190 emit_insn (gen_force_la_31 (dst, src));
3191 }
3192
3193 /* Return a legitimate reference for ORIG (an address) using the
3194 register REG. If REG is 0, a new pseudo is generated.
3195
3196 There are two types of references that must be handled:
3197
3198 1. Global data references must load the address from the GOT, via
3199 the PIC reg. An insn is emitted to do this load, and the reg is
3200 returned.
3201
3202 2. Static data references, constant pool addresses, and code labels
3203 compute the address as an offset from the GOT, whose base is in
3204 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3205 differentiate them from global data objects. The returned
3206 address is the PIC reg + an unspec constant.
3207
3208 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3209 reg also appears in the address. */
3210
3211 rtx
3212 legitimize_pic_address (rtx orig, rtx reg)
3213 {
3214 rtx addr = orig;
3215 rtx new_rtx = orig;
3216 rtx base;
3217
3218 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3219
3220 if (GET_CODE (addr) == LABEL_REF
3221 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3222 {
3223 /* This is a local symbol. */
3224 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3225 {
3226 /* Access local symbols PC-relative via LARL.
3227 This is the same as in the non-PIC case, so it is
3228 handled automatically ... */
3229 }
3230 else
3231 {
3232 /* Access local symbols relative to the GOT. */
3233
3234 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3235
3236 if (reload_in_progress || reload_completed)
3237 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3238
3239 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3240 addr = gen_rtx_CONST (Pmode, addr);
3241 addr = force_const_mem (Pmode, addr);
3242 emit_move_insn (temp, addr);
3243
3244 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3245 if (reg != 0)
3246 {
3247 s390_load_address (reg, new_rtx);
3248 new_rtx = reg;
3249 }
3250 }
3251 }
3252 else if (GET_CODE (addr) == SYMBOL_REF)
3253 {
3254 if (reg == 0)
3255 reg = gen_reg_rtx (Pmode);
3256
3257 if (flag_pic == 1)
3258 {
3259 /* Assume GOT offset < 4k. This is handled the same way
3260 in both 31- and 64-bit code (@GOT). */
3261
3262 if (reload_in_progress || reload_completed)
3263 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3264
3265 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3266 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3267 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3268 new_rtx = gen_const_mem (Pmode, new_rtx);
3269 emit_move_insn (reg, new_rtx);
3270 new_rtx = reg;
3271 }
3272 else if (TARGET_CPU_ZARCH)
3273 {
3274 /* If the GOT offset might be >= 4k, we determine the position
3275 of the GOT entry via a PC-relative LARL (@GOTENT). */
3276
3277 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3278
3279 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3280 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3281
3282 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3283 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3284 emit_move_insn (temp, new_rtx);
3285
3286 new_rtx = gen_const_mem (Pmode, temp);
3287 emit_move_insn (reg, new_rtx);
3288 new_rtx = reg;
3289 }
3290 else
3291 {
3292 /* If the GOT offset might be >= 4k, we have to load it
3293 from the literal pool (@GOT). */
3294
3295 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3296
3297 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3298 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3299
3300 if (reload_in_progress || reload_completed)
3301 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3302
3303 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3304 addr = gen_rtx_CONST (Pmode, addr);
3305 addr = force_const_mem (Pmode, addr);
3306 emit_move_insn (temp, addr);
3307
3308 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3309 new_rtx = gen_const_mem (Pmode, new_rtx);
3310 emit_move_insn (reg, new_rtx);
3311 new_rtx = reg;
3312 }
3313 }
3314 else
3315 {
3316 if (GET_CODE (addr) == CONST)
3317 {
3318 addr = XEXP (addr, 0);
3319 if (GET_CODE (addr) == UNSPEC)
3320 {
3321 gcc_assert (XVECLEN (addr, 0) == 1);
3322 switch (XINT (addr, 1))
3323 {
3324 /* If someone moved a GOT-relative UNSPEC
3325 out of the literal pool, force them back in. */
3326 case UNSPEC_GOTOFF:
3327 case UNSPEC_PLTOFF:
3328 new_rtx = force_const_mem (Pmode, orig);
3329 break;
3330
3331 /* @GOT is OK as is if small. */
3332 case UNSPEC_GOT:
3333 if (flag_pic == 2)
3334 new_rtx = force_const_mem (Pmode, orig);
3335 break;
3336
3337 /* @GOTENT is OK as is. */
3338 case UNSPEC_GOTENT:
3339 break;
3340
3341 /* @PLT is OK as is on 64-bit, must be converted to
3342 GOT-relative @PLTOFF on 31-bit. */
3343 case UNSPEC_PLT:
3344 if (!TARGET_CPU_ZARCH)
3345 {
3346 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3347
3348 if (reload_in_progress || reload_completed)
3349 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3350
3351 addr = XVECEXP (addr, 0, 0);
3352 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3353 UNSPEC_PLTOFF);
3354 addr = gen_rtx_CONST (Pmode, addr);
3355 addr = force_const_mem (Pmode, addr);
3356 emit_move_insn (temp, addr);
3357
3358 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3359 if (reg != 0)
3360 {
3361 s390_load_address (reg, new_rtx);
3362 new_rtx = reg;
3363 }
3364 }
3365 break;
3366
3367 /* Everything else cannot happen. */
3368 default:
3369 gcc_unreachable ();
3370 }
3371 }
3372 else
3373 gcc_assert (GET_CODE (addr) == PLUS);
3374 }
3375 if (GET_CODE (addr) == PLUS)
3376 {
3377 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3378
3379 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3380 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3381
3382 /* Check first to see if this is a constant offset
3383 from a local symbol reference. */
3384 if ((GET_CODE (op0) == LABEL_REF
3385 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3386 && GET_CODE (op1) == CONST_INT)
3387 {
3388 if (TARGET_CPU_ZARCH
3389 && larl_operand (op0, VOIDmode)
3390 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3391 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3392 {
3393 if (INTVAL (op1) & 1)
3394 {
3395 /* LARL can't handle odd offsets, so emit a
3396 pair of LARL and LA. */
3397 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3398
3399 if (!DISP_IN_RANGE (INTVAL (op1)))
3400 {
3401 HOST_WIDE_INT even = INTVAL (op1) - 1;
3402 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3403 op0 = gen_rtx_CONST (Pmode, op0);
3404 op1 = const1_rtx;
3405 }
3406
3407 emit_move_insn (temp, op0);
3408 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3409
3410 if (reg != 0)
3411 {
3412 s390_load_address (reg, new_rtx);
3413 new_rtx = reg;
3414 }
3415 }
3416 else
3417 {
3418 /* If the offset is even, we can just use LARL.
3419 This will happen automatically. */
3420 }
3421 }
3422 else
3423 {
3424 /* Access local symbols relative to the GOT. */
3425
3426 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3427
3428 if (reload_in_progress || reload_completed)
3429 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3430
3431 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3432 UNSPEC_GOTOFF);
3433 addr = gen_rtx_PLUS (Pmode, addr, op1);
3434 addr = gen_rtx_CONST (Pmode, addr);
3435 addr = force_const_mem (Pmode, addr);
3436 emit_move_insn (temp, addr);
3437
3438 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3439 if (reg != 0)
3440 {
3441 s390_load_address (reg, new_rtx);
3442 new_rtx = reg;
3443 }
3444 }
3445 }
3446
3447 /* Now, check whether it is a GOT relative symbol plus offset
3448 that was pulled out of the literal pool. Force it back in. */
3449
3450 else if (GET_CODE (op0) == UNSPEC
3451 && GET_CODE (op1) == CONST_INT
3452 && XINT (op0, 1) == UNSPEC_GOTOFF)
3453 {
3454 gcc_assert (XVECLEN (op0, 0) == 1);
3455
3456 new_rtx = force_const_mem (Pmode, orig);
3457 }
3458
3459 /* Otherwise, compute the sum. */
3460 else
3461 {
3462 base = legitimize_pic_address (XEXP (addr, 0), reg);
3463 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3464 base == reg ? NULL_RTX : reg);
3465 if (GET_CODE (new_rtx) == CONST_INT)
3466 new_rtx = plus_constant (base, INTVAL (new_rtx));
3467 else
3468 {
3469 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3470 {
3471 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3472 new_rtx = XEXP (new_rtx, 1);
3473 }
3474 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3475 }
3476
3477 if (GET_CODE (new_rtx) == CONST)
3478 new_rtx = XEXP (new_rtx, 0);
3479 new_rtx = force_operand (new_rtx, 0);
3480 }
3481 }
3482 }
3483 return new_rtx;
3484 }
3485
3486 /* Load the thread pointer into a register. */
3487
3488 rtx
3489 s390_get_thread_pointer (void)
3490 {
3491 rtx tp = gen_reg_rtx (Pmode);
3492
3493 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3494 mark_reg_pointer (tp, BITS_PER_WORD);
3495
3496 return tp;
3497 }
3498
3499 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3500 in s390_tls_symbol which always refers to __tls_get_offset.
3501 The returned offset is written to RESULT_REG and an USE rtx is
3502 generated for TLS_CALL. */
3503
3504 static GTY(()) rtx s390_tls_symbol;
3505
3506 static void
3507 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3508 {
3509 rtx insn;
3510
3511 gcc_assert (flag_pic);
3512
3513 if (!s390_tls_symbol)
3514 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3515
3516 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3517 gen_rtx_REG (Pmode, RETURN_REGNUM));
3518
3519 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3520 RTL_CONST_CALL_P (insn) = 1;
3521 }
3522
3523 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3524 this (thread-local) address. REG may be used as temporary. */
3525
3526 static rtx
3527 legitimize_tls_address (rtx addr, rtx reg)
3528 {
3529 rtx new_rtx, tls_call, temp, base, r2, insn;
3530
3531 if (GET_CODE (addr) == SYMBOL_REF)
3532 switch (tls_symbolic_operand (addr))
3533 {
3534 case TLS_MODEL_GLOBAL_DYNAMIC:
3535 start_sequence ();
3536 r2 = gen_rtx_REG (Pmode, 2);
3537 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3538 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3539 new_rtx = force_const_mem (Pmode, new_rtx);
3540 emit_move_insn (r2, new_rtx);
3541 s390_emit_tls_call_insn (r2, tls_call);
3542 insn = get_insns ();
3543 end_sequence ();
3544
3545 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3546 temp = gen_reg_rtx (Pmode);
3547 emit_libcall_block (insn, temp, r2, new_rtx);
3548
3549 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3550 if (reg != 0)
3551 {
3552 s390_load_address (reg, new_rtx);
3553 new_rtx = reg;
3554 }
3555 break;
3556
3557 case TLS_MODEL_LOCAL_DYNAMIC:
3558 start_sequence ();
3559 r2 = gen_rtx_REG (Pmode, 2);
3560 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3561 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3562 new_rtx = force_const_mem (Pmode, new_rtx);
3563 emit_move_insn (r2, new_rtx);
3564 s390_emit_tls_call_insn (r2, tls_call);
3565 insn = get_insns ();
3566 end_sequence ();
3567
3568 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3569 temp = gen_reg_rtx (Pmode);
3570 emit_libcall_block (insn, temp, r2, new_rtx);
3571
3572 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3573 base = gen_reg_rtx (Pmode);
3574 s390_load_address (base, new_rtx);
3575
3576 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3577 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3578 new_rtx = force_const_mem (Pmode, new_rtx);
3579 temp = gen_reg_rtx (Pmode);
3580 emit_move_insn (temp, new_rtx);
3581
3582 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3583 if (reg != 0)
3584 {
3585 s390_load_address (reg, new_rtx);
3586 new_rtx = reg;
3587 }
3588 break;
3589
3590 case TLS_MODEL_INITIAL_EXEC:
3591 if (flag_pic == 1)
3592 {
3593 /* Assume GOT offset < 4k. This is handled the same way
3594 in both 31- and 64-bit code. */
3595
3596 if (reload_in_progress || reload_completed)
3597 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3598
3599 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3600 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3601 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3602 new_rtx = gen_const_mem (Pmode, new_rtx);
3603 temp = gen_reg_rtx (Pmode);
3604 emit_move_insn (temp, new_rtx);
3605 }
3606 else if (TARGET_CPU_ZARCH)
3607 {
3608 /* If the GOT offset might be >= 4k, we determine the position
3609 of the GOT entry via a PC-relative LARL. */
3610
3611 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3612 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3613 temp = gen_reg_rtx (Pmode);
3614 emit_move_insn (temp, new_rtx);
3615
3616 new_rtx = gen_const_mem (Pmode, temp);
3617 temp = gen_reg_rtx (Pmode);
3618 emit_move_insn (temp, new_rtx);
3619 }
3620 else if (flag_pic)
3621 {
3622 /* If the GOT offset might be >= 4k, we have to load it
3623 from the literal pool. */
3624
3625 if (reload_in_progress || reload_completed)
3626 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3627
3628 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3629 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3630 new_rtx = force_const_mem (Pmode, new_rtx);
3631 temp = gen_reg_rtx (Pmode);
3632 emit_move_insn (temp, new_rtx);
3633
3634 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3635 new_rtx = gen_const_mem (Pmode, new_rtx);
3636
3637 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3638 temp = gen_reg_rtx (Pmode);
3639 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3640 }
3641 else
3642 {
3643 /* In position-dependent code, load the absolute address of
3644 the GOT entry from the literal pool. */
3645
3646 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3647 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3648 new_rtx = force_const_mem (Pmode, new_rtx);
3649 temp = gen_reg_rtx (Pmode);
3650 emit_move_insn (temp, new_rtx);
3651
3652 new_rtx = temp;
3653 new_rtx = gen_const_mem (Pmode, new_rtx);
3654 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3655 temp = gen_reg_rtx (Pmode);
3656 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3657 }
3658
3659 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3660 if (reg != 0)
3661 {
3662 s390_load_address (reg, new_rtx);
3663 new_rtx = reg;
3664 }
3665 break;
3666
3667 case TLS_MODEL_LOCAL_EXEC:
3668 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3669 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3670 new_rtx = force_const_mem (Pmode, new_rtx);
3671 temp = gen_reg_rtx (Pmode);
3672 emit_move_insn (temp, new_rtx);
3673
3674 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3675 if (reg != 0)
3676 {
3677 s390_load_address (reg, new_rtx);
3678 new_rtx = reg;
3679 }
3680 break;
3681
3682 default:
3683 gcc_unreachable ();
3684 }
3685
3686 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3687 {
3688 switch (XINT (XEXP (addr, 0), 1))
3689 {
3690 case UNSPEC_INDNTPOFF:
3691 gcc_assert (TARGET_CPU_ZARCH);
3692 new_rtx = addr;
3693 break;
3694
3695 default:
3696 gcc_unreachable ();
3697 }
3698 }
3699
3700 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3701 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3702 {
3703 new_rtx = XEXP (XEXP (addr, 0), 0);
3704 if (GET_CODE (new_rtx) != SYMBOL_REF)
3705 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3706
3707 new_rtx = legitimize_tls_address (new_rtx, reg);
3708 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3709 new_rtx = force_operand (new_rtx, 0);
3710 }
3711
3712 else
3713 gcc_unreachable (); /* for now ... */
3714
3715 return new_rtx;
3716 }
3717
3718 /* Emit insns making the address in operands[1] valid for a standard
3719 move to operands[0]. operands[1] is replaced by an address which
3720 should be used instead of the former RTX to emit the move
3721 pattern. */
3722
3723 void
3724 emit_symbolic_move (rtx *operands)
3725 {
3726 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3727
3728 if (GET_CODE (operands[0]) == MEM)
3729 operands[1] = force_reg (Pmode, operands[1]);
3730 else if (TLS_SYMBOLIC_CONST (operands[1]))
3731 operands[1] = legitimize_tls_address (operands[1], temp);
3732 else if (flag_pic)
3733 operands[1] = legitimize_pic_address (operands[1], temp);
3734 }
3735
3736 /* Try machine-dependent ways of modifying an illegitimate address X
3737 to be legitimate. If we find one, return the new, valid address.
3738
3739 OLDX is the address as it was before break_out_memory_refs was called.
3740 In some cases it is useful to look at this to decide what needs to be done.
3741
3742 MODE is the mode of the operand pointed to by X.
3743
3744 When -fpic is used, special handling is needed for symbolic references.
3745 See comments by legitimize_pic_address for details. */
3746
3747 static rtx
3748 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3749 enum machine_mode mode ATTRIBUTE_UNUSED)
3750 {
3751 rtx constant_term = const0_rtx;
3752
3753 if (TLS_SYMBOLIC_CONST (x))
3754 {
3755 x = legitimize_tls_address (x, 0);
3756
3757 if (s390_legitimate_address_p (mode, x, FALSE))
3758 return x;
3759 }
3760 else if (GET_CODE (x) == PLUS
3761 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3762 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3763 {
3764 return x;
3765 }
3766 else if (flag_pic)
3767 {
3768 if (SYMBOLIC_CONST (x)
3769 || (GET_CODE (x) == PLUS
3770 && (SYMBOLIC_CONST (XEXP (x, 0))
3771 || SYMBOLIC_CONST (XEXP (x, 1)))))
3772 x = legitimize_pic_address (x, 0);
3773
3774 if (s390_legitimate_address_p (mode, x, FALSE))
3775 return x;
3776 }
3777
3778 x = eliminate_constant_term (x, &constant_term);
3779
3780 /* Optimize loading of large displacements by splitting them
3781 into the multiple of 4K and the rest; this allows the
3782 former to be CSE'd if possible.
3783
3784 Don't do this if the displacement is added to a register
3785 pointing into the stack frame, as the offsets will
3786 change later anyway. */
3787
3788 if (GET_CODE (constant_term) == CONST_INT
3789 && !TARGET_LONG_DISPLACEMENT
3790 && !DISP_IN_RANGE (INTVAL (constant_term))
3791 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3792 {
3793 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3794 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3795
3796 rtx temp = gen_reg_rtx (Pmode);
3797 rtx val = force_operand (GEN_INT (upper), temp);
3798 if (val != temp)
3799 emit_move_insn (temp, val);
3800
3801 x = gen_rtx_PLUS (Pmode, x, temp);
3802 constant_term = GEN_INT (lower);
3803 }
3804
3805 if (GET_CODE (x) == PLUS)
3806 {
3807 if (GET_CODE (XEXP (x, 0)) == REG)
3808 {
3809 rtx temp = gen_reg_rtx (Pmode);
3810 rtx val = force_operand (XEXP (x, 1), temp);
3811 if (val != temp)
3812 emit_move_insn (temp, val);
3813
3814 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3815 }
3816
3817 else if (GET_CODE (XEXP (x, 1)) == REG)
3818 {
3819 rtx temp = gen_reg_rtx (Pmode);
3820 rtx val = force_operand (XEXP (x, 0), temp);
3821 if (val != temp)
3822 emit_move_insn (temp, val);
3823
3824 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3825 }
3826 }
3827
3828 if (constant_term != const0_rtx)
3829 x = gen_rtx_PLUS (Pmode, x, constant_term);
3830
3831 return x;
3832 }
3833
3834 /* Try a machine-dependent way of reloading an illegitimate address AD
3835 operand. If we find one, push the reload and and return the new address.
3836
3837 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3838 and TYPE is the reload type of the current reload. */
3839
3840 rtx
3841 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3842 int opnum, int type)
3843 {
3844 if (!optimize || TARGET_LONG_DISPLACEMENT)
3845 return NULL_RTX;
3846
3847 if (GET_CODE (ad) == PLUS)
3848 {
3849 rtx tem = simplify_binary_operation (PLUS, Pmode,
3850 XEXP (ad, 0), XEXP (ad, 1));
3851 if (tem)
3852 ad = tem;
3853 }
3854
3855 if (GET_CODE (ad) == PLUS
3856 && GET_CODE (XEXP (ad, 0)) == REG
3857 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3858 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3859 {
3860 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3861 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3862 rtx cst, tem, new_rtx;
3863
3864 cst = GEN_INT (upper);
3865 if (!legitimate_reload_constant_p (cst))
3866 cst = force_const_mem (Pmode, cst);
3867
3868 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3869 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3870
3871 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3872 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3873 opnum, (enum reload_type) type);
3874 return new_rtx;
3875 }
3876
3877 return NULL_RTX;
3878 }
3879
3880 /* Emit code to move LEN bytes from DST to SRC. */
3881
3882 void
3883 s390_expand_movmem (rtx dst, rtx src, rtx len)
3884 {
3885 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3886 {
3887 if (INTVAL (len) > 0)
3888 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3889 }
3890
3891 else if (TARGET_MVCLE)
3892 {
3893 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3894 }
3895
3896 else
3897 {
3898 rtx dst_addr, src_addr, count, blocks, temp;
3899 rtx loop_start_label = gen_label_rtx ();
3900 rtx loop_end_label = gen_label_rtx ();
3901 rtx end_label = gen_label_rtx ();
3902 enum machine_mode mode;
3903
3904 mode = GET_MODE (len);
3905 if (mode == VOIDmode)
3906 mode = Pmode;
3907
3908 dst_addr = gen_reg_rtx (Pmode);
3909 src_addr = gen_reg_rtx (Pmode);
3910 count = gen_reg_rtx (mode);
3911 blocks = gen_reg_rtx (mode);
3912
3913 convert_move (count, len, 1);
3914 emit_cmp_and_jump_insns (count, const0_rtx,
3915 EQ, NULL_RTX, mode, 1, end_label);
3916
3917 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
3918 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
3919 dst = change_address (dst, VOIDmode, dst_addr);
3920 src = change_address (src, VOIDmode, src_addr);
3921
3922 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
3923 OPTAB_DIRECT);
3924 if (temp != count)
3925 emit_move_insn (count, temp);
3926
3927 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
3928 OPTAB_DIRECT);
3929 if (temp != blocks)
3930 emit_move_insn (blocks, temp);
3931
3932 emit_cmp_and_jump_insns (blocks, const0_rtx,
3933 EQ, NULL_RTX, mode, 1, loop_end_label);
3934
3935 emit_label (loop_start_label);
3936
3937 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
3938 s390_load_address (dst_addr,
3939 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
3940 s390_load_address (src_addr,
3941 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
3942
3943 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
3944 OPTAB_DIRECT);
3945 if (temp != blocks)
3946 emit_move_insn (blocks, temp);
3947
3948 emit_cmp_and_jump_insns (blocks, const0_rtx,
3949 EQ, NULL_RTX, mode, 1, loop_end_label);
3950
3951 emit_jump (loop_start_label);
3952 emit_label (loop_end_label);
3953
3954 emit_insn (gen_movmem_short (dst, src,
3955 convert_to_mode (Pmode, count, 1)));
3956 emit_label (end_label);
3957 }
3958 }
3959
3960 /* Emit code to set LEN bytes at DST to VAL.
3961 Make use of clrmem if VAL is zero. */
3962
3963 void
3964 s390_expand_setmem (rtx dst, rtx len, rtx val)
3965 {
3966 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
3967 return;
3968
3969 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
3970
3971 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
3972 {
3973 if (val == const0_rtx && INTVAL (len) <= 256)
3974 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
3975 else
3976 {
3977 /* Initialize memory by storing the first byte. */
3978 emit_move_insn (adjust_address (dst, QImode, 0), val);
3979
3980 if (INTVAL (len) > 1)
3981 {
3982 /* Initiate 1 byte overlap move.
3983 The first byte of DST is propagated through DSTP1.
3984 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
3985 DST is set to size 1 so the rest of the memory location
3986 does not count as source operand. */
3987 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
3988 set_mem_size (dst, const1_rtx);
3989
3990 emit_insn (gen_movmem_short (dstp1, dst,
3991 GEN_INT (INTVAL (len) - 2)));
3992 }
3993 }
3994 }
3995
3996 else if (TARGET_MVCLE)
3997 {
3998 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
3999 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4000 }
4001
4002 else
4003 {
4004 rtx dst_addr, src_addr, count, blocks, temp, dstp1 = NULL_RTX;
4005 rtx loop_start_label = gen_label_rtx ();
4006 rtx loop_end_label = gen_label_rtx ();
4007 rtx end_label = gen_label_rtx ();
4008 enum machine_mode mode;
4009
4010 mode = GET_MODE (len);
4011 if (mode == VOIDmode)
4012 mode = Pmode;
4013
4014 dst_addr = gen_reg_rtx (Pmode);
4015 src_addr = gen_reg_rtx (Pmode);
4016 count = gen_reg_rtx (mode);
4017 blocks = gen_reg_rtx (mode);
4018
4019 convert_move (count, len, 1);
4020 emit_cmp_and_jump_insns (count, const0_rtx,
4021 EQ, NULL_RTX, mode, 1, end_label);
4022
4023 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4024 dst = change_address (dst, VOIDmode, dst_addr);
4025
4026 if (val == const0_rtx)
4027 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4028 OPTAB_DIRECT);
4029 else
4030 {
4031 dstp1 = adjust_address (dst, VOIDmode, 1);
4032 set_mem_size (dst, const1_rtx);
4033
4034 /* Initialize memory by storing the first byte. */
4035 emit_move_insn (adjust_address (dst, QImode, 0), val);
4036
4037 /* If count is 1 we are done. */
4038 emit_cmp_and_jump_insns (count, const1_rtx,
4039 EQ, NULL_RTX, mode, 1, end_label);
4040
4041 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4042 OPTAB_DIRECT);
4043 }
4044 if (temp != count)
4045 emit_move_insn (count, temp);
4046
4047 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4048 OPTAB_DIRECT);
4049 if (temp != blocks)
4050 emit_move_insn (blocks, temp);
4051
4052 emit_cmp_and_jump_insns (blocks, const0_rtx,
4053 EQ, NULL_RTX, mode, 1, loop_end_label);
4054
4055 emit_label (loop_start_label);
4056
4057 if (val == const0_rtx)
4058 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4059 else
4060 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4061 s390_load_address (dst_addr,
4062 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4063
4064 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4065 OPTAB_DIRECT);
4066 if (temp != blocks)
4067 emit_move_insn (blocks, temp);
4068
4069 emit_cmp_and_jump_insns (blocks, const0_rtx,
4070 EQ, NULL_RTX, mode, 1, loop_end_label);
4071
4072 emit_jump (loop_start_label);
4073 emit_label (loop_end_label);
4074
4075 if (val == const0_rtx)
4076 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4077 else
4078 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4079 emit_label (end_label);
4080 }
4081 }
4082
4083 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4084 and return the result in TARGET. */
4085
4086 void
4087 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4088 {
4089 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4090 rtx tmp;
4091
4092 /* As the result of CMPINT is inverted compared to what we need,
4093 we have to swap the operands. */
4094 tmp = op0; op0 = op1; op1 = tmp;
4095
4096 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4097 {
4098 if (INTVAL (len) > 0)
4099 {
4100 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4101 emit_insn (gen_cmpint (target, ccreg));
4102 }
4103 else
4104 emit_move_insn (target, const0_rtx);
4105 }
4106 else if (TARGET_MVCLE)
4107 {
4108 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4109 emit_insn (gen_cmpint (target, ccreg));
4110 }
4111 else
4112 {
4113 rtx addr0, addr1, count, blocks, temp;
4114 rtx loop_start_label = gen_label_rtx ();
4115 rtx loop_end_label = gen_label_rtx ();
4116 rtx end_label = gen_label_rtx ();
4117 enum machine_mode mode;
4118
4119 mode = GET_MODE (len);
4120 if (mode == VOIDmode)
4121 mode = Pmode;
4122
4123 addr0 = gen_reg_rtx (Pmode);
4124 addr1 = gen_reg_rtx (Pmode);
4125 count = gen_reg_rtx (mode);
4126 blocks = gen_reg_rtx (mode);
4127
4128 convert_move (count, len, 1);
4129 emit_cmp_and_jump_insns (count, const0_rtx,
4130 EQ, NULL_RTX, mode, 1, end_label);
4131
4132 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4133 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4134 op0 = change_address (op0, VOIDmode, addr0);
4135 op1 = change_address (op1, VOIDmode, addr1);
4136
4137 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4138 OPTAB_DIRECT);
4139 if (temp != count)
4140 emit_move_insn (count, temp);
4141
4142 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4143 OPTAB_DIRECT);
4144 if (temp != blocks)
4145 emit_move_insn (blocks, temp);
4146
4147 emit_cmp_and_jump_insns (blocks, const0_rtx,
4148 EQ, NULL_RTX, mode, 1, loop_end_label);
4149
4150 emit_label (loop_start_label);
4151
4152 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4153 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4154 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4155 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4156 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4157 emit_jump_insn (temp);
4158
4159 s390_load_address (addr0,
4160 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4161 s390_load_address (addr1,
4162 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4163
4164 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4165 OPTAB_DIRECT);
4166 if (temp != blocks)
4167 emit_move_insn (blocks, temp);
4168
4169 emit_cmp_and_jump_insns (blocks, const0_rtx,
4170 EQ, NULL_RTX, mode, 1, loop_end_label);
4171
4172 emit_jump (loop_start_label);
4173 emit_label (loop_end_label);
4174
4175 emit_insn (gen_cmpmem_short (op0, op1,
4176 convert_to_mode (Pmode, count, 1)));
4177 emit_label (end_label);
4178
4179 emit_insn (gen_cmpint (target, ccreg));
4180 }
4181 }
4182
4183
4184 /* Expand conditional increment or decrement using alc/slb instructions.
4185 Should generate code setting DST to either SRC or SRC + INCREMENT,
4186 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4187 Returns true if successful, false otherwise.
4188
4189 That makes it possible to implement some if-constructs without jumps e.g.:
4190 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4191 unsigned int a, b, c;
4192 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4193 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4194 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4195 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4196
4197 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4198 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4199 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4200 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4201 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4202
4203 bool
4204 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4205 rtx dst, rtx src, rtx increment)
4206 {
4207 enum machine_mode cmp_mode;
4208 enum machine_mode cc_mode;
4209 rtx op_res;
4210 rtx insn;
4211 rtvec p;
4212 int ret;
4213
4214 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4215 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4216 cmp_mode = SImode;
4217 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4218 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4219 cmp_mode = DImode;
4220 else
4221 return false;
4222
4223 /* Try ADD LOGICAL WITH CARRY. */
4224 if (increment == const1_rtx)
4225 {
4226 /* Determine CC mode to use. */
4227 if (cmp_code == EQ || cmp_code == NE)
4228 {
4229 if (cmp_op1 != const0_rtx)
4230 {
4231 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4232 NULL_RTX, 0, OPTAB_WIDEN);
4233 cmp_op1 = const0_rtx;
4234 }
4235
4236 cmp_code = cmp_code == EQ ? LEU : GTU;
4237 }
4238
4239 if (cmp_code == LTU || cmp_code == LEU)
4240 {
4241 rtx tem = cmp_op0;
4242 cmp_op0 = cmp_op1;
4243 cmp_op1 = tem;
4244 cmp_code = swap_condition (cmp_code);
4245 }
4246
4247 switch (cmp_code)
4248 {
4249 case GTU:
4250 cc_mode = CCUmode;
4251 break;
4252
4253 case GEU:
4254 cc_mode = CCL3mode;
4255 break;
4256
4257 default:
4258 return false;
4259 }
4260
4261 /* Emit comparison instruction pattern. */
4262 if (!register_operand (cmp_op0, cmp_mode))
4263 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4264
4265 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4266 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4267 /* We use insn_invalid_p here to add clobbers if required. */
4268 ret = insn_invalid_p (emit_insn (insn));
4269 gcc_assert (!ret);
4270
4271 /* Emit ALC instruction pattern. */
4272 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4273 gen_rtx_REG (cc_mode, CC_REGNUM),
4274 const0_rtx);
4275
4276 if (src != const0_rtx)
4277 {
4278 if (!register_operand (src, GET_MODE (dst)))
4279 src = force_reg (GET_MODE (dst), src);
4280
4281 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4282 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4283 }
4284
4285 p = rtvec_alloc (2);
4286 RTVEC_ELT (p, 0) =
4287 gen_rtx_SET (VOIDmode, dst, op_res);
4288 RTVEC_ELT (p, 1) =
4289 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4290 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4291
4292 return true;
4293 }
4294
4295 /* Try SUBTRACT LOGICAL WITH BORROW. */
4296 if (increment == constm1_rtx)
4297 {
4298 /* Determine CC mode to use. */
4299 if (cmp_code == EQ || cmp_code == NE)
4300 {
4301 if (cmp_op1 != const0_rtx)
4302 {
4303 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4304 NULL_RTX, 0, OPTAB_WIDEN);
4305 cmp_op1 = const0_rtx;
4306 }
4307
4308 cmp_code = cmp_code == EQ ? LEU : GTU;
4309 }
4310
4311 if (cmp_code == GTU || cmp_code == GEU)
4312 {
4313 rtx tem = cmp_op0;
4314 cmp_op0 = cmp_op1;
4315 cmp_op1 = tem;
4316 cmp_code = swap_condition (cmp_code);
4317 }
4318
4319 switch (cmp_code)
4320 {
4321 case LEU:
4322 cc_mode = CCUmode;
4323 break;
4324
4325 case LTU:
4326 cc_mode = CCL3mode;
4327 break;
4328
4329 default:
4330 return false;
4331 }
4332
4333 /* Emit comparison instruction pattern. */
4334 if (!register_operand (cmp_op0, cmp_mode))
4335 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4336
4337 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4338 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4339 /* We use insn_invalid_p here to add clobbers if required. */
4340 ret = insn_invalid_p (emit_insn (insn));
4341 gcc_assert (!ret);
4342
4343 /* Emit SLB instruction pattern. */
4344 if (!register_operand (src, GET_MODE (dst)))
4345 src = force_reg (GET_MODE (dst), src);
4346
4347 op_res = gen_rtx_MINUS (GET_MODE (dst),
4348 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4349 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4350 gen_rtx_REG (cc_mode, CC_REGNUM),
4351 const0_rtx));
4352 p = rtvec_alloc (2);
4353 RTVEC_ELT (p, 0) =
4354 gen_rtx_SET (VOIDmode, dst, op_res);
4355 RTVEC_ELT (p, 1) =
4356 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4357 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4358
4359 return true;
4360 }
4361
4362 return false;
4363 }
4364
4365 /* Expand code for the insv template. Return true if successful. */
4366
4367 bool
4368 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4369 {
4370 int bitsize = INTVAL (op1);
4371 int bitpos = INTVAL (op2);
4372
4373 /* On z10 we can use the risbg instruction to implement insv. */
4374 if (TARGET_Z10
4375 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4376 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4377 {
4378 rtx op;
4379 rtx clobber;
4380
4381 op = gen_rtx_SET (GET_MODE(src),
4382 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4383 src);
4384 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4385 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4386
4387 return true;
4388 }
4389
4390 /* We need byte alignment. */
4391 if (bitsize % BITS_PER_UNIT)
4392 return false;
4393
4394 if (bitpos == 0
4395 && memory_operand (dest, VOIDmode)
4396 && (register_operand (src, word_mode)
4397 || const_int_operand (src, VOIDmode)))
4398 {
4399 /* Emit standard pattern if possible. */
4400 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4401 if (GET_MODE_BITSIZE (mode) == bitsize)
4402 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4403
4404 /* (set (ze (mem)) (const_int)). */
4405 else if (const_int_operand (src, VOIDmode))
4406 {
4407 int size = bitsize / BITS_PER_UNIT;
4408 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4409 GET_MODE_SIZE (word_mode) - size);
4410
4411 dest = adjust_address (dest, BLKmode, 0);
4412 set_mem_size (dest, GEN_INT (size));
4413 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4414 }
4415
4416 /* (set (ze (mem)) (reg)). */
4417 else if (register_operand (src, word_mode))
4418 {
4419 if (bitsize <= GET_MODE_BITSIZE (SImode))
4420 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4421 const0_rtx), src);
4422 else
4423 {
4424 /* Emit st,stcmh sequence. */
4425 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4426 int size = stcmh_width / BITS_PER_UNIT;
4427
4428 emit_move_insn (adjust_address (dest, SImode, size),
4429 gen_lowpart (SImode, src));
4430 set_mem_size (dest, GEN_INT (size));
4431 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4432 (stcmh_width), const0_rtx),
4433 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4434 (GET_MODE_BITSIZE (SImode))));
4435 }
4436 }
4437 else
4438 return false;
4439
4440 return true;
4441 }
4442
4443 /* (set (ze (reg)) (const_int)). */
4444 if (TARGET_ZARCH
4445 && register_operand (dest, word_mode)
4446 && (bitpos % 16) == 0
4447 && (bitsize % 16) == 0
4448 && const_int_operand (src, VOIDmode))
4449 {
4450 HOST_WIDE_INT val = INTVAL (src);
4451 int regpos = bitpos + bitsize;
4452
4453 while (regpos > bitpos)
4454 {
4455 enum machine_mode putmode;
4456 int putsize;
4457
4458 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4459 putmode = SImode;
4460 else
4461 putmode = HImode;
4462
4463 putsize = GET_MODE_BITSIZE (putmode);
4464 regpos -= putsize;
4465 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4466 GEN_INT (putsize),
4467 GEN_INT (regpos)),
4468 gen_int_mode (val, putmode));
4469 val >>= putsize;
4470 }
4471 gcc_assert (regpos == bitpos);
4472 return true;
4473 }
4474
4475 return false;
4476 }
4477
4478 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4479 register that holds VAL of mode MODE shifted by COUNT bits. */
4480
4481 static inline rtx
4482 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4483 {
4484 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4485 NULL_RTX, 1, OPTAB_DIRECT);
4486 return expand_simple_binop (SImode, ASHIFT, val, count,
4487 NULL_RTX, 1, OPTAB_DIRECT);
4488 }
4489
4490 /* Structure to hold the initial parameters for a compare_and_swap operation
4491 in HImode and QImode. */
4492
4493 struct alignment_context
4494 {
4495 rtx memsi; /* SI aligned memory location. */
4496 rtx shift; /* Bit offset with regard to lsb. */
4497 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4498 rtx modemaski; /* ~modemask */
4499 bool aligned; /* True if memory is aligned, false else. */
4500 };
4501
4502 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4503 structure AC for transparent simplifying, if the memory alignment is known
4504 to be at least 32bit. MEM is the memory location for the actual operation
4505 and MODE its mode. */
4506
4507 static void
4508 init_alignment_context (struct alignment_context *ac, rtx mem,
4509 enum machine_mode mode)
4510 {
4511 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4512 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4513
4514 if (ac->aligned)
4515 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4516 else
4517 {
4518 /* Alignment is unknown. */
4519 rtx byteoffset, addr, align;
4520
4521 /* Force the address into a register. */
4522 addr = force_reg (Pmode, XEXP (mem, 0));
4523
4524 /* Align it to SImode. */
4525 align = expand_simple_binop (Pmode, AND, addr,
4526 GEN_INT (-GET_MODE_SIZE (SImode)),
4527 NULL_RTX, 1, OPTAB_DIRECT);
4528 /* Generate MEM. */
4529 ac->memsi = gen_rtx_MEM (SImode, align);
4530 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4531 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4532 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4533
4534 /* Calculate shiftcount. */
4535 byteoffset = expand_simple_binop (Pmode, AND, addr,
4536 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4537 NULL_RTX, 1, OPTAB_DIRECT);
4538 /* As we already have some offset, evaluate the remaining distance. */
4539 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4540 NULL_RTX, 1, OPTAB_DIRECT);
4541
4542 }
4543 /* Shift is the byte count, but we need the bitcount. */
4544 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4545 NULL_RTX, 1, OPTAB_DIRECT);
4546 /* Calculate masks. */
4547 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4548 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4549 NULL_RTX, 1, OPTAB_DIRECT);
4550 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4551 }
4552
4553 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4554 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4555 to set if CMP == MEM.
4556 CMP is never in memory for compare_and_swap_cc because
4557 expand_bool_compare_and_swap puts it into a register for later compare. */
4558
4559 void
4560 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4561 {
4562 struct alignment_context ac;
4563 rtx cmpv, newv, val, resv, cc;
4564 rtx res = gen_reg_rtx (SImode);
4565 rtx csloop = gen_label_rtx ();
4566 rtx csend = gen_label_rtx ();
4567
4568 gcc_assert (register_operand (target, VOIDmode));
4569 gcc_assert (MEM_P (mem));
4570
4571 init_alignment_context (&ac, mem, mode);
4572
4573 /* Shift the values to the correct bit positions. */
4574 if (!(ac.aligned && MEM_P (cmp)))
4575 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4576 if (!(ac.aligned && MEM_P (new_rtx)))
4577 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4578
4579 /* Load full word. Subsequent loads are performed by CS. */
4580 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4581 NULL_RTX, 1, OPTAB_DIRECT);
4582
4583 /* Start CS loop. */
4584 emit_label (csloop);
4585 /* val = "<mem>00..0<mem>"
4586 * cmp = "00..0<cmp>00..0"
4587 * new = "00..0<new>00..0"
4588 */
4589
4590 /* Patch cmp and new with val at correct position. */
4591 if (ac.aligned && MEM_P (cmp))
4592 {
4593 cmpv = force_reg (SImode, val);
4594 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4595 }
4596 else
4597 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4598 NULL_RTX, 1, OPTAB_DIRECT));
4599 if (ac.aligned && MEM_P (new_rtx))
4600 {
4601 newv = force_reg (SImode, val);
4602 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4603 }
4604 else
4605 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4606 NULL_RTX, 1, OPTAB_DIRECT));
4607
4608 /* Jump to end if we're done (likely?). */
4609 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4610 cmpv, newv));
4611
4612 /* Check for changes outside mode. */
4613 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4614 NULL_RTX, 1, OPTAB_DIRECT);
4615 cc = s390_emit_compare (NE, resv, val);
4616 emit_move_insn (val, resv);
4617 /* Loop internal if so. */
4618 s390_emit_jump (csloop, cc);
4619
4620 emit_label (csend);
4621
4622 /* Return the correct part of the bitfield. */
4623 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4624 NULL_RTX, 1, OPTAB_DIRECT), 1);
4625 }
4626
4627 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4628 and VAL the value to play with. If AFTER is true then store the value
4629 MEM holds after the operation, if AFTER is false then store the value MEM
4630 holds before the operation. If TARGET is zero then discard that value, else
4631 store it to TARGET. */
4632
4633 void
4634 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4635 rtx target, rtx mem, rtx val, bool after)
4636 {
4637 struct alignment_context ac;
4638 rtx cmp;
4639 rtx new_rtx = gen_reg_rtx (SImode);
4640 rtx orig = gen_reg_rtx (SImode);
4641 rtx csloop = gen_label_rtx ();
4642
4643 gcc_assert (!target || register_operand (target, VOIDmode));
4644 gcc_assert (MEM_P (mem));
4645
4646 init_alignment_context (&ac, mem, mode);
4647
4648 /* Shift val to the correct bit positions.
4649 Preserve "icm", but prevent "ex icm". */
4650 if (!(ac.aligned && code == SET && MEM_P (val)))
4651 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4652
4653 /* Further preparation insns. */
4654 if (code == PLUS || code == MINUS)
4655 emit_move_insn (orig, val);
4656 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4657 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4658 NULL_RTX, 1, OPTAB_DIRECT);
4659
4660 /* Load full word. Subsequent loads are performed by CS. */
4661 cmp = force_reg (SImode, ac.memsi);
4662
4663 /* Start CS loop. */
4664 emit_label (csloop);
4665 emit_move_insn (new_rtx, cmp);
4666
4667 /* Patch new with val at correct position. */
4668 switch (code)
4669 {
4670 case PLUS:
4671 case MINUS:
4672 val = expand_simple_binop (SImode, code, new_rtx, orig,
4673 NULL_RTX, 1, OPTAB_DIRECT);
4674 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4675 NULL_RTX, 1, OPTAB_DIRECT);
4676 /* FALLTHRU */
4677 case SET:
4678 if (ac.aligned && MEM_P (val))
4679 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4680 else
4681 {
4682 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4683 NULL_RTX, 1, OPTAB_DIRECT);
4684 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4685 NULL_RTX, 1, OPTAB_DIRECT);
4686 }
4687 break;
4688 case AND:
4689 case IOR:
4690 case XOR:
4691 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4692 NULL_RTX, 1, OPTAB_DIRECT);
4693 break;
4694 case MULT: /* NAND */
4695 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4696 NULL_RTX, 1, OPTAB_DIRECT);
4697 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4698 NULL_RTX, 1, OPTAB_DIRECT);
4699 break;
4700 default:
4701 gcc_unreachable ();
4702 }
4703
4704 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4705 ac.memsi, cmp, new_rtx));
4706
4707 /* Return the correct part of the bitfield. */
4708 if (target)
4709 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4710 after ? new_rtx : cmp, ac.shift,
4711 NULL_RTX, 1, OPTAB_DIRECT), 1);
4712 }
4713
4714 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4715 We need to emit DTP-relative relocations. */
4716
4717 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4718
4719 static void
4720 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4721 {
4722 switch (size)
4723 {
4724 case 4:
4725 fputs ("\t.long\t", file);
4726 break;
4727 case 8:
4728 fputs ("\t.quad\t", file);
4729 break;
4730 default:
4731 gcc_unreachable ();
4732 }
4733 output_addr_const (file, x);
4734 fputs ("@DTPOFF", file);
4735 }
4736
4737 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4738 /* Implement TARGET_MANGLE_TYPE. */
4739
4740 static const char *
4741 s390_mangle_type (const_tree type)
4742 {
4743 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4744 && TARGET_LONG_DOUBLE_128)
4745 return "g";
4746
4747 /* For all other types, use normal C++ mangling. */
4748 return NULL;
4749 }
4750 #endif
4751
4752 /* In the name of slightly smaller debug output, and to cater to
4753 general assembler lossage, recognize various UNSPEC sequences
4754 and turn them back into a direct symbol reference. */
4755
4756 static rtx
4757 s390_delegitimize_address (rtx orig_x)
4758 {
4759 rtx x = orig_x, y;
4760
4761 if (GET_CODE (x) != MEM)
4762 return orig_x;
4763
4764 x = XEXP (x, 0);
4765 if (GET_CODE (x) == PLUS
4766 && GET_CODE (XEXP (x, 1)) == CONST
4767 && GET_CODE (XEXP (x, 0)) == REG
4768 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4769 {
4770 y = XEXP (XEXP (x, 1), 0);
4771 if (GET_CODE (y) == UNSPEC
4772 && XINT (y, 1) == UNSPEC_GOT)
4773 return XVECEXP (y, 0, 0);
4774 return orig_x;
4775 }
4776
4777 if (GET_CODE (x) == CONST)
4778 {
4779 y = XEXP (x, 0);
4780 if (GET_CODE (y) == UNSPEC
4781 && XINT (y, 1) == UNSPEC_GOTENT)
4782 return XVECEXP (y, 0, 0);
4783 return orig_x;
4784 }
4785
4786 return orig_x;
4787 }
4788
4789 /* Output operand OP to stdio stream FILE.
4790 OP is an address (register + offset) which is not used to address data;
4791 instead the rightmost bits are interpreted as the value. */
4792
4793 static void
4794 print_shift_count_operand (FILE *file, rtx op)
4795 {
4796 HOST_WIDE_INT offset;
4797 rtx base;
4798
4799 /* Extract base register and offset. */
4800 if (!s390_decompose_shift_count (op, &base, &offset))
4801 gcc_unreachable ();
4802
4803 /* Sanity check. */
4804 if (base)
4805 {
4806 gcc_assert (GET_CODE (base) == REG);
4807 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
4808 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
4809 }
4810
4811 /* Offsets are constricted to twelve bits. */
4812 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
4813 if (base)
4814 fprintf (file, "(%s)", reg_names[REGNO (base)]);
4815 }
4816
4817 /* See 'get_some_local_dynamic_name'. */
4818
4819 static int
4820 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4821 {
4822 rtx x = *px;
4823
4824 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4825 {
4826 x = get_pool_constant (x);
4827 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
4828 }
4829
4830 if (GET_CODE (x) == SYMBOL_REF
4831 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
4832 {
4833 cfun->machine->some_ld_name = XSTR (x, 0);
4834 return 1;
4835 }
4836
4837 return 0;
4838 }
4839
4840 /* Locate some local-dynamic symbol still in use by this function
4841 so that we can print its name in local-dynamic base patterns. */
4842
4843 static const char *
4844 get_some_local_dynamic_name (void)
4845 {
4846 rtx insn;
4847
4848 if (cfun->machine->some_ld_name)
4849 return cfun->machine->some_ld_name;
4850
4851 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4852 if (INSN_P (insn)
4853 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4854 return cfun->machine->some_ld_name;
4855
4856 gcc_unreachable ();
4857 }
4858
4859 /* Output machine-dependent UNSPECs occurring in address constant X
4860 in assembler syntax to stdio stream FILE. Returns true if the
4861 constant X could be recognized, false otherwise. */
4862
4863 bool
4864 s390_output_addr_const_extra (FILE *file, rtx x)
4865 {
4866 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
4867 switch (XINT (x, 1))
4868 {
4869 case UNSPEC_GOTENT:
4870 output_addr_const (file, XVECEXP (x, 0, 0));
4871 fprintf (file, "@GOTENT");
4872 return true;
4873 case UNSPEC_GOT:
4874 output_addr_const (file, XVECEXP (x, 0, 0));
4875 fprintf (file, "@GOT");
4876 return true;
4877 case UNSPEC_GOTOFF:
4878 output_addr_const (file, XVECEXP (x, 0, 0));
4879 fprintf (file, "@GOTOFF");
4880 return true;
4881 case UNSPEC_PLT:
4882 output_addr_const (file, XVECEXP (x, 0, 0));
4883 fprintf (file, "@PLT");
4884 return true;
4885 case UNSPEC_PLTOFF:
4886 output_addr_const (file, XVECEXP (x, 0, 0));
4887 fprintf (file, "@PLTOFF");
4888 return true;
4889 case UNSPEC_TLSGD:
4890 output_addr_const (file, XVECEXP (x, 0, 0));
4891 fprintf (file, "@TLSGD");
4892 return true;
4893 case UNSPEC_TLSLDM:
4894 assemble_name (file, get_some_local_dynamic_name ());
4895 fprintf (file, "@TLSLDM");
4896 return true;
4897 case UNSPEC_DTPOFF:
4898 output_addr_const (file, XVECEXP (x, 0, 0));
4899 fprintf (file, "@DTPOFF");
4900 return true;
4901 case UNSPEC_NTPOFF:
4902 output_addr_const (file, XVECEXP (x, 0, 0));
4903 fprintf (file, "@NTPOFF");
4904 return true;
4905 case UNSPEC_GOTNTPOFF:
4906 output_addr_const (file, XVECEXP (x, 0, 0));
4907 fprintf (file, "@GOTNTPOFF");
4908 return true;
4909 case UNSPEC_INDNTPOFF:
4910 output_addr_const (file, XVECEXP (x, 0, 0));
4911 fprintf (file, "@INDNTPOFF");
4912 return true;
4913 }
4914
4915 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
4916 switch (XINT (x, 1))
4917 {
4918 case UNSPEC_POOL_OFFSET:
4919 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
4920 output_addr_const (file, x);
4921 return true;
4922 }
4923 return false;
4924 }
4925
4926 /* Output address operand ADDR in assembler syntax to
4927 stdio stream FILE. */
4928
4929 void
4930 print_operand_address (FILE *file, rtx addr)
4931 {
4932 struct s390_address ad;
4933
4934 if (s390_symref_operand_p (addr, NULL, NULL))
4935 {
4936 gcc_assert (TARGET_Z10);
4937 output_addr_const (file, addr);
4938 return;
4939 }
4940
4941 if (!s390_decompose_address (addr, &ad)
4942 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4943 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4944 output_operand_lossage ("cannot decompose address");
4945
4946 if (ad.disp)
4947 output_addr_const (file, ad.disp);
4948 else
4949 fprintf (file, "0");
4950
4951 if (ad.base && ad.indx)
4952 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
4953 reg_names[REGNO (ad.base)]);
4954 else if (ad.base)
4955 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
4956 }
4957
4958 /* Output operand X in assembler syntax to stdio stream FILE.
4959 CODE specified the format flag. The following format flags
4960 are recognized:
4961
4962 'C': print opcode suffix for branch condition.
4963 'D': print opcode suffix for inverse branch condition.
4964 'E': print opcode suffix for branch on index instruction.
4965 'J': print tls_load/tls_gdcall/tls_ldcall suffix
4966 'G': print the size of the operand in bytes.
4967 'O': print only the displacement of a memory reference.
4968 'R': print only the base register of a memory reference.
4969 'S': print S-type memory reference (base+displacement).
4970 'N': print the second word of a DImode operand.
4971 'M': print the second word of a TImode operand.
4972 'Y': print shift count operand.
4973
4974 'b': print integer X as if it's an unsigned byte.
4975 'c': print integer X as if it's an signed byte.
4976 'x': print integer X as if it's an unsigned halfword.
4977 'h': print integer X as if it's a signed halfword.
4978 'i': print the first nonzero HImode part of X.
4979 'j': print the first HImode part unequal to -1 of X.
4980 'k': print the first nonzero SImode part of X.
4981 'm': print the first SImode part unequal to -1 of X.
4982 'o': print integer X as if it's an unsigned 32bit word. */
4983
4984 void
4985 print_operand (FILE *file, rtx x, int code)
4986 {
4987 switch (code)
4988 {
4989 case 'C':
4990 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
4991 return;
4992
4993 case 'D':
4994 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
4995 return;
4996
4997 case 'E':
4998 if (GET_CODE (x) == LE)
4999 fprintf (file, "l");
5000 else if (GET_CODE (x) == GT)
5001 fprintf (file, "h");
5002 else
5003 gcc_unreachable ();
5004 return;
5005
5006 case 'J':
5007 if (GET_CODE (x) == SYMBOL_REF)
5008 {
5009 fprintf (file, "%s", ":tls_load:");
5010 output_addr_const (file, x);
5011 }
5012 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5013 {
5014 fprintf (file, "%s", ":tls_gdcall:");
5015 output_addr_const (file, XVECEXP (x, 0, 0));
5016 }
5017 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5018 {
5019 fprintf (file, "%s", ":tls_ldcall:");
5020 assemble_name (file, get_some_local_dynamic_name ());
5021 }
5022 else
5023 gcc_unreachable ();
5024 return;
5025
5026 case 'G':
5027 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5028 return;
5029
5030 case 'O':
5031 {
5032 struct s390_address ad;
5033 int ret;
5034
5035 gcc_assert (GET_CODE (x) == MEM);
5036 ret = s390_decompose_address (XEXP (x, 0), &ad);
5037 gcc_assert (ret);
5038 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5039 gcc_assert (!ad.indx);
5040
5041 if (ad.disp)
5042 output_addr_const (file, ad.disp);
5043 else
5044 fprintf (file, "0");
5045 }
5046 return;
5047
5048 case 'R':
5049 {
5050 struct s390_address ad;
5051 int ret;
5052
5053 gcc_assert (GET_CODE (x) == MEM);
5054 ret = s390_decompose_address (XEXP (x, 0), &ad);
5055 gcc_assert (ret);
5056 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5057 gcc_assert (!ad.indx);
5058
5059 if (ad.base)
5060 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5061 else
5062 fprintf (file, "0");
5063 }
5064 return;
5065
5066 case 'S':
5067 {
5068 struct s390_address ad;
5069 int ret;
5070
5071 gcc_assert (GET_CODE (x) == MEM);
5072 ret = s390_decompose_address (XEXP (x, 0), &ad);
5073 gcc_assert (ret);
5074 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5075 gcc_assert (!ad.indx);
5076
5077 if (ad.disp)
5078 output_addr_const (file, ad.disp);
5079 else
5080 fprintf (file, "0");
5081
5082 if (ad.base)
5083 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5084 }
5085 return;
5086
5087 case 'N':
5088 if (GET_CODE (x) == REG)
5089 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5090 else if (GET_CODE (x) == MEM)
5091 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5092 else
5093 gcc_unreachable ();
5094 break;
5095
5096 case 'M':
5097 if (GET_CODE (x) == REG)
5098 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5099 else if (GET_CODE (x) == MEM)
5100 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5101 else
5102 gcc_unreachable ();
5103 break;
5104
5105 case 'Y':
5106 print_shift_count_operand (file, x);
5107 return;
5108 }
5109
5110 switch (GET_CODE (x))
5111 {
5112 case REG:
5113 fprintf (file, "%s", reg_names[REGNO (x)]);
5114 break;
5115
5116 case MEM:
5117 output_address (XEXP (x, 0));
5118 break;
5119
5120 case CONST:
5121 case CODE_LABEL:
5122 case LABEL_REF:
5123 case SYMBOL_REF:
5124 output_addr_const (file, x);
5125 break;
5126
5127 case CONST_INT:
5128 if (code == 'b')
5129 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5130 else if (code == 'c')
5131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5132 else if (code == 'x')
5133 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5134 else if (code == 'h')
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5136 else if (code == 'i')
5137 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5138 s390_extract_part (x, HImode, 0));
5139 else if (code == 'j')
5140 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5141 s390_extract_part (x, HImode, -1));
5142 else if (code == 'k')
5143 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5144 s390_extract_part (x, SImode, 0));
5145 else if (code == 'm')
5146 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5147 s390_extract_part (x, SImode, -1));
5148 else if (code == 'o')
5149 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5150 else
5151 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5152 break;
5153
5154 case CONST_DOUBLE:
5155 gcc_assert (GET_MODE (x) == VOIDmode);
5156 if (code == 'b')
5157 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5158 else if (code == 'x')
5159 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5160 else if (code == 'h')
5161 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5162 else
5163 gcc_unreachable ();
5164 break;
5165
5166 default:
5167 fatal_insn ("UNKNOWN in print_operand !?", x);
5168 break;
5169 }
5170 }
5171
5172 /* Target hook for assembling integer objects. We need to define it
5173 here to work a round a bug in some versions of GAS, which couldn't
5174 handle values smaller than INT_MIN when printed in decimal. */
5175
5176 static bool
5177 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5178 {
5179 if (size == 8 && aligned_p
5180 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5181 {
5182 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5183 INTVAL (x));
5184 return true;
5185 }
5186 return default_assemble_integer (x, size, aligned_p);
5187 }
5188
5189 /* Returns true if register REGNO is used for forming
5190 a memory address in expression X. */
5191
5192 static bool
5193 reg_used_in_mem_p (int regno, rtx x)
5194 {
5195 enum rtx_code code = GET_CODE (x);
5196 int i, j;
5197 const char *fmt;
5198
5199 if (code == MEM)
5200 {
5201 if (refers_to_regno_p (regno, regno+1,
5202 XEXP (x, 0), 0))
5203 return true;
5204 }
5205 else if (code == SET
5206 && GET_CODE (SET_DEST (x)) == PC)
5207 {
5208 if (refers_to_regno_p (regno, regno+1,
5209 SET_SRC (x), 0))
5210 return true;
5211 }
5212
5213 fmt = GET_RTX_FORMAT (code);
5214 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5215 {
5216 if (fmt[i] == 'e'
5217 && reg_used_in_mem_p (regno, XEXP (x, i)))
5218 return true;
5219
5220 else if (fmt[i] == 'E')
5221 for (j = 0; j < XVECLEN (x, i); j++)
5222 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5223 return true;
5224 }
5225 return false;
5226 }
5227
5228 /* Returns true if expression DEP_RTX sets an address register
5229 used by instruction INSN to address memory. */
5230
5231 static bool
5232 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5233 {
5234 rtx target, pat;
5235
5236 if (GET_CODE (dep_rtx) == INSN)
5237 dep_rtx = PATTERN (dep_rtx);
5238
5239 if (GET_CODE (dep_rtx) == SET)
5240 {
5241 target = SET_DEST (dep_rtx);
5242 if (GET_CODE (target) == STRICT_LOW_PART)
5243 target = XEXP (target, 0);
5244 while (GET_CODE (target) == SUBREG)
5245 target = SUBREG_REG (target);
5246
5247 if (GET_CODE (target) == REG)
5248 {
5249 int regno = REGNO (target);
5250
5251 if (s390_safe_attr_type (insn) == TYPE_LA)
5252 {
5253 pat = PATTERN (insn);
5254 if (GET_CODE (pat) == PARALLEL)
5255 {
5256 gcc_assert (XVECLEN (pat, 0) == 2);
5257 pat = XVECEXP (pat, 0, 0);
5258 }
5259 gcc_assert (GET_CODE (pat) == SET);
5260 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5261 }
5262 else if (get_attr_atype (insn) == ATYPE_AGEN)
5263 return reg_used_in_mem_p (regno, PATTERN (insn));
5264 }
5265 }
5266 return false;
5267 }
5268
5269 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5270
5271 int
5272 s390_agen_dep_p (rtx dep_insn, rtx insn)
5273 {
5274 rtx dep_rtx = PATTERN (dep_insn);
5275 int i;
5276
5277 if (GET_CODE (dep_rtx) == SET
5278 && addr_generation_dependency_p (dep_rtx, insn))
5279 return 1;
5280 else if (GET_CODE (dep_rtx) == PARALLEL)
5281 {
5282 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5283 {
5284 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5285 return 1;
5286 }
5287 }
5288 return 0;
5289 }
5290
5291
5292 /* A C statement (sans semicolon) to update the integer scheduling priority
5293 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5294 reduce the priority to execute INSN later. Do not define this macro if
5295 you do not need to adjust the scheduling priorities of insns.
5296
5297 A STD instruction should be scheduled earlier,
5298 in order to use the bypass. */
5299
5300
5301 static int
5302 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5303 {
5304 if (! INSN_P (insn))
5305 return priority;
5306
5307 if (s390_tune != PROCESSOR_2084_Z990
5308 && s390_tune != PROCESSOR_2094_Z9_109
5309 && s390_tune != PROCESSOR_2097_Z10)
5310 return priority;
5311
5312 switch (s390_safe_attr_type (insn))
5313 {
5314 case TYPE_FSTOREDF:
5315 case TYPE_FSTORESF:
5316 priority = priority << 3;
5317 break;
5318 case TYPE_STORE:
5319 case TYPE_STM:
5320 priority = priority << 1;
5321 break;
5322 default:
5323 break;
5324 }
5325 return priority;
5326 }
5327
5328
5329 /* The number of instructions that can be issued per cycle. */
5330
5331 static int
5332 s390_issue_rate (void)
5333 {
5334 switch (s390_tune)
5335 {
5336 case PROCESSOR_2084_Z990:
5337 case PROCESSOR_2094_Z9_109:
5338 return 3;
5339 case PROCESSOR_2097_Z10:
5340 return 2;
5341 default:
5342 return 1;
5343 }
5344 }
5345
5346 static int
5347 s390_first_cycle_multipass_dfa_lookahead (void)
5348 {
5349 return 4;
5350 }
5351
5352
5353 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5354 Fix up MEMs as required. */
5355
5356 static void
5357 annotate_constant_pool_refs (rtx *x)
5358 {
5359 int i, j;
5360 const char *fmt;
5361
5362 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5363 || !CONSTANT_POOL_ADDRESS_P (*x));
5364
5365 /* Literal pool references can only occur inside a MEM ... */
5366 if (GET_CODE (*x) == MEM)
5367 {
5368 rtx memref = XEXP (*x, 0);
5369
5370 if (GET_CODE (memref) == SYMBOL_REF
5371 && CONSTANT_POOL_ADDRESS_P (memref))
5372 {
5373 rtx base = cfun->machine->base_reg;
5374 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5375 UNSPEC_LTREF);
5376
5377 *x = replace_equiv_address (*x, addr);
5378 return;
5379 }
5380
5381 if (GET_CODE (memref) == CONST
5382 && GET_CODE (XEXP (memref, 0)) == PLUS
5383 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5384 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5385 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5386 {
5387 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5388 rtx sym = XEXP (XEXP (memref, 0), 0);
5389 rtx base = cfun->machine->base_reg;
5390 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5391 UNSPEC_LTREF);
5392
5393 *x = replace_equiv_address (*x, plus_constant (addr, off));
5394 return;
5395 }
5396 }
5397
5398 /* ... or a load-address type pattern. */
5399 if (GET_CODE (*x) == SET)
5400 {
5401 rtx addrref = SET_SRC (*x);
5402
5403 if (GET_CODE (addrref) == SYMBOL_REF
5404 && CONSTANT_POOL_ADDRESS_P (addrref))
5405 {
5406 rtx base = cfun->machine->base_reg;
5407 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5408 UNSPEC_LTREF);
5409
5410 SET_SRC (*x) = addr;
5411 return;
5412 }
5413
5414 if (GET_CODE (addrref) == CONST
5415 && GET_CODE (XEXP (addrref, 0)) == PLUS
5416 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5417 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5418 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5419 {
5420 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5421 rtx sym = XEXP (XEXP (addrref, 0), 0);
5422 rtx base = cfun->machine->base_reg;
5423 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5424 UNSPEC_LTREF);
5425
5426 SET_SRC (*x) = plus_constant (addr, off);
5427 return;
5428 }
5429 }
5430
5431 /* Annotate LTREL_BASE as well. */
5432 if (GET_CODE (*x) == UNSPEC
5433 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5434 {
5435 rtx base = cfun->machine->base_reg;
5436 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5437 UNSPEC_LTREL_BASE);
5438 return;
5439 }
5440
5441 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5442 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5443 {
5444 if (fmt[i] == 'e')
5445 {
5446 annotate_constant_pool_refs (&XEXP (*x, i));
5447 }
5448 else if (fmt[i] == 'E')
5449 {
5450 for (j = 0; j < XVECLEN (*x, i); j++)
5451 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5452 }
5453 }
5454 }
5455
5456 /* Split all branches that exceed the maximum distance.
5457 Returns true if this created a new literal pool entry. */
5458
5459 static int
5460 s390_split_branches (void)
5461 {
5462 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5463 int new_literal = 0, ret;
5464 rtx insn, pat, tmp, target;
5465 rtx *label;
5466
5467 /* We need correct insn addresses. */
5468
5469 shorten_branches (get_insns ());
5470
5471 /* Find all branches that exceed 64KB, and split them. */
5472
5473 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5474 {
5475 if (GET_CODE (insn) != JUMP_INSN)
5476 continue;
5477
5478 pat = PATTERN (insn);
5479 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5480 pat = XVECEXP (pat, 0, 0);
5481 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5482 continue;
5483
5484 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5485 {
5486 label = &SET_SRC (pat);
5487 }
5488 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5489 {
5490 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5491 label = &XEXP (SET_SRC (pat), 1);
5492 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5493 label = &XEXP (SET_SRC (pat), 2);
5494 else
5495 continue;
5496 }
5497 else
5498 continue;
5499
5500 if (get_attr_length (insn) <= 4)
5501 continue;
5502
5503 /* We are going to use the return register as scratch register,
5504 make sure it will be saved/restored by the prologue/epilogue. */
5505 cfun_frame_layout.save_return_addr_p = 1;
5506
5507 if (!flag_pic)
5508 {
5509 new_literal = 1;
5510 tmp = force_const_mem (Pmode, *label);
5511 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5512 INSN_ADDRESSES_NEW (tmp, -1);
5513 annotate_constant_pool_refs (&PATTERN (tmp));
5514
5515 target = temp_reg;
5516 }
5517 else
5518 {
5519 new_literal = 1;
5520 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5521 UNSPEC_LTREL_OFFSET);
5522 target = gen_rtx_CONST (Pmode, target);
5523 target = force_const_mem (Pmode, target);
5524 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5525 INSN_ADDRESSES_NEW (tmp, -1);
5526 annotate_constant_pool_refs (&PATTERN (tmp));
5527
5528 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5529 cfun->machine->base_reg),
5530 UNSPEC_LTREL_BASE);
5531 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5532 }
5533
5534 ret = validate_change (insn, label, target, 0);
5535 gcc_assert (ret);
5536 }
5537
5538 return new_literal;
5539 }
5540
5541
5542 /* Find an annotated literal pool symbol referenced in RTX X,
5543 and store it at REF. Will abort if X contains references to
5544 more than one such pool symbol; multiple references to the same
5545 symbol are allowed, however.
5546
5547 The rtx pointed to by REF must be initialized to NULL_RTX
5548 by the caller before calling this routine. */
5549
5550 static void
5551 find_constant_pool_ref (rtx x, rtx *ref)
5552 {
5553 int i, j;
5554 const char *fmt;
5555
5556 /* Ignore LTREL_BASE references. */
5557 if (GET_CODE (x) == UNSPEC
5558 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5559 return;
5560 /* Likewise POOL_ENTRY insns. */
5561 if (GET_CODE (x) == UNSPEC_VOLATILE
5562 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5563 return;
5564
5565 gcc_assert (GET_CODE (x) != SYMBOL_REF
5566 || !CONSTANT_POOL_ADDRESS_P (x));
5567
5568 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5569 {
5570 rtx sym = XVECEXP (x, 0, 0);
5571 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5572 && CONSTANT_POOL_ADDRESS_P (sym));
5573
5574 if (*ref == NULL_RTX)
5575 *ref = sym;
5576 else
5577 gcc_assert (*ref == sym);
5578
5579 return;
5580 }
5581
5582 fmt = GET_RTX_FORMAT (GET_CODE (x));
5583 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5584 {
5585 if (fmt[i] == 'e')
5586 {
5587 find_constant_pool_ref (XEXP (x, i), ref);
5588 }
5589 else if (fmt[i] == 'E')
5590 {
5591 for (j = 0; j < XVECLEN (x, i); j++)
5592 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5593 }
5594 }
5595 }
5596
5597 /* Replace every reference to the annotated literal pool
5598 symbol REF in X by its base plus OFFSET. */
5599
5600 static void
5601 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5602 {
5603 int i, j;
5604 const char *fmt;
5605
5606 gcc_assert (*x != ref);
5607
5608 if (GET_CODE (*x) == UNSPEC
5609 && XINT (*x, 1) == UNSPEC_LTREF
5610 && XVECEXP (*x, 0, 0) == ref)
5611 {
5612 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5613 return;
5614 }
5615
5616 if (GET_CODE (*x) == PLUS
5617 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5618 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5619 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5620 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5621 {
5622 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5623 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5624 return;
5625 }
5626
5627 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5628 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5629 {
5630 if (fmt[i] == 'e')
5631 {
5632 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5633 }
5634 else if (fmt[i] == 'E')
5635 {
5636 for (j = 0; j < XVECLEN (*x, i); j++)
5637 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5638 }
5639 }
5640 }
5641
5642 /* Check whether X contains an UNSPEC_LTREL_BASE.
5643 Return its constant pool symbol if found, NULL_RTX otherwise. */
5644
5645 static rtx
5646 find_ltrel_base (rtx x)
5647 {
5648 int i, j;
5649 const char *fmt;
5650
5651 if (GET_CODE (x) == UNSPEC
5652 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5653 return XVECEXP (x, 0, 0);
5654
5655 fmt = GET_RTX_FORMAT (GET_CODE (x));
5656 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5657 {
5658 if (fmt[i] == 'e')
5659 {
5660 rtx fnd = find_ltrel_base (XEXP (x, i));
5661 if (fnd)
5662 return fnd;
5663 }
5664 else if (fmt[i] == 'E')
5665 {
5666 for (j = 0; j < XVECLEN (x, i); j++)
5667 {
5668 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5669 if (fnd)
5670 return fnd;
5671 }
5672 }
5673 }
5674
5675 return NULL_RTX;
5676 }
5677
5678 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5679
5680 static void
5681 replace_ltrel_base (rtx *x)
5682 {
5683 int i, j;
5684 const char *fmt;
5685
5686 if (GET_CODE (*x) == UNSPEC
5687 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5688 {
5689 *x = XVECEXP (*x, 0, 1);
5690 return;
5691 }
5692
5693 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5694 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5695 {
5696 if (fmt[i] == 'e')
5697 {
5698 replace_ltrel_base (&XEXP (*x, i));
5699 }
5700 else if (fmt[i] == 'E')
5701 {
5702 for (j = 0; j < XVECLEN (*x, i); j++)
5703 replace_ltrel_base (&XVECEXP (*x, i, j));
5704 }
5705 }
5706 }
5707
5708
5709 /* We keep a list of constants which we have to add to internal
5710 constant tables in the middle of large functions. */
5711
5712 #define NR_C_MODES 11
5713 enum machine_mode constant_modes[NR_C_MODES] =
5714 {
5715 TFmode, TImode, TDmode,
5716 DFmode, DImode, DDmode,
5717 SFmode, SImode, SDmode,
5718 HImode,
5719 QImode
5720 };
5721
5722 struct constant
5723 {
5724 struct constant *next;
5725 rtx value;
5726 rtx label;
5727 };
5728
5729 struct constant_pool
5730 {
5731 struct constant_pool *next;
5732 rtx first_insn;
5733 rtx pool_insn;
5734 bitmap insns;
5735 rtx emit_pool_after;
5736
5737 struct constant *constants[NR_C_MODES];
5738 struct constant *execute;
5739 rtx label;
5740 int size;
5741 };
5742
5743 /* Allocate new constant_pool structure. */
5744
5745 static struct constant_pool *
5746 s390_alloc_pool (void)
5747 {
5748 struct constant_pool *pool;
5749 int i;
5750
5751 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5752 pool->next = NULL;
5753 for (i = 0; i < NR_C_MODES; i++)
5754 pool->constants[i] = NULL;
5755
5756 pool->execute = NULL;
5757 pool->label = gen_label_rtx ();
5758 pool->first_insn = NULL_RTX;
5759 pool->pool_insn = NULL_RTX;
5760 pool->insns = BITMAP_ALLOC (NULL);
5761 pool->size = 0;
5762 pool->emit_pool_after = NULL_RTX;
5763
5764 return pool;
5765 }
5766
5767 /* Create new constant pool covering instructions starting at INSN
5768 and chain it to the end of POOL_LIST. */
5769
5770 static struct constant_pool *
5771 s390_start_pool (struct constant_pool **pool_list, rtx insn)
5772 {
5773 struct constant_pool *pool, **prev;
5774
5775 pool = s390_alloc_pool ();
5776 pool->first_insn = insn;
5777
5778 for (prev = pool_list; *prev; prev = &(*prev)->next)
5779 ;
5780 *prev = pool;
5781
5782 return pool;
5783 }
5784
5785 /* End range of instructions covered by POOL at INSN and emit
5786 placeholder insn representing the pool. */
5787
5788 static void
5789 s390_end_pool (struct constant_pool *pool, rtx insn)
5790 {
5791 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
5792
5793 if (!insn)
5794 insn = get_last_insn ();
5795
5796 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
5797 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5798 }
5799
5800 /* Add INSN to the list of insns covered by POOL. */
5801
5802 static void
5803 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
5804 {
5805 bitmap_set_bit (pool->insns, INSN_UID (insn));
5806 }
5807
5808 /* Return pool out of POOL_LIST that covers INSN. */
5809
5810 static struct constant_pool *
5811 s390_find_pool (struct constant_pool *pool_list, rtx insn)
5812 {
5813 struct constant_pool *pool;
5814
5815 for (pool = pool_list; pool; pool = pool->next)
5816 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
5817 break;
5818
5819 return pool;
5820 }
5821
5822 /* Add constant VAL of mode MODE to the constant pool POOL. */
5823
5824 static void
5825 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
5826 {
5827 struct constant *c;
5828 int i;
5829
5830 for (i = 0; i < NR_C_MODES; i++)
5831 if (constant_modes[i] == mode)
5832 break;
5833 gcc_assert (i != NR_C_MODES);
5834
5835 for (c = pool->constants[i]; c != NULL; c = c->next)
5836 if (rtx_equal_p (val, c->value))
5837 break;
5838
5839 if (c == NULL)
5840 {
5841 c = (struct constant *) xmalloc (sizeof *c);
5842 c->value = val;
5843 c->label = gen_label_rtx ();
5844 c->next = pool->constants[i];
5845 pool->constants[i] = c;
5846 pool->size += GET_MODE_SIZE (mode);
5847 }
5848 }
5849
5850 /* Return an rtx that represents the offset of X from the start of
5851 pool POOL. */
5852
5853 static rtx
5854 s390_pool_offset (struct constant_pool *pool, rtx x)
5855 {
5856 rtx label;
5857
5858 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
5859 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
5860 UNSPEC_POOL_OFFSET);
5861 return gen_rtx_CONST (GET_MODE (x), x);
5862 }
5863
5864 /* Find constant VAL of mode MODE in the constant pool POOL.
5865 Return an RTX describing the distance from the start of
5866 the pool to the location of the new constant. */
5867
5868 static rtx
5869 s390_find_constant (struct constant_pool *pool, rtx val,
5870 enum machine_mode mode)
5871 {
5872 struct constant *c;
5873 int i;
5874
5875 for (i = 0; i < NR_C_MODES; i++)
5876 if (constant_modes[i] == mode)
5877 break;
5878 gcc_assert (i != NR_C_MODES);
5879
5880 for (c = pool->constants[i]; c != NULL; c = c->next)
5881 if (rtx_equal_p (val, c->value))
5882 break;
5883
5884 gcc_assert (c);
5885
5886 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
5887 }
5888
5889 /* Check whether INSN is an execute. Return the label_ref to its
5890 execute target template if so, NULL_RTX otherwise. */
5891
5892 static rtx
5893 s390_execute_label (rtx insn)
5894 {
5895 if (GET_CODE (insn) == INSN
5896 && GET_CODE (PATTERN (insn)) == PARALLEL
5897 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
5898 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
5899 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
5900
5901 return NULL_RTX;
5902 }
5903
5904 /* Add execute target for INSN to the constant pool POOL. */
5905
5906 static void
5907 s390_add_execute (struct constant_pool *pool, rtx insn)
5908 {
5909 struct constant *c;
5910
5911 for (c = pool->execute; c != NULL; c = c->next)
5912 if (INSN_UID (insn) == INSN_UID (c->value))
5913 break;
5914
5915 if (c == NULL)
5916 {
5917 c = (struct constant *) xmalloc (sizeof *c);
5918 c->value = insn;
5919 c->label = gen_label_rtx ();
5920 c->next = pool->execute;
5921 pool->execute = c;
5922 pool->size += 6;
5923 }
5924 }
5925
5926 /* Find execute target for INSN in the constant pool POOL.
5927 Return an RTX describing the distance from the start of
5928 the pool to the location of the execute target. */
5929
5930 static rtx
5931 s390_find_execute (struct constant_pool *pool, rtx insn)
5932 {
5933 struct constant *c;
5934
5935 for (c = pool->execute; c != NULL; c = c->next)
5936 if (INSN_UID (insn) == INSN_UID (c->value))
5937 break;
5938
5939 gcc_assert (c);
5940
5941 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
5942 }
5943
5944 /* For an execute INSN, extract the execute target template. */
5945
5946 static rtx
5947 s390_execute_target (rtx insn)
5948 {
5949 rtx pattern = PATTERN (insn);
5950 gcc_assert (s390_execute_label (insn));
5951
5952 if (XVECLEN (pattern, 0) == 2)
5953 {
5954 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
5955 }
5956 else
5957 {
5958 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
5959 int i;
5960
5961 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
5962 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
5963
5964 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
5965 }
5966
5967 return pattern;
5968 }
5969
5970 /* Indicate that INSN cannot be duplicated. This is the case for
5971 execute insns that carry a unique label. */
5972
5973 static bool
5974 s390_cannot_copy_insn_p (rtx insn)
5975 {
5976 rtx label = s390_execute_label (insn);
5977 return label && label != const0_rtx;
5978 }
5979
5980 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
5981 do not emit the pool base label. */
5982
5983 static void
5984 s390_dump_pool (struct constant_pool *pool, bool remote_label)
5985 {
5986 struct constant *c;
5987 rtx insn = pool->pool_insn;
5988 int i;
5989
5990 /* Switch to rodata section. */
5991 if (TARGET_CPU_ZARCH)
5992 {
5993 insn = emit_insn_after (gen_pool_section_start (), insn);
5994 INSN_ADDRESSES_NEW (insn, -1);
5995 }
5996
5997 /* Ensure minimum pool alignment. */
5998 if (TARGET_CPU_ZARCH)
5999 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6000 else
6001 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6002 INSN_ADDRESSES_NEW (insn, -1);
6003
6004 /* Emit pool base label. */
6005 if (!remote_label)
6006 {
6007 insn = emit_label_after (pool->label, insn);
6008 INSN_ADDRESSES_NEW (insn, -1);
6009 }
6010
6011 /* Dump constants in descending alignment requirement order,
6012 ensuring proper alignment for every constant. */
6013 for (i = 0; i < NR_C_MODES; i++)
6014 for (c = pool->constants[i]; c; c = c->next)
6015 {
6016 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6017 rtx value = copy_rtx (c->value);
6018 if (GET_CODE (value) == CONST
6019 && GET_CODE (XEXP (value, 0)) == UNSPEC
6020 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6021 && XVECLEN (XEXP (value, 0), 0) == 1)
6022 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6023
6024 insn = emit_label_after (c->label, insn);
6025 INSN_ADDRESSES_NEW (insn, -1);
6026
6027 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6028 gen_rtvec (1, value),
6029 UNSPECV_POOL_ENTRY);
6030 insn = emit_insn_after (value, insn);
6031 INSN_ADDRESSES_NEW (insn, -1);
6032 }
6033
6034 /* Ensure minimum alignment for instructions. */
6035 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6036 INSN_ADDRESSES_NEW (insn, -1);
6037
6038 /* Output in-pool execute template insns. */
6039 for (c = pool->execute; c; c = c->next)
6040 {
6041 insn = emit_label_after (c->label, insn);
6042 INSN_ADDRESSES_NEW (insn, -1);
6043
6044 insn = emit_insn_after (s390_execute_target (c->value), insn);
6045 INSN_ADDRESSES_NEW (insn, -1);
6046 }
6047
6048 /* Switch back to previous section. */
6049 if (TARGET_CPU_ZARCH)
6050 {
6051 insn = emit_insn_after (gen_pool_section_end (), insn);
6052 INSN_ADDRESSES_NEW (insn, -1);
6053 }
6054
6055 insn = emit_barrier_after (insn);
6056 INSN_ADDRESSES_NEW (insn, -1);
6057
6058 /* Remove placeholder insn. */
6059 remove_insn (pool->pool_insn);
6060 }
6061
6062 /* Free all memory used by POOL. */
6063
6064 static void
6065 s390_free_pool (struct constant_pool *pool)
6066 {
6067 struct constant *c, *next;
6068 int i;
6069
6070 for (i = 0; i < NR_C_MODES; i++)
6071 for (c = pool->constants[i]; c; c = next)
6072 {
6073 next = c->next;
6074 free (c);
6075 }
6076
6077 for (c = pool->execute; c; c = next)
6078 {
6079 next = c->next;
6080 free (c);
6081 }
6082
6083 BITMAP_FREE (pool->insns);
6084 free (pool);
6085 }
6086
6087
6088 /* Collect main literal pool. Return NULL on overflow. */
6089
6090 static struct constant_pool *
6091 s390_mainpool_start (void)
6092 {
6093 struct constant_pool *pool;
6094 rtx insn;
6095
6096 pool = s390_alloc_pool ();
6097
6098 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6099 {
6100 if (GET_CODE (insn) == INSN
6101 && GET_CODE (PATTERN (insn)) == SET
6102 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6103 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6104 {
6105 gcc_assert (!pool->pool_insn);
6106 pool->pool_insn = insn;
6107 }
6108
6109 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6110 {
6111 s390_add_execute (pool, insn);
6112 }
6113 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6114 {
6115 rtx pool_ref = NULL_RTX;
6116 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6117 if (pool_ref)
6118 {
6119 rtx constant = get_pool_constant (pool_ref);
6120 enum machine_mode mode = get_pool_mode (pool_ref);
6121 s390_add_constant (pool, constant, mode);
6122 }
6123 }
6124
6125 /* If hot/cold partitioning is enabled we have to make sure that
6126 the literal pool is emitted in the same section where the
6127 initialization of the literal pool base pointer takes place.
6128 emit_pool_after is only used in the non-overflow case on non
6129 Z cpus where we can emit the literal pool at the end of the
6130 function body within the text section. */
6131 if (NOTE_P (insn)
6132 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6133 && !pool->emit_pool_after)
6134 pool->emit_pool_after = PREV_INSN (insn);
6135 }
6136
6137 gcc_assert (pool->pool_insn || pool->size == 0);
6138
6139 if (pool->size >= 4096)
6140 {
6141 /* We're going to chunkify the pool, so remove the main
6142 pool placeholder insn. */
6143 remove_insn (pool->pool_insn);
6144
6145 s390_free_pool (pool);
6146 pool = NULL;
6147 }
6148
6149 /* If the functions ends with the section where the literal pool
6150 should be emitted set the marker to its end. */
6151 if (pool && !pool->emit_pool_after)
6152 pool->emit_pool_after = get_last_insn ();
6153
6154 return pool;
6155 }
6156
6157 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6158 Modify the current function to output the pool constants as well as
6159 the pool register setup instruction. */
6160
6161 static void
6162 s390_mainpool_finish (struct constant_pool *pool)
6163 {
6164 rtx base_reg = cfun->machine->base_reg;
6165 rtx insn;
6166
6167 /* If the pool is empty, we're done. */
6168 if (pool->size == 0)
6169 {
6170 /* We don't actually need a base register after all. */
6171 cfun->machine->base_reg = NULL_RTX;
6172
6173 if (pool->pool_insn)
6174 remove_insn (pool->pool_insn);
6175 s390_free_pool (pool);
6176 return;
6177 }
6178
6179 /* We need correct insn addresses. */
6180 shorten_branches (get_insns ());
6181
6182 /* On zSeries, we use a LARL to load the pool register. The pool is
6183 located in the .rodata section, so we emit it after the function. */
6184 if (TARGET_CPU_ZARCH)
6185 {
6186 insn = gen_main_base_64 (base_reg, pool->label);
6187 insn = emit_insn_after (insn, pool->pool_insn);
6188 INSN_ADDRESSES_NEW (insn, -1);
6189 remove_insn (pool->pool_insn);
6190
6191 insn = get_last_insn ();
6192 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6193 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6194
6195 s390_dump_pool (pool, 0);
6196 }
6197
6198 /* On S/390, if the total size of the function's code plus literal pool
6199 does not exceed 4096 bytes, we use BASR to set up a function base
6200 pointer, and emit the literal pool at the end of the function. */
6201 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6202 + pool->size + 8 /* alignment slop */ < 4096)
6203 {
6204 insn = gen_main_base_31_small (base_reg, pool->label);
6205 insn = emit_insn_after (insn, pool->pool_insn);
6206 INSN_ADDRESSES_NEW (insn, -1);
6207 remove_insn (pool->pool_insn);
6208
6209 insn = emit_label_after (pool->label, insn);
6210 INSN_ADDRESSES_NEW (insn, -1);
6211
6212 /* emit_pool_after will be set by s390_mainpool_start to the
6213 last insn of the section where the literal pool should be
6214 emitted. */
6215 insn = pool->emit_pool_after;
6216
6217 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6218 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6219
6220 s390_dump_pool (pool, 1);
6221 }
6222
6223 /* Otherwise, we emit an inline literal pool and use BASR to branch
6224 over it, setting up the pool register at the same time. */
6225 else
6226 {
6227 rtx pool_end = gen_label_rtx ();
6228
6229 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6230 insn = emit_insn_after (insn, pool->pool_insn);
6231 INSN_ADDRESSES_NEW (insn, -1);
6232 remove_insn (pool->pool_insn);
6233
6234 insn = emit_label_after (pool->label, insn);
6235 INSN_ADDRESSES_NEW (insn, -1);
6236
6237 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6238 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6239
6240 insn = emit_label_after (pool_end, pool->pool_insn);
6241 INSN_ADDRESSES_NEW (insn, -1);
6242
6243 s390_dump_pool (pool, 1);
6244 }
6245
6246
6247 /* Replace all literal pool references. */
6248
6249 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6250 {
6251 if (INSN_P (insn))
6252 replace_ltrel_base (&PATTERN (insn));
6253
6254 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6255 {
6256 rtx addr, pool_ref = NULL_RTX;
6257 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6258 if (pool_ref)
6259 {
6260 if (s390_execute_label (insn))
6261 addr = s390_find_execute (pool, insn);
6262 else
6263 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6264 get_pool_mode (pool_ref));
6265
6266 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6267 INSN_CODE (insn) = -1;
6268 }
6269 }
6270 }
6271
6272
6273 /* Free the pool. */
6274 s390_free_pool (pool);
6275 }
6276
6277 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6278 We have decided we cannot use this pool, so revert all changes
6279 to the current function that were done by s390_mainpool_start. */
6280 static void
6281 s390_mainpool_cancel (struct constant_pool *pool)
6282 {
6283 /* We didn't actually change the instruction stream, so simply
6284 free the pool memory. */
6285 s390_free_pool (pool);
6286 }
6287
6288
6289 /* Chunkify the literal pool. */
6290
6291 #define S390_POOL_CHUNK_MIN 0xc00
6292 #define S390_POOL_CHUNK_MAX 0xe00
6293
6294 static struct constant_pool *
6295 s390_chunkify_start (void)
6296 {
6297 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6298 int extra_size = 0;
6299 bitmap far_labels;
6300 rtx pending_ltrel = NULL_RTX;
6301 rtx insn;
6302
6303 rtx (*gen_reload_base) (rtx, rtx) =
6304 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6305
6306
6307 /* We need correct insn addresses. */
6308
6309 shorten_branches (get_insns ());
6310
6311 /* Scan all insns and move literals to pool chunks. */
6312
6313 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6314 {
6315 bool section_switch_p = false;
6316
6317 /* Check for pending LTREL_BASE. */
6318 if (INSN_P (insn))
6319 {
6320 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6321 if (ltrel_base)
6322 {
6323 gcc_assert (ltrel_base == pending_ltrel);
6324 pending_ltrel = NULL_RTX;
6325 }
6326 }
6327
6328 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6329 {
6330 if (!curr_pool)
6331 curr_pool = s390_start_pool (&pool_list, insn);
6332
6333 s390_add_execute (curr_pool, insn);
6334 s390_add_pool_insn (curr_pool, insn);
6335 }
6336 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6337 {
6338 rtx pool_ref = NULL_RTX;
6339 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6340 if (pool_ref)
6341 {
6342 rtx constant = get_pool_constant (pool_ref);
6343 enum machine_mode mode = get_pool_mode (pool_ref);
6344
6345 if (!curr_pool)
6346 curr_pool = s390_start_pool (&pool_list, insn);
6347
6348 s390_add_constant (curr_pool, constant, mode);
6349 s390_add_pool_insn (curr_pool, insn);
6350
6351 /* Don't split the pool chunk between a LTREL_OFFSET load
6352 and the corresponding LTREL_BASE. */
6353 if (GET_CODE (constant) == CONST
6354 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6355 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6356 {
6357 gcc_assert (!pending_ltrel);
6358 pending_ltrel = pool_ref;
6359 }
6360 }
6361 }
6362
6363 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6364 {
6365 if (curr_pool)
6366 s390_add_pool_insn (curr_pool, insn);
6367 /* An LTREL_BASE must follow within the same basic block. */
6368 gcc_assert (!pending_ltrel);
6369 }
6370
6371 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6372 section_switch_p = true;
6373
6374 if (!curr_pool
6375 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6376 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6377 continue;
6378
6379 if (TARGET_CPU_ZARCH)
6380 {
6381 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6382 continue;
6383
6384 s390_end_pool (curr_pool, NULL_RTX);
6385 curr_pool = NULL;
6386 }
6387 else
6388 {
6389 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6390 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6391 + extra_size;
6392
6393 /* We will later have to insert base register reload insns.
6394 Those will have an effect on code size, which we need to
6395 consider here. This calculation makes rather pessimistic
6396 worst-case assumptions. */
6397 if (GET_CODE (insn) == CODE_LABEL)
6398 extra_size += 6;
6399
6400 if (chunk_size < S390_POOL_CHUNK_MIN
6401 && curr_pool->size < S390_POOL_CHUNK_MIN
6402 && !section_switch_p)
6403 continue;
6404
6405 /* Pool chunks can only be inserted after BARRIERs ... */
6406 if (GET_CODE (insn) == BARRIER)
6407 {
6408 s390_end_pool (curr_pool, insn);
6409 curr_pool = NULL;
6410 extra_size = 0;
6411 }
6412
6413 /* ... so if we don't find one in time, create one. */
6414 else if (chunk_size > S390_POOL_CHUNK_MAX
6415 || curr_pool->size > S390_POOL_CHUNK_MAX
6416 || section_switch_p)
6417 {
6418 rtx label, jump, barrier;
6419
6420 if (!section_switch_p)
6421 {
6422 /* We can insert the barrier only after a 'real' insn. */
6423 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6424 continue;
6425 if (get_attr_length (insn) == 0)
6426 continue;
6427 /* Don't separate LTREL_BASE from the corresponding
6428 LTREL_OFFSET load. */
6429 if (pending_ltrel)
6430 continue;
6431 }
6432 else
6433 {
6434 gcc_assert (!pending_ltrel);
6435
6436 /* The old pool has to end before the section switch
6437 note in order to make it part of the current
6438 section. */
6439 insn = PREV_INSN (insn);
6440 }
6441
6442 label = gen_label_rtx ();
6443 jump = emit_jump_insn_after (gen_jump (label), insn);
6444 barrier = emit_barrier_after (jump);
6445 insn = emit_label_after (label, barrier);
6446 JUMP_LABEL (jump) = label;
6447 LABEL_NUSES (label) = 1;
6448
6449 INSN_ADDRESSES_NEW (jump, -1);
6450 INSN_ADDRESSES_NEW (barrier, -1);
6451 INSN_ADDRESSES_NEW (insn, -1);
6452
6453 s390_end_pool (curr_pool, barrier);
6454 curr_pool = NULL;
6455 extra_size = 0;
6456 }
6457 }
6458 }
6459
6460 if (curr_pool)
6461 s390_end_pool (curr_pool, NULL_RTX);
6462 gcc_assert (!pending_ltrel);
6463
6464 /* Find all labels that are branched into
6465 from an insn belonging to a different chunk. */
6466
6467 far_labels = BITMAP_ALLOC (NULL);
6468
6469 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6470 {
6471 /* Labels marked with LABEL_PRESERVE_P can be target
6472 of non-local jumps, so we have to mark them.
6473 The same holds for named labels.
6474
6475 Don't do that, however, if it is the label before
6476 a jump table. */
6477
6478 if (GET_CODE (insn) == CODE_LABEL
6479 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6480 {
6481 rtx vec_insn = next_real_insn (insn);
6482 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6483 PATTERN (vec_insn) : NULL_RTX;
6484 if (!vec_pat
6485 || !(GET_CODE (vec_pat) == ADDR_VEC
6486 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6487 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6488 }
6489
6490 /* If we have a direct jump (conditional or unconditional)
6491 or a casesi jump, check all potential targets. */
6492 else if (GET_CODE (insn) == JUMP_INSN)
6493 {
6494 rtx pat = PATTERN (insn);
6495 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6496 pat = XVECEXP (pat, 0, 0);
6497
6498 if (GET_CODE (pat) == SET)
6499 {
6500 rtx label = JUMP_LABEL (insn);
6501 if (label)
6502 {
6503 if (s390_find_pool (pool_list, label)
6504 != s390_find_pool (pool_list, insn))
6505 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6506 }
6507 }
6508 else if (GET_CODE (pat) == PARALLEL
6509 && XVECLEN (pat, 0) == 2
6510 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6511 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6512 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6513 {
6514 /* Find the jump table used by this casesi jump. */
6515 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6516 rtx vec_insn = next_real_insn (vec_label);
6517 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6518 PATTERN (vec_insn) : NULL_RTX;
6519 if (vec_pat
6520 && (GET_CODE (vec_pat) == ADDR_VEC
6521 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6522 {
6523 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6524
6525 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6526 {
6527 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6528
6529 if (s390_find_pool (pool_list, label)
6530 != s390_find_pool (pool_list, insn))
6531 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6532 }
6533 }
6534 }
6535 }
6536 }
6537
6538 /* Insert base register reload insns before every pool. */
6539
6540 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6541 {
6542 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6543 curr_pool->label);
6544 rtx insn = curr_pool->first_insn;
6545 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6546 }
6547
6548 /* Insert base register reload insns at every far label. */
6549
6550 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6551 if (GET_CODE (insn) == CODE_LABEL
6552 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6553 {
6554 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6555 if (pool)
6556 {
6557 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6558 pool->label);
6559 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6560 }
6561 }
6562
6563
6564 BITMAP_FREE (far_labels);
6565
6566
6567 /* Recompute insn addresses. */
6568
6569 init_insn_lengths ();
6570 shorten_branches (get_insns ());
6571
6572 return pool_list;
6573 }
6574
6575 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6576 After we have decided to use this list, finish implementing
6577 all changes to the current function as required. */
6578
6579 static void
6580 s390_chunkify_finish (struct constant_pool *pool_list)
6581 {
6582 struct constant_pool *curr_pool = NULL;
6583 rtx insn;
6584
6585
6586 /* Replace all literal pool references. */
6587
6588 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6589 {
6590 if (INSN_P (insn))
6591 replace_ltrel_base (&PATTERN (insn));
6592
6593 curr_pool = s390_find_pool (pool_list, insn);
6594 if (!curr_pool)
6595 continue;
6596
6597 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6598 {
6599 rtx addr, pool_ref = NULL_RTX;
6600 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6601 if (pool_ref)
6602 {
6603 if (s390_execute_label (insn))
6604 addr = s390_find_execute (curr_pool, insn);
6605 else
6606 addr = s390_find_constant (curr_pool,
6607 get_pool_constant (pool_ref),
6608 get_pool_mode (pool_ref));
6609
6610 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6611 INSN_CODE (insn) = -1;
6612 }
6613 }
6614 }
6615
6616 /* Dump out all literal pools. */
6617
6618 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6619 s390_dump_pool (curr_pool, 0);
6620
6621 /* Free pool list. */
6622
6623 while (pool_list)
6624 {
6625 struct constant_pool *next = pool_list->next;
6626 s390_free_pool (pool_list);
6627 pool_list = next;
6628 }
6629 }
6630
6631 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6632 We have decided we cannot use this list, so revert all changes
6633 to the current function that were done by s390_chunkify_start. */
6634
6635 static void
6636 s390_chunkify_cancel (struct constant_pool *pool_list)
6637 {
6638 struct constant_pool *curr_pool = NULL;
6639 rtx insn;
6640
6641 /* Remove all pool placeholder insns. */
6642
6643 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6644 {
6645 /* Did we insert an extra barrier? Remove it. */
6646 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6647 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6648 rtx label = NEXT_INSN (curr_pool->pool_insn);
6649
6650 if (jump && GET_CODE (jump) == JUMP_INSN
6651 && barrier && GET_CODE (barrier) == BARRIER
6652 && label && GET_CODE (label) == CODE_LABEL
6653 && GET_CODE (PATTERN (jump)) == SET
6654 && SET_DEST (PATTERN (jump)) == pc_rtx
6655 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6656 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6657 {
6658 remove_insn (jump);
6659 remove_insn (barrier);
6660 remove_insn (label);
6661 }
6662
6663 remove_insn (curr_pool->pool_insn);
6664 }
6665
6666 /* Remove all base register reload insns. */
6667
6668 for (insn = get_insns (); insn; )
6669 {
6670 rtx next_insn = NEXT_INSN (insn);
6671
6672 if (GET_CODE (insn) == INSN
6673 && GET_CODE (PATTERN (insn)) == SET
6674 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6675 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6676 remove_insn (insn);
6677
6678 insn = next_insn;
6679 }
6680
6681 /* Free pool list. */
6682
6683 while (pool_list)
6684 {
6685 struct constant_pool *next = pool_list->next;
6686 s390_free_pool (pool_list);
6687 pool_list = next;
6688 }
6689 }
6690
6691 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6692
6693 void
6694 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6695 {
6696 REAL_VALUE_TYPE r;
6697
6698 switch (GET_MODE_CLASS (mode))
6699 {
6700 case MODE_FLOAT:
6701 case MODE_DECIMAL_FLOAT:
6702 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6703
6704 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6705 assemble_real (r, mode, align);
6706 break;
6707
6708 case MODE_INT:
6709 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6710 mark_symbol_refs_as_used (exp);
6711 break;
6712
6713 default:
6714 gcc_unreachable ();
6715 }
6716 }
6717
6718
6719 /* Return an RTL expression representing the value of the return address
6720 for the frame COUNT steps up from the current frame. FRAME is the
6721 frame pointer of that frame. */
6722
6723 rtx
6724 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6725 {
6726 int offset;
6727 rtx addr;
6728
6729 /* Without backchain, we fail for all but the current frame. */
6730
6731 if (!TARGET_BACKCHAIN && count > 0)
6732 return NULL_RTX;
6733
6734 /* For the current frame, we need to make sure the initial
6735 value of RETURN_REGNUM is actually saved. */
6736
6737 if (count == 0)
6738 {
6739 /* On non-z architectures branch splitting could overwrite r14. */
6740 if (TARGET_CPU_ZARCH)
6741 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6742 else
6743 {
6744 cfun_frame_layout.save_return_addr_p = true;
6745 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
6746 }
6747 }
6748
6749 if (TARGET_PACKED_STACK)
6750 offset = -2 * UNITS_PER_WORD;
6751 else
6752 offset = RETURN_REGNUM * UNITS_PER_WORD;
6753
6754 addr = plus_constant (frame, offset);
6755 addr = memory_address (Pmode, addr);
6756 return gen_rtx_MEM (Pmode, addr);
6757 }
6758
6759 /* Return an RTL expression representing the back chain stored in
6760 the current stack frame. */
6761
6762 rtx
6763 s390_back_chain_rtx (void)
6764 {
6765 rtx chain;
6766
6767 gcc_assert (TARGET_BACKCHAIN);
6768
6769 if (TARGET_PACKED_STACK)
6770 chain = plus_constant (stack_pointer_rtx,
6771 STACK_POINTER_OFFSET - UNITS_PER_WORD);
6772 else
6773 chain = stack_pointer_rtx;
6774
6775 chain = gen_rtx_MEM (Pmode, chain);
6776 return chain;
6777 }
6778
6779 /* Find first call clobbered register unused in a function.
6780 This could be used as base register in a leaf function
6781 or for holding the return address before epilogue. */
6782
6783 static int
6784 find_unused_clobbered_reg (void)
6785 {
6786 int i;
6787 for (i = 0; i < 6; i++)
6788 if (!df_regs_ever_live_p (i))
6789 return i;
6790 return 0;
6791 }
6792
6793
6794 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
6795 clobbered hard regs in SETREG. */
6796
6797 static void
6798 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
6799 {
6800 int *regs_ever_clobbered = (int *)data;
6801 unsigned int i, regno;
6802 enum machine_mode mode = GET_MODE (setreg);
6803
6804 if (GET_CODE (setreg) == SUBREG)
6805 {
6806 rtx inner = SUBREG_REG (setreg);
6807 if (!GENERAL_REG_P (inner))
6808 return;
6809 regno = subreg_regno (setreg);
6810 }
6811 else if (GENERAL_REG_P (setreg))
6812 regno = REGNO (setreg);
6813 else
6814 return;
6815
6816 for (i = regno;
6817 i < regno + HARD_REGNO_NREGS (regno, mode);
6818 i++)
6819 regs_ever_clobbered[i] = 1;
6820 }
6821
6822 /* Walks through all basic blocks of the current function looking
6823 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
6824 of the passed integer array REGS_EVER_CLOBBERED are set to one for
6825 each of those regs. */
6826
6827 static void
6828 s390_regs_ever_clobbered (int *regs_ever_clobbered)
6829 {
6830 basic_block cur_bb;
6831 rtx cur_insn;
6832 unsigned int i;
6833
6834 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
6835
6836 /* For non-leaf functions we have to consider all call clobbered regs to be
6837 clobbered. */
6838 if (!current_function_is_leaf)
6839 {
6840 for (i = 0; i < 16; i++)
6841 regs_ever_clobbered[i] = call_really_used_regs[i];
6842 }
6843
6844 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
6845 this work is done by liveness analysis (mark_regs_live_at_end).
6846 Special care is needed for functions containing landing pads. Landing pads
6847 may use the eh registers, but the code which sets these registers is not
6848 contained in that function. Hence s390_regs_ever_clobbered is not able to
6849 deal with this automatically. */
6850 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
6851 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
6852 if (crtl->calls_eh_return
6853 || (cfun->machine->has_landing_pad_p
6854 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
6855 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
6856
6857 /* For nonlocal gotos all call-saved registers have to be saved.
6858 This flag is also set for the unwinding code in libgcc.
6859 See expand_builtin_unwind_init. For regs_ever_live this is done by
6860 reload. */
6861 if (cfun->has_nonlocal_label)
6862 for (i = 0; i < 16; i++)
6863 if (!call_really_used_regs[i])
6864 regs_ever_clobbered[i] = 1;
6865
6866 FOR_EACH_BB (cur_bb)
6867 {
6868 FOR_BB_INSNS (cur_bb, cur_insn)
6869 {
6870 if (INSN_P (cur_insn))
6871 note_stores (PATTERN (cur_insn),
6872 s390_reg_clobbered_rtx,
6873 regs_ever_clobbered);
6874 }
6875 }
6876 }
6877
6878 /* Determine the frame area which actually has to be accessed
6879 in the function epilogue. The values are stored at the
6880 given pointers AREA_BOTTOM (address of the lowest used stack
6881 address) and AREA_TOP (address of the first item which does
6882 not belong to the stack frame). */
6883
6884 static void
6885 s390_frame_area (int *area_bottom, int *area_top)
6886 {
6887 int b, t;
6888 int i;
6889
6890 b = INT_MAX;
6891 t = INT_MIN;
6892
6893 if (cfun_frame_layout.first_restore_gpr != -1)
6894 {
6895 b = (cfun_frame_layout.gprs_offset
6896 + cfun_frame_layout.first_restore_gpr * UNITS_PER_WORD);
6897 t = b + (cfun_frame_layout.last_restore_gpr
6898 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_WORD;
6899 }
6900
6901 if (TARGET_64BIT && cfun_save_high_fprs_p)
6902 {
6903 b = MIN (b, cfun_frame_layout.f8_offset);
6904 t = MAX (t, (cfun_frame_layout.f8_offset
6905 + cfun_frame_layout.high_fprs * 8));
6906 }
6907
6908 if (!TARGET_64BIT)
6909 for (i = 2; i < 4; i++)
6910 if (cfun_fpr_bit_p (i))
6911 {
6912 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
6913 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
6914 }
6915
6916 *area_bottom = b;
6917 *area_top = t;
6918 }
6919
6920 /* Fill cfun->machine with info about register usage of current function.
6921 Return in CLOBBERED_REGS which GPRs are currently considered set. */
6922
6923 static void
6924 s390_register_info (int clobbered_regs[])
6925 {
6926 int i, j;
6927
6928 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
6929 cfun_frame_layout.fpr_bitmap = 0;
6930 cfun_frame_layout.high_fprs = 0;
6931 if (TARGET_64BIT)
6932 for (i = 24; i < 32; i++)
6933 if (df_regs_ever_live_p (i) && !global_regs[i])
6934 {
6935 cfun_set_fpr_bit (i - 16);
6936 cfun_frame_layout.high_fprs++;
6937 }
6938
6939 /* Find first and last gpr to be saved. We trust regs_ever_live
6940 data, except that we don't save and restore global registers.
6941
6942 Also, all registers with special meaning to the compiler need
6943 to be handled extra. */
6944
6945 s390_regs_ever_clobbered (clobbered_regs);
6946
6947 for (i = 0; i < 16; i++)
6948 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
6949
6950 if (frame_pointer_needed)
6951 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
6952
6953 if (flag_pic)
6954 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
6955 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6956
6957 clobbered_regs[BASE_REGNUM]
6958 |= (cfun->machine->base_reg
6959 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
6960
6961 clobbered_regs[RETURN_REGNUM]
6962 |= (!current_function_is_leaf
6963 || TARGET_TPF_PROFILING
6964 || cfun->machine->split_branches_pending_p
6965 || cfun_frame_layout.save_return_addr_p
6966 || crtl->calls_eh_return
6967 || cfun->stdarg);
6968
6969 clobbered_regs[STACK_POINTER_REGNUM]
6970 |= (!current_function_is_leaf
6971 || TARGET_TPF_PROFILING
6972 || cfun_save_high_fprs_p
6973 || get_frame_size () > 0
6974 || cfun->calls_alloca
6975 || cfun->stdarg);
6976
6977 for (i = 6; i < 16; i++)
6978 if (df_regs_ever_live_p (i) || clobbered_regs[i])
6979 break;
6980 for (j = 15; j > i; j--)
6981 if (df_regs_ever_live_p (j) || clobbered_regs[j])
6982 break;
6983
6984 if (i == 16)
6985 {
6986 /* Nothing to save/restore. */
6987 cfun_frame_layout.first_save_gpr_slot = -1;
6988 cfun_frame_layout.last_save_gpr_slot = -1;
6989 cfun_frame_layout.first_save_gpr = -1;
6990 cfun_frame_layout.first_restore_gpr = -1;
6991 cfun_frame_layout.last_save_gpr = -1;
6992 cfun_frame_layout.last_restore_gpr = -1;
6993 }
6994 else
6995 {
6996 /* Save slots for gprs from i to j. */
6997 cfun_frame_layout.first_save_gpr_slot = i;
6998 cfun_frame_layout.last_save_gpr_slot = j;
6999
7000 for (i = cfun_frame_layout.first_save_gpr_slot;
7001 i < cfun_frame_layout.last_save_gpr_slot + 1;
7002 i++)
7003 if (clobbered_regs[i])
7004 break;
7005
7006 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7007 if (clobbered_regs[j])
7008 break;
7009
7010 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7011 {
7012 /* Nothing to save/restore. */
7013 cfun_frame_layout.first_save_gpr = -1;
7014 cfun_frame_layout.first_restore_gpr = -1;
7015 cfun_frame_layout.last_save_gpr = -1;
7016 cfun_frame_layout.last_restore_gpr = -1;
7017 }
7018 else
7019 {
7020 /* Save / Restore from gpr i to j. */
7021 cfun_frame_layout.first_save_gpr = i;
7022 cfun_frame_layout.first_restore_gpr = i;
7023 cfun_frame_layout.last_save_gpr = j;
7024 cfun_frame_layout.last_restore_gpr = j;
7025 }
7026 }
7027
7028 if (cfun->stdarg)
7029 {
7030 /* Varargs functions need to save gprs 2 to 6. */
7031 if (cfun->va_list_gpr_size
7032 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7033 {
7034 int min_gpr = crtl->args.info.gprs;
7035 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7036 if (max_gpr > GP_ARG_NUM_REG)
7037 max_gpr = GP_ARG_NUM_REG;
7038
7039 if (cfun_frame_layout.first_save_gpr == -1
7040 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7041 {
7042 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7043 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7044 }
7045
7046 if (cfun_frame_layout.last_save_gpr == -1
7047 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7048 {
7049 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7050 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7051 }
7052 }
7053
7054 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7055 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7056 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7057 {
7058 int min_fpr = crtl->args.info.fprs;
7059 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7060 if (max_fpr > FP_ARG_NUM_REG)
7061 max_fpr = FP_ARG_NUM_REG;
7062
7063 /* ??? This is currently required to ensure proper location
7064 of the fpr save slots within the va_list save area. */
7065 if (TARGET_PACKED_STACK)
7066 min_fpr = 0;
7067
7068 for (i = min_fpr; i < max_fpr; i++)
7069 cfun_set_fpr_bit (i);
7070 }
7071 }
7072
7073 if (!TARGET_64BIT)
7074 for (i = 2; i < 4; i++)
7075 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7076 cfun_set_fpr_bit (i);
7077 }
7078
7079 /* Fill cfun->machine with info about frame of current function. */
7080
7081 static void
7082 s390_frame_info (void)
7083 {
7084 int i;
7085
7086 cfun_frame_layout.frame_size = get_frame_size ();
7087 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7088 fatal_error ("total size of local variables exceeds architecture limit");
7089
7090 if (!TARGET_PACKED_STACK)
7091 {
7092 cfun_frame_layout.backchain_offset = 0;
7093 cfun_frame_layout.f0_offset = 16 * UNITS_PER_WORD;
7094 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7095 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7096 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7097 * UNITS_PER_WORD);
7098 }
7099 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7100 {
7101 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7102 - UNITS_PER_WORD);
7103 cfun_frame_layout.gprs_offset
7104 = (cfun_frame_layout.backchain_offset
7105 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7106 * UNITS_PER_WORD);
7107
7108 if (TARGET_64BIT)
7109 {
7110 cfun_frame_layout.f4_offset
7111 = (cfun_frame_layout.gprs_offset
7112 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7113
7114 cfun_frame_layout.f0_offset
7115 = (cfun_frame_layout.f4_offset
7116 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7117 }
7118 else
7119 {
7120 /* On 31 bit we have to care about alignment of the
7121 floating point regs to provide fastest access. */
7122 cfun_frame_layout.f0_offset
7123 = ((cfun_frame_layout.gprs_offset
7124 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7125 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7126
7127 cfun_frame_layout.f4_offset
7128 = (cfun_frame_layout.f0_offset
7129 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7130 }
7131 }
7132 else /* no backchain */
7133 {
7134 cfun_frame_layout.f4_offset
7135 = (STACK_POINTER_OFFSET
7136 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7137
7138 cfun_frame_layout.f0_offset
7139 = (cfun_frame_layout.f4_offset
7140 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7141
7142 cfun_frame_layout.gprs_offset
7143 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7144 }
7145
7146 if (current_function_is_leaf
7147 && !TARGET_TPF_PROFILING
7148 && cfun_frame_layout.frame_size == 0
7149 && !cfun_save_high_fprs_p
7150 && !cfun->calls_alloca
7151 && !cfun->stdarg)
7152 return;
7153
7154 if (!TARGET_PACKED_STACK)
7155 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7156 + crtl->outgoing_args_size
7157 + cfun_frame_layout.high_fprs * 8);
7158 else
7159 {
7160 if (TARGET_BACKCHAIN)
7161 cfun_frame_layout.frame_size += UNITS_PER_WORD;
7162
7163 /* No alignment trouble here because f8-f15 are only saved under
7164 64 bit. */
7165 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7166 cfun_frame_layout.f4_offset),
7167 cfun_frame_layout.gprs_offset)
7168 - cfun_frame_layout.high_fprs * 8);
7169
7170 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7171
7172 for (i = 0; i < 8; i++)
7173 if (cfun_fpr_bit_p (i))
7174 cfun_frame_layout.frame_size += 8;
7175
7176 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7177
7178 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7179 the frame size to sustain 8 byte alignment of stack frames. */
7180 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7181 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7182 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7183
7184 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7185 }
7186 }
7187
7188 /* Generate frame layout. Fills in register and frame data for the current
7189 function in cfun->machine. This routine can be called multiple times;
7190 it will re-do the complete frame layout every time. */
7191
7192 static void
7193 s390_init_frame_layout (void)
7194 {
7195 HOST_WIDE_INT frame_size;
7196 int base_used;
7197 int clobbered_regs[16];
7198
7199 /* On S/390 machines, we may need to perform branch splitting, which
7200 will require both base and return address register. We have no
7201 choice but to assume we're going to need them until right at the
7202 end of the machine dependent reorg phase. */
7203 if (!TARGET_CPU_ZARCH)
7204 cfun->machine->split_branches_pending_p = true;
7205
7206 do
7207 {
7208 frame_size = cfun_frame_layout.frame_size;
7209
7210 /* Try to predict whether we'll need the base register. */
7211 base_used = cfun->machine->split_branches_pending_p
7212 || crtl->uses_const_pool
7213 || (!DISP_IN_RANGE (frame_size)
7214 && !CONST_OK_FOR_K (frame_size));
7215
7216 /* Decide which register to use as literal pool base. In small
7217 leaf functions, try to use an unused call-clobbered register
7218 as base register to avoid save/restore overhead. */
7219 if (!base_used)
7220 cfun->machine->base_reg = NULL_RTX;
7221 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7222 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7223 else
7224 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7225
7226 s390_register_info (clobbered_regs);
7227 s390_frame_info ();
7228 }
7229 while (frame_size != cfun_frame_layout.frame_size);
7230 }
7231
7232 /* Update frame layout. Recompute actual register save data based on
7233 current info and update regs_ever_live for the special registers.
7234 May be called multiple times, but may never cause *more* registers
7235 to be saved than s390_init_frame_layout allocated room for. */
7236
7237 static void
7238 s390_update_frame_layout (void)
7239 {
7240 int clobbered_regs[16];
7241
7242 s390_register_info (clobbered_regs);
7243
7244 df_set_regs_ever_live (BASE_REGNUM,
7245 clobbered_regs[BASE_REGNUM] ? true : false);
7246 df_set_regs_ever_live (RETURN_REGNUM,
7247 clobbered_regs[RETURN_REGNUM] ? true : false);
7248 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7249 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7250
7251 if (cfun->machine->base_reg)
7252 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7253 }
7254
7255 /* Return true if it is legal to put a value with MODE into REGNO. */
7256
7257 bool
7258 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7259 {
7260 switch (REGNO_REG_CLASS (regno))
7261 {
7262 case FP_REGS:
7263 if (REGNO_PAIR_OK (regno, mode))
7264 {
7265 if (mode == SImode || mode == DImode)
7266 return true;
7267
7268 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7269 return true;
7270 }
7271 break;
7272 case ADDR_REGS:
7273 if (FRAME_REGNO_P (regno) && mode == Pmode)
7274 return true;
7275
7276 /* fallthrough */
7277 case GENERAL_REGS:
7278 if (REGNO_PAIR_OK (regno, mode))
7279 {
7280 if (TARGET_64BIT
7281 || (mode != TFmode && mode != TCmode && mode != TDmode))
7282 return true;
7283 }
7284 break;
7285 case CC_REGS:
7286 if (GET_MODE_CLASS (mode) == MODE_CC)
7287 return true;
7288 break;
7289 case ACCESS_REGS:
7290 if (REGNO_PAIR_OK (regno, mode))
7291 {
7292 if (mode == SImode || mode == Pmode)
7293 return true;
7294 }
7295 break;
7296 default:
7297 return false;
7298 }
7299
7300 return false;
7301 }
7302
7303 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7304
7305 bool
7306 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7307 {
7308 /* Once we've decided upon a register to use as base register, it must
7309 no longer be used for any other purpose. */
7310 if (cfun->machine->base_reg)
7311 if (REGNO (cfun->machine->base_reg) == old_reg
7312 || REGNO (cfun->machine->base_reg) == new_reg)
7313 return false;
7314
7315 return true;
7316 }
7317
7318 /* Maximum number of registers to represent a value of mode MODE
7319 in a register of class RCLASS. */
7320
7321 bool
7322 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7323 {
7324 switch (rclass)
7325 {
7326 case FP_REGS:
7327 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7328 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7329 else
7330 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7331 case ACCESS_REGS:
7332 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7333 default:
7334 break;
7335 }
7336 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7337 }
7338
7339 /* Return true if register FROM can be eliminated via register TO. */
7340
7341 static bool
7342 s390_can_eliminate (const int from, const int to)
7343 {
7344 /* On zSeries machines, we have not marked the base register as fixed.
7345 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7346 If a function requires the base register, we say here that this
7347 elimination cannot be performed. This will cause reload to free
7348 up the base register (as if it were fixed). On the other hand,
7349 if the current function does *not* require the base register, we
7350 say here the elimination succeeds, which in turn allows reload
7351 to allocate the base register for any other purpose. */
7352 if (from == BASE_REGNUM && to == BASE_REGNUM)
7353 {
7354 if (TARGET_CPU_ZARCH)
7355 {
7356 s390_init_frame_layout ();
7357 return cfun->machine->base_reg == NULL_RTX;
7358 }
7359
7360 return false;
7361 }
7362
7363 /* Everything else must point into the stack frame. */
7364 gcc_assert (to == STACK_POINTER_REGNUM
7365 || to == HARD_FRAME_POINTER_REGNUM);
7366
7367 gcc_assert (from == FRAME_POINTER_REGNUM
7368 || from == ARG_POINTER_REGNUM
7369 || from == RETURN_ADDRESS_POINTER_REGNUM);
7370
7371 /* Make sure we actually saved the return address. */
7372 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7373 if (!crtl->calls_eh_return
7374 && !cfun->stdarg
7375 && !cfun_frame_layout.save_return_addr_p)
7376 return false;
7377
7378 return true;
7379 }
7380
7381 /* Return offset between register FROM and TO initially after prolog. */
7382
7383 HOST_WIDE_INT
7384 s390_initial_elimination_offset (int from, int to)
7385 {
7386 HOST_WIDE_INT offset;
7387 int index;
7388
7389 /* ??? Why are we called for non-eliminable pairs? */
7390 if (!s390_can_eliminate (from, to))
7391 return 0;
7392
7393 switch (from)
7394 {
7395 case FRAME_POINTER_REGNUM:
7396 offset = (get_frame_size()
7397 + STACK_POINTER_OFFSET
7398 + crtl->outgoing_args_size);
7399 break;
7400
7401 case ARG_POINTER_REGNUM:
7402 s390_init_frame_layout ();
7403 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7404 break;
7405
7406 case RETURN_ADDRESS_POINTER_REGNUM:
7407 s390_init_frame_layout ();
7408 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7409 gcc_assert (index >= 0);
7410 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7411 offset += index * UNITS_PER_WORD;
7412 break;
7413
7414 case BASE_REGNUM:
7415 offset = 0;
7416 break;
7417
7418 default:
7419 gcc_unreachable ();
7420 }
7421
7422 return offset;
7423 }
7424
7425 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7426 to register BASE. Return generated insn. */
7427
7428 static rtx
7429 save_fpr (rtx base, int offset, int regnum)
7430 {
7431 rtx addr;
7432 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7433
7434 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7435 set_mem_alias_set (addr, get_varargs_alias_set ());
7436 else
7437 set_mem_alias_set (addr, get_frame_alias_set ());
7438
7439 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7440 }
7441
7442 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7443 to register BASE. Return generated insn. */
7444
7445 static rtx
7446 restore_fpr (rtx base, int offset, int regnum)
7447 {
7448 rtx addr;
7449 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7450 set_mem_alias_set (addr, get_frame_alias_set ());
7451
7452 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7453 }
7454
7455 /* Return true if REGNO is a global register, but not one
7456 of the special ones that need to be saved/restored in anyway. */
7457
7458 static inline bool
7459 global_not_special_regno_p (int regno)
7460 {
7461 return (global_regs[regno]
7462 /* These registers are special and need to be
7463 restored in any case. */
7464 && !(regno == STACK_POINTER_REGNUM
7465 || regno == RETURN_REGNUM
7466 || regno == BASE_REGNUM
7467 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7468 }
7469
7470 /* Generate insn to save registers FIRST to LAST into
7471 the register save area located at offset OFFSET
7472 relative to register BASE. */
7473
7474 static rtx
7475 save_gprs (rtx base, int offset, int first, int last)
7476 {
7477 rtx addr, insn, note;
7478 int i;
7479
7480 addr = plus_constant (base, offset);
7481 addr = gen_rtx_MEM (Pmode, addr);
7482
7483 set_mem_alias_set (addr, get_frame_alias_set ());
7484
7485 /* Special-case single register. */
7486 if (first == last)
7487 {
7488 if (TARGET_64BIT)
7489 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7490 else
7491 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7492
7493 if (!global_not_special_regno_p (first))
7494 RTX_FRAME_RELATED_P (insn) = 1;
7495 return insn;
7496 }
7497
7498
7499 insn = gen_store_multiple (addr,
7500 gen_rtx_REG (Pmode, first),
7501 GEN_INT (last - first + 1));
7502
7503 if (first <= 6 && cfun->stdarg)
7504 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7505 {
7506 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7507
7508 if (first + i <= 6)
7509 set_mem_alias_set (mem, get_varargs_alias_set ());
7510 }
7511
7512 /* We need to set the FRAME_RELATED flag on all SETs
7513 inside the store-multiple pattern.
7514
7515 However, we must not emit DWARF records for registers 2..5
7516 if they are stored for use by variable arguments ...
7517
7518 ??? Unfortunately, it is not enough to simply not the
7519 FRAME_RELATED flags for those SETs, because the first SET
7520 of the PARALLEL is always treated as if it had the flag
7521 set, even if it does not. Therefore we emit a new pattern
7522 without those registers as REG_FRAME_RELATED_EXPR note. */
7523
7524 if (first >= 6 && !global_not_special_regno_p (first))
7525 {
7526 rtx pat = PATTERN (insn);
7527
7528 for (i = 0; i < XVECLEN (pat, 0); i++)
7529 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7530 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7531 0, i)))))
7532 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7533
7534 RTX_FRAME_RELATED_P (insn) = 1;
7535 }
7536 else if (last >= 6)
7537 {
7538 int start;
7539
7540 for (start = first >= 6 ? first : 6; start <= last; start++)
7541 if (!global_not_special_regno_p (start))
7542 break;
7543
7544 if (start > last)
7545 return insn;
7546
7547 addr = plus_constant (base, offset + (start - first) * UNITS_PER_WORD);
7548 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7549 gen_rtx_REG (Pmode, start),
7550 GEN_INT (last - start + 1));
7551 note = PATTERN (note);
7552
7553 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7554
7555 for (i = 0; i < XVECLEN (note, 0); i++)
7556 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7557 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7558 0, i)))))
7559 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7560
7561 RTX_FRAME_RELATED_P (insn) = 1;
7562 }
7563
7564 return insn;
7565 }
7566
7567 /* Generate insn to restore registers FIRST to LAST from
7568 the register save area located at offset OFFSET
7569 relative to register BASE. */
7570
7571 static rtx
7572 restore_gprs (rtx base, int offset, int first, int last)
7573 {
7574 rtx addr, insn;
7575
7576 addr = plus_constant (base, offset);
7577 addr = gen_rtx_MEM (Pmode, addr);
7578 set_mem_alias_set (addr, get_frame_alias_set ());
7579
7580 /* Special-case single register. */
7581 if (first == last)
7582 {
7583 if (TARGET_64BIT)
7584 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7585 else
7586 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7587
7588 return insn;
7589 }
7590
7591 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7592 addr,
7593 GEN_INT (last - first + 1));
7594 return insn;
7595 }
7596
7597 /* Return insn sequence to load the GOT register. */
7598
7599 static GTY(()) rtx got_symbol;
7600 rtx
7601 s390_load_got (void)
7602 {
7603 rtx insns;
7604
7605 if (!got_symbol)
7606 {
7607 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7608 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7609 }
7610
7611 start_sequence ();
7612
7613 if (TARGET_CPU_ZARCH)
7614 {
7615 emit_move_insn (pic_offset_table_rtx, got_symbol);
7616 }
7617 else
7618 {
7619 rtx offset;
7620
7621 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7622 UNSPEC_LTREL_OFFSET);
7623 offset = gen_rtx_CONST (Pmode, offset);
7624 offset = force_const_mem (Pmode, offset);
7625
7626 emit_move_insn (pic_offset_table_rtx, offset);
7627
7628 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7629 UNSPEC_LTREL_BASE);
7630 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7631
7632 emit_move_insn (pic_offset_table_rtx, offset);
7633 }
7634
7635 insns = get_insns ();
7636 end_sequence ();
7637 return insns;
7638 }
7639
7640 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7641 and the change to the stack pointer. */
7642
7643 static void
7644 s390_emit_stack_tie (void)
7645 {
7646 rtx mem = gen_frame_mem (BLKmode,
7647 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7648
7649 emit_insn (gen_stack_tie (mem));
7650 }
7651
7652 /* Expand the prologue into a bunch of separate insns. */
7653
7654 void
7655 s390_emit_prologue (void)
7656 {
7657 rtx insn, addr;
7658 rtx temp_reg;
7659 int i;
7660 int offset;
7661 int next_fpr = 0;
7662
7663 /* Complete frame layout. */
7664
7665 s390_update_frame_layout ();
7666
7667 /* Annotate all constant pool references to let the scheduler know
7668 they implicitly use the base register. */
7669
7670 push_topmost_sequence ();
7671
7672 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7673 if (INSN_P (insn))
7674 {
7675 annotate_constant_pool_refs (&PATTERN (insn));
7676 df_insn_rescan (insn);
7677 }
7678
7679 pop_topmost_sequence ();
7680
7681 /* Choose best register to use for temp use within prologue.
7682 See below for why TPF must use the register 1. */
7683
7684 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7685 && !current_function_is_leaf
7686 && !TARGET_TPF_PROFILING)
7687 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7688 else
7689 temp_reg = gen_rtx_REG (Pmode, 1);
7690
7691 /* Save call saved gprs. */
7692 if (cfun_frame_layout.first_save_gpr != -1)
7693 {
7694 insn = save_gprs (stack_pointer_rtx,
7695 cfun_frame_layout.gprs_offset +
7696 UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
7697 - cfun_frame_layout.first_save_gpr_slot),
7698 cfun_frame_layout.first_save_gpr,
7699 cfun_frame_layout.last_save_gpr);
7700 emit_insn (insn);
7701 }
7702
7703 /* Dummy insn to mark literal pool slot. */
7704
7705 if (cfun->machine->base_reg)
7706 emit_insn (gen_main_pool (cfun->machine->base_reg));
7707
7708 offset = cfun_frame_layout.f0_offset;
7709
7710 /* Save f0 and f2. */
7711 for (i = 0; i < 2; i++)
7712 {
7713 if (cfun_fpr_bit_p (i))
7714 {
7715 save_fpr (stack_pointer_rtx, offset, i + 16);
7716 offset += 8;
7717 }
7718 else if (!TARGET_PACKED_STACK)
7719 offset += 8;
7720 }
7721
7722 /* Save f4 and f6. */
7723 offset = cfun_frame_layout.f4_offset;
7724 for (i = 2; i < 4; i++)
7725 {
7726 if (cfun_fpr_bit_p (i))
7727 {
7728 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7729 offset += 8;
7730
7731 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7732 therefore are not frame related. */
7733 if (!call_really_used_regs[i + 16])
7734 RTX_FRAME_RELATED_P (insn) = 1;
7735 }
7736 else if (!TARGET_PACKED_STACK)
7737 offset += 8;
7738 }
7739
7740 if (TARGET_PACKED_STACK
7741 && cfun_save_high_fprs_p
7742 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7743 {
7744 offset = (cfun_frame_layout.f8_offset
7745 + (cfun_frame_layout.high_fprs - 1) * 8);
7746
7747 for (i = 15; i > 7 && offset >= 0; i--)
7748 if (cfun_fpr_bit_p (i))
7749 {
7750 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7751
7752 RTX_FRAME_RELATED_P (insn) = 1;
7753 offset -= 8;
7754 }
7755 if (offset >= cfun_frame_layout.f8_offset)
7756 next_fpr = i + 16;
7757 }
7758
7759 if (!TARGET_PACKED_STACK)
7760 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
7761
7762 /* Decrement stack pointer. */
7763
7764 if (cfun_frame_layout.frame_size > 0)
7765 {
7766 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7767 rtx real_frame_off;
7768
7769 if (s390_stack_size)
7770 {
7771 HOST_WIDE_INT stack_guard;
7772
7773 if (s390_stack_guard)
7774 stack_guard = s390_stack_guard;
7775 else
7776 {
7777 /* If no value for stack guard is provided the smallest power of 2
7778 larger than the current frame size is chosen. */
7779 stack_guard = 1;
7780 while (stack_guard < cfun_frame_layout.frame_size)
7781 stack_guard <<= 1;
7782 }
7783
7784 if (cfun_frame_layout.frame_size >= s390_stack_size)
7785 {
7786 warning (0, "frame size of function %qs is "
7787 HOST_WIDE_INT_PRINT_DEC
7788 " bytes exceeding user provided stack limit of "
7789 HOST_WIDE_INT_PRINT_DEC " bytes. "
7790 "An unconditional trap is added.",
7791 current_function_name(), cfun_frame_layout.frame_size,
7792 s390_stack_size);
7793 emit_insn (gen_trap ());
7794 }
7795 else
7796 {
7797 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
7798 & ~(stack_guard - 1));
7799 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
7800 GEN_INT (stack_check_mask));
7801 if (TARGET_64BIT)
7802 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode, t, const0_rtx),
7803 t, const0_rtx, const0_rtx));
7804 else
7805 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode, t, const0_rtx),
7806 t, const0_rtx, const0_rtx));
7807 }
7808 }
7809
7810 if (s390_warn_framesize > 0
7811 && cfun_frame_layout.frame_size >= s390_warn_framesize)
7812 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
7813 current_function_name (), cfun_frame_layout.frame_size);
7814
7815 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
7816 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
7817
7818 /* Save incoming stack pointer into temp reg. */
7819 if (TARGET_BACKCHAIN || next_fpr)
7820 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
7821
7822 /* Subtract frame size from stack pointer. */
7823
7824 if (DISP_IN_RANGE (INTVAL (frame_off)))
7825 {
7826 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7827 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7828 frame_off));
7829 insn = emit_insn (insn);
7830 }
7831 else
7832 {
7833 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
7834 frame_off = force_const_mem (Pmode, frame_off);
7835
7836 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
7837 annotate_constant_pool_refs (&PATTERN (insn));
7838 }
7839
7840 RTX_FRAME_RELATED_P (insn) = 1;
7841 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7842 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7843 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7844 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7845 real_frame_off)));
7846
7847 /* Set backchain. */
7848
7849 if (TARGET_BACKCHAIN)
7850 {
7851 if (cfun_frame_layout.backchain_offset)
7852 addr = gen_rtx_MEM (Pmode,
7853 plus_constant (stack_pointer_rtx,
7854 cfun_frame_layout.backchain_offset));
7855 else
7856 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7857 set_mem_alias_set (addr, get_frame_alias_set ());
7858 insn = emit_insn (gen_move_insn (addr, temp_reg));
7859 }
7860
7861 /* If we support asynchronous exceptions (e.g. for Java),
7862 we need to make sure the backchain pointer is set up
7863 before any possibly trapping memory access. */
7864
7865 if (TARGET_BACKCHAIN && flag_non_call_exceptions)
7866 {
7867 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
7868 emit_clobber (addr);
7869 }
7870 }
7871
7872 /* Save fprs 8 - 15 (64 bit ABI). */
7873
7874 if (cfun_save_high_fprs_p && next_fpr)
7875 {
7876 /* If the stack might be accessed through a different register
7877 we have to make sure that the stack pointer decrement is not
7878 moved below the use of the stack slots. */
7879 s390_emit_stack_tie ();
7880
7881 insn = emit_insn (gen_add2_insn (temp_reg,
7882 GEN_INT (cfun_frame_layout.f8_offset)));
7883
7884 offset = 0;
7885
7886 for (i = 24; i <= next_fpr; i++)
7887 if (cfun_fpr_bit_p (i - 16))
7888 {
7889 rtx addr = plus_constant (stack_pointer_rtx,
7890 cfun_frame_layout.frame_size
7891 + cfun_frame_layout.f8_offset
7892 + offset);
7893
7894 insn = save_fpr (temp_reg, offset, i);
7895 offset += 8;
7896 RTX_FRAME_RELATED_P (insn) = 1;
7897 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7898 gen_rtx_SET (VOIDmode,
7899 gen_rtx_MEM (DFmode, addr),
7900 gen_rtx_REG (DFmode, i)));
7901 }
7902 }
7903
7904 /* Set frame pointer, if needed. */
7905
7906 if (frame_pointer_needed)
7907 {
7908 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7909 RTX_FRAME_RELATED_P (insn) = 1;
7910 }
7911
7912 /* Set up got pointer, if needed. */
7913
7914 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
7915 {
7916 rtx insns = s390_load_got ();
7917
7918 for (insn = insns; insn; insn = NEXT_INSN (insn))
7919 annotate_constant_pool_refs (&PATTERN (insn));
7920
7921 emit_insn (insns);
7922 }
7923
7924 if (TARGET_TPF_PROFILING)
7925 {
7926 /* Generate a BAS instruction to serve as a function
7927 entry intercept to facilitate the use of tracing
7928 algorithms located at the branch target. */
7929 emit_insn (gen_prologue_tpf ());
7930
7931 /* Emit a blockage here so that all code
7932 lies between the profiling mechanisms. */
7933 emit_insn (gen_blockage ());
7934 }
7935 }
7936
7937 /* Expand the epilogue into a bunch of separate insns. */
7938
7939 void
7940 s390_emit_epilogue (bool sibcall)
7941 {
7942 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
7943 int area_bottom, area_top, offset = 0;
7944 int next_offset;
7945 rtvec p;
7946 int i;
7947
7948 if (TARGET_TPF_PROFILING)
7949 {
7950
7951 /* Generate a BAS instruction to serve as a function
7952 entry intercept to facilitate the use of tracing
7953 algorithms located at the branch target. */
7954
7955 /* Emit a blockage here so that all code
7956 lies between the profiling mechanisms. */
7957 emit_insn (gen_blockage ());
7958
7959 emit_insn (gen_epilogue_tpf ());
7960 }
7961
7962 /* Check whether to use frame or stack pointer for restore. */
7963
7964 frame_pointer = (frame_pointer_needed
7965 ? hard_frame_pointer_rtx : stack_pointer_rtx);
7966
7967 s390_frame_area (&area_bottom, &area_top);
7968
7969 /* Check whether we can access the register save area.
7970 If not, increment the frame pointer as required. */
7971
7972 if (area_top <= area_bottom)
7973 {
7974 /* Nothing to restore. */
7975 }
7976 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
7977 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
7978 {
7979 /* Area is in range. */
7980 offset = cfun_frame_layout.frame_size;
7981 }
7982 else
7983 {
7984 rtx insn, frame_off, cfa;
7985
7986 offset = area_bottom < 0 ? -area_bottom : 0;
7987 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
7988
7989 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
7990 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
7991 if (DISP_IN_RANGE (INTVAL (frame_off)))
7992 {
7993 insn = gen_rtx_SET (VOIDmode, frame_pointer,
7994 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
7995 insn = emit_insn (insn);
7996 }
7997 else
7998 {
7999 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8000 frame_off = force_const_mem (Pmode, frame_off);
8001
8002 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8003 annotate_constant_pool_refs (&PATTERN (insn));
8004 }
8005 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8006 RTX_FRAME_RELATED_P (insn) = 1;
8007 }
8008
8009 /* Restore call saved fprs. */
8010
8011 if (TARGET_64BIT)
8012 {
8013 if (cfun_save_high_fprs_p)
8014 {
8015 next_offset = cfun_frame_layout.f8_offset;
8016 for (i = 24; i < 32; i++)
8017 {
8018 if (cfun_fpr_bit_p (i - 16))
8019 {
8020 restore_fpr (frame_pointer,
8021 offset + next_offset, i);
8022 cfa_restores
8023 = alloc_reg_note (REG_CFA_RESTORE,
8024 gen_rtx_REG (DFmode, i), cfa_restores);
8025 next_offset += 8;
8026 }
8027 }
8028 }
8029
8030 }
8031 else
8032 {
8033 next_offset = cfun_frame_layout.f4_offset;
8034 for (i = 18; i < 20; i++)
8035 {
8036 if (cfun_fpr_bit_p (i - 16))
8037 {
8038 restore_fpr (frame_pointer,
8039 offset + next_offset, i);
8040 cfa_restores
8041 = alloc_reg_note (REG_CFA_RESTORE,
8042 gen_rtx_REG (DFmode, i), cfa_restores);
8043 next_offset += 8;
8044 }
8045 else if (!TARGET_PACKED_STACK)
8046 next_offset += 8;
8047 }
8048
8049 }
8050
8051 /* Return register. */
8052
8053 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8054
8055 /* Restore call saved gprs. */
8056
8057 if (cfun_frame_layout.first_restore_gpr != -1)
8058 {
8059 rtx insn, addr;
8060 int i;
8061
8062 /* Check for global register and save them
8063 to stack location from where they get restored. */
8064
8065 for (i = cfun_frame_layout.first_restore_gpr;
8066 i <= cfun_frame_layout.last_restore_gpr;
8067 i++)
8068 {
8069 if (global_not_special_regno_p (i))
8070 {
8071 addr = plus_constant (frame_pointer,
8072 offset + cfun_frame_layout.gprs_offset
8073 + (i - cfun_frame_layout.first_save_gpr_slot)
8074 * UNITS_PER_WORD);
8075 addr = gen_rtx_MEM (Pmode, addr);
8076 set_mem_alias_set (addr, get_frame_alias_set ());
8077 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8078 }
8079 else
8080 cfa_restores
8081 = alloc_reg_note (REG_CFA_RESTORE,
8082 gen_rtx_REG (Pmode, i), cfa_restores);
8083 }
8084
8085 if (! sibcall)
8086 {
8087 /* Fetch return address from stack before load multiple,
8088 this will do good for scheduling. */
8089
8090 if (cfun_frame_layout.save_return_addr_p
8091 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8092 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8093 {
8094 int return_regnum = find_unused_clobbered_reg();
8095 if (!return_regnum)
8096 return_regnum = 4;
8097 return_reg = gen_rtx_REG (Pmode, return_regnum);
8098
8099 addr = plus_constant (frame_pointer,
8100 offset + cfun_frame_layout.gprs_offset
8101 + (RETURN_REGNUM
8102 - cfun_frame_layout.first_save_gpr_slot)
8103 * UNITS_PER_WORD);
8104 addr = gen_rtx_MEM (Pmode, addr);
8105 set_mem_alias_set (addr, get_frame_alias_set ());
8106 emit_move_insn (return_reg, addr);
8107 }
8108 }
8109
8110 insn = restore_gprs (frame_pointer,
8111 offset + cfun_frame_layout.gprs_offset
8112 + (cfun_frame_layout.first_restore_gpr
8113 - cfun_frame_layout.first_save_gpr_slot)
8114 * UNITS_PER_WORD,
8115 cfun_frame_layout.first_restore_gpr,
8116 cfun_frame_layout.last_restore_gpr);
8117 insn = emit_insn (insn);
8118 REG_NOTES (insn) = cfa_restores;
8119 add_reg_note (insn, REG_CFA_DEF_CFA,
8120 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8121 RTX_FRAME_RELATED_P (insn) = 1;
8122 }
8123
8124 if (! sibcall)
8125 {
8126
8127 /* Return to caller. */
8128
8129 p = rtvec_alloc (2);
8130
8131 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8132 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8133 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8134 }
8135 }
8136
8137
8138 /* Return the size in bytes of a function argument of
8139 type TYPE and/or mode MODE. At least one of TYPE or
8140 MODE must be specified. */
8141
8142 static int
8143 s390_function_arg_size (enum machine_mode mode, const_tree type)
8144 {
8145 if (type)
8146 return int_size_in_bytes (type);
8147
8148 /* No type info available for some library calls ... */
8149 if (mode != BLKmode)
8150 return GET_MODE_SIZE (mode);
8151
8152 /* If we have neither type nor mode, abort */
8153 gcc_unreachable ();
8154 }
8155
8156 /* Return true if a function argument of type TYPE and mode MODE
8157 is to be passed in a floating-point register, if available. */
8158
8159 static bool
8160 s390_function_arg_float (enum machine_mode mode, tree type)
8161 {
8162 int size = s390_function_arg_size (mode, type);
8163 if (size > 8)
8164 return false;
8165
8166 /* Soft-float changes the ABI: no floating-point registers are used. */
8167 if (TARGET_SOFT_FLOAT)
8168 return false;
8169
8170 /* No type info available for some library calls ... */
8171 if (!type)
8172 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8173
8174 /* The ABI says that record types with a single member are treated
8175 just like that member would be. */
8176 while (TREE_CODE (type) == RECORD_TYPE)
8177 {
8178 tree field, single = NULL_TREE;
8179
8180 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8181 {
8182 if (TREE_CODE (field) != FIELD_DECL)
8183 continue;
8184
8185 if (single == NULL_TREE)
8186 single = TREE_TYPE (field);
8187 else
8188 return false;
8189 }
8190
8191 if (single == NULL_TREE)
8192 return false;
8193 else
8194 type = single;
8195 }
8196
8197 return TREE_CODE (type) == REAL_TYPE;
8198 }
8199
8200 /* Return true if a function argument of type TYPE and mode MODE
8201 is to be passed in an integer register, or a pair of integer
8202 registers, if available. */
8203
8204 static bool
8205 s390_function_arg_integer (enum machine_mode mode, tree type)
8206 {
8207 int size = s390_function_arg_size (mode, type);
8208 if (size > 8)
8209 return false;
8210
8211 /* No type info available for some library calls ... */
8212 if (!type)
8213 return GET_MODE_CLASS (mode) == MODE_INT
8214 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8215
8216 /* We accept small integral (and similar) types. */
8217 if (INTEGRAL_TYPE_P (type)
8218 || POINTER_TYPE_P (type)
8219 || TREE_CODE (type) == OFFSET_TYPE
8220 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8221 return true;
8222
8223 /* We also accept structs of size 1, 2, 4, 8 that are not
8224 passed in floating-point registers. */
8225 if (AGGREGATE_TYPE_P (type)
8226 && exact_log2 (size) >= 0
8227 && !s390_function_arg_float (mode, type))
8228 return true;
8229
8230 return false;
8231 }
8232
8233 /* Return 1 if a function argument of type TYPE and mode MODE
8234 is to be passed by reference. The ABI specifies that only
8235 structures of size 1, 2, 4, or 8 bytes are passed by value,
8236 all other structures (and complex numbers) are passed by
8237 reference. */
8238
8239 static bool
8240 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8241 enum machine_mode mode, const_tree type,
8242 bool named ATTRIBUTE_UNUSED)
8243 {
8244 int size = s390_function_arg_size (mode, type);
8245 if (size > 8)
8246 return true;
8247
8248 if (type)
8249 {
8250 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8251 return 1;
8252
8253 if (TREE_CODE (type) == COMPLEX_TYPE
8254 || TREE_CODE (type) == VECTOR_TYPE)
8255 return 1;
8256 }
8257
8258 return 0;
8259 }
8260
8261 /* Update the data in CUM to advance over an argument of mode MODE and
8262 data type TYPE. (TYPE is null for libcalls where that information
8263 may not be available.). The boolean NAMED specifies whether the
8264 argument is a named argument (as opposed to an unnamed argument
8265 matching an ellipsis). */
8266
8267 void
8268 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8269 tree type, int named ATTRIBUTE_UNUSED)
8270 {
8271 if (s390_function_arg_float (mode, type))
8272 {
8273 cum->fprs += 1;
8274 }
8275 else if (s390_function_arg_integer (mode, type))
8276 {
8277 int size = s390_function_arg_size (mode, type);
8278 cum->gprs += ((size + UNITS_PER_WORD-1) / UNITS_PER_WORD);
8279 }
8280 else
8281 gcc_unreachable ();
8282 }
8283
8284 /* Define where to put the arguments to a function.
8285 Value is zero to push the argument on the stack,
8286 or a hard register in which to store the argument.
8287
8288 MODE is the argument's machine mode.
8289 TYPE is the data type of the argument (as a tree).
8290 This is null for libcalls where that information may
8291 not be available.
8292 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8293 the preceding args and about the function being called.
8294 NAMED is nonzero if this argument is a named parameter
8295 (otherwise it is an extra parameter matching an ellipsis).
8296
8297 On S/390, we use general purpose registers 2 through 6 to
8298 pass integer, pointer, and certain structure arguments, and
8299 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8300 to pass floating point arguments. All remaining arguments
8301 are pushed to the stack. */
8302
8303 rtx
8304 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8305 int named ATTRIBUTE_UNUSED)
8306 {
8307 if (s390_function_arg_float (mode, type))
8308 {
8309 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8310 return 0;
8311 else
8312 return gen_rtx_REG (mode, cum->fprs + 16);
8313 }
8314 else if (s390_function_arg_integer (mode, type))
8315 {
8316 int size = s390_function_arg_size (mode, type);
8317 int n_gprs = (size + UNITS_PER_WORD-1) / UNITS_PER_WORD;
8318
8319 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8320 return 0;
8321 else
8322 return gen_rtx_REG (mode, cum->gprs + 2);
8323 }
8324
8325 /* After the real arguments, expand_call calls us once again
8326 with a void_type_node type. Whatever we return here is
8327 passed as operand 2 to the call expanders.
8328
8329 We don't need this feature ... */
8330 else if (type == void_type_node)
8331 return const0_rtx;
8332
8333 gcc_unreachable ();
8334 }
8335
8336 /* Return true if return values of type TYPE should be returned
8337 in a memory buffer whose address is passed by the caller as
8338 hidden first argument. */
8339
8340 static bool
8341 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8342 {
8343 /* We accept small integral (and similar) types. */
8344 if (INTEGRAL_TYPE_P (type)
8345 || POINTER_TYPE_P (type)
8346 || TREE_CODE (type) == OFFSET_TYPE
8347 || TREE_CODE (type) == REAL_TYPE)
8348 return int_size_in_bytes (type) > 8;
8349
8350 /* Aggregates and similar constructs are always returned
8351 in memory. */
8352 if (AGGREGATE_TYPE_P (type)
8353 || TREE_CODE (type) == COMPLEX_TYPE
8354 || TREE_CODE (type) == VECTOR_TYPE)
8355 return true;
8356
8357 /* ??? We get called on all sorts of random stuff from
8358 aggregate_value_p. We can't abort, but it's not clear
8359 what's safe to return. Pretend it's a struct I guess. */
8360 return true;
8361 }
8362
8363 /* Function arguments and return values are promoted to word size. */
8364
8365 static enum machine_mode
8366 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8367 int *punsignedp,
8368 const_tree fntype ATTRIBUTE_UNUSED,
8369 int for_return ATTRIBUTE_UNUSED)
8370 {
8371 if (INTEGRAL_MODE_P (mode)
8372 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
8373 {
8374 if (POINTER_TYPE_P (type))
8375 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8376 return Pmode;
8377 }
8378
8379 return mode;
8380 }
8381
8382 /* Define where to return a (scalar) value of type TYPE.
8383 If TYPE is null, define where to return a (scalar)
8384 value of mode MODE from a libcall. */
8385
8386 rtx
8387 s390_function_value (const_tree type, const_tree fn, enum machine_mode mode)
8388 {
8389 if (type)
8390 {
8391 int unsignedp = TYPE_UNSIGNED (type);
8392 mode = promote_function_mode (type, TYPE_MODE (type), &unsignedp, fn, 1);
8393 }
8394
8395 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8396 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8397
8398 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8399 return gen_rtx_REG (mode, 16);
8400 else
8401 return gen_rtx_REG (mode, 2);
8402 }
8403
8404
8405 /* Create and return the va_list datatype.
8406
8407 On S/390, va_list is an array type equivalent to
8408
8409 typedef struct __va_list_tag
8410 {
8411 long __gpr;
8412 long __fpr;
8413 void *__overflow_arg_area;
8414 void *__reg_save_area;
8415 } va_list[1];
8416
8417 where __gpr and __fpr hold the number of general purpose
8418 or floating point arguments used up to now, respectively,
8419 __overflow_arg_area points to the stack location of the
8420 next argument passed on the stack, and __reg_save_area
8421 always points to the start of the register area in the
8422 call frame of the current function. The function prologue
8423 saves all registers used for argument passing into this
8424 area if the function uses variable arguments. */
8425
8426 static tree
8427 s390_build_builtin_va_list (void)
8428 {
8429 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8430
8431 record = lang_hooks.types.make_type (RECORD_TYPE);
8432
8433 type_decl =
8434 build_decl (BUILTINS_LOCATION,
8435 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8436
8437 f_gpr = build_decl (BUILTINS_LOCATION,
8438 FIELD_DECL, get_identifier ("__gpr"),
8439 long_integer_type_node);
8440 f_fpr = build_decl (BUILTINS_LOCATION,
8441 FIELD_DECL, get_identifier ("__fpr"),
8442 long_integer_type_node);
8443 f_ovf = build_decl (BUILTINS_LOCATION,
8444 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8445 ptr_type_node);
8446 f_sav = build_decl (BUILTINS_LOCATION,
8447 FIELD_DECL, get_identifier ("__reg_save_area"),
8448 ptr_type_node);
8449
8450 va_list_gpr_counter_field = f_gpr;
8451 va_list_fpr_counter_field = f_fpr;
8452
8453 DECL_FIELD_CONTEXT (f_gpr) = record;
8454 DECL_FIELD_CONTEXT (f_fpr) = record;
8455 DECL_FIELD_CONTEXT (f_ovf) = record;
8456 DECL_FIELD_CONTEXT (f_sav) = record;
8457
8458 TREE_CHAIN (record) = type_decl;
8459 TYPE_NAME (record) = type_decl;
8460 TYPE_FIELDS (record) = f_gpr;
8461 TREE_CHAIN (f_gpr) = f_fpr;
8462 TREE_CHAIN (f_fpr) = f_ovf;
8463 TREE_CHAIN (f_ovf) = f_sav;
8464
8465 layout_type (record);
8466
8467 /* The correct type is an array type of one element. */
8468 return build_array_type (record, build_index_type (size_zero_node));
8469 }
8470
8471 /* Implement va_start by filling the va_list structure VALIST.
8472 STDARG_P is always true, and ignored.
8473 NEXTARG points to the first anonymous stack argument.
8474
8475 The following global variables are used to initialize
8476 the va_list structure:
8477
8478 crtl->args.info:
8479 holds number of gprs and fprs used for named arguments.
8480 crtl->args.arg_offset_rtx:
8481 holds the offset of the first anonymous stack argument
8482 (relative to the virtual arg pointer). */
8483
8484 static void
8485 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8486 {
8487 HOST_WIDE_INT n_gpr, n_fpr;
8488 int off;
8489 tree f_gpr, f_fpr, f_ovf, f_sav;
8490 tree gpr, fpr, ovf, sav, t;
8491
8492 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8493 f_fpr = TREE_CHAIN (f_gpr);
8494 f_ovf = TREE_CHAIN (f_fpr);
8495 f_sav = TREE_CHAIN (f_ovf);
8496
8497 valist = build_va_arg_indirect_ref (valist);
8498 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8499 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8500 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8501 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8502
8503 /* Count number of gp and fp argument registers used. */
8504
8505 n_gpr = crtl->args.info.gprs;
8506 n_fpr = crtl->args.info.fprs;
8507
8508 if (cfun->va_list_gpr_size)
8509 {
8510 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8511 build_int_cst (NULL_TREE, n_gpr));
8512 TREE_SIDE_EFFECTS (t) = 1;
8513 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8514 }
8515
8516 if (cfun->va_list_fpr_size)
8517 {
8518 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8519 build_int_cst (NULL_TREE, n_fpr));
8520 TREE_SIDE_EFFECTS (t) = 1;
8521 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8522 }
8523
8524 /* Find the overflow area. */
8525 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8526 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8527 {
8528 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8529
8530 off = INTVAL (crtl->args.arg_offset_rtx);
8531 off = off < 0 ? 0 : off;
8532 if (TARGET_DEBUG_ARG)
8533 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8534 (int)n_gpr, (int)n_fpr, off);
8535
8536 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8537
8538 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8539 TREE_SIDE_EFFECTS (t) = 1;
8540 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8541 }
8542
8543 /* Find the register save area. */
8544 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8545 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8546 {
8547 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8548 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8549 size_int (-RETURN_REGNUM * UNITS_PER_WORD));
8550
8551 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8552 TREE_SIDE_EFFECTS (t) = 1;
8553 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8554 }
8555 }
8556
8557 /* Implement va_arg by updating the va_list structure
8558 VALIST as required to retrieve an argument of type
8559 TYPE, and returning that argument.
8560
8561 Generates code equivalent to:
8562
8563 if (integral value) {
8564 if (size <= 4 && args.gpr < 5 ||
8565 size > 4 && args.gpr < 4 )
8566 ret = args.reg_save_area[args.gpr+8]
8567 else
8568 ret = *args.overflow_arg_area++;
8569 } else if (float value) {
8570 if (args.fgpr < 2)
8571 ret = args.reg_save_area[args.fpr+64]
8572 else
8573 ret = *args.overflow_arg_area++;
8574 } else if (aggregate value) {
8575 if (args.gpr < 5)
8576 ret = *args.reg_save_area[args.gpr]
8577 else
8578 ret = **args.overflow_arg_area++;
8579 } */
8580
8581 static tree
8582 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8583 gimple_seq *post_p ATTRIBUTE_UNUSED)
8584 {
8585 tree f_gpr, f_fpr, f_ovf, f_sav;
8586 tree gpr, fpr, ovf, sav, reg, t, u;
8587 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8588 tree lab_false, lab_over, addr;
8589
8590 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8591 f_fpr = TREE_CHAIN (f_gpr);
8592 f_ovf = TREE_CHAIN (f_fpr);
8593 f_sav = TREE_CHAIN (f_ovf);
8594
8595 valist = build_va_arg_indirect_ref (valist);
8596 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8597 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8598 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8599
8600 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8601 both appear on a lhs. */
8602 valist = unshare_expr (valist);
8603 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8604
8605 size = int_size_in_bytes (type);
8606
8607 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8608 {
8609 if (TARGET_DEBUG_ARG)
8610 {
8611 fprintf (stderr, "va_arg: aggregate type");
8612 debug_tree (type);
8613 }
8614
8615 /* Aggregates are passed by reference. */
8616 indirect_p = 1;
8617 reg = gpr;
8618 n_reg = 1;
8619
8620 /* kernel stack layout on 31 bit: It is assumed here that no padding
8621 will be added by s390_frame_info because for va_args always an even
8622 number of gprs has to be saved r15-r2 = 14 regs. */
8623 sav_ofs = 2 * UNITS_PER_WORD;
8624 sav_scale = UNITS_PER_WORD;
8625 size = UNITS_PER_WORD;
8626 max_reg = GP_ARG_NUM_REG - n_reg;
8627 }
8628 else if (s390_function_arg_float (TYPE_MODE (type), type))
8629 {
8630 if (TARGET_DEBUG_ARG)
8631 {
8632 fprintf (stderr, "va_arg: float type");
8633 debug_tree (type);
8634 }
8635
8636 /* FP args go in FP registers, if present. */
8637 indirect_p = 0;
8638 reg = fpr;
8639 n_reg = 1;
8640 sav_ofs = 16 * UNITS_PER_WORD;
8641 sav_scale = 8;
8642 max_reg = FP_ARG_NUM_REG - n_reg;
8643 }
8644 else
8645 {
8646 if (TARGET_DEBUG_ARG)
8647 {
8648 fprintf (stderr, "va_arg: other type");
8649 debug_tree (type);
8650 }
8651
8652 /* Otherwise into GP registers. */
8653 indirect_p = 0;
8654 reg = gpr;
8655 n_reg = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8656
8657 /* kernel stack layout on 31 bit: It is assumed here that no padding
8658 will be added by s390_frame_info because for va_args always an even
8659 number of gprs has to be saved r15-r2 = 14 regs. */
8660 sav_ofs = 2 * UNITS_PER_WORD;
8661
8662 if (size < UNITS_PER_WORD)
8663 sav_ofs += UNITS_PER_WORD - size;
8664
8665 sav_scale = UNITS_PER_WORD;
8666 max_reg = GP_ARG_NUM_REG - n_reg;
8667 }
8668
8669 /* Pull the value out of the saved registers ... */
8670
8671 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8672 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8673 addr = create_tmp_var (ptr_type_node, "addr");
8674
8675 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8676 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8677 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8678 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8679 gimplify_and_add (t, pre_p);
8680
8681 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
8682 size_int (sav_ofs));
8683 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
8684 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
8685 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
8686
8687 gimplify_assign (addr, t, pre_p);
8688
8689 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8690
8691 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8692
8693
8694 /* ... Otherwise out of the overflow area. */
8695
8696 t = ovf;
8697 if (size < UNITS_PER_WORD)
8698 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8699 size_int (UNITS_PER_WORD - size));
8700
8701 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8702
8703 gimplify_assign (addr, t, pre_p);
8704
8705 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8706 size_int (size));
8707 gimplify_assign (ovf, t, pre_p);
8708
8709 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8710
8711
8712 /* Increment register save count. */
8713
8714 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
8715 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
8716 gimplify_and_add (u, pre_p);
8717
8718 if (indirect_p)
8719 {
8720 t = build_pointer_type_for_mode (build_pointer_type (type),
8721 ptr_mode, true);
8722 addr = fold_convert (t, addr);
8723 addr = build_va_arg_indirect_ref (addr);
8724 }
8725 else
8726 {
8727 t = build_pointer_type_for_mode (type, ptr_mode, true);
8728 addr = fold_convert (t, addr);
8729 }
8730
8731 return build_va_arg_indirect_ref (addr);
8732 }
8733
8734
8735 /* Builtins. */
8736
8737 enum s390_builtin
8738 {
8739 S390_BUILTIN_THREAD_POINTER,
8740 S390_BUILTIN_SET_THREAD_POINTER,
8741
8742 S390_BUILTIN_max
8743 };
8744
8745 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
8746 CODE_FOR_get_tp_64,
8747 CODE_FOR_set_tp_64
8748 };
8749
8750 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
8751 CODE_FOR_get_tp_31,
8752 CODE_FOR_set_tp_31
8753 };
8754
8755 static void
8756 s390_init_builtins (void)
8757 {
8758 tree ftype;
8759
8760 ftype = build_function_type (ptr_type_node, void_list_node);
8761 add_builtin_function ("__builtin_thread_pointer", ftype,
8762 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
8763 NULL, NULL_TREE);
8764
8765 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
8766 add_builtin_function ("__builtin_set_thread_pointer", ftype,
8767 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
8768 NULL, NULL_TREE);
8769 }
8770
8771 /* Expand an expression EXP that calls a built-in function,
8772 with result going to TARGET if that's convenient
8773 (and in mode MODE if that's convenient).
8774 SUBTARGET may be used as the target for computing one of EXP's operands.
8775 IGNORE is nonzero if the value is to be ignored. */
8776
8777 static rtx
8778 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8779 enum machine_mode mode ATTRIBUTE_UNUSED,
8780 int ignore ATTRIBUTE_UNUSED)
8781 {
8782 #define MAX_ARGS 2
8783
8784 enum insn_code const *code_for_builtin =
8785 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
8786
8787 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8788 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8789 enum insn_code icode;
8790 rtx op[MAX_ARGS], pat;
8791 int arity;
8792 bool nonvoid;
8793 tree arg;
8794 call_expr_arg_iterator iter;
8795
8796 if (fcode >= S390_BUILTIN_max)
8797 internal_error ("bad builtin fcode");
8798 icode = code_for_builtin[fcode];
8799 if (icode == 0)
8800 internal_error ("bad builtin fcode");
8801
8802 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
8803
8804 arity = 0;
8805 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8806 {
8807 const struct insn_operand_data *insn_op;
8808
8809 if (arg == error_mark_node)
8810 return NULL_RTX;
8811 if (arity > MAX_ARGS)
8812 return NULL_RTX;
8813
8814 insn_op = &insn_data[icode].operand[arity + nonvoid];
8815
8816 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
8817
8818 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
8819 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
8820 arity++;
8821 }
8822
8823 if (nonvoid)
8824 {
8825 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8826 if (!target
8827 || GET_MODE (target) != tmode
8828 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
8829 target = gen_reg_rtx (tmode);
8830 }
8831
8832 switch (arity)
8833 {
8834 case 0:
8835 pat = GEN_FCN (icode) (target);
8836 break;
8837 case 1:
8838 if (nonvoid)
8839 pat = GEN_FCN (icode) (target, op[0]);
8840 else
8841 pat = GEN_FCN (icode) (op[0]);
8842 break;
8843 case 2:
8844 pat = GEN_FCN (icode) (target, op[0], op[1]);
8845 break;
8846 default:
8847 gcc_unreachable ();
8848 }
8849 if (!pat)
8850 return NULL_RTX;
8851 emit_insn (pat);
8852
8853 if (nonvoid)
8854 return target;
8855 else
8856 return const0_rtx;
8857 }
8858
8859
8860 /* Output assembly code for the trampoline template to
8861 stdio stream FILE.
8862
8863 On S/390, we use gpr 1 internally in the trampoline code;
8864 gpr 0 is used to hold the static chain. */
8865
8866 void
8867 s390_trampoline_template (FILE *file)
8868 {
8869 rtx op[2];
8870 op[0] = gen_rtx_REG (Pmode, 0);
8871 op[1] = gen_rtx_REG (Pmode, 1);
8872
8873 if (TARGET_64BIT)
8874 {
8875 output_asm_insn ("basr\t%1,0", op);
8876 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
8877 output_asm_insn ("br\t%1", op);
8878 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
8879 }
8880 else
8881 {
8882 output_asm_insn ("basr\t%1,0", op);
8883 output_asm_insn ("lm\t%0,%1,6(%1)", op);
8884 output_asm_insn ("br\t%1", op);
8885 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
8886 }
8887 }
8888
8889 /* Emit RTL insns to initialize the variable parts of a trampoline.
8890 FNADDR is an RTX for the address of the function's pure code.
8891 CXT is an RTX for the static chain value for the function. */
8892
8893 void
8894 s390_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
8895 {
8896 emit_move_insn (gen_rtx_MEM (Pmode,
8897 memory_address (Pmode,
8898 plus_constant (addr, (TARGET_64BIT ? 16 : 8)))), cxt);
8899 emit_move_insn (gen_rtx_MEM (Pmode,
8900 memory_address (Pmode,
8901 plus_constant (addr, (TARGET_64BIT ? 24 : 12)))), fnaddr);
8902 }
8903
8904 /* Output assembler code to FILE to increment profiler label # LABELNO
8905 for profiling a function entry. */
8906
8907 void
8908 s390_function_profiler (FILE *file, int labelno)
8909 {
8910 rtx op[7];
8911
8912 char label[128];
8913 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
8914
8915 fprintf (file, "# function profiler \n");
8916
8917 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
8918 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
8919 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_WORD));
8920
8921 op[2] = gen_rtx_REG (Pmode, 1);
8922 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8923 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
8924
8925 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
8926 if (flag_pic)
8927 {
8928 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
8929 op[4] = gen_rtx_CONST (Pmode, op[4]);
8930 }
8931
8932 if (TARGET_64BIT)
8933 {
8934 output_asm_insn ("stg\t%0,%1", op);
8935 output_asm_insn ("larl\t%2,%3", op);
8936 output_asm_insn ("brasl\t%0,%4", op);
8937 output_asm_insn ("lg\t%0,%1", op);
8938 }
8939 else if (!flag_pic)
8940 {
8941 op[6] = gen_label_rtx ();
8942
8943 output_asm_insn ("st\t%0,%1", op);
8944 output_asm_insn ("bras\t%2,%l6", op);
8945 output_asm_insn (".long\t%4", op);
8946 output_asm_insn (".long\t%3", op);
8947 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
8948 output_asm_insn ("l\t%0,0(%2)", op);
8949 output_asm_insn ("l\t%2,4(%2)", op);
8950 output_asm_insn ("basr\t%0,%0", op);
8951 output_asm_insn ("l\t%0,%1", op);
8952 }
8953 else
8954 {
8955 op[5] = gen_label_rtx ();
8956 op[6] = gen_label_rtx ();
8957
8958 output_asm_insn ("st\t%0,%1", op);
8959 output_asm_insn ("bras\t%2,%l6", op);
8960 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
8961 output_asm_insn (".long\t%4-%l5", op);
8962 output_asm_insn (".long\t%3-%l5", op);
8963 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
8964 output_asm_insn ("lr\t%0,%2", op);
8965 output_asm_insn ("a\t%0,0(%2)", op);
8966 output_asm_insn ("a\t%2,4(%2)", op);
8967 output_asm_insn ("basr\t%0,%0", op);
8968 output_asm_insn ("l\t%0,%1", op);
8969 }
8970 }
8971
8972 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
8973 into its SYMBOL_REF_FLAGS. */
8974
8975 static void
8976 s390_encode_section_info (tree decl, rtx rtl, int first)
8977 {
8978 default_encode_section_info (decl, rtl, first);
8979
8980 if (TREE_CODE (decl) == VAR_DECL)
8981 {
8982 /* If a variable has a forced alignment to < 2 bytes, mark it
8983 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
8984 operand. */
8985 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
8986 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
8987 if (!DECL_SIZE (decl)
8988 || !DECL_ALIGN (decl)
8989 || !host_integerp (DECL_SIZE (decl), 0)
8990 || (DECL_ALIGN (decl) <= 64
8991 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
8992 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
8993 }
8994
8995 /* Literal pool references don't have a decl so they are handled
8996 differently here. We rely on the information in the MEM_ALIGN
8997 entry to decide upon natural alignment. */
8998 if (MEM_P (rtl)
8999 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9000 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9001 && (MEM_ALIGN (rtl) == 0
9002 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9003 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9004 }
9005
9006 /* Output thunk to FILE that implements a C++ virtual function call (with
9007 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9008 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9009 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9010 relative to the resulting this pointer. */
9011
9012 static void
9013 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9014 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9015 tree function)
9016 {
9017 rtx op[10];
9018 int nonlocal = 0;
9019
9020 /* Operand 0 is the target function. */
9021 op[0] = XEXP (DECL_RTL (function), 0);
9022 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9023 {
9024 nonlocal = 1;
9025 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9026 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9027 op[0] = gen_rtx_CONST (Pmode, op[0]);
9028 }
9029
9030 /* Operand 1 is the 'this' pointer. */
9031 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9032 op[1] = gen_rtx_REG (Pmode, 3);
9033 else
9034 op[1] = gen_rtx_REG (Pmode, 2);
9035
9036 /* Operand 2 is the delta. */
9037 op[2] = GEN_INT (delta);
9038
9039 /* Operand 3 is the vcall_offset. */
9040 op[3] = GEN_INT (vcall_offset);
9041
9042 /* Operand 4 is the temporary register. */
9043 op[4] = gen_rtx_REG (Pmode, 1);
9044
9045 /* Operands 5 to 8 can be used as labels. */
9046 op[5] = NULL_RTX;
9047 op[6] = NULL_RTX;
9048 op[7] = NULL_RTX;
9049 op[8] = NULL_RTX;
9050
9051 /* Operand 9 can be used for temporary register. */
9052 op[9] = NULL_RTX;
9053
9054 /* Generate code. */
9055 if (TARGET_64BIT)
9056 {
9057 /* Setup literal pool pointer if required. */
9058 if ((!DISP_IN_RANGE (delta)
9059 && !CONST_OK_FOR_K (delta)
9060 && !CONST_OK_FOR_Os (delta))
9061 || (!DISP_IN_RANGE (vcall_offset)
9062 && !CONST_OK_FOR_K (vcall_offset)
9063 && !CONST_OK_FOR_Os (vcall_offset)))
9064 {
9065 op[5] = gen_label_rtx ();
9066 output_asm_insn ("larl\t%4,%5", op);
9067 }
9068
9069 /* Add DELTA to this pointer. */
9070 if (delta)
9071 {
9072 if (CONST_OK_FOR_J (delta))
9073 output_asm_insn ("la\t%1,%2(%1)", op);
9074 else if (DISP_IN_RANGE (delta))
9075 output_asm_insn ("lay\t%1,%2(%1)", op);
9076 else if (CONST_OK_FOR_K (delta))
9077 output_asm_insn ("aghi\t%1,%2", op);
9078 else if (CONST_OK_FOR_Os (delta))
9079 output_asm_insn ("agfi\t%1,%2", op);
9080 else
9081 {
9082 op[6] = gen_label_rtx ();
9083 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9084 }
9085 }
9086
9087 /* Perform vcall adjustment. */
9088 if (vcall_offset)
9089 {
9090 if (DISP_IN_RANGE (vcall_offset))
9091 {
9092 output_asm_insn ("lg\t%4,0(%1)", op);
9093 output_asm_insn ("ag\t%1,%3(%4)", op);
9094 }
9095 else if (CONST_OK_FOR_K (vcall_offset))
9096 {
9097 output_asm_insn ("lghi\t%4,%3", op);
9098 output_asm_insn ("ag\t%4,0(%1)", op);
9099 output_asm_insn ("ag\t%1,0(%4)", op);
9100 }
9101 else if (CONST_OK_FOR_Os (vcall_offset))
9102 {
9103 output_asm_insn ("lgfi\t%4,%3", op);
9104 output_asm_insn ("ag\t%4,0(%1)", op);
9105 output_asm_insn ("ag\t%1,0(%4)", op);
9106 }
9107 else
9108 {
9109 op[7] = gen_label_rtx ();
9110 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9111 output_asm_insn ("ag\t%4,0(%1)", op);
9112 output_asm_insn ("ag\t%1,0(%4)", op);
9113 }
9114 }
9115
9116 /* Jump to target. */
9117 output_asm_insn ("jg\t%0", op);
9118
9119 /* Output literal pool if required. */
9120 if (op[5])
9121 {
9122 output_asm_insn (".align\t4", op);
9123 targetm.asm_out.internal_label (file, "L",
9124 CODE_LABEL_NUMBER (op[5]));
9125 }
9126 if (op[6])
9127 {
9128 targetm.asm_out.internal_label (file, "L",
9129 CODE_LABEL_NUMBER (op[6]));
9130 output_asm_insn (".long\t%2", op);
9131 }
9132 if (op[7])
9133 {
9134 targetm.asm_out.internal_label (file, "L",
9135 CODE_LABEL_NUMBER (op[7]));
9136 output_asm_insn (".long\t%3", op);
9137 }
9138 }
9139 else
9140 {
9141 /* Setup base pointer if required. */
9142 if (!vcall_offset
9143 || (!DISP_IN_RANGE (delta)
9144 && !CONST_OK_FOR_K (delta)
9145 && !CONST_OK_FOR_Os (delta))
9146 || (!DISP_IN_RANGE (delta)
9147 && !CONST_OK_FOR_K (vcall_offset)
9148 && !CONST_OK_FOR_Os (vcall_offset)))
9149 {
9150 op[5] = gen_label_rtx ();
9151 output_asm_insn ("basr\t%4,0", op);
9152 targetm.asm_out.internal_label (file, "L",
9153 CODE_LABEL_NUMBER (op[5]));
9154 }
9155
9156 /* Add DELTA to this pointer. */
9157 if (delta)
9158 {
9159 if (CONST_OK_FOR_J (delta))
9160 output_asm_insn ("la\t%1,%2(%1)", op);
9161 else if (DISP_IN_RANGE (delta))
9162 output_asm_insn ("lay\t%1,%2(%1)", op);
9163 else if (CONST_OK_FOR_K (delta))
9164 output_asm_insn ("ahi\t%1,%2", op);
9165 else if (CONST_OK_FOR_Os (delta))
9166 output_asm_insn ("afi\t%1,%2", op);
9167 else
9168 {
9169 op[6] = gen_label_rtx ();
9170 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9171 }
9172 }
9173
9174 /* Perform vcall adjustment. */
9175 if (vcall_offset)
9176 {
9177 if (CONST_OK_FOR_J (vcall_offset))
9178 {
9179 output_asm_insn ("l\t%4,0(%1)", op);
9180 output_asm_insn ("a\t%1,%3(%4)", op);
9181 }
9182 else if (DISP_IN_RANGE (vcall_offset))
9183 {
9184 output_asm_insn ("l\t%4,0(%1)", op);
9185 output_asm_insn ("ay\t%1,%3(%4)", op);
9186 }
9187 else if (CONST_OK_FOR_K (vcall_offset))
9188 {
9189 output_asm_insn ("lhi\t%4,%3", op);
9190 output_asm_insn ("a\t%4,0(%1)", op);
9191 output_asm_insn ("a\t%1,0(%4)", op);
9192 }
9193 else if (CONST_OK_FOR_Os (vcall_offset))
9194 {
9195 output_asm_insn ("iilf\t%4,%3", op);
9196 output_asm_insn ("a\t%4,0(%1)", op);
9197 output_asm_insn ("a\t%1,0(%4)", op);
9198 }
9199 else
9200 {
9201 op[7] = gen_label_rtx ();
9202 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9203 output_asm_insn ("a\t%4,0(%1)", op);
9204 output_asm_insn ("a\t%1,0(%4)", op);
9205 }
9206
9207 /* We had to clobber the base pointer register.
9208 Re-setup the base pointer (with a different base). */
9209 op[5] = gen_label_rtx ();
9210 output_asm_insn ("basr\t%4,0", op);
9211 targetm.asm_out.internal_label (file, "L",
9212 CODE_LABEL_NUMBER (op[5]));
9213 }
9214
9215 /* Jump to target. */
9216 op[8] = gen_label_rtx ();
9217
9218 if (!flag_pic)
9219 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9220 else if (!nonlocal)
9221 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9222 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9223 else if (flag_pic == 1)
9224 {
9225 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9226 output_asm_insn ("l\t%4,%0(%4)", op);
9227 }
9228 else if (flag_pic == 2)
9229 {
9230 op[9] = gen_rtx_REG (Pmode, 0);
9231 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9232 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9233 output_asm_insn ("ar\t%4,%9", op);
9234 output_asm_insn ("l\t%4,0(%4)", op);
9235 }
9236
9237 output_asm_insn ("br\t%4", op);
9238
9239 /* Output literal pool. */
9240 output_asm_insn (".align\t4", op);
9241
9242 if (nonlocal && flag_pic == 2)
9243 output_asm_insn (".long\t%0", op);
9244 if (nonlocal)
9245 {
9246 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9247 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9248 }
9249
9250 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9251 if (!flag_pic)
9252 output_asm_insn (".long\t%0", op);
9253 else
9254 output_asm_insn (".long\t%0-%5", op);
9255
9256 if (op[6])
9257 {
9258 targetm.asm_out.internal_label (file, "L",
9259 CODE_LABEL_NUMBER (op[6]));
9260 output_asm_insn (".long\t%2", op);
9261 }
9262 if (op[7])
9263 {
9264 targetm.asm_out.internal_label (file, "L",
9265 CODE_LABEL_NUMBER (op[7]));
9266 output_asm_insn (".long\t%3", op);
9267 }
9268 }
9269 }
9270
9271 static bool
9272 s390_valid_pointer_mode (enum machine_mode mode)
9273 {
9274 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9275 }
9276
9277 /* Checks whether the given CALL_EXPR would use a caller
9278 saved register. This is used to decide whether sibling call
9279 optimization could be performed on the respective function
9280 call. */
9281
9282 static bool
9283 s390_call_saved_register_used (tree call_expr)
9284 {
9285 CUMULATIVE_ARGS cum;
9286 tree parameter;
9287 enum machine_mode mode;
9288 tree type;
9289 rtx parm_rtx;
9290 int reg, i;
9291
9292 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9293
9294 for (i = 0; i < call_expr_nargs (call_expr); i++)
9295 {
9296 parameter = CALL_EXPR_ARG (call_expr, i);
9297 gcc_assert (parameter);
9298
9299 /* For an undeclared variable passed as parameter we will get
9300 an ERROR_MARK node here. */
9301 if (TREE_CODE (parameter) == ERROR_MARK)
9302 return true;
9303
9304 type = TREE_TYPE (parameter);
9305 gcc_assert (type);
9306
9307 mode = TYPE_MODE (type);
9308 gcc_assert (mode);
9309
9310 if (pass_by_reference (&cum, mode, type, true))
9311 {
9312 mode = Pmode;
9313 type = build_pointer_type (type);
9314 }
9315
9316 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9317
9318 s390_function_arg_advance (&cum, mode, type, 0);
9319
9320 if (parm_rtx && REG_P (parm_rtx))
9321 {
9322 for (reg = 0;
9323 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9324 reg++)
9325 if (! call_used_regs[reg + REGNO (parm_rtx)])
9326 return true;
9327 }
9328 }
9329 return false;
9330 }
9331
9332 /* Return true if the given call expression can be
9333 turned into a sibling call.
9334 DECL holds the declaration of the function to be called whereas
9335 EXP is the call expression itself. */
9336
9337 static bool
9338 s390_function_ok_for_sibcall (tree decl, tree exp)
9339 {
9340 /* The TPF epilogue uses register 1. */
9341 if (TARGET_TPF_PROFILING)
9342 return false;
9343
9344 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9345 which would have to be restored before the sibcall. */
9346 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9347 return false;
9348
9349 /* Register 6 on s390 is available as an argument register but unfortunately
9350 "caller saved". This makes functions needing this register for arguments
9351 not suitable for sibcalls. */
9352 return !s390_call_saved_register_used (exp);
9353 }
9354
9355 /* Return the fixed registers used for condition codes. */
9356
9357 static bool
9358 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9359 {
9360 *p1 = CC_REGNUM;
9361 *p2 = INVALID_REGNUM;
9362
9363 return true;
9364 }
9365
9366 /* This function is used by the call expanders of the machine description.
9367 It emits the call insn itself together with the necessary operations
9368 to adjust the target address and returns the emitted insn.
9369 ADDR_LOCATION is the target address rtx
9370 TLS_CALL the location of the thread-local symbol
9371 RESULT_REG the register where the result of the call should be stored
9372 RETADDR_REG the register where the return address should be stored
9373 If this parameter is NULL_RTX the call is considered
9374 to be a sibling call. */
9375
9376 rtx
9377 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9378 rtx retaddr_reg)
9379 {
9380 bool plt_call = false;
9381 rtx insn;
9382 rtx call;
9383 rtx clobber;
9384 rtvec vec;
9385
9386 /* Direct function calls need special treatment. */
9387 if (GET_CODE (addr_location) == SYMBOL_REF)
9388 {
9389 /* When calling a global routine in PIC mode, we must
9390 replace the symbol itself with the PLT stub. */
9391 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9392 {
9393 addr_location = gen_rtx_UNSPEC (Pmode,
9394 gen_rtvec (1, addr_location),
9395 UNSPEC_PLT);
9396 addr_location = gen_rtx_CONST (Pmode, addr_location);
9397 plt_call = true;
9398 }
9399
9400 /* Unless we can use the bras(l) insn, force the
9401 routine address into a register. */
9402 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9403 {
9404 if (flag_pic)
9405 addr_location = legitimize_pic_address (addr_location, 0);
9406 else
9407 addr_location = force_reg (Pmode, addr_location);
9408 }
9409 }
9410
9411 /* If it is already an indirect call or the code above moved the
9412 SYMBOL_REF to somewhere else make sure the address can be found in
9413 register 1. */
9414 if (retaddr_reg == NULL_RTX
9415 && GET_CODE (addr_location) != SYMBOL_REF
9416 && !plt_call)
9417 {
9418 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9419 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9420 }
9421
9422 addr_location = gen_rtx_MEM (QImode, addr_location);
9423 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9424
9425 if (result_reg != NULL_RTX)
9426 call = gen_rtx_SET (VOIDmode, result_reg, call);
9427
9428 if (retaddr_reg != NULL_RTX)
9429 {
9430 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9431
9432 if (tls_call != NULL_RTX)
9433 vec = gen_rtvec (3, call, clobber,
9434 gen_rtx_USE (VOIDmode, tls_call));
9435 else
9436 vec = gen_rtvec (2, call, clobber);
9437
9438 call = gen_rtx_PARALLEL (VOIDmode, vec);
9439 }
9440
9441 insn = emit_call_insn (call);
9442
9443 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9444 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9445 {
9446 /* s390_function_ok_for_sibcall should
9447 have denied sibcalls in this case. */
9448 gcc_assert (retaddr_reg != NULL_RTX);
9449
9450 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9451 }
9452 return insn;
9453 }
9454
9455 /* Implement CONDITIONAL_REGISTER_USAGE. */
9456
9457 void
9458 s390_conditional_register_usage (void)
9459 {
9460 int i;
9461
9462 if (flag_pic)
9463 {
9464 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9465 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9466 }
9467 if (TARGET_CPU_ZARCH)
9468 {
9469 fixed_regs[BASE_REGNUM] = 0;
9470 call_used_regs[BASE_REGNUM] = 0;
9471 fixed_regs[RETURN_REGNUM] = 0;
9472 call_used_regs[RETURN_REGNUM] = 0;
9473 }
9474 if (TARGET_64BIT)
9475 {
9476 for (i = 24; i < 32; i++)
9477 call_used_regs[i] = call_really_used_regs[i] = 0;
9478 }
9479 else
9480 {
9481 for (i = 18; i < 20; i++)
9482 call_used_regs[i] = call_really_used_regs[i] = 0;
9483 }
9484
9485 if (TARGET_SOFT_FLOAT)
9486 {
9487 for (i = 16; i < 32; i++)
9488 call_used_regs[i] = fixed_regs[i] = 1;
9489 }
9490 }
9491
9492 /* Corresponding function to eh_return expander. */
9493
9494 static GTY(()) rtx s390_tpf_eh_return_symbol;
9495 void
9496 s390_emit_tpf_eh_return (rtx target)
9497 {
9498 rtx insn, reg;
9499
9500 if (!s390_tpf_eh_return_symbol)
9501 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9502
9503 reg = gen_rtx_REG (Pmode, 2);
9504
9505 emit_move_insn (reg, target);
9506 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9507 gen_rtx_REG (Pmode, RETURN_REGNUM));
9508 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9509
9510 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9511 }
9512
9513 /* Rework the prologue/epilogue to avoid saving/restoring
9514 registers unnecessarily. */
9515
9516 static void
9517 s390_optimize_prologue (void)
9518 {
9519 rtx insn, new_insn, next_insn;
9520
9521 /* Do a final recompute of the frame-related data. */
9522
9523 s390_update_frame_layout ();
9524
9525 /* If all special registers are in fact used, there's nothing we
9526 can do, so no point in walking the insn list. */
9527
9528 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9529 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9530 && (TARGET_CPU_ZARCH
9531 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9532 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9533 return;
9534
9535 /* Search for prologue/epilogue insns and replace them. */
9536
9537 for (insn = get_insns (); insn; insn = next_insn)
9538 {
9539 int first, last, off;
9540 rtx set, base, offset;
9541
9542 next_insn = NEXT_INSN (insn);
9543
9544 if (GET_CODE (insn) != INSN)
9545 continue;
9546
9547 if (GET_CODE (PATTERN (insn)) == PARALLEL
9548 && store_multiple_operation (PATTERN (insn), VOIDmode))
9549 {
9550 set = XVECEXP (PATTERN (insn), 0, 0);
9551 first = REGNO (SET_SRC (set));
9552 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9553 offset = const0_rtx;
9554 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9555 off = INTVAL (offset);
9556
9557 if (GET_CODE (base) != REG || off < 0)
9558 continue;
9559 if (cfun_frame_layout.first_save_gpr != -1
9560 && (cfun_frame_layout.first_save_gpr < first
9561 || cfun_frame_layout.last_save_gpr > last))
9562 continue;
9563 if (REGNO (base) != STACK_POINTER_REGNUM
9564 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9565 continue;
9566 if (first > BASE_REGNUM || last < BASE_REGNUM)
9567 continue;
9568
9569 if (cfun_frame_layout.first_save_gpr != -1)
9570 {
9571 new_insn = save_gprs (base,
9572 off + (cfun_frame_layout.first_save_gpr
9573 - first) * UNITS_PER_WORD,
9574 cfun_frame_layout.first_save_gpr,
9575 cfun_frame_layout.last_save_gpr);
9576 new_insn = emit_insn_before (new_insn, insn);
9577 INSN_ADDRESSES_NEW (new_insn, -1);
9578 }
9579
9580 remove_insn (insn);
9581 continue;
9582 }
9583
9584 if (cfun_frame_layout.first_save_gpr == -1
9585 && GET_CODE (PATTERN (insn)) == SET
9586 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9587 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9588 || (!TARGET_CPU_ZARCH
9589 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9590 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9591 {
9592 set = PATTERN (insn);
9593 first = REGNO (SET_SRC (set));
9594 offset = const0_rtx;
9595 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9596 off = INTVAL (offset);
9597
9598 if (GET_CODE (base) != REG || off < 0)
9599 continue;
9600 if (REGNO (base) != STACK_POINTER_REGNUM
9601 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9602 continue;
9603
9604 remove_insn (insn);
9605 continue;
9606 }
9607
9608 if (GET_CODE (PATTERN (insn)) == PARALLEL
9609 && load_multiple_operation (PATTERN (insn), VOIDmode))
9610 {
9611 set = XVECEXP (PATTERN (insn), 0, 0);
9612 first = REGNO (SET_DEST (set));
9613 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9614 offset = const0_rtx;
9615 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9616 off = INTVAL (offset);
9617
9618 if (GET_CODE (base) != REG || off < 0)
9619 continue;
9620 if (cfun_frame_layout.first_restore_gpr != -1
9621 && (cfun_frame_layout.first_restore_gpr < first
9622 || cfun_frame_layout.last_restore_gpr > last))
9623 continue;
9624 if (REGNO (base) != STACK_POINTER_REGNUM
9625 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9626 continue;
9627 if (first > BASE_REGNUM || last < BASE_REGNUM)
9628 continue;
9629
9630 if (cfun_frame_layout.first_restore_gpr != -1)
9631 {
9632 new_insn = restore_gprs (base,
9633 off + (cfun_frame_layout.first_restore_gpr
9634 - first) * UNITS_PER_WORD,
9635 cfun_frame_layout.first_restore_gpr,
9636 cfun_frame_layout.last_restore_gpr);
9637 new_insn = emit_insn_before (new_insn, insn);
9638 INSN_ADDRESSES_NEW (new_insn, -1);
9639 }
9640
9641 remove_insn (insn);
9642 continue;
9643 }
9644
9645 if (cfun_frame_layout.first_restore_gpr == -1
9646 && GET_CODE (PATTERN (insn)) == SET
9647 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
9648 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
9649 || (!TARGET_CPU_ZARCH
9650 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
9651 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
9652 {
9653 set = PATTERN (insn);
9654 first = REGNO (SET_DEST (set));
9655 offset = const0_rtx;
9656 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9657 off = INTVAL (offset);
9658
9659 if (GET_CODE (base) != REG || off < 0)
9660 continue;
9661 if (REGNO (base) != STACK_POINTER_REGNUM
9662 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9663 continue;
9664
9665 remove_insn (insn);
9666 continue;
9667 }
9668 }
9669 }
9670
9671 /* On z10 the dynamic branch prediction must see the backward jump in
9672 a window of 384 bytes. If not it falls back to the static
9673 prediction. This function rearranges the loop backward branch in a
9674 way which makes the static prediction always correct. The function
9675 returns true if it added an instruction. */
9676 static bool
9677 s390_z10_fix_long_loop_prediction (rtx insn)
9678 {
9679 rtx set = single_set (insn);
9680 rtx code_label, label_ref, new_label;
9681 rtx uncond_jump;
9682 rtx cur_insn;
9683 rtx tmp;
9684 int distance;
9685
9686 /* This will exclude branch on count and branch on index patterns
9687 since these are correctly statically predicted. */
9688 if (!set
9689 || SET_DEST (set) != pc_rtx
9690 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
9691 return false;
9692
9693 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
9694 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
9695
9696 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
9697
9698 code_label = XEXP (label_ref, 0);
9699
9700 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
9701 || INSN_ADDRESSES (INSN_UID (insn)) == -1
9702 || (INSN_ADDRESSES (INSN_UID (insn))
9703 - INSN_ADDRESSES (INSN_UID (code_label)) < Z10_PREDICT_DISTANCE))
9704 return false;
9705
9706 for (distance = 0, cur_insn = PREV_INSN (insn);
9707 distance < Z10_PREDICT_DISTANCE - 6;
9708 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
9709 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
9710 return false;
9711
9712 new_label = gen_label_rtx ();
9713 uncond_jump = emit_jump_insn_after (
9714 gen_rtx_SET (VOIDmode, pc_rtx,
9715 gen_rtx_LABEL_REF (VOIDmode, code_label)),
9716 insn);
9717 emit_label_after (new_label, uncond_jump);
9718
9719 tmp = XEXP (SET_SRC (set), 1);
9720 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
9721 XEXP (SET_SRC (set), 2) = tmp;
9722 INSN_CODE (insn) = -1;
9723
9724 XEXP (label_ref, 0) = new_label;
9725 JUMP_LABEL (insn) = new_label;
9726 JUMP_LABEL (uncond_jump) = code_label;
9727
9728 return true;
9729 }
9730
9731 /* Returns 1 if INSN reads the value of REG for purposes not related
9732 to addressing of memory, and 0 otherwise. */
9733 static int
9734 s390_non_addr_reg_read_p (rtx reg, rtx insn)
9735 {
9736 return reg_referenced_p (reg, PATTERN (insn))
9737 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
9738 }
9739
9740 /* Starting from INSN find_cond_jump looks downwards in the insn
9741 stream for a single jump insn which is the last user of the
9742 condition code set in INSN. */
9743 static rtx
9744 find_cond_jump (rtx insn)
9745 {
9746 for (; insn; insn = NEXT_INSN (insn))
9747 {
9748 rtx ite, cc;
9749
9750 if (LABEL_P (insn))
9751 break;
9752
9753 if (!JUMP_P (insn))
9754 {
9755 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
9756 break;
9757 continue;
9758 }
9759
9760 /* This will be triggered by a return. */
9761 if (GET_CODE (PATTERN (insn)) != SET)
9762 break;
9763
9764 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
9765 ite = SET_SRC (PATTERN (insn));
9766
9767 if (GET_CODE (ite) != IF_THEN_ELSE)
9768 break;
9769
9770 cc = XEXP (XEXP (ite, 0), 0);
9771 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
9772 break;
9773
9774 if (find_reg_note (insn, REG_DEAD, cc))
9775 return insn;
9776 break;
9777 }
9778
9779 return NULL_RTX;
9780 }
9781
9782 /* Swap the condition in COND and the operands in OP0 and OP1 so that
9783 the semantics does not change. If NULL_RTX is passed as COND the
9784 function tries to find the conditional jump starting with INSN. */
9785 static void
9786 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
9787 {
9788 rtx tmp = *op0;
9789
9790 if (cond == NULL_RTX)
9791 {
9792 rtx jump = find_cond_jump (NEXT_INSN (insn));
9793 jump = jump ? single_set (jump) : NULL_RTX;
9794
9795 if (jump == NULL_RTX)
9796 return;
9797
9798 cond = XEXP (XEXP (jump, 1), 0);
9799 }
9800
9801 *op0 = *op1;
9802 *op1 = tmp;
9803 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
9804 }
9805
9806 /* On z10, instructions of the compare-and-branch family have the
9807 property to access the register occurring as second operand with
9808 its bits complemented. If such a compare is grouped with a second
9809 instruction that accesses the same register non-complemented, and
9810 if that register's value is delivered via a bypass, then the
9811 pipeline recycles, thereby causing significant performance decline.
9812 This function locates such situations and exchanges the two
9813 operands of the compare. The function return true whenever it
9814 added an insn. */
9815 static bool
9816 s390_z10_optimize_cmp (rtx insn)
9817 {
9818 rtx prev_insn, next_insn;
9819 bool insn_added_p = false;
9820 rtx cond, *op0, *op1;
9821
9822 if (GET_CODE (PATTERN (insn)) == PARALLEL)
9823 {
9824 /* Handle compare and branch and branch on count
9825 instructions. */
9826 rtx pattern = single_set (insn);
9827
9828 if (!pattern
9829 || SET_DEST (pattern) != pc_rtx
9830 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
9831 return false;
9832
9833 cond = XEXP (SET_SRC (pattern), 0);
9834 op0 = &XEXP (cond, 0);
9835 op1 = &XEXP (cond, 1);
9836 }
9837 else if (GET_CODE (PATTERN (insn)) == SET)
9838 {
9839 rtx src, dest;
9840
9841 /* Handle normal compare instructions. */
9842 src = SET_SRC (PATTERN (insn));
9843 dest = SET_DEST (PATTERN (insn));
9844
9845 if (!REG_P (dest)
9846 || !CC_REGNO_P (REGNO (dest))
9847 || GET_CODE (src) != COMPARE)
9848 return false;
9849
9850 /* s390_swap_cmp will try to find the conditional
9851 jump when passing NULL_RTX as condition. */
9852 cond = NULL_RTX;
9853 op0 = &XEXP (src, 0);
9854 op1 = &XEXP (src, 1);
9855 }
9856 else
9857 return false;
9858
9859 if (!REG_P (*op0) || !REG_P (*op1))
9860 return false;
9861
9862 /* Swap the COMPARE arguments and its mask if there is a
9863 conflicting access in the previous insn. */
9864 prev_insn = PREV_INSN (insn);
9865 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
9866 && reg_referenced_p (*op1, PATTERN (prev_insn)))
9867 s390_swap_cmp (cond, op0, op1, insn);
9868
9869 /* Check if there is a conflict with the next insn. If there
9870 was no conflict with the previous insn, then swap the
9871 COMPARE arguments and its mask. If we already swapped
9872 the operands, or if swapping them would cause a conflict
9873 with the previous insn, issue a NOP after the COMPARE in
9874 order to separate the two instuctions. */
9875 next_insn = NEXT_INSN (insn);
9876 if (next_insn != NULL_RTX && INSN_P (next_insn)
9877 && s390_non_addr_reg_read_p (*op1, next_insn))
9878 {
9879 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
9880 && s390_non_addr_reg_read_p (*op0, prev_insn))
9881 {
9882 if (REGNO (*op1) == 0)
9883 emit_insn_after (gen_nop1 (), insn);
9884 else
9885 emit_insn_after (gen_nop (), insn);
9886 insn_added_p = true;
9887 }
9888 else
9889 s390_swap_cmp (cond, op0, op1, insn);
9890 }
9891 return insn_added_p;
9892 }
9893
9894 /* Perform machine-dependent processing. */
9895
9896 static void
9897 s390_reorg (void)
9898 {
9899 bool pool_overflow = false;
9900
9901 /* Make sure all splits have been performed; splits after
9902 machine_dependent_reorg might confuse insn length counts. */
9903 split_all_insns_noflow ();
9904
9905 /* Install the main literal pool and the associated base
9906 register load insns.
9907
9908 In addition, there are two problematic situations we need
9909 to correct:
9910
9911 - the literal pool might be > 4096 bytes in size, so that
9912 some of its elements cannot be directly accessed
9913
9914 - a branch target might be > 64K away from the branch, so that
9915 it is not possible to use a PC-relative instruction.
9916
9917 To fix those, we split the single literal pool into multiple
9918 pool chunks, reloading the pool base register at various
9919 points throughout the function to ensure it always points to
9920 the pool chunk the following code expects, and / or replace
9921 PC-relative branches by absolute branches.
9922
9923 However, the two problems are interdependent: splitting the
9924 literal pool can move a branch further away from its target,
9925 causing the 64K limit to overflow, and on the other hand,
9926 replacing a PC-relative branch by an absolute branch means
9927 we need to put the branch target address into the literal
9928 pool, possibly causing it to overflow.
9929
9930 So, we loop trying to fix up both problems until we manage
9931 to satisfy both conditions at the same time. Note that the
9932 loop is guaranteed to terminate as every pass of the loop
9933 strictly decreases the total number of PC-relative branches
9934 in the function. (This is not completely true as there
9935 might be branch-over-pool insns introduced by chunkify_start.
9936 Those never need to be split however.) */
9937
9938 for (;;)
9939 {
9940 struct constant_pool *pool = NULL;
9941
9942 /* Collect the literal pool. */
9943 if (!pool_overflow)
9944 {
9945 pool = s390_mainpool_start ();
9946 if (!pool)
9947 pool_overflow = true;
9948 }
9949
9950 /* If literal pool overflowed, start to chunkify it. */
9951 if (pool_overflow)
9952 pool = s390_chunkify_start ();
9953
9954 /* Split out-of-range branches. If this has created new
9955 literal pool entries, cancel current chunk list and
9956 recompute it. zSeries machines have large branch
9957 instructions, so we never need to split a branch. */
9958 if (!TARGET_CPU_ZARCH && s390_split_branches ())
9959 {
9960 if (pool_overflow)
9961 s390_chunkify_cancel (pool);
9962 else
9963 s390_mainpool_cancel (pool);
9964
9965 continue;
9966 }
9967
9968 /* If we made it up to here, both conditions are satisfied.
9969 Finish up literal pool related changes. */
9970 if (pool_overflow)
9971 s390_chunkify_finish (pool);
9972 else
9973 s390_mainpool_finish (pool);
9974
9975 /* We're done splitting branches. */
9976 cfun->machine->split_branches_pending_p = false;
9977 break;
9978 }
9979
9980 /* Generate out-of-pool execute target insns. */
9981 if (TARGET_CPU_ZARCH)
9982 {
9983 rtx insn, label, target;
9984
9985 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9986 {
9987 label = s390_execute_label (insn);
9988 if (!label)
9989 continue;
9990
9991 gcc_assert (label != const0_rtx);
9992
9993 target = emit_label (XEXP (label, 0));
9994 INSN_ADDRESSES_NEW (target, -1);
9995
9996 target = emit_insn (s390_execute_target (insn));
9997 INSN_ADDRESSES_NEW (target, -1);
9998 }
9999 }
10000
10001 /* Try to optimize prologue and epilogue further. */
10002 s390_optimize_prologue ();
10003
10004 /* Walk over the insns and do some z10 specific changes. */
10005 if (s390_tune == PROCESSOR_2097_Z10)
10006 {
10007 rtx insn;
10008 bool insn_added_p = false;
10009
10010 /* The insn lengths and addresses have to be up to date for the
10011 following manipulations. */
10012 shorten_branches (get_insns ());
10013
10014 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10015 {
10016 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10017 continue;
10018
10019 if (JUMP_P (insn))
10020 insn_added_p |= s390_z10_fix_long_loop_prediction (insn);
10021
10022 if (GET_CODE (PATTERN (insn)) == PARALLEL
10023 || GET_CODE (PATTERN (insn)) == SET)
10024 insn_added_p |= s390_z10_optimize_cmp (insn);
10025 }
10026
10027 /* Adjust branches if we added new instructions. */
10028 if (insn_added_p)
10029 shorten_branches (get_insns ());
10030 }
10031 }
10032
10033
10034 /* Initialize GCC target structure. */
10035
10036 #undef TARGET_ASM_ALIGNED_HI_OP
10037 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10038 #undef TARGET_ASM_ALIGNED_DI_OP
10039 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10040 #undef TARGET_ASM_INTEGER
10041 #define TARGET_ASM_INTEGER s390_assemble_integer
10042
10043 #undef TARGET_ASM_OPEN_PAREN
10044 #define TARGET_ASM_OPEN_PAREN ""
10045
10046 #undef TARGET_ASM_CLOSE_PAREN
10047 #define TARGET_ASM_CLOSE_PAREN ""
10048
10049 #undef TARGET_DEFAULT_TARGET_FLAGS
10050 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
10051 #undef TARGET_HANDLE_OPTION
10052 #define TARGET_HANDLE_OPTION s390_handle_option
10053
10054 #undef TARGET_ENCODE_SECTION_INFO
10055 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10056
10057 #ifdef HAVE_AS_TLS
10058 #undef TARGET_HAVE_TLS
10059 #define TARGET_HAVE_TLS true
10060 #endif
10061 #undef TARGET_CANNOT_FORCE_CONST_MEM
10062 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10063
10064 #undef TARGET_DELEGITIMIZE_ADDRESS
10065 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10066
10067 #undef TARGET_LEGITIMIZE_ADDRESS
10068 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10069
10070 #undef TARGET_RETURN_IN_MEMORY
10071 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10072
10073 #undef TARGET_INIT_BUILTINS
10074 #define TARGET_INIT_BUILTINS s390_init_builtins
10075 #undef TARGET_EXPAND_BUILTIN
10076 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10077
10078 #undef TARGET_ASM_OUTPUT_MI_THUNK
10079 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10080 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10081 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10082
10083 #undef TARGET_SCHED_ADJUST_PRIORITY
10084 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10085 #undef TARGET_SCHED_ISSUE_RATE
10086 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10087 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10088 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10089
10090 #undef TARGET_CANNOT_COPY_INSN_P
10091 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10092 #undef TARGET_RTX_COSTS
10093 #define TARGET_RTX_COSTS s390_rtx_costs
10094 #undef TARGET_ADDRESS_COST
10095 #define TARGET_ADDRESS_COST s390_address_cost
10096
10097 #undef TARGET_MACHINE_DEPENDENT_REORG
10098 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10099
10100 #undef TARGET_VALID_POINTER_MODE
10101 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10102
10103 #undef TARGET_BUILD_BUILTIN_VA_LIST
10104 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10105 #undef TARGET_EXPAND_BUILTIN_VA_START
10106 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10107 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10108 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10109
10110 #undef TARGET_PROMOTE_FUNCTION_MODE
10111 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10112 #undef TARGET_PASS_BY_REFERENCE
10113 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10114
10115 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10116 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10117
10118 #undef TARGET_FIXED_CONDITION_CODE_REGS
10119 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10120
10121 #undef TARGET_CC_MODES_COMPATIBLE
10122 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10123
10124 #undef TARGET_INVALID_WITHIN_DOLOOP
10125 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10126
10127 #ifdef HAVE_AS_TLS
10128 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10129 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10130 #endif
10131
10132 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10133 #undef TARGET_MANGLE_TYPE
10134 #define TARGET_MANGLE_TYPE s390_mangle_type
10135 #endif
10136
10137 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10138 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10139
10140 #undef TARGET_SECONDARY_RELOAD
10141 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10142
10143 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10144 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10145
10146 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10147 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10148
10149 #undef TARGET_LEGITIMATE_ADDRESS_P
10150 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10151
10152 #undef TARGET_CAN_ELIMINATE
10153 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10154
10155 struct gcc_target targetm = TARGET_INITIALIZER;
10156
10157 #include "gt-s390.h"