Fix enum conversions which are invalid in C++:
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "reload.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "integrate.h"
47 #include "ggc.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "optabs.h"
53 #include "gimple.h"
54 #include "df.h"
55
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (10), /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (10), /* MADBR */
213 COSTS_N_INSNS (10), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 extern int reload_completed;
225
226 /* Save information from a "cmpxx" operation until the branch or scc is
227 emitted. A pair of a MODE_CC register and a const0_rtx if a compare
228 has been emitted already. */
229 rtx s390_compare_op0, s390_compare_op1;
230
231 /* Structure used to hold the components of a S/390 memory
232 address. A legitimate address on S/390 is of the general
233 form
234 base + index + displacement
235 where any of the components is optional.
236
237 base and index are registers of the class ADDR_REGS,
238 displacement is an unsigned 12-bit immediate constant. */
239
240 struct s390_address
241 {
242 rtx base;
243 rtx indx;
244 rtx disp;
245 bool pointer;
246 bool literal_pool;
247 };
248
249 /* Which cpu are we tuning for. */
250 enum processor_type s390_tune = PROCESSOR_max;
251 enum processor_flags s390_tune_flags;
252 /* Which instruction set architecture to use. */
253 enum processor_type s390_arch;
254 enum processor_flags s390_arch_flags;
255
256 HOST_WIDE_INT s390_warn_framesize = 0;
257 HOST_WIDE_INT s390_stack_size = 0;
258 HOST_WIDE_INT s390_stack_guard = 0;
259
260 /* The following structure is embedded in the machine
261 specific part of struct function. */
262
263 struct s390_frame_layout GTY (())
264 {
265 /* Offset within stack frame. */
266 HOST_WIDE_INT gprs_offset;
267 HOST_WIDE_INT f0_offset;
268 HOST_WIDE_INT f4_offset;
269 HOST_WIDE_INT f8_offset;
270 HOST_WIDE_INT backchain_offset;
271
272 /* Number of first and last gpr where slots in the register
273 save area are reserved for. */
274 int first_save_gpr_slot;
275 int last_save_gpr_slot;
276
277 /* Number of first and last gpr to be saved, restored. */
278 int first_save_gpr;
279 int first_restore_gpr;
280 int last_save_gpr;
281 int last_restore_gpr;
282
283 /* Bits standing for floating point registers. Set, if the
284 respective register has to be saved. Starting with reg 16 (f0)
285 at the rightmost bit.
286 Bit 15 - 8 7 6 5 4 3 2 1 0
287 fpr 15 - 8 7 5 3 1 6 4 2 0
288 reg 31 - 24 23 22 21 20 19 18 17 16 */
289 unsigned int fpr_bitmap;
290
291 /* Number of floating point registers f8-f15 which must be saved. */
292 int high_fprs;
293
294 /* Set if return address needs to be saved.
295 This flag is set by s390_return_addr_rtx if it could not use
296 the initial value of r14 and therefore depends on r14 saved
297 to the stack. */
298 bool save_return_addr_p;
299
300 /* Size of stack frame. */
301 HOST_WIDE_INT frame_size;
302 };
303
304 /* Define the structure for the machine field in struct function. */
305
306 struct machine_function GTY(())
307 {
308 struct s390_frame_layout frame_layout;
309
310 /* Literal pool base register. */
311 rtx base_reg;
312
313 /* True if we may need to perform branch splitting. */
314 bool split_branches_pending_p;
315
316 /* Some local-dynamic TLS symbol name. */
317 const char *some_ld_name;
318
319 bool has_landing_pad_p;
320 };
321
322 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
323
324 #define cfun_frame_layout (cfun->machine->frame_layout)
325 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
326 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
327 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_WORD)
328 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
329 (1 << (BITNUM)))
330 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
331 (1 << (BITNUM))))
332
333 /* Number of GPRs and FPRs used for argument passing. */
334 #define GP_ARG_NUM_REG 5
335 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
336
337 /* A couple of shortcuts. */
338 #define CONST_OK_FOR_J(x) \
339 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
340 #define CONST_OK_FOR_K(x) \
341 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
342 #define CONST_OK_FOR_Os(x) \
343 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
344 #define CONST_OK_FOR_Op(x) \
345 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
346 #define CONST_OK_FOR_On(x) \
347 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
348
349 #define REGNO_PAIR_OK(REGNO, MODE) \
350 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
351
352 static enum machine_mode
353 s390_libgcc_cmp_return_mode (void)
354 {
355 return TARGET_64BIT ? DImode : SImode;
356 }
357
358 static enum machine_mode
359 s390_libgcc_shift_count_mode (void)
360 {
361 return TARGET_64BIT ? DImode : SImode;
362 }
363
364 /* Return true if the back end supports mode MODE. */
365 static bool
366 s390_scalar_mode_supported_p (enum machine_mode mode)
367 {
368 if (DECIMAL_FLOAT_MODE_P (mode))
369 return true;
370 else
371 return default_scalar_mode_supported_p (mode);
372 }
373
374 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
375
376 void
377 s390_set_has_landing_pad_p (bool value)
378 {
379 cfun->machine->has_landing_pad_p = value;
380 }
381
382 /* If two condition code modes are compatible, return a condition code
383 mode which is compatible with both. Otherwise, return
384 VOIDmode. */
385
386 static enum machine_mode
387 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
388 {
389 if (m1 == m2)
390 return m1;
391
392 switch (m1)
393 {
394 case CCZmode:
395 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
396 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
397 return m2;
398 return VOIDmode;
399
400 case CCSmode:
401 case CCUmode:
402 case CCTmode:
403 case CCSRmode:
404 case CCURmode:
405 case CCZ1mode:
406 if (m2 == CCZmode)
407 return m1;
408
409 return VOIDmode;
410
411 default:
412 return VOIDmode;
413 }
414 return VOIDmode;
415 }
416
417 /* Return true if SET either doesn't set the CC register, or else
418 the source and destination have matching CC modes and that
419 CC mode is at least as constrained as REQ_MODE. */
420
421 static bool
422 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
423 {
424 enum machine_mode set_mode;
425
426 gcc_assert (GET_CODE (set) == SET);
427
428 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
429 return 1;
430
431 set_mode = GET_MODE (SET_DEST (set));
432 switch (set_mode)
433 {
434 case CCSmode:
435 case CCSRmode:
436 case CCUmode:
437 case CCURmode:
438 case CCLmode:
439 case CCL1mode:
440 case CCL2mode:
441 case CCL3mode:
442 case CCT1mode:
443 case CCT2mode:
444 case CCT3mode:
445 if (req_mode != set_mode)
446 return 0;
447 break;
448
449 case CCZmode:
450 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
451 && req_mode != CCSRmode && req_mode != CCURmode)
452 return 0;
453 break;
454
455 case CCAPmode:
456 case CCANmode:
457 if (req_mode != CCAmode)
458 return 0;
459 break;
460
461 default:
462 gcc_unreachable ();
463 }
464
465 return (GET_MODE (SET_SRC (set)) == set_mode);
466 }
467
468 /* Return true if every SET in INSN that sets the CC register
469 has source and destination with matching CC modes and that
470 CC mode is at least as constrained as REQ_MODE.
471 If REQ_MODE is VOIDmode, always return false. */
472
473 bool
474 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
475 {
476 int i;
477
478 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
479 if (req_mode == VOIDmode)
480 return false;
481
482 if (GET_CODE (PATTERN (insn)) == SET)
483 return s390_match_ccmode_set (PATTERN (insn), req_mode);
484
485 if (GET_CODE (PATTERN (insn)) == PARALLEL)
486 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
487 {
488 rtx set = XVECEXP (PATTERN (insn), 0, i);
489 if (GET_CODE (set) == SET)
490 if (!s390_match_ccmode_set (set, req_mode))
491 return false;
492 }
493
494 return true;
495 }
496
497 /* If a test-under-mask instruction can be used to implement
498 (compare (and ... OP1) OP2), return the CC mode required
499 to do that. Otherwise, return VOIDmode.
500 MIXED is true if the instruction can distinguish between
501 CC1 and CC2 for mixed selected bits (TMxx), it is false
502 if the instruction cannot (TM). */
503
504 enum machine_mode
505 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
506 {
507 int bit0, bit1;
508
509 /* ??? Fixme: should work on CONST_DOUBLE as well. */
510 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
511 return VOIDmode;
512
513 /* Selected bits all zero: CC0.
514 e.g.: int a; if ((a & (16 + 128)) == 0) */
515 if (INTVAL (op2) == 0)
516 return CCTmode;
517
518 /* Selected bits all one: CC3.
519 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
520 if (INTVAL (op2) == INTVAL (op1))
521 return CCT3mode;
522
523 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
524 int a;
525 if ((a & (16 + 128)) == 16) -> CCT1
526 if ((a & (16 + 128)) == 128) -> CCT2 */
527 if (mixed)
528 {
529 bit1 = exact_log2 (INTVAL (op2));
530 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
531 if (bit0 != -1 && bit1 != -1)
532 return bit0 > bit1 ? CCT1mode : CCT2mode;
533 }
534
535 return VOIDmode;
536 }
537
538 /* Given a comparison code OP (EQ, NE, etc.) and the operands
539 OP0 and OP1 of a COMPARE, return the mode to be used for the
540 comparison. */
541
542 enum machine_mode
543 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
544 {
545 switch (code)
546 {
547 case EQ:
548 case NE:
549 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
550 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
551 return CCAPmode;
552 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
553 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
554 return CCAPmode;
555 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
556 || GET_CODE (op1) == NEG)
557 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
558 return CCLmode;
559
560 if (GET_CODE (op0) == AND)
561 {
562 /* Check whether we can potentially do it via TM. */
563 enum machine_mode ccmode;
564 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
565 if (ccmode != VOIDmode)
566 {
567 /* Relax CCTmode to CCZmode to allow fall-back to AND
568 if that turns out to be beneficial. */
569 return ccmode == CCTmode ? CCZmode : ccmode;
570 }
571 }
572
573 if (register_operand (op0, HImode)
574 && GET_CODE (op1) == CONST_INT
575 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
576 return CCT3mode;
577 if (register_operand (op0, QImode)
578 && GET_CODE (op1) == CONST_INT
579 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
580 return CCT3mode;
581
582 return CCZmode;
583
584 case LE:
585 case LT:
586 case GE:
587 case GT:
588 /* The only overflow condition of NEG and ABS happens when
589 -INT_MAX is used as parameter, which stays negative. So
590 we have an overflow from a positive value to a negative.
591 Using CCAP mode the resulting cc can be used for comparisons. */
592 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
593 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
594 return CCAPmode;
595
596 /* If constants are involved in an add instruction it is possible to use
597 the resulting cc for comparisons with zero. Knowing the sign of the
598 constant the overflow behavior gets predictable. e.g.:
599 int a, b; if ((b = a + c) > 0)
600 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
601 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
602 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
603 {
604 if (INTVAL (XEXP((op0), 1)) < 0)
605 return CCANmode;
606 else
607 return CCAPmode;
608 }
609 /* Fall through. */
610 case UNORDERED:
611 case ORDERED:
612 case UNEQ:
613 case UNLE:
614 case UNLT:
615 case UNGE:
616 case UNGT:
617 case LTGT:
618 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
619 && GET_CODE (op1) != CONST_INT)
620 return CCSRmode;
621 return CCSmode;
622
623 case LTU:
624 case GEU:
625 if (GET_CODE (op0) == PLUS
626 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
627 return CCL1mode;
628
629 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
630 && GET_CODE (op1) != CONST_INT)
631 return CCURmode;
632 return CCUmode;
633
634 case LEU:
635 case GTU:
636 if (GET_CODE (op0) == MINUS
637 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
638 return CCL2mode;
639
640 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
641 && GET_CODE (op1) != CONST_INT)
642 return CCURmode;
643 return CCUmode;
644
645 default:
646 gcc_unreachable ();
647 }
648 }
649
650 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
651 that we can implement more efficiently. */
652
653 void
654 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
655 {
656 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
657 if ((*code == EQ || *code == NE)
658 && *op1 == const0_rtx
659 && GET_CODE (*op0) == ZERO_EXTRACT
660 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
661 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
662 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
663 {
664 rtx inner = XEXP (*op0, 0);
665 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
666 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
667 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
668
669 if (len > 0 && len < modesize
670 && pos >= 0 && pos + len <= modesize
671 && modesize <= HOST_BITS_PER_WIDE_INT)
672 {
673 unsigned HOST_WIDE_INT block;
674 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
675 block <<= modesize - pos - len;
676
677 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
678 gen_int_mode (block, GET_MODE (inner)));
679 }
680 }
681
682 /* Narrow AND of memory against immediate to enable TM. */
683 if ((*code == EQ || *code == NE)
684 && *op1 == const0_rtx
685 && GET_CODE (*op0) == AND
686 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
687 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
688 {
689 rtx inner = XEXP (*op0, 0);
690 rtx mask = XEXP (*op0, 1);
691
692 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
693 if (GET_CODE (inner) == SUBREG
694 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
695 && (GET_MODE_SIZE (GET_MODE (inner))
696 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
697 && ((INTVAL (mask)
698 & GET_MODE_MASK (GET_MODE (inner))
699 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
700 == 0))
701 inner = SUBREG_REG (inner);
702
703 /* Do not change volatile MEMs. */
704 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
705 {
706 int part = s390_single_part (XEXP (*op0, 1),
707 GET_MODE (inner), QImode, 0);
708 if (part >= 0)
709 {
710 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
711 inner = adjust_address_nv (inner, QImode, part);
712 *op0 = gen_rtx_AND (QImode, inner, mask);
713 }
714 }
715 }
716
717 /* Narrow comparisons against 0xffff to HImode if possible. */
718 if ((*code == EQ || *code == NE)
719 && GET_CODE (*op1) == CONST_INT
720 && INTVAL (*op1) == 0xffff
721 && SCALAR_INT_MODE_P (GET_MODE (*op0))
722 && (nonzero_bits (*op0, GET_MODE (*op0))
723 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
724 {
725 *op0 = gen_lowpart (HImode, *op0);
726 *op1 = constm1_rtx;
727 }
728
729 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
730 if (GET_CODE (*op0) == UNSPEC
731 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
732 && XVECLEN (*op0, 0) == 1
733 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
734 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
735 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
736 && *op1 == const0_rtx)
737 {
738 enum rtx_code new_code = UNKNOWN;
739 switch (*code)
740 {
741 case EQ: new_code = EQ; break;
742 case NE: new_code = NE; break;
743 case LT: new_code = GTU; break;
744 case GT: new_code = LTU; break;
745 case LE: new_code = GEU; break;
746 case GE: new_code = LEU; break;
747 default: break;
748 }
749
750 if (new_code != UNKNOWN)
751 {
752 *op0 = XVECEXP (*op0, 0, 0);
753 *code = new_code;
754 }
755 }
756
757 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
758 if (GET_CODE (*op0) == UNSPEC
759 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
760 && XVECLEN (*op0, 0) == 1
761 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
762 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
763 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
764 && *op1 == const0_rtx)
765 {
766 enum rtx_code new_code = UNKNOWN;
767 switch (*code)
768 {
769 case EQ: new_code = EQ; break;
770 case NE: new_code = NE; break;
771 default: break;
772 }
773
774 if (new_code != UNKNOWN)
775 {
776 *op0 = XVECEXP (*op0, 0, 0);
777 *code = new_code;
778 }
779 }
780
781 /* Simplify cascaded EQ, NE with const0_rtx. */
782 if ((*code == NE || *code == EQ)
783 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
784 && GET_MODE (*op0) == SImode
785 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
786 && REG_P (XEXP (*op0, 0))
787 && XEXP (*op0, 1) == const0_rtx
788 && *op1 == const0_rtx)
789 {
790 if ((*code == EQ && GET_CODE (*op0) == NE)
791 || (*code == NE && GET_CODE (*op0) == EQ))
792 *code = EQ;
793 else
794 *code = NE;
795 *op0 = XEXP (*op0, 0);
796 }
797
798 /* Prefer register over memory as first operand. */
799 if (MEM_P (*op0) && REG_P (*op1))
800 {
801 rtx tem = *op0; *op0 = *op1; *op1 = tem;
802 *code = swap_condition (*code);
803 }
804 }
805
806 /* Emit a compare instruction suitable to implement the comparison
807 OP0 CODE OP1. Return the correct condition RTL to be placed in
808 the IF_THEN_ELSE of the conditional branch testing the result. */
809
810 rtx
811 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
812 {
813 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
814 rtx cc;
815
816 /* Do not output a redundant compare instruction if a compare_and_swap
817 pattern already computed the result and the machine modes are compatible. */
818 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
819 {
820 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
821 == GET_MODE (op0));
822 cc = op0;
823 }
824 else
825 {
826 cc = gen_rtx_REG (mode, CC_REGNUM);
827 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
828 }
829
830 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
831 }
832
833 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
834 matches CMP.
835 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
836 conditional branch testing the result. */
837
838 static rtx
839 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
840 {
841 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
842 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
843 }
844
845 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
846 unconditional jump, else a conditional jump under condition COND. */
847
848 void
849 s390_emit_jump (rtx target, rtx cond)
850 {
851 rtx insn;
852
853 target = gen_rtx_LABEL_REF (VOIDmode, target);
854 if (cond)
855 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
856
857 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
858 emit_jump_insn (insn);
859 }
860
861 /* Return branch condition mask to implement a branch
862 specified by CODE. Return -1 for invalid comparisons. */
863
864 int
865 s390_branch_condition_mask (rtx code)
866 {
867 const int CC0 = 1 << 3;
868 const int CC1 = 1 << 2;
869 const int CC2 = 1 << 1;
870 const int CC3 = 1 << 0;
871
872 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
873 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
874 gcc_assert (XEXP (code, 1) == const0_rtx);
875
876 switch (GET_MODE (XEXP (code, 0)))
877 {
878 case CCZmode:
879 case CCZ1mode:
880 switch (GET_CODE (code))
881 {
882 case EQ: return CC0;
883 case NE: return CC1 | CC2 | CC3;
884 default: return -1;
885 }
886 break;
887
888 case CCT1mode:
889 switch (GET_CODE (code))
890 {
891 case EQ: return CC1;
892 case NE: return CC0 | CC2 | CC3;
893 default: return -1;
894 }
895 break;
896
897 case CCT2mode:
898 switch (GET_CODE (code))
899 {
900 case EQ: return CC2;
901 case NE: return CC0 | CC1 | CC3;
902 default: return -1;
903 }
904 break;
905
906 case CCT3mode:
907 switch (GET_CODE (code))
908 {
909 case EQ: return CC3;
910 case NE: return CC0 | CC1 | CC2;
911 default: return -1;
912 }
913 break;
914
915 case CCLmode:
916 switch (GET_CODE (code))
917 {
918 case EQ: return CC0 | CC2;
919 case NE: return CC1 | CC3;
920 default: return -1;
921 }
922 break;
923
924 case CCL1mode:
925 switch (GET_CODE (code))
926 {
927 case LTU: return CC2 | CC3; /* carry */
928 case GEU: return CC0 | CC1; /* no carry */
929 default: return -1;
930 }
931 break;
932
933 case CCL2mode:
934 switch (GET_CODE (code))
935 {
936 case GTU: return CC0 | CC1; /* borrow */
937 case LEU: return CC2 | CC3; /* no borrow */
938 default: return -1;
939 }
940 break;
941
942 case CCL3mode:
943 switch (GET_CODE (code))
944 {
945 case EQ: return CC0 | CC2;
946 case NE: return CC1 | CC3;
947 case LTU: return CC1;
948 case GTU: return CC3;
949 case LEU: return CC1 | CC2;
950 case GEU: return CC2 | CC3;
951 default: return -1;
952 }
953
954 case CCUmode:
955 switch (GET_CODE (code))
956 {
957 case EQ: return CC0;
958 case NE: return CC1 | CC2 | CC3;
959 case LTU: return CC1;
960 case GTU: return CC2;
961 case LEU: return CC0 | CC1;
962 case GEU: return CC0 | CC2;
963 default: return -1;
964 }
965 break;
966
967 case CCURmode:
968 switch (GET_CODE (code))
969 {
970 case EQ: return CC0;
971 case NE: return CC2 | CC1 | CC3;
972 case LTU: return CC2;
973 case GTU: return CC1;
974 case LEU: return CC0 | CC2;
975 case GEU: return CC0 | CC1;
976 default: return -1;
977 }
978 break;
979
980 case CCAPmode:
981 switch (GET_CODE (code))
982 {
983 case EQ: return CC0;
984 case NE: return CC1 | CC2 | CC3;
985 case LT: return CC1 | CC3;
986 case GT: return CC2;
987 case LE: return CC0 | CC1 | CC3;
988 case GE: return CC0 | CC2;
989 default: return -1;
990 }
991 break;
992
993 case CCANmode:
994 switch (GET_CODE (code))
995 {
996 case EQ: return CC0;
997 case NE: return CC1 | CC2 | CC3;
998 case LT: return CC1;
999 case GT: return CC2 | CC3;
1000 case LE: return CC0 | CC1;
1001 case GE: return CC0 | CC2 | CC3;
1002 default: return -1;
1003 }
1004 break;
1005
1006 case CCSmode:
1007 switch (GET_CODE (code))
1008 {
1009 case EQ: return CC0;
1010 case NE: return CC1 | CC2 | CC3;
1011 case LT: return CC1;
1012 case GT: return CC2;
1013 case LE: return CC0 | CC1;
1014 case GE: return CC0 | CC2;
1015 case UNORDERED: return CC3;
1016 case ORDERED: return CC0 | CC1 | CC2;
1017 case UNEQ: return CC0 | CC3;
1018 case UNLT: return CC1 | CC3;
1019 case UNGT: return CC2 | CC3;
1020 case UNLE: return CC0 | CC1 | CC3;
1021 case UNGE: return CC0 | CC2 | CC3;
1022 case LTGT: return CC1 | CC2;
1023 default: return -1;
1024 }
1025 break;
1026
1027 case CCSRmode:
1028 switch (GET_CODE (code))
1029 {
1030 case EQ: return CC0;
1031 case NE: return CC2 | CC1 | CC3;
1032 case LT: return CC2;
1033 case GT: return CC1;
1034 case LE: return CC0 | CC2;
1035 case GE: return CC0 | CC1;
1036 case UNORDERED: return CC3;
1037 case ORDERED: return CC0 | CC2 | CC1;
1038 case UNEQ: return CC0 | CC3;
1039 case UNLT: return CC2 | CC3;
1040 case UNGT: return CC1 | CC3;
1041 case UNLE: return CC0 | CC2 | CC3;
1042 case UNGE: return CC0 | CC1 | CC3;
1043 case LTGT: return CC2 | CC1;
1044 default: return -1;
1045 }
1046 break;
1047
1048 default:
1049 return -1;
1050 }
1051 }
1052
1053
1054 /* Return branch condition mask to implement a compare and branch
1055 specified by CODE. Return -1 for invalid comparisons. */
1056
1057 int
1058 s390_compare_and_branch_condition_mask (rtx code)
1059 {
1060 const int CC0 = 1 << 3;
1061 const int CC1 = 1 << 2;
1062 const int CC2 = 1 << 1;
1063
1064 switch (GET_CODE (code))
1065 {
1066 case EQ:
1067 return CC0;
1068 case NE:
1069 return CC1 | CC2;
1070 case LT:
1071 case LTU:
1072 return CC1;
1073 case GT:
1074 case GTU:
1075 return CC2;
1076 case LE:
1077 case LEU:
1078 return CC0 | CC1;
1079 case GE:
1080 case GEU:
1081 return CC0 | CC2;
1082 default:
1083 gcc_unreachable ();
1084 }
1085 return -1;
1086 }
1087
1088 /* If INV is false, return assembler mnemonic string to implement
1089 a branch specified by CODE. If INV is true, return mnemonic
1090 for the corresponding inverted branch. */
1091
1092 static const char *
1093 s390_branch_condition_mnemonic (rtx code, int inv)
1094 {
1095 int mask;
1096
1097 static const char *const mnemonic[16] =
1098 {
1099 NULL, "o", "h", "nle",
1100 "l", "nhe", "lh", "ne",
1101 "e", "nlh", "he", "nl",
1102 "le", "nh", "no", NULL
1103 };
1104
1105 if (GET_CODE (XEXP (code, 0)) == REG
1106 && REGNO (XEXP (code, 0)) == CC_REGNUM
1107 && XEXP (code, 1) == const0_rtx)
1108 mask = s390_branch_condition_mask (code);
1109 else
1110 mask = s390_compare_and_branch_condition_mask (code);
1111
1112 gcc_assert (mask >= 0);
1113
1114 if (inv)
1115 mask ^= 15;
1116
1117 gcc_assert (mask >= 1 && mask <= 14);
1118
1119 return mnemonic[mask];
1120 }
1121
1122 /* Return the part of op which has a value different from def.
1123 The size of the part is determined by mode.
1124 Use this function only if you already know that op really
1125 contains such a part. */
1126
1127 unsigned HOST_WIDE_INT
1128 s390_extract_part (rtx op, enum machine_mode mode, int def)
1129 {
1130 unsigned HOST_WIDE_INT value = 0;
1131 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1132 int part_bits = GET_MODE_BITSIZE (mode);
1133 unsigned HOST_WIDE_INT part_mask
1134 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1135 int i;
1136
1137 for (i = 0; i < max_parts; i++)
1138 {
1139 if (i == 0)
1140 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1141 else
1142 value >>= part_bits;
1143
1144 if ((value & part_mask) != (def & part_mask))
1145 return value & part_mask;
1146 }
1147
1148 gcc_unreachable ();
1149 }
1150
1151 /* If OP is an integer constant of mode MODE with exactly one
1152 part of mode PART_MODE unequal to DEF, return the number of that
1153 part. Otherwise, return -1. */
1154
1155 int
1156 s390_single_part (rtx op,
1157 enum machine_mode mode,
1158 enum machine_mode part_mode,
1159 int def)
1160 {
1161 unsigned HOST_WIDE_INT value = 0;
1162 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1163 unsigned HOST_WIDE_INT part_mask
1164 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1165 int i, part = -1;
1166
1167 if (GET_CODE (op) != CONST_INT)
1168 return -1;
1169
1170 for (i = 0; i < n_parts; i++)
1171 {
1172 if (i == 0)
1173 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1174 else
1175 value >>= GET_MODE_BITSIZE (part_mode);
1176
1177 if ((value & part_mask) != (def & part_mask))
1178 {
1179 if (part != -1)
1180 return -1;
1181 else
1182 part = i;
1183 }
1184 }
1185 return part == -1 ? -1 : n_parts - 1 - part;
1186 }
1187
1188 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1189 bits and no other bits are set in IN. POS and LENGTH can be used
1190 to obtain the start position and the length of the bitfield.
1191
1192 POS gives the position of the first bit of the bitfield counting
1193 from the lowest order bit starting with zero. In order to use this
1194 value for S/390 instructions this has to be converted to "bits big
1195 endian" style. */
1196
1197 bool
1198 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1199 int *pos, int *length)
1200 {
1201 int tmp_pos = 0;
1202 int tmp_length = 0;
1203 int i;
1204 unsigned HOST_WIDE_INT mask = 1ULL;
1205 bool contiguous = false;
1206
1207 for (i = 0; i < size; mask <<= 1, i++)
1208 {
1209 if (contiguous)
1210 {
1211 if (mask & in)
1212 tmp_length++;
1213 else
1214 break;
1215 }
1216 else
1217 {
1218 if (mask & in)
1219 {
1220 contiguous = true;
1221 tmp_length++;
1222 }
1223 else
1224 tmp_pos++;
1225 }
1226 }
1227
1228 if (!tmp_length)
1229 return false;
1230
1231 /* Calculate a mask for all bits beyond the contiguous bits. */
1232 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1233
1234 if (mask & in)
1235 return false;
1236
1237 if (tmp_length + tmp_pos - 1 > size)
1238 return false;
1239
1240 if (length)
1241 *length = tmp_length;
1242
1243 if (pos)
1244 *pos = tmp_pos;
1245
1246 return true;
1247 }
1248
1249 /* Check whether we can (and want to) split a double-word
1250 move in mode MODE from SRC to DST into two single-word
1251 moves, moving the subword FIRST_SUBWORD first. */
1252
1253 bool
1254 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1255 {
1256 /* Floating point registers cannot be split. */
1257 if (FP_REG_P (src) || FP_REG_P (dst))
1258 return false;
1259
1260 /* We don't need to split if operands are directly accessible. */
1261 if (s_operand (src, mode) || s_operand (dst, mode))
1262 return false;
1263
1264 /* Non-offsettable memory references cannot be split. */
1265 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1266 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1267 return false;
1268
1269 /* Moving the first subword must not clobber a register
1270 needed to move the second subword. */
1271 if (register_operand (dst, mode))
1272 {
1273 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1274 if (reg_overlap_mentioned_p (subreg, src))
1275 return false;
1276 }
1277
1278 return true;
1279 }
1280
1281 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1282 and [MEM2, MEM2 + SIZE] do overlap and false
1283 otherwise. */
1284
1285 bool
1286 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1287 {
1288 rtx addr1, addr2, addr_delta;
1289 HOST_WIDE_INT delta;
1290
1291 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1292 return true;
1293
1294 if (size == 0)
1295 return false;
1296
1297 addr1 = XEXP (mem1, 0);
1298 addr2 = XEXP (mem2, 0);
1299
1300 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1301
1302 /* This overlapping check is used by peepholes merging memory block operations.
1303 Overlapping operations would otherwise be recognized by the S/390 hardware
1304 and would fall back to a slower implementation. Allowing overlapping
1305 operations would lead to slow code but not to wrong code. Therefore we are
1306 somewhat optimistic if we cannot prove that the memory blocks are
1307 overlapping.
1308 That's why we return false here although this may accept operations on
1309 overlapping memory areas. */
1310 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1311 return false;
1312
1313 delta = INTVAL (addr_delta);
1314
1315 if (delta == 0
1316 || (delta > 0 && delta < size)
1317 || (delta < 0 && -delta < size))
1318 return true;
1319
1320 return false;
1321 }
1322
1323 /* Check whether the address of memory reference MEM2 equals exactly
1324 the address of memory reference MEM1 plus DELTA. Return true if
1325 we can prove this to be the case, false otherwise. */
1326
1327 bool
1328 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1329 {
1330 rtx addr1, addr2, addr_delta;
1331
1332 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1333 return false;
1334
1335 addr1 = XEXP (mem1, 0);
1336 addr2 = XEXP (mem2, 0);
1337
1338 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1339 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1340 return false;
1341
1342 return true;
1343 }
1344
1345 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1346
1347 void
1348 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1349 rtx *operands)
1350 {
1351 enum machine_mode wmode = mode;
1352 rtx dst = operands[0];
1353 rtx src1 = operands[1];
1354 rtx src2 = operands[2];
1355 rtx op, clob, tem;
1356
1357 /* If we cannot handle the operation directly, use a temp register. */
1358 if (!s390_logical_operator_ok_p (operands))
1359 dst = gen_reg_rtx (mode);
1360
1361 /* QImode and HImode patterns make sense only if we have a destination
1362 in memory. Otherwise perform the operation in SImode. */
1363 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1364 wmode = SImode;
1365
1366 /* Widen operands if required. */
1367 if (mode != wmode)
1368 {
1369 if (GET_CODE (dst) == SUBREG
1370 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1371 dst = tem;
1372 else if (REG_P (dst))
1373 dst = gen_rtx_SUBREG (wmode, dst, 0);
1374 else
1375 dst = gen_reg_rtx (wmode);
1376
1377 if (GET_CODE (src1) == SUBREG
1378 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1379 src1 = tem;
1380 else if (GET_MODE (src1) != VOIDmode)
1381 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1382
1383 if (GET_CODE (src2) == SUBREG
1384 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1385 src2 = tem;
1386 else if (GET_MODE (src2) != VOIDmode)
1387 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1388 }
1389
1390 /* Emit the instruction. */
1391 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1392 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1393 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1394
1395 /* Fix up the destination if needed. */
1396 if (dst != operands[0])
1397 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1398 }
1399
1400 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1401
1402 bool
1403 s390_logical_operator_ok_p (rtx *operands)
1404 {
1405 /* If the destination operand is in memory, it needs to coincide
1406 with one of the source operands. After reload, it has to be
1407 the first source operand. */
1408 if (GET_CODE (operands[0]) == MEM)
1409 return rtx_equal_p (operands[0], operands[1])
1410 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1411
1412 return true;
1413 }
1414
1415 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1416 operand IMMOP to switch from SS to SI type instructions. */
1417
1418 void
1419 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1420 {
1421 int def = code == AND ? -1 : 0;
1422 HOST_WIDE_INT mask;
1423 int part;
1424
1425 gcc_assert (GET_CODE (*memop) == MEM);
1426 gcc_assert (!MEM_VOLATILE_P (*memop));
1427
1428 mask = s390_extract_part (*immop, QImode, def);
1429 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1430 gcc_assert (part >= 0);
1431
1432 *memop = adjust_address (*memop, QImode, part);
1433 *immop = gen_int_mode (mask, QImode);
1434 }
1435
1436
1437 /* How to allocate a 'struct machine_function'. */
1438
1439 static struct machine_function *
1440 s390_init_machine_status (void)
1441 {
1442 return GGC_CNEW (struct machine_function);
1443 }
1444
1445 /* Change optimizations to be performed, depending on the
1446 optimization level.
1447
1448 LEVEL is the optimization level specified; 2 if `-O2' is
1449 specified, 1 if `-O' is specified, and 0 if neither is specified.
1450
1451 SIZE is nonzero if `-Os' is specified and zero otherwise. */
1452
1453 void
1454 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
1455 {
1456 /* ??? There are apparently still problems with -fcaller-saves. */
1457 flag_caller_saves = 0;
1458
1459 /* By default, always emit DWARF-2 unwind info. This allows debugging
1460 without maintaining a stack frame back-chain. */
1461 flag_asynchronous_unwind_tables = 1;
1462
1463 /* Use MVCLE instructions to decrease code size if requested. */
1464 if (size != 0)
1465 target_flags |= MASK_MVCLE;
1466 }
1467
1468 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1469 to the associated processor_type and processor_flags if so. */
1470
1471 static bool
1472 s390_handle_arch_option (const char *arg,
1473 enum processor_type *type,
1474 enum processor_flags *flags)
1475 {
1476 static struct pta
1477 {
1478 const char *const name; /* processor name or nickname. */
1479 const enum processor_type processor;
1480 const enum processor_flags flags;
1481 }
1482 const processor_alias_table[] =
1483 {
1484 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1485 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1486 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1487 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1488 | PF_LONG_DISPLACEMENT},
1489 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1490 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1491 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1492 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1493 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1494 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1495 };
1496 size_t i;
1497
1498 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1499 if (strcmp (arg, processor_alias_table[i].name) == 0)
1500 {
1501 *type = processor_alias_table[i].processor;
1502 *flags = processor_alias_table[i].flags;
1503 return true;
1504 }
1505 return false;
1506 }
1507
1508 /* Implement TARGET_HANDLE_OPTION. */
1509
1510 static bool
1511 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1512 {
1513 switch (code)
1514 {
1515 case OPT_march_:
1516 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1517
1518 case OPT_mstack_guard_:
1519 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1520 return false;
1521 if (exact_log2 (s390_stack_guard) == -1)
1522 error ("stack guard value must be an exact power of 2");
1523 return true;
1524
1525 case OPT_mstack_size_:
1526 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1527 return false;
1528 if (exact_log2 (s390_stack_size) == -1)
1529 error ("stack size must be an exact power of 2");
1530 return true;
1531
1532 case OPT_mtune_:
1533 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1534
1535 case OPT_mwarn_framesize_:
1536 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1537
1538 default:
1539 return true;
1540 }
1541 }
1542
1543 void
1544 override_options (void)
1545 {
1546 /* Set up function hooks. */
1547 init_machine_status = s390_init_machine_status;
1548
1549 /* Architecture mode defaults according to ABI. */
1550 if (!(target_flags_explicit & MASK_ZARCH))
1551 {
1552 if (TARGET_64BIT)
1553 target_flags |= MASK_ZARCH;
1554 else
1555 target_flags &= ~MASK_ZARCH;
1556 }
1557
1558 /* Determine processor architectural level. */
1559 if (!s390_arch_string)
1560 {
1561 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1562 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1563 }
1564
1565 /* Determine processor to tune for. */
1566 if (s390_tune == PROCESSOR_max)
1567 {
1568 s390_tune = s390_arch;
1569 s390_tune_flags = s390_arch_flags;
1570 }
1571
1572 /* Sanity checks. */
1573 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1574 error ("z/Architecture mode not supported on %s", s390_arch_string);
1575 if (TARGET_64BIT && !TARGET_ZARCH)
1576 error ("64-bit ABI not supported in ESA/390 mode");
1577
1578 if (TARGET_HARD_DFP && !TARGET_DFP)
1579 {
1580 if (target_flags_explicit & MASK_HARD_DFP)
1581 {
1582 if (!TARGET_CPU_DFP)
1583 error ("Hardware decimal floating point instructions"
1584 " not available on %s", s390_arch_string);
1585 if (!TARGET_ZARCH)
1586 error ("Hardware decimal floating point instructions"
1587 " not available in ESA/390 mode");
1588 }
1589 else
1590 target_flags &= ~MASK_HARD_DFP;
1591 }
1592
1593 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1594 {
1595 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1596 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1597
1598 target_flags &= ~MASK_HARD_DFP;
1599 }
1600
1601 /* Set processor cost function. */
1602 switch (s390_tune)
1603 {
1604 case PROCESSOR_2084_Z990:
1605 s390_cost = &z990_cost;
1606 break;
1607 case PROCESSOR_2094_Z9_109:
1608 s390_cost = &z9_109_cost;
1609 break;
1610 case PROCESSOR_2097_Z10:
1611 s390_cost = &z10_cost;
1612 break;
1613 default:
1614 s390_cost = &z900_cost;
1615 }
1616
1617 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1618 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1619 "in combination");
1620
1621 if (s390_stack_size)
1622 {
1623 if (s390_stack_guard >= s390_stack_size)
1624 error ("stack size must be greater than the stack guard value");
1625 else if (s390_stack_size > 1 << 16)
1626 error ("stack size must not be greater than 64k");
1627 }
1628 else if (s390_stack_guard)
1629 error ("-mstack-guard implies use of -mstack-size");
1630
1631 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1632 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1633 target_flags |= MASK_LONG_DOUBLE_128;
1634 #endif
1635 }
1636
1637 /* Map for smallest class containing reg regno. */
1638
1639 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1640 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1641 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1642 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1643 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1644 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1645 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1646 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1647 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1648 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1649 ACCESS_REGS, ACCESS_REGS
1650 };
1651
1652 /* Return attribute type of insn. */
1653
1654 static enum attr_type
1655 s390_safe_attr_type (rtx insn)
1656 {
1657 if (recog_memoized (insn) >= 0)
1658 return get_attr_type (insn);
1659 else
1660 return TYPE_NONE;
1661 }
1662
1663 /* Return true if DISP is a valid short displacement. */
1664
1665 static bool
1666 s390_short_displacement (rtx disp)
1667 {
1668 /* No displacement is OK. */
1669 if (!disp)
1670 return true;
1671
1672 /* Integer displacement in range. */
1673 if (GET_CODE (disp) == CONST_INT)
1674 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1675
1676 /* GOT offset is not OK, the GOT can be large. */
1677 if (GET_CODE (disp) == CONST
1678 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1679 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1680 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1681 return false;
1682
1683 /* All other symbolic constants are literal pool references,
1684 which are OK as the literal pool must be small. */
1685 if (GET_CODE (disp) == CONST)
1686 return true;
1687
1688 return false;
1689 }
1690
1691 /* Decompose a RTL expression ADDR for a memory address into
1692 its components, returned in OUT.
1693
1694 Returns false if ADDR is not a valid memory address, true
1695 otherwise. If OUT is NULL, don't return the components,
1696 but check for validity only.
1697
1698 Note: Only addresses in canonical form are recognized.
1699 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1700 canonical form so that they will be recognized. */
1701
1702 static int
1703 s390_decompose_address (rtx addr, struct s390_address *out)
1704 {
1705 HOST_WIDE_INT offset = 0;
1706 rtx base = NULL_RTX;
1707 rtx indx = NULL_RTX;
1708 rtx disp = NULL_RTX;
1709 rtx orig_disp;
1710 bool pointer = false;
1711 bool base_ptr = false;
1712 bool indx_ptr = false;
1713 bool literal_pool = false;
1714
1715 /* We may need to substitute the literal pool base register into the address
1716 below. However, at this point we do not know which register is going to
1717 be used as base, so we substitute the arg pointer register. This is going
1718 to be treated as holding a pointer below -- it shouldn't be used for any
1719 other purpose. */
1720 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1721
1722 /* Decompose address into base + index + displacement. */
1723
1724 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1725 base = addr;
1726
1727 else if (GET_CODE (addr) == PLUS)
1728 {
1729 rtx op0 = XEXP (addr, 0);
1730 rtx op1 = XEXP (addr, 1);
1731 enum rtx_code code0 = GET_CODE (op0);
1732 enum rtx_code code1 = GET_CODE (op1);
1733
1734 if (code0 == REG || code0 == UNSPEC)
1735 {
1736 if (code1 == REG || code1 == UNSPEC)
1737 {
1738 indx = op0; /* index + base */
1739 base = op1;
1740 }
1741
1742 else
1743 {
1744 base = op0; /* base + displacement */
1745 disp = op1;
1746 }
1747 }
1748
1749 else if (code0 == PLUS)
1750 {
1751 indx = XEXP (op0, 0); /* index + base + disp */
1752 base = XEXP (op0, 1);
1753 disp = op1;
1754 }
1755
1756 else
1757 {
1758 return false;
1759 }
1760 }
1761
1762 else
1763 disp = addr; /* displacement */
1764
1765 /* Extract integer part of displacement. */
1766 orig_disp = disp;
1767 if (disp)
1768 {
1769 if (GET_CODE (disp) == CONST_INT)
1770 {
1771 offset = INTVAL (disp);
1772 disp = NULL_RTX;
1773 }
1774 else if (GET_CODE (disp) == CONST
1775 && GET_CODE (XEXP (disp, 0)) == PLUS
1776 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1777 {
1778 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1779 disp = XEXP (XEXP (disp, 0), 0);
1780 }
1781 }
1782
1783 /* Strip off CONST here to avoid special case tests later. */
1784 if (disp && GET_CODE (disp) == CONST)
1785 disp = XEXP (disp, 0);
1786
1787 /* We can convert literal pool addresses to
1788 displacements by basing them off the base register. */
1789 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1790 {
1791 /* Either base or index must be free to hold the base register. */
1792 if (!base)
1793 base = fake_pool_base, literal_pool = true;
1794 else if (!indx)
1795 indx = fake_pool_base, literal_pool = true;
1796 else
1797 return false;
1798
1799 /* Mark up the displacement. */
1800 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1801 UNSPEC_LTREL_OFFSET);
1802 }
1803
1804 /* Validate base register. */
1805 if (base)
1806 {
1807 if (GET_CODE (base) == UNSPEC)
1808 switch (XINT (base, 1))
1809 {
1810 case UNSPEC_LTREF:
1811 if (!disp)
1812 disp = gen_rtx_UNSPEC (Pmode,
1813 gen_rtvec (1, XVECEXP (base, 0, 0)),
1814 UNSPEC_LTREL_OFFSET);
1815 else
1816 return false;
1817
1818 base = XVECEXP (base, 0, 1);
1819 break;
1820
1821 case UNSPEC_LTREL_BASE:
1822 if (XVECLEN (base, 0) == 1)
1823 base = fake_pool_base, literal_pool = true;
1824 else
1825 base = XVECEXP (base, 0, 1);
1826 break;
1827
1828 default:
1829 return false;
1830 }
1831
1832 if (!REG_P (base)
1833 || (GET_MODE (base) != SImode
1834 && GET_MODE (base) != Pmode))
1835 return false;
1836
1837 if (REGNO (base) == STACK_POINTER_REGNUM
1838 || REGNO (base) == FRAME_POINTER_REGNUM
1839 || ((reload_completed || reload_in_progress)
1840 && frame_pointer_needed
1841 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1842 || REGNO (base) == ARG_POINTER_REGNUM
1843 || (flag_pic
1844 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1845 pointer = base_ptr = true;
1846
1847 if ((reload_completed || reload_in_progress)
1848 && base == cfun->machine->base_reg)
1849 pointer = base_ptr = literal_pool = true;
1850 }
1851
1852 /* Validate index register. */
1853 if (indx)
1854 {
1855 if (GET_CODE (indx) == UNSPEC)
1856 switch (XINT (indx, 1))
1857 {
1858 case UNSPEC_LTREF:
1859 if (!disp)
1860 disp = gen_rtx_UNSPEC (Pmode,
1861 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1862 UNSPEC_LTREL_OFFSET);
1863 else
1864 return false;
1865
1866 indx = XVECEXP (indx, 0, 1);
1867 break;
1868
1869 case UNSPEC_LTREL_BASE:
1870 if (XVECLEN (indx, 0) == 1)
1871 indx = fake_pool_base, literal_pool = true;
1872 else
1873 indx = XVECEXP (indx, 0, 1);
1874 break;
1875
1876 default:
1877 return false;
1878 }
1879
1880 if (!REG_P (indx)
1881 || (GET_MODE (indx) != SImode
1882 && GET_MODE (indx) != Pmode))
1883 return false;
1884
1885 if (REGNO (indx) == STACK_POINTER_REGNUM
1886 || REGNO (indx) == FRAME_POINTER_REGNUM
1887 || ((reload_completed || reload_in_progress)
1888 && frame_pointer_needed
1889 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1890 || REGNO (indx) == ARG_POINTER_REGNUM
1891 || (flag_pic
1892 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1893 pointer = indx_ptr = true;
1894
1895 if ((reload_completed || reload_in_progress)
1896 && indx == cfun->machine->base_reg)
1897 pointer = indx_ptr = literal_pool = true;
1898 }
1899
1900 /* Prefer to use pointer as base, not index. */
1901 if (base && indx && !base_ptr
1902 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1903 {
1904 rtx tmp = base;
1905 base = indx;
1906 indx = tmp;
1907 }
1908
1909 /* Validate displacement. */
1910 if (!disp)
1911 {
1912 /* If virtual registers are involved, the displacement will change later
1913 anyway as the virtual registers get eliminated. This could make a
1914 valid displacement invalid, but it is more likely to make an invalid
1915 displacement valid, because we sometimes access the register save area
1916 via negative offsets to one of those registers.
1917 Thus we don't check the displacement for validity here. If after
1918 elimination the displacement turns out to be invalid after all,
1919 this is fixed up by reload in any case. */
1920 if (base != arg_pointer_rtx
1921 && indx != arg_pointer_rtx
1922 && base != return_address_pointer_rtx
1923 && indx != return_address_pointer_rtx
1924 && base != frame_pointer_rtx
1925 && indx != frame_pointer_rtx
1926 && base != virtual_stack_vars_rtx
1927 && indx != virtual_stack_vars_rtx)
1928 if (!DISP_IN_RANGE (offset))
1929 return false;
1930 }
1931 else
1932 {
1933 /* All the special cases are pointers. */
1934 pointer = true;
1935
1936 /* In the small-PIC case, the linker converts @GOT
1937 and @GOTNTPOFF offsets to possible displacements. */
1938 if (GET_CODE (disp) == UNSPEC
1939 && (XINT (disp, 1) == UNSPEC_GOT
1940 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1941 && flag_pic == 1)
1942 {
1943 ;
1944 }
1945
1946 /* Accept pool label offsets. */
1947 else if (GET_CODE (disp) == UNSPEC
1948 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1949 ;
1950
1951 /* Accept literal pool references. */
1952 else if (GET_CODE (disp) == UNSPEC
1953 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1954 {
1955 orig_disp = gen_rtx_CONST (Pmode, disp);
1956 if (offset)
1957 {
1958 /* If we have an offset, make sure it does not
1959 exceed the size of the constant pool entry. */
1960 rtx sym = XVECEXP (disp, 0, 0);
1961 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1962 return false;
1963
1964 orig_disp = plus_constant (orig_disp, offset);
1965 }
1966 }
1967
1968 else
1969 return false;
1970 }
1971
1972 if (!base && !indx)
1973 pointer = true;
1974
1975 if (out)
1976 {
1977 out->base = base;
1978 out->indx = indx;
1979 out->disp = orig_disp;
1980 out->pointer = pointer;
1981 out->literal_pool = literal_pool;
1982 }
1983
1984 return true;
1985 }
1986
1987 /* Decompose a RTL expression OP for a shift count into its components,
1988 and return the base register in BASE and the offset in OFFSET.
1989
1990 Return true if OP is a valid shift count, false if not. */
1991
1992 bool
1993 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
1994 {
1995 HOST_WIDE_INT off = 0;
1996
1997 /* We can have an integer constant, an address register,
1998 or a sum of the two. */
1999 if (GET_CODE (op) == CONST_INT)
2000 {
2001 off = INTVAL (op);
2002 op = NULL_RTX;
2003 }
2004 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2005 {
2006 off = INTVAL (XEXP (op, 1));
2007 op = XEXP (op, 0);
2008 }
2009 while (op && GET_CODE (op) == SUBREG)
2010 op = SUBREG_REG (op);
2011
2012 if (op && GET_CODE (op) != REG)
2013 return false;
2014
2015 if (offset)
2016 *offset = off;
2017 if (base)
2018 *base = op;
2019
2020 return true;
2021 }
2022
2023
2024 /* Return true if CODE is a valid address without index. */
2025
2026 bool
2027 s390_legitimate_address_without_index_p (rtx op)
2028 {
2029 struct s390_address addr;
2030
2031 if (!s390_decompose_address (XEXP (op, 0), &addr))
2032 return false;
2033 if (addr.indx)
2034 return false;
2035
2036 return true;
2037 }
2038
2039
2040 /* Evaluates constraint strings described by the regular expression
2041 ([A|B](Q|R|S|T))|U|W and returns 1 if OP is a valid operand for the
2042 constraint given in STR, or 0 else. */
2043
2044 int
2045 s390_mem_constraint (const char *str, rtx op)
2046 {
2047 struct s390_address addr;
2048 char c = str[0];
2049
2050 /* Check for offsettable variants of memory constraints. */
2051 if (c == 'A')
2052 {
2053 /* Only accept non-volatile MEMs. */
2054 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2055 return 0;
2056
2057 if ((reload_completed || reload_in_progress)
2058 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2059 return 0;
2060
2061 c = str[1];
2062 }
2063
2064 /* Check for non-literal-pool variants of memory constraints. */
2065 else if (c == 'B')
2066 {
2067 if (GET_CODE (op) != MEM)
2068 return 0;
2069 if (!s390_decompose_address (XEXP (op, 0), &addr))
2070 return 0;
2071 if (addr.literal_pool)
2072 return 0;
2073
2074 c = str[1];
2075 }
2076
2077 switch (c)
2078 {
2079 case 'Q':
2080 if (GET_CODE (op) != MEM)
2081 return 0;
2082 if (!s390_decompose_address (XEXP (op, 0), &addr))
2083 return 0;
2084 if (addr.indx)
2085 return 0;
2086
2087 if (TARGET_LONG_DISPLACEMENT)
2088 {
2089 if (!s390_short_displacement (addr.disp))
2090 return 0;
2091 }
2092 break;
2093
2094 case 'R':
2095 if (GET_CODE (op) != MEM)
2096 return 0;
2097
2098 if (TARGET_LONG_DISPLACEMENT)
2099 {
2100 if (!s390_decompose_address (XEXP (op, 0), &addr))
2101 return 0;
2102 if (!s390_short_displacement (addr.disp))
2103 return 0;
2104 }
2105 break;
2106
2107 case 'S':
2108 if (!TARGET_LONG_DISPLACEMENT)
2109 return 0;
2110 if (GET_CODE (op) != MEM)
2111 return 0;
2112 if (!s390_decompose_address (XEXP (op, 0), &addr))
2113 return 0;
2114 if (addr.indx)
2115 return 0;
2116 if (s390_short_displacement (addr.disp))
2117 return 0;
2118 break;
2119
2120 case 'T':
2121 if (!TARGET_LONG_DISPLACEMENT)
2122 return 0;
2123 if (GET_CODE (op) != MEM)
2124 return 0;
2125 if (!s390_decompose_address (XEXP (op, 0), &addr))
2126 return 0;
2127 if (s390_short_displacement (addr.disp))
2128 return 0;
2129 break;
2130
2131 case 'U':
2132 if (TARGET_LONG_DISPLACEMENT)
2133 {
2134 if (!s390_decompose_address (op, &addr))
2135 return 0;
2136 if (!s390_short_displacement (addr.disp))
2137 return 0;
2138 }
2139 break;
2140
2141 case 'W':
2142 if (!TARGET_LONG_DISPLACEMENT)
2143 return 0;
2144 if (!s390_decompose_address (op, &addr))
2145 return 0;
2146 if (s390_short_displacement (addr.disp))
2147 return 0;
2148 break;
2149
2150 case 'Y':
2151 /* Simply check for the basic form of a shift count. Reload will
2152 take care of making sure we have a proper base register. */
2153 if (!s390_decompose_shift_count (op, NULL, NULL))
2154 return 0;
2155 break;
2156
2157 default:
2158 return 0;
2159 }
2160
2161 return 1;
2162 }
2163
2164
2165
2166 /* Evaluates constraint strings starting with letter O. Input
2167 parameter C is the second letter following the "O" in the constraint
2168 string. Returns 1 if VALUE meets the respective constraint and 0
2169 otherwise. */
2170
2171 int
2172 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2173 {
2174 if (!TARGET_EXTIMM)
2175 return 0;
2176
2177 switch (c)
2178 {
2179 case 's':
2180 return trunc_int_for_mode (value, SImode) == value;
2181
2182 case 'p':
2183 return value == 0
2184 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2185
2186 case 'n':
2187 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2188
2189 default:
2190 gcc_unreachable ();
2191 }
2192 }
2193
2194
2195 /* Evaluates constraint strings starting with letter N. Parameter STR
2196 contains the letters following letter "N" in the constraint string.
2197 Returns true if VALUE matches the constraint. */
2198
2199 int
2200 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2201 {
2202 enum machine_mode mode, part_mode;
2203 int def;
2204 int part, part_goal;
2205
2206
2207 if (str[0] == 'x')
2208 part_goal = -1;
2209 else
2210 part_goal = str[0] - '0';
2211
2212 switch (str[1])
2213 {
2214 case 'Q':
2215 part_mode = QImode;
2216 break;
2217 case 'H':
2218 part_mode = HImode;
2219 break;
2220 case 'S':
2221 part_mode = SImode;
2222 break;
2223 default:
2224 return 0;
2225 }
2226
2227 switch (str[2])
2228 {
2229 case 'H':
2230 mode = HImode;
2231 break;
2232 case 'S':
2233 mode = SImode;
2234 break;
2235 case 'D':
2236 mode = DImode;
2237 break;
2238 default:
2239 return 0;
2240 }
2241
2242 switch (str[3])
2243 {
2244 case '0':
2245 def = 0;
2246 break;
2247 case 'F':
2248 def = -1;
2249 break;
2250 default:
2251 return 0;
2252 }
2253
2254 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2255 return 0;
2256
2257 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2258 if (part < 0)
2259 return 0;
2260 if (part_goal != -1 && part_goal != part)
2261 return 0;
2262
2263 return 1;
2264 }
2265
2266
2267 /* Returns true if the input parameter VALUE is a float zero. */
2268
2269 int
2270 s390_float_const_zero_p (rtx value)
2271 {
2272 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2273 && value == CONST0_RTX (GET_MODE (value)));
2274 }
2275
2276
2277 /* Compute a (partial) cost for rtx X. Return true if the complete
2278 cost has been computed, and false if subexpressions should be
2279 scanned. In either case, *TOTAL contains the cost result.
2280 CODE contains GET_CODE (x), OUTER_CODE contains the code
2281 of the superexpression of x. */
2282
2283 static bool
2284 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2285 bool speed ATTRIBUTE_UNUSED)
2286 {
2287 switch (code)
2288 {
2289 case CONST:
2290 case CONST_INT:
2291 case LABEL_REF:
2292 case SYMBOL_REF:
2293 case CONST_DOUBLE:
2294 case MEM:
2295 *total = 0;
2296 return true;
2297
2298 case ASHIFT:
2299 case ASHIFTRT:
2300 case LSHIFTRT:
2301 case ROTATE:
2302 case ROTATERT:
2303 case AND:
2304 case IOR:
2305 case XOR:
2306 case NEG:
2307 case NOT:
2308 *total = COSTS_N_INSNS (1);
2309 return false;
2310
2311 case PLUS:
2312 case MINUS:
2313 /* Check for multiply and add. */
2314 if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2315 && GET_CODE (XEXP (x, 0)) == MULT
2316 && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
2317 {
2318 /* This is the multiply and add case. */
2319 if (GET_MODE (x) == DFmode)
2320 *total = s390_cost->madbr;
2321 else
2322 *total = s390_cost->maebr;
2323 *total += (rtx_cost (XEXP (XEXP (x, 0), 0), MULT, speed)
2324 + rtx_cost (XEXP (XEXP (x, 0), 1), MULT, speed)
2325 + rtx_cost (XEXP (x, 1), (enum rtx_code) code, speed));
2326 return true; /* Do not do an additional recursive descent. */
2327 }
2328 *total = COSTS_N_INSNS (1);
2329 return false;
2330
2331 case MULT:
2332 switch (GET_MODE (x))
2333 {
2334 case SImode:
2335 {
2336 rtx left = XEXP (x, 0);
2337 rtx right = XEXP (x, 1);
2338 if (GET_CODE (right) == CONST_INT
2339 && CONST_OK_FOR_K (INTVAL (right)))
2340 *total = s390_cost->mhi;
2341 else if (GET_CODE (left) == SIGN_EXTEND)
2342 *total = s390_cost->mh;
2343 else
2344 *total = s390_cost->ms; /* msr, ms, msy */
2345 break;
2346 }
2347 case DImode:
2348 {
2349 rtx left = XEXP (x, 0);
2350 rtx right = XEXP (x, 1);
2351 if (TARGET_64BIT)
2352 {
2353 if (GET_CODE (right) == CONST_INT
2354 && CONST_OK_FOR_K (INTVAL (right)))
2355 *total = s390_cost->mghi;
2356 else if (GET_CODE (left) == SIGN_EXTEND)
2357 *total = s390_cost->msgf;
2358 else
2359 *total = s390_cost->msg; /* msgr, msg */
2360 }
2361 else /* TARGET_31BIT */
2362 {
2363 if (GET_CODE (left) == SIGN_EXTEND
2364 && GET_CODE (right) == SIGN_EXTEND)
2365 /* mulsidi case: mr, m */
2366 *total = s390_cost->m;
2367 else if (GET_CODE (left) == ZERO_EXTEND
2368 && GET_CODE (right) == ZERO_EXTEND
2369 && TARGET_CPU_ZARCH)
2370 /* umulsidi case: ml, mlr */
2371 *total = s390_cost->ml;
2372 else
2373 /* Complex calculation is required. */
2374 *total = COSTS_N_INSNS (40);
2375 }
2376 break;
2377 }
2378 case SFmode:
2379 case DFmode:
2380 *total = s390_cost->mult_df;
2381 break;
2382 case TFmode:
2383 *total = s390_cost->mxbr;
2384 break;
2385 default:
2386 return false;
2387 }
2388 return false;
2389
2390 case UDIV:
2391 case UMOD:
2392 if (GET_MODE (x) == TImode) /* 128 bit division */
2393 *total = s390_cost->dlgr;
2394 else if (GET_MODE (x) == DImode)
2395 {
2396 rtx right = XEXP (x, 1);
2397 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2398 *total = s390_cost->dlr;
2399 else /* 64 by 64 bit division */
2400 *total = s390_cost->dlgr;
2401 }
2402 else if (GET_MODE (x) == SImode) /* 32 bit division */
2403 *total = s390_cost->dlr;
2404 return false;
2405
2406 case DIV:
2407 case MOD:
2408 if (GET_MODE (x) == DImode)
2409 {
2410 rtx right = XEXP (x, 1);
2411 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2412 if (TARGET_64BIT)
2413 *total = s390_cost->dsgfr;
2414 else
2415 *total = s390_cost->dr;
2416 else /* 64 by 64 bit division */
2417 *total = s390_cost->dsgr;
2418 }
2419 else if (GET_MODE (x) == SImode) /* 32 bit division */
2420 *total = s390_cost->dlr;
2421 else if (GET_MODE (x) == SFmode)
2422 {
2423 *total = s390_cost->debr;
2424 }
2425 else if (GET_MODE (x) == DFmode)
2426 {
2427 *total = s390_cost->ddbr;
2428 }
2429 else if (GET_MODE (x) == TFmode)
2430 {
2431 *total = s390_cost->dxbr;
2432 }
2433 return false;
2434
2435 case SQRT:
2436 if (GET_MODE (x) == SFmode)
2437 *total = s390_cost->sqebr;
2438 else if (GET_MODE (x) == DFmode)
2439 *total = s390_cost->sqdbr;
2440 else /* TFmode */
2441 *total = s390_cost->sqxbr;
2442 return false;
2443
2444 case SIGN_EXTEND:
2445 case ZERO_EXTEND:
2446 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2447 || outer_code == PLUS || outer_code == MINUS
2448 || outer_code == COMPARE)
2449 *total = 0;
2450 return false;
2451
2452 case COMPARE:
2453 *total = COSTS_N_INSNS (1);
2454 if (GET_CODE (XEXP (x, 0)) == AND
2455 && GET_CODE (XEXP (x, 1)) == CONST_INT
2456 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2457 {
2458 rtx op0 = XEXP (XEXP (x, 0), 0);
2459 rtx op1 = XEXP (XEXP (x, 0), 1);
2460 rtx op2 = XEXP (x, 1);
2461
2462 if (memory_operand (op0, GET_MODE (op0))
2463 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2464 return true;
2465 if (register_operand (op0, GET_MODE (op0))
2466 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2467 return true;
2468 }
2469 return false;
2470
2471 default:
2472 return false;
2473 }
2474 }
2475
2476 /* Return the cost of an address rtx ADDR. */
2477
2478 static int
2479 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2480 {
2481 struct s390_address ad;
2482 if (!s390_decompose_address (addr, &ad))
2483 return 1000;
2484
2485 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2486 }
2487
2488 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2489 otherwise return 0. */
2490
2491 int
2492 tls_symbolic_operand (rtx op)
2493 {
2494 if (GET_CODE (op) != SYMBOL_REF)
2495 return 0;
2496 return SYMBOL_REF_TLS_MODEL (op);
2497 }
2498 \f
2499 /* Split DImode access register reference REG (on 64-bit) into its constituent
2500 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2501 gen_highpart cannot be used as they assume all registers are word-sized,
2502 while our access registers have only half that size. */
2503
2504 void
2505 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2506 {
2507 gcc_assert (TARGET_64BIT);
2508 gcc_assert (ACCESS_REG_P (reg));
2509 gcc_assert (GET_MODE (reg) == DImode);
2510 gcc_assert (!(REGNO (reg) & 1));
2511
2512 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2513 *hi = gen_rtx_REG (SImode, REGNO (reg));
2514 }
2515
2516 /* Return true if OP contains a symbol reference */
2517
2518 bool
2519 symbolic_reference_mentioned_p (rtx op)
2520 {
2521 const char *fmt;
2522 int i;
2523
2524 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2525 return 1;
2526
2527 fmt = GET_RTX_FORMAT (GET_CODE (op));
2528 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2529 {
2530 if (fmt[i] == 'E')
2531 {
2532 int j;
2533
2534 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2535 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2536 return 1;
2537 }
2538
2539 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2540 return 1;
2541 }
2542
2543 return 0;
2544 }
2545
2546 /* Return true if OP contains a reference to a thread-local symbol. */
2547
2548 bool
2549 tls_symbolic_reference_mentioned_p (rtx op)
2550 {
2551 const char *fmt;
2552 int i;
2553
2554 if (GET_CODE (op) == SYMBOL_REF)
2555 return tls_symbolic_operand (op);
2556
2557 fmt = GET_RTX_FORMAT (GET_CODE (op));
2558 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2559 {
2560 if (fmt[i] == 'E')
2561 {
2562 int j;
2563
2564 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2565 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2566 return true;
2567 }
2568
2569 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2570 return true;
2571 }
2572
2573 return false;
2574 }
2575
2576
2577 /* Return true if OP is a legitimate general operand when
2578 generating PIC code. It is given that flag_pic is on
2579 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2580
2581 int
2582 legitimate_pic_operand_p (rtx op)
2583 {
2584 /* Accept all non-symbolic constants. */
2585 if (!SYMBOLIC_CONST (op))
2586 return 1;
2587
2588 /* Reject everything else; must be handled
2589 via emit_symbolic_move. */
2590 return 0;
2591 }
2592
2593 /* Returns true if the constant value OP is a legitimate general operand.
2594 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2595
2596 int
2597 legitimate_constant_p (rtx op)
2598 {
2599 /* Accept all non-symbolic constants. */
2600 if (!SYMBOLIC_CONST (op))
2601 return 1;
2602
2603 /* Accept immediate LARL operands. */
2604 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2605 return 1;
2606
2607 /* Thread-local symbols are never legal constants. This is
2608 so that emit_call knows that computing such addresses
2609 might require a function call. */
2610 if (TLS_SYMBOLIC_CONST (op))
2611 return 0;
2612
2613 /* In the PIC case, symbolic constants must *not* be
2614 forced into the literal pool. We accept them here,
2615 so that they will be handled by emit_symbolic_move. */
2616 if (flag_pic)
2617 return 1;
2618
2619 /* All remaining non-PIC symbolic constants are
2620 forced into the literal pool. */
2621 return 0;
2622 }
2623
2624 /* Determine if it's legal to put X into the constant pool. This
2625 is not possible if X contains the address of a symbol that is
2626 not constant (TLS) or not known at final link time (PIC). */
2627
2628 static bool
2629 s390_cannot_force_const_mem (rtx x)
2630 {
2631 switch (GET_CODE (x))
2632 {
2633 case CONST_INT:
2634 case CONST_DOUBLE:
2635 /* Accept all non-symbolic constants. */
2636 return false;
2637
2638 case LABEL_REF:
2639 /* Labels are OK iff we are non-PIC. */
2640 return flag_pic != 0;
2641
2642 case SYMBOL_REF:
2643 /* 'Naked' TLS symbol references are never OK,
2644 non-TLS symbols are OK iff we are non-PIC. */
2645 if (tls_symbolic_operand (x))
2646 return true;
2647 else
2648 return flag_pic != 0;
2649
2650 case CONST:
2651 return s390_cannot_force_const_mem (XEXP (x, 0));
2652 case PLUS:
2653 case MINUS:
2654 return s390_cannot_force_const_mem (XEXP (x, 0))
2655 || s390_cannot_force_const_mem (XEXP (x, 1));
2656
2657 case UNSPEC:
2658 switch (XINT (x, 1))
2659 {
2660 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2661 case UNSPEC_LTREL_OFFSET:
2662 case UNSPEC_GOT:
2663 case UNSPEC_GOTOFF:
2664 case UNSPEC_PLTOFF:
2665 case UNSPEC_TLSGD:
2666 case UNSPEC_TLSLDM:
2667 case UNSPEC_NTPOFF:
2668 case UNSPEC_DTPOFF:
2669 case UNSPEC_GOTNTPOFF:
2670 case UNSPEC_INDNTPOFF:
2671 return false;
2672
2673 /* If the literal pool shares the code section, be put
2674 execute template placeholders into the pool as well. */
2675 case UNSPEC_INSN:
2676 return TARGET_CPU_ZARCH;
2677
2678 default:
2679 return true;
2680 }
2681 break;
2682
2683 default:
2684 gcc_unreachable ();
2685 }
2686 }
2687
2688 /* Returns true if the constant value OP is a legitimate general
2689 operand during and after reload. The difference to
2690 legitimate_constant_p is that this function will not accept
2691 a constant that would need to be forced to the literal pool
2692 before it can be used as operand. */
2693
2694 bool
2695 legitimate_reload_constant_p (rtx op)
2696 {
2697 /* Accept la(y) operands. */
2698 if (GET_CODE (op) == CONST_INT
2699 && DISP_IN_RANGE (INTVAL (op)))
2700 return true;
2701
2702 /* Accept l(g)hi/l(g)fi operands. */
2703 if (GET_CODE (op) == CONST_INT
2704 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2705 return true;
2706
2707 /* Accept lliXX operands. */
2708 if (TARGET_ZARCH
2709 && GET_CODE (op) == CONST_INT
2710 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2711 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2712 return true;
2713
2714 if (TARGET_EXTIMM
2715 && GET_CODE (op) == CONST_INT
2716 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2717 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2718 return true;
2719
2720 /* Accept larl operands. */
2721 if (TARGET_CPU_ZARCH
2722 && larl_operand (op, VOIDmode))
2723 return true;
2724
2725 /* Accept lzXX operands. */
2726 if (GET_CODE (op) == CONST_DOUBLE
2727 && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'G', "G"))
2728 return true;
2729
2730 /* Accept double-word operands that can be split. */
2731 if (GET_CODE (op) == CONST_INT
2732 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2733 {
2734 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2735 rtx hi = operand_subword (op, 0, 0, dword_mode);
2736 rtx lo = operand_subword (op, 1, 0, dword_mode);
2737 return legitimate_reload_constant_p (hi)
2738 && legitimate_reload_constant_p (lo);
2739 }
2740
2741 /* Everything else cannot be handled without reload. */
2742 return false;
2743 }
2744
2745 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2746 return the class of reg to actually use. */
2747
2748 enum reg_class
2749 s390_preferred_reload_class (rtx op, enum reg_class rclass)
2750 {
2751 switch (GET_CODE (op))
2752 {
2753 /* Constants we cannot reload must be forced into the
2754 literal pool. */
2755
2756 case CONST_DOUBLE:
2757 case CONST_INT:
2758 if (legitimate_reload_constant_p (op))
2759 return rclass;
2760 else
2761 return NO_REGS;
2762
2763 /* If a symbolic constant or a PLUS is reloaded,
2764 it is most likely being used as an address, so
2765 prefer ADDR_REGS. If 'class' is not a superset
2766 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2767 case PLUS:
2768 case LABEL_REF:
2769 case SYMBOL_REF:
2770 case CONST:
2771 if (reg_class_subset_p (ADDR_REGS, rclass))
2772 return ADDR_REGS;
2773 else
2774 return NO_REGS;
2775
2776 default:
2777 break;
2778 }
2779
2780 return rclass;
2781 }
2782
2783 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2784 and return these parts in SYMREF and ADDEND. You can pass NULL in
2785 SYMREF and/or ADDEND if you are not interested in these values. */
2786
2787 static bool
2788 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2789 {
2790 HOST_WIDE_INT tmpaddend = 0;
2791
2792 if (GET_CODE (addr) == CONST)
2793 addr = XEXP (addr, 0);
2794
2795 if (GET_CODE (addr) == PLUS)
2796 {
2797 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2798 && CONST_INT_P (XEXP (addr, 1)))
2799 {
2800 tmpaddend = INTVAL (XEXP (addr, 1));
2801 addr = XEXP (addr, 0);
2802 }
2803 else
2804 return false;
2805 }
2806 else
2807 if (GET_CODE (addr) != SYMBOL_REF)
2808 return false;
2809
2810 if (symref)
2811 *symref = addr;
2812 if (addend)
2813 *addend = tmpaddend;
2814
2815 return true;
2816 }
2817
2818 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2819 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2820 aligned. */
2821
2822 bool
2823 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2824 {
2825 HOST_WIDE_INT addend;
2826 rtx symref;
2827
2828 if (!s390_symref_operand_p (addr, &symref, &addend))
2829 return false;
2830
2831 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2832 && !(addend & (alignment - 1)));
2833 }
2834
2835 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2836 operand SCRATCH is used to reload the even part of the address and
2837 adding one. */
2838
2839 void
2840 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2841 {
2842 HOST_WIDE_INT addend;
2843 rtx symref;
2844
2845 if (!s390_symref_operand_p (addr, &symref, &addend))
2846 gcc_unreachable ();
2847
2848 if (!(addend & 1))
2849 /* Easy case. The addend is even so larl will do fine. */
2850 emit_move_insn (reg, addr);
2851 else
2852 {
2853 /* We can leave the scratch register untouched if the target
2854 register is a valid base register. */
2855 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2856 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2857 scratch = reg;
2858
2859 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2860 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2861
2862 if (addend != 1)
2863 emit_move_insn (scratch,
2864 gen_rtx_CONST (Pmode,
2865 gen_rtx_PLUS (Pmode, symref,
2866 GEN_INT (addend - 1))));
2867 else
2868 emit_move_insn (scratch, symref);
2869
2870 /* Increment the address using la in order to avoid clobbering cc. */
2871 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2872 }
2873 }
2874
2875 /* Generate what is necessary to move between REG and MEM using
2876 SCRATCH. The direction is given by TOMEM. */
2877
2878 void
2879 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2880 {
2881 /* Reload might have pulled a constant out of the literal pool.
2882 Force it back in. */
2883 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2884 || GET_CODE (mem) == CONST)
2885 mem = force_const_mem (GET_MODE (reg), mem);
2886
2887 gcc_assert (MEM_P (mem));
2888
2889 /* For a load from memory we can leave the scratch register
2890 untouched if the target register is a valid base register. */
2891 if (!tomem
2892 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2893 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2894 && GET_MODE (reg) == GET_MODE (scratch))
2895 scratch = reg;
2896
2897 /* Load address into scratch register. Since we can't have a
2898 secondary reload for a secondary reload we have to cover the case
2899 where larl would need a secondary reload here as well. */
2900 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2901
2902 /* Now we can use a standard load/store to do the move. */
2903 if (tomem)
2904 emit_move_insn (replace_equiv_address (mem, scratch), reg);
2905 else
2906 emit_move_insn (reg, replace_equiv_address (mem, scratch));
2907 }
2908
2909 /* Inform reload about cases where moving X with a mode MODE to a register in
2910 RCLASS requires an extra scratch or immediate register. Return the class
2911 needed for the immediate register. */
2912
2913 static enum reg_class
2914 s390_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
2915 enum machine_mode mode, secondary_reload_info *sri)
2916 {
2917 /* Intermediate register needed. */
2918 if (reg_classes_intersect_p (CC_REGS, rclass))
2919 return GENERAL_REGS;
2920
2921 if (TARGET_Z10)
2922 {
2923 /* On z10 several optimizer steps may generate larl operands with
2924 an odd addend. */
2925 if (in_p
2926 && s390_symref_operand_p (x, NULL, NULL)
2927 && mode == Pmode
2928 && !s390_check_symref_alignment (x, 2))
2929 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
2930 : CODE_FOR_reloadsi_larl_odd_addend_z10);
2931
2932 /* On z10 we need a scratch register when moving QI, TI or floating
2933 point mode values from or to a memory location with a SYMBOL_REF
2934 or if the symref addend of a SI or DI move is not aligned to the
2935 width of the access. */
2936 if (MEM_P (x)
2937 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
2938 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
2939 || (!TARGET_64BIT && mode == DImode)
2940 || ((mode == HImode || mode == SImode || mode == DImode)
2941 && (!s390_check_symref_alignment (XEXP (x, 0),
2942 GET_MODE_SIZE (mode))))))
2943 {
2944 #define __SECONDARY_RELOAD_CASE(M,m) \
2945 case M##mode: \
2946 if (TARGET_64BIT) \
2947 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
2948 CODE_FOR_reload##m##di_tomem_z10; \
2949 else \
2950 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
2951 CODE_FOR_reload##m##si_tomem_z10; \
2952 break;
2953
2954 switch (GET_MODE (x))
2955 {
2956 __SECONDARY_RELOAD_CASE (QI, qi);
2957 __SECONDARY_RELOAD_CASE (HI, hi);
2958 __SECONDARY_RELOAD_CASE (SI, si);
2959 __SECONDARY_RELOAD_CASE (DI, di);
2960 __SECONDARY_RELOAD_CASE (TI, ti);
2961 __SECONDARY_RELOAD_CASE (SF, sf);
2962 __SECONDARY_RELOAD_CASE (DF, df);
2963 __SECONDARY_RELOAD_CASE (TF, tf);
2964 __SECONDARY_RELOAD_CASE (SD, sd);
2965 __SECONDARY_RELOAD_CASE (DD, dd);
2966 __SECONDARY_RELOAD_CASE (TD, td);
2967
2968 default:
2969 gcc_unreachable ();
2970 }
2971 #undef __SECONDARY_RELOAD_CASE
2972 }
2973 }
2974
2975 /* We need a scratch register when loading a PLUS expression which
2976 is not a legitimate operand of the LOAD ADDRESS instruction. */
2977 if (in_p && s390_plus_operand (x, mode))
2978 sri->icode = (TARGET_64BIT ?
2979 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
2980
2981 /* Performing a multiword move from or to memory we have to make sure the
2982 second chunk in memory is addressable without causing a displacement
2983 overflow. If that would be the case we calculate the address in
2984 a scratch register. */
2985 if (MEM_P (x)
2986 && GET_CODE (XEXP (x, 0)) == PLUS
2987 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2988 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
2989 + GET_MODE_SIZE (mode) - 1))
2990 {
2991 /* For GENERAL_REGS a displacement overflow is no problem if occurring
2992 in a s_operand address since we may fallback to lm/stm. So we only
2993 have to care about overflows in the b+i+d case. */
2994 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
2995 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
2996 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
2997 /* For FP_REGS no lm/stm is available so this check is triggered
2998 for displacement overflows in b+i+d and b+d like addresses. */
2999 || (reg_classes_intersect_p (FP_REGS, rclass)
3000 && s390_class_max_nregs (FP_REGS, mode) > 1))
3001 {
3002 if (in_p)
3003 sri->icode = (TARGET_64BIT ?
3004 CODE_FOR_reloaddi_nonoffmem_in :
3005 CODE_FOR_reloadsi_nonoffmem_in);
3006 else
3007 sri->icode = (TARGET_64BIT ?
3008 CODE_FOR_reloaddi_nonoffmem_out :
3009 CODE_FOR_reloadsi_nonoffmem_out);
3010 }
3011 }
3012
3013 /* A scratch address register is needed when a symbolic constant is
3014 copied to r0 compiling with -fPIC. In other cases the target
3015 register might be used as temporary (see legitimize_pic_address). */
3016 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3017 sri->icode = (TARGET_64BIT ?
3018 CODE_FOR_reloaddi_PIC_addr :
3019 CODE_FOR_reloadsi_PIC_addr);
3020
3021 /* Either scratch or no register needed. */
3022 return NO_REGS;
3023 }
3024
3025 /* Generate code to load SRC, which is PLUS that is not a
3026 legitimate operand for the LA instruction, into TARGET.
3027 SCRATCH may be used as scratch register. */
3028
3029 void
3030 s390_expand_plus_operand (rtx target, rtx src,
3031 rtx scratch)
3032 {
3033 rtx sum1, sum2;
3034 struct s390_address ad;
3035
3036 /* src must be a PLUS; get its two operands. */
3037 gcc_assert (GET_CODE (src) == PLUS);
3038 gcc_assert (GET_MODE (src) == Pmode);
3039
3040 /* Check if any of the two operands is already scheduled
3041 for replacement by reload. This can happen e.g. when
3042 float registers occur in an address. */
3043 sum1 = find_replacement (&XEXP (src, 0));
3044 sum2 = find_replacement (&XEXP (src, 1));
3045 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3046
3047 /* If the address is already strictly valid, there's nothing to do. */
3048 if (!s390_decompose_address (src, &ad)
3049 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3050 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3051 {
3052 /* Otherwise, one of the operands cannot be an address register;
3053 we reload its value into the scratch register. */
3054 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3055 {
3056 emit_move_insn (scratch, sum1);
3057 sum1 = scratch;
3058 }
3059 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3060 {
3061 emit_move_insn (scratch, sum2);
3062 sum2 = scratch;
3063 }
3064
3065 /* According to the way these invalid addresses are generated
3066 in reload.c, it should never happen (at least on s390) that
3067 *neither* of the PLUS components, after find_replacements
3068 was applied, is an address register. */
3069 if (sum1 == scratch && sum2 == scratch)
3070 {
3071 debug_rtx (src);
3072 gcc_unreachable ();
3073 }
3074
3075 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3076 }
3077
3078 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3079 is only ever performed on addresses, so we can mark the
3080 sum as legitimate for LA in any case. */
3081 s390_load_address (target, src);
3082 }
3083
3084
3085 /* Return true if ADDR is a valid memory address.
3086 STRICT specifies whether strict register checking applies. */
3087
3088 bool
3089 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
3090 {
3091 struct s390_address ad;
3092
3093 if (TARGET_Z10
3094 && larl_operand (addr, VOIDmode)
3095 && (mode == VOIDmode
3096 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3097 return true;
3098
3099 if (!s390_decompose_address (addr, &ad))
3100 return false;
3101
3102 if (strict)
3103 {
3104 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3105 return false;
3106
3107 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3108 return false;
3109 }
3110 else
3111 {
3112 if (ad.base
3113 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3114 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3115 return false;
3116
3117 if (ad.indx
3118 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3119 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3120 return false;
3121 }
3122 return true;
3123 }
3124
3125 /* Return true if OP is a valid operand for the LA instruction.
3126 In 31-bit, we need to prove that the result is used as an
3127 address, as LA performs only a 31-bit addition. */
3128
3129 bool
3130 legitimate_la_operand_p (rtx op)
3131 {
3132 struct s390_address addr;
3133 if (!s390_decompose_address (op, &addr))
3134 return false;
3135
3136 return (TARGET_64BIT || addr.pointer);
3137 }
3138
3139 /* Return true if it is valid *and* preferable to use LA to
3140 compute the sum of OP1 and OP2. */
3141
3142 bool
3143 preferred_la_operand_p (rtx op1, rtx op2)
3144 {
3145 struct s390_address addr;
3146
3147 if (op2 != const0_rtx)
3148 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3149
3150 if (!s390_decompose_address (op1, &addr))
3151 return false;
3152 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3153 return false;
3154 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3155 return false;
3156
3157 if (!TARGET_64BIT && !addr.pointer)
3158 return false;
3159
3160 if (addr.pointer)
3161 return true;
3162
3163 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3164 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3165 return true;
3166
3167 return false;
3168 }
3169
3170 /* Emit a forced load-address operation to load SRC into DST.
3171 This will use the LOAD ADDRESS instruction even in situations
3172 where legitimate_la_operand_p (SRC) returns false. */
3173
3174 void
3175 s390_load_address (rtx dst, rtx src)
3176 {
3177 if (TARGET_64BIT)
3178 emit_move_insn (dst, src);
3179 else
3180 emit_insn (gen_force_la_31 (dst, src));
3181 }
3182
3183 /* Return a legitimate reference for ORIG (an address) using the
3184 register REG. If REG is 0, a new pseudo is generated.
3185
3186 There are two types of references that must be handled:
3187
3188 1. Global data references must load the address from the GOT, via
3189 the PIC reg. An insn is emitted to do this load, and the reg is
3190 returned.
3191
3192 2. Static data references, constant pool addresses, and code labels
3193 compute the address as an offset from the GOT, whose base is in
3194 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3195 differentiate them from global data objects. The returned
3196 address is the PIC reg + an unspec constant.
3197
3198 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
3199 reg also appears in the address. */
3200
3201 rtx
3202 legitimize_pic_address (rtx orig, rtx reg)
3203 {
3204 rtx addr = orig;
3205 rtx new_rtx = orig;
3206 rtx base;
3207
3208 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3209
3210 if (GET_CODE (addr) == LABEL_REF
3211 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3212 {
3213 /* This is a local symbol. */
3214 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3215 {
3216 /* Access local symbols PC-relative via LARL.
3217 This is the same as in the non-PIC case, so it is
3218 handled automatically ... */
3219 }
3220 else
3221 {
3222 /* Access local symbols relative to the GOT. */
3223
3224 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3225
3226 if (reload_in_progress || reload_completed)
3227 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3228
3229 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3230 addr = gen_rtx_CONST (Pmode, addr);
3231 addr = force_const_mem (Pmode, addr);
3232 emit_move_insn (temp, addr);
3233
3234 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3235 if (reg != 0)
3236 {
3237 s390_load_address (reg, new_rtx);
3238 new_rtx = reg;
3239 }
3240 }
3241 }
3242 else if (GET_CODE (addr) == SYMBOL_REF)
3243 {
3244 if (reg == 0)
3245 reg = gen_reg_rtx (Pmode);
3246
3247 if (flag_pic == 1)
3248 {
3249 /* Assume GOT offset < 4k. This is handled the same way
3250 in both 31- and 64-bit code (@GOT). */
3251
3252 if (reload_in_progress || reload_completed)
3253 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3254
3255 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3256 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3257 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3258 new_rtx = gen_const_mem (Pmode, new_rtx);
3259 emit_move_insn (reg, new_rtx);
3260 new_rtx = reg;
3261 }
3262 else if (TARGET_CPU_ZARCH)
3263 {
3264 /* If the GOT offset might be >= 4k, we determine the position
3265 of the GOT entry via a PC-relative LARL (@GOTENT). */
3266
3267 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3268
3269 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3270 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3271
3272 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3273 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3274 emit_move_insn (temp, new_rtx);
3275
3276 new_rtx = gen_const_mem (Pmode, temp);
3277 emit_move_insn (reg, new_rtx);
3278 new_rtx = reg;
3279 }
3280 else
3281 {
3282 /* If the GOT offset might be >= 4k, we have to load it
3283 from the literal pool (@GOT). */
3284
3285 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3286
3287 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3288 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3289
3290 if (reload_in_progress || reload_completed)
3291 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3292
3293 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3294 addr = gen_rtx_CONST (Pmode, addr);
3295 addr = force_const_mem (Pmode, addr);
3296 emit_move_insn (temp, addr);
3297
3298 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3299 new_rtx = gen_const_mem (Pmode, new_rtx);
3300 emit_move_insn (reg, new_rtx);
3301 new_rtx = reg;
3302 }
3303 }
3304 else
3305 {
3306 if (GET_CODE (addr) == CONST)
3307 {
3308 addr = XEXP (addr, 0);
3309 if (GET_CODE (addr) == UNSPEC)
3310 {
3311 gcc_assert (XVECLEN (addr, 0) == 1);
3312 switch (XINT (addr, 1))
3313 {
3314 /* If someone moved a GOT-relative UNSPEC
3315 out of the literal pool, force them back in. */
3316 case UNSPEC_GOTOFF:
3317 case UNSPEC_PLTOFF:
3318 new_rtx = force_const_mem (Pmode, orig);
3319 break;
3320
3321 /* @GOT is OK as is if small. */
3322 case UNSPEC_GOT:
3323 if (flag_pic == 2)
3324 new_rtx = force_const_mem (Pmode, orig);
3325 break;
3326
3327 /* @GOTENT is OK as is. */
3328 case UNSPEC_GOTENT:
3329 break;
3330
3331 /* @PLT is OK as is on 64-bit, must be converted to
3332 GOT-relative @PLTOFF on 31-bit. */
3333 case UNSPEC_PLT:
3334 if (!TARGET_CPU_ZARCH)
3335 {
3336 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3337
3338 if (reload_in_progress || reload_completed)
3339 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3340
3341 addr = XVECEXP (addr, 0, 0);
3342 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3343 UNSPEC_PLTOFF);
3344 addr = gen_rtx_CONST (Pmode, addr);
3345 addr = force_const_mem (Pmode, addr);
3346 emit_move_insn (temp, addr);
3347
3348 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3349 if (reg != 0)
3350 {
3351 s390_load_address (reg, new_rtx);
3352 new_rtx = reg;
3353 }
3354 }
3355 break;
3356
3357 /* Everything else cannot happen. */
3358 default:
3359 gcc_unreachable ();
3360 }
3361 }
3362 else
3363 gcc_assert (GET_CODE (addr) == PLUS);
3364 }
3365 if (GET_CODE (addr) == PLUS)
3366 {
3367 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3368
3369 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3370 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3371
3372 /* Check first to see if this is a constant offset
3373 from a local symbol reference. */
3374 if ((GET_CODE (op0) == LABEL_REF
3375 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3376 && GET_CODE (op1) == CONST_INT)
3377 {
3378 if (TARGET_CPU_ZARCH
3379 && larl_operand (op0, VOIDmode)
3380 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3381 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3382 {
3383 if (INTVAL (op1) & 1)
3384 {
3385 /* LARL can't handle odd offsets, so emit a
3386 pair of LARL and LA. */
3387 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3388
3389 if (!DISP_IN_RANGE (INTVAL (op1)))
3390 {
3391 HOST_WIDE_INT even = INTVAL (op1) - 1;
3392 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3393 op0 = gen_rtx_CONST (Pmode, op0);
3394 op1 = const1_rtx;
3395 }
3396
3397 emit_move_insn (temp, op0);
3398 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3399
3400 if (reg != 0)
3401 {
3402 s390_load_address (reg, new_rtx);
3403 new_rtx = reg;
3404 }
3405 }
3406 else
3407 {
3408 /* If the offset is even, we can just use LARL.
3409 This will happen automatically. */
3410 }
3411 }
3412 else
3413 {
3414 /* Access local symbols relative to the GOT. */
3415
3416 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3417
3418 if (reload_in_progress || reload_completed)
3419 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3420
3421 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3422 UNSPEC_GOTOFF);
3423 addr = gen_rtx_PLUS (Pmode, addr, op1);
3424 addr = gen_rtx_CONST (Pmode, addr);
3425 addr = force_const_mem (Pmode, addr);
3426 emit_move_insn (temp, addr);
3427
3428 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3429 if (reg != 0)
3430 {
3431 s390_load_address (reg, new_rtx);
3432 new_rtx = reg;
3433 }
3434 }
3435 }
3436
3437 /* Now, check whether it is a GOT relative symbol plus offset
3438 that was pulled out of the literal pool. Force it back in. */
3439
3440 else if (GET_CODE (op0) == UNSPEC
3441 && GET_CODE (op1) == CONST_INT
3442 && XINT (op0, 1) == UNSPEC_GOTOFF)
3443 {
3444 gcc_assert (XVECLEN (op0, 0) == 1);
3445
3446 new_rtx = force_const_mem (Pmode, orig);
3447 }
3448
3449 /* Otherwise, compute the sum. */
3450 else
3451 {
3452 base = legitimize_pic_address (XEXP (addr, 0), reg);
3453 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3454 base == reg ? NULL_RTX : reg);
3455 if (GET_CODE (new_rtx) == CONST_INT)
3456 new_rtx = plus_constant (base, INTVAL (new_rtx));
3457 else
3458 {
3459 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3460 {
3461 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3462 new_rtx = XEXP (new_rtx, 1);
3463 }
3464 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3465 }
3466
3467 if (GET_CODE (new_rtx) == CONST)
3468 new_rtx = XEXP (new_rtx, 0);
3469 new_rtx = force_operand (new_rtx, 0);
3470 }
3471 }
3472 }
3473 return new_rtx;
3474 }
3475
3476 /* Load the thread pointer into a register. */
3477
3478 rtx
3479 s390_get_thread_pointer (void)
3480 {
3481 rtx tp = gen_reg_rtx (Pmode);
3482
3483 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3484 mark_reg_pointer (tp, BITS_PER_WORD);
3485
3486 return tp;
3487 }
3488
3489 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3490 in s390_tls_symbol which always refers to __tls_get_offset.
3491 The returned offset is written to RESULT_REG and an USE rtx is
3492 generated for TLS_CALL. */
3493
3494 static GTY(()) rtx s390_tls_symbol;
3495
3496 static void
3497 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3498 {
3499 rtx insn;
3500
3501 gcc_assert (flag_pic);
3502
3503 if (!s390_tls_symbol)
3504 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3505
3506 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3507 gen_rtx_REG (Pmode, RETURN_REGNUM));
3508
3509 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3510 RTL_CONST_CALL_P (insn) = 1;
3511 }
3512
3513 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3514 this (thread-local) address. REG may be used as temporary. */
3515
3516 static rtx
3517 legitimize_tls_address (rtx addr, rtx reg)
3518 {
3519 rtx new_rtx, tls_call, temp, base, r2, insn;
3520
3521 if (GET_CODE (addr) == SYMBOL_REF)
3522 switch (tls_symbolic_operand (addr))
3523 {
3524 case TLS_MODEL_GLOBAL_DYNAMIC:
3525 start_sequence ();
3526 r2 = gen_rtx_REG (Pmode, 2);
3527 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3528 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3529 new_rtx = force_const_mem (Pmode, new_rtx);
3530 emit_move_insn (r2, new_rtx);
3531 s390_emit_tls_call_insn (r2, tls_call);
3532 insn = get_insns ();
3533 end_sequence ();
3534
3535 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3536 temp = gen_reg_rtx (Pmode);
3537 emit_libcall_block (insn, temp, r2, new_rtx);
3538
3539 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3540 if (reg != 0)
3541 {
3542 s390_load_address (reg, new_rtx);
3543 new_rtx = reg;
3544 }
3545 break;
3546
3547 case TLS_MODEL_LOCAL_DYNAMIC:
3548 start_sequence ();
3549 r2 = gen_rtx_REG (Pmode, 2);
3550 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3551 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3552 new_rtx = force_const_mem (Pmode, new_rtx);
3553 emit_move_insn (r2, new_rtx);
3554 s390_emit_tls_call_insn (r2, tls_call);
3555 insn = get_insns ();
3556 end_sequence ();
3557
3558 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3559 temp = gen_reg_rtx (Pmode);
3560 emit_libcall_block (insn, temp, r2, new_rtx);
3561
3562 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3563 base = gen_reg_rtx (Pmode);
3564 s390_load_address (base, new_rtx);
3565
3566 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3567 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3568 new_rtx = force_const_mem (Pmode, new_rtx);
3569 temp = gen_reg_rtx (Pmode);
3570 emit_move_insn (temp, new_rtx);
3571
3572 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3573 if (reg != 0)
3574 {
3575 s390_load_address (reg, new_rtx);
3576 new_rtx = reg;
3577 }
3578 break;
3579
3580 case TLS_MODEL_INITIAL_EXEC:
3581 if (flag_pic == 1)
3582 {
3583 /* Assume GOT offset < 4k. This is handled the same way
3584 in both 31- and 64-bit code. */
3585
3586 if (reload_in_progress || reload_completed)
3587 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3588
3589 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3590 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3591 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3592 new_rtx = gen_const_mem (Pmode, new_rtx);
3593 temp = gen_reg_rtx (Pmode);
3594 emit_move_insn (temp, new_rtx);
3595 }
3596 else if (TARGET_CPU_ZARCH)
3597 {
3598 /* If the GOT offset might be >= 4k, we determine the position
3599 of the GOT entry via a PC-relative LARL. */
3600
3601 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3602 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3603 temp = gen_reg_rtx (Pmode);
3604 emit_move_insn (temp, new_rtx);
3605
3606 new_rtx = gen_const_mem (Pmode, temp);
3607 temp = gen_reg_rtx (Pmode);
3608 emit_move_insn (temp, new_rtx);
3609 }
3610 else if (flag_pic)
3611 {
3612 /* If the GOT offset might be >= 4k, we have to load it
3613 from the literal pool. */
3614
3615 if (reload_in_progress || reload_completed)
3616 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3617
3618 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3619 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3620 new_rtx = force_const_mem (Pmode, new_rtx);
3621 temp = gen_reg_rtx (Pmode);
3622 emit_move_insn (temp, new_rtx);
3623
3624 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3625 new_rtx = gen_const_mem (Pmode, new_rtx);
3626
3627 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3628 temp = gen_reg_rtx (Pmode);
3629 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3630 }
3631 else
3632 {
3633 /* In position-dependent code, load the absolute address of
3634 the GOT entry from the literal pool. */
3635
3636 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3637 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3638 new_rtx = force_const_mem (Pmode, new_rtx);
3639 temp = gen_reg_rtx (Pmode);
3640 emit_move_insn (temp, new_rtx);
3641
3642 new_rtx = temp;
3643 new_rtx = gen_const_mem (Pmode, new_rtx);
3644 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3645 temp = gen_reg_rtx (Pmode);
3646 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3647 }
3648
3649 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3650 if (reg != 0)
3651 {
3652 s390_load_address (reg, new_rtx);
3653 new_rtx = reg;
3654 }
3655 break;
3656
3657 case TLS_MODEL_LOCAL_EXEC:
3658 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3659 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3660 new_rtx = force_const_mem (Pmode, new_rtx);
3661 temp = gen_reg_rtx (Pmode);
3662 emit_move_insn (temp, new_rtx);
3663
3664 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3665 if (reg != 0)
3666 {
3667 s390_load_address (reg, new_rtx);
3668 new_rtx = reg;
3669 }
3670 break;
3671
3672 default:
3673 gcc_unreachable ();
3674 }
3675
3676 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3677 {
3678 switch (XINT (XEXP (addr, 0), 1))
3679 {
3680 case UNSPEC_INDNTPOFF:
3681 gcc_assert (TARGET_CPU_ZARCH);
3682 new_rtx = addr;
3683 break;
3684
3685 default:
3686 gcc_unreachable ();
3687 }
3688 }
3689
3690 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3691 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3692 {
3693 new_rtx = XEXP (XEXP (addr, 0), 0);
3694 if (GET_CODE (new_rtx) != SYMBOL_REF)
3695 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3696
3697 new_rtx = legitimize_tls_address (new_rtx, reg);
3698 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3699 new_rtx = force_operand (new_rtx, 0);
3700 }
3701
3702 else
3703 gcc_unreachable (); /* for now ... */
3704
3705 return new_rtx;
3706 }
3707
3708 /* Emit insns making the address in operands[1] valid for a standard
3709 move to operands[0]. operands[1] is replaced by an address which
3710 should be used instead of the former RTX to emit the move
3711 pattern. */
3712
3713 void
3714 emit_symbolic_move (rtx *operands)
3715 {
3716 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3717
3718 if (GET_CODE (operands[0]) == MEM)
3719 operands[1] = force_reg (Pmode, operands[1]);
3720 else if (TLS_SYMBOLIC_CONST (operands[1]))
3721 operands[1] = legitimize_tls_address (operands[1], temp);
3722 else if (flag_pic)
3723 operands[1] = legitimize_pic_address (operands[1], temp);
3724 }
3725
3726 /* Try machine-dependent ways of modifying an illegitimate address X
3727 to be legitimate. If we find one, return the new, valid address.
3728
3729 OLDX is the address as it was before break_out_memory_refs was called.
3730 In some cases it is useful to look at this to decide what needs to be done.
3731
3732 MODE is the mode of the operand pointed to by X.
3733
3734 When -fpic is used, special handling is needed for symbolic references.
3735 See comments by legitimize_pic_address for details. */
3736
3737 rtx
3738 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3739 enum machine_mode mode ATTRIBUTE_UNUSED)
3740 {
3741 rtx constant_term = const0_rtx;
3742
3743 if (TLS_SYMBOLIC_CONST (x))
3744 {
3745 x = legitimize_tls_address (x, 0);
3746
3747 if (legitimate_address_p (mode, x, FALSE))
3748 return x;
3749 }
3750 else if (GET_CODE (x) == PLUS
3751 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3752 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3753 {
3754 return x;
3755 }
3756 else if (flag_pic)
3757 {
3758 if (SYMBOLIC_CONST (x)
3759 || (GET_CODE (x) == PLUS
3760 && (SYMBOLIC_CONST (XEXP (x, 0))
3761 || SYMBOLIC_CONST (XEXP (x, 1)))))
3762 x = legitimize_pic_address (x, 0);
3763
3764 if (legitimate_address_p (mode, x, FALSE))
3765 return x;
3766 }
3767
3768 x = eliminate_constant_term (x, &constant_term);
3769
3770 /* Optimize loading of large displacements by splitting them
3771 into the multiple of 4K and the rest; this allows the
3772 former to be CSE'd if possible.
3773
3774 Don't do this if the displacement is added to a register
3775 pointing into the stack frame, as the offsets will
3776 change later anyway. */
3777
3778 if (GET_CODE (constant_term) == CONST_INT
3779 && !TARGET_LONG_DISPLACEMENT
3780 && !DISP_IN_RANGE (INTVAL (constant_term))
3781 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3782 {
3783 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3784 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3785
3786 rtx temp = gen_reg_rtx (Pmode);
3787 rtx val = force_operand (GEN_INT (upper), temp);
3788 if (val != temp)
3789 emit_move_insn (temp, val);
3790
3791 x = gen_rtx_PLUS (Pmode, x, temp);
3792 constant_term = GEN_INT (lower);
3793 }
3794
3795 if (GET_CODE (x) == PLUS)
3796 {
3797 if (GET_CODE (XEXP (x, 0)) == REG)
3798 {
3799 rtx temp = gen_reg_rtx (Pmode);
3800 rtx val = force_operand (XEXP (x, 1), temp);
3801 if (val != temp)
3802 emit_move_insn (temp, val);
3803
3804 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3805 }
3806
3807 else if (GET_CODE (XEXP (x, 1)) == REG)
3808 {
3809 rtx temp = gen_reg_rtx (Pmode);
3810 rtx val = force_operand (XEXP (x, 0), temp);
3811 if (val != temp)
3812 emit_move_insn (temp, val);
3813
3814 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3815 }
3816 }
3817
3818 if (constant_term != const0_rtx)
3819 x = gen_rtx_PLUS (Pmode, x, constant_term);
3820
3821 return x;
3822 }
3823
3824 /* Try a machine-dependent way of reloading an illegitimate address AD
3825 operand. If we find one, push the reload and and return the new address.
3826
3827 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3828 and TYPE is the reload type of the current reload. */
3829
3830 rtx
3831 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3832 int opnum, int type)
3833 {
3834 if (!optimize || TARGET_LONG_DISPLACEMENT)
3835 return NULL_RTX;
3836
3837 if (GET_CODE (ad) == PLUS)
3838 {
3839 rtx tem = simplify_binary_operation (PLUS, Pmode,
3840 XEXP (ad, 0), XEXP (ad, 1));
3841 if (tem)
3842 ad = tem;
3843 }
3844
3845 if (GET_CODE (ad) == PLUS
3846 && GET_CODE (XEXP (ad, 0)) == REG
3847 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3848 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3849 {
3850 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3851 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3852 rtx cst, tem, new_rtx;
3853
3854 cst = GEN_INT (upper);
3855 if (!legitimate_reload_constant_p (cst))
3856 cst = force_const_mem (Pmode, cst);
3857
3858 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3859 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3860
3861 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3862 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3863 opnum, (enum reload_type) type);
3864 return new_rtx;
3865 }
3866
3867 return NULL_RTX;
3868 }
3869
3870 /* Emit code to move LEN bytes from DST to SRC. */
3871
3872 void
3873 s390_expand_movmem (rtx dst, rtx src, rtx len)
3874 {
3875 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3876 {
3877 if (INTVAL (len) > 0)
3878 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3879 }
3880
3881 else if (TARGET_MVCLE)
3882 {
3883 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3884 }
3885
3886 else
3887 {
3888 rtx dst_addr, src_addr, count, blocks, temp;
3889 rtx loop_start_label = gen_label_rtx ();
3890 rtx loop_end_label = gen_label_rtx ();
3891 rtx end_label = gen_label_rtx ();
3892 enum machine_mode mode;
3893
3894 mode = GET_MODE (len);
3895 if (mode == VOIDmode)
3896 mode = Pmode;
3897
3898 dst_addr = gen_reg_rtx (Pmode);
3899 src_addr = gen_reg_rtx (Pmode);
3900 count = gen_reg_rtx (mode);
3901 blocks = gen_reg_rtx (mode);
3902
3903 convert_move (count, len, 1);
3904 emit_cmp_and_jump_insns (count, const0_rtx,
3905 EQ, NULL_RTX, mode, 1, end_label);
3906
3907 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
3908 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
3909 dst = change_address (dst, VOIDmode, dst_addr);
3910 src = change_address (src, VOIDmode, src_addr);
3911
3912 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
3913 OPTAB_DIRECT);
3914 if (temp != count)
3915 emit_move_insn (count, temp);
3916
3917 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
3918 OPTAB_DIRECT);
3919 if (temp != blocks)
3920 emit_move_insn (blocks, temp);
3921
3922 emit_cmp_and_jump_insns (blocks, const0_rtx,
3923 EQ, NULL_RTX, mode, 1, loop_end_label);
3924
3925 emit_label (loop_start_label);
3926
3927 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
3928 s390_load_address (dst_addr,
3929 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
3930 s390_load_address (src_addr,
3931 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
3932
3933 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
3934 OPTAB_DIRECT);
3935 if (temp != blocks)
3936 emit_move_insn (blocks, temp);
3937
3938 emit_cmp_and_jump_insns (blocks, const0_rtx,
3939 EQ, NULL_RTX, mode, 1, loop_end_label);
3940
3941 emit_jump (loop_start_label);
3942 emit_label (loop_end_label);
3943
3944 emit_insn (gen_movmem_short (dst, src,
3945 convert_to_mode (Pmode, count, 1)));
3946 emit_label (end_label);
3947 }
3948 }
3949
3950 /* Emit code to set LEN bytes at DST to VAL.
3951 Make use of clrmem if VAL is zero. */
3952
3953 void
3954 s390_expand_setmem (rtx dst, rtx len, rtx val)
3955 {
3956 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
3957 return;
3958
3959 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
3960
3961 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
3962 {
3963 if (val == const0_rtx && INTVAL (len) <= 256)
3964 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
3965 else
3966 {
3967 /* Initialize memory by storing the first byte. */
3968 emit_move_insn (adjust_address (dst, QImode, 0), val);
3969
3970 if (INTVAL (len) > 1)
3971 {
3972 /* Initiate 1 byte overlap move.
3973 The first byte of DST is propagated through DSTP1.
3974 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
3975 DST is set to size 1 so the rest of the memory location
3976 does not count as source operand. */
3977 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
3978 set_mem_size (dst, const1_rtx);
3979
3980 emit_insn (gen_movmem_short (dstp1, dst,
3981 GEN_INT (INTVAL (len) - 2)));
3982 }
3983 }
3984 }
3985
3986 else if (TARGET_MVCLE)
3987 {
3988 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
3989 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
3990 }
3991
3992 else
3993 {
3994 rtx dst_addr, src_addr, count, blocks, temp, dstp1 = NULL_RTX;
3995 rtx loop_start_label = gen_label_rtx ();
3996 rtx loop_end_label = gen_label_rtx ();
3997 rtx end_label = gen_label_rtx ();
3998 enum machine_mode mode;
3999
4000 mode = GET_MODE (len);
4001 if (mode == VOIDmode)
4002 mode = Pmode;
4003
4004 dst_addr = gen_reg_rtx (Pmode);
4005 src_addr = gen_reg_rtx (Pmode);
4006 count = gen_reg_rtx (mode);
4007 blocks = gen_reg_rtx (mode);
4008
4009 convert_move (count, len, 1);
4010 emit_cmp_and_jump_insns (count, const0_rtx,
4011 EQ, NULL_RTX, mode, 1, end_label);
4012
4013 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4014 dst = change_address (dst, VOIDmode, dst_addr);
4015
4016 if (val == const0_rtx)
4017 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4018 OPTAB_DIRECT);
4019 else
4020 {
4021 dstp1 = adjust_address (dst, VOIDmode, 1);
4022 set_mem_size (dst, const1_rtx);
4023
4024 /* Initialize memory by storing the first byte. */
4025 emit_move_insn (adjust_address (dst, QImode, 0), val);
4026
4027 /* If count is 1 we are done. */
4028 emit_cmp_and_jump_insns (count, const1_rtx,
4029 EQ, NULL_RTX, mode, 1, end_label);
4030
4031 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4032 OPTAB_DIRECT);
4033 }
4034 if (temp != count)
4035 emit_move_insn (count, temp);
4036
4037 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4038 OPTAB_DIRECT);
4039 if (temp != blocks)
4040 emit_move_insn (blocks, temp);
4041
4042 emit_cmp_and_jump_insns (blocks, const0_rtx,
4043 EQ, NULL_RTX, mode, 1, loop_end_label);
4044
4045 emit_label (loop_start_label);
4046
4047 if (val == const0_rtx)
4048 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4049 else
4050 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4051 s390_load_address (dst_addr,
4052 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4053
4054 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4055 OPTAB_DIRECT);
4056 if (temp != blocks)
4057 emit_move_insn (blocks, temp);
4058
4059 emit_cmp_and_jump_insns (blocks, const0_rtx,
4060 EQ, NULL_RTX, mode, 1, loop_end_label);
4061
4062 emit_jump (loop_start_label);
4063 emit_label (loop_end_label);
4064
4065 if (val == const0_rtx)
4066 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4067 else
4068 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4069 emit_label (end_label);
4070 }
4071 }
4072
4073 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4074 and return the result in TARGET. */
4075
4076 void
4077 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4078 {
4079 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4080 rtx tmp;
4081
4082 /* As the result of CMPINT is inverted compared to what we need,
4083 we have to swap the operands. */
4084 tmp = op0; op0 = op1; op1 = tmp;
4085
4086 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4087 {
4088 if (INTVAL (len) > 0)
4089 {
4090 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4091 emit_insn (gen_cmpint (target, ccreg));
4092 }
4093 else
4094 emit_move_insn (target, const0_rtx);
4095 }
4096 else if (TARGET_MVCLE)
4097 {
4098 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4099 emit_insn (gen_cmpint (target, ccreg));
4100 }
4101 else
4102 {
4103 rtx addr0, addr1, count, blocks, temp;
4104 rtx loop_start_label = gen_label_rtx ();
4105 rtx loop_end_label = gen_label_rtx ();
4106 rtx end_label = gen_label_rtx ();
4107 enum machine_mode mode;
4108
4109 mode = GET_MODE (len);
4110 if (mode == VOIDmode)
4111 mode = Pmode;
4112
4113 addr0 = gen_reg_rtx (Pmode);
4114 addr1 = gen_reg_rtx (Pmode);
4115 count = gen_reg_rtx (mode);
4116 blocks = gen_reg_rtx (mode);
4117
4118 convert_move (count, len, 1);
4119 emit_cmp_and_jump_insns (count, const0_rtx,
4120 EQ, NULL_RTX, mode, 1, end_label);
4121
4122 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4123 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4124 op0 = change_address (op0, VOIDmode, addr0);
4125 op1 = change_address (op1, VOIDmode, addr1);
4126
4127 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4128 OPTAB_DIRECT);
4129 if (temp != count)
4130 emit_move_insn (count, temp);
4131
4132 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4133 OPTAB_DIRECT);
4134 if (temp != blocks)
4135 emit_move_insn (blocks, temp);
4136
4137 emit_cmp_and_jump_insns (blocks, const0_rtx,
4138 EQ, NULL_RTX, mode, 1, loop_end_label);
4139
4140 emit_label (loop_start_label);
4141
4142 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4143 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4144 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4145 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4146 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4147 emit_jump_insn (temp);
4148
4149 s390_load_address (addr0,
4150 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4151 s390_load_address (addr1,
4152 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4153
4154 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4155 OPTAB_DIRECT);
4156 if (temp != blocks)
4157 emit_move_insn (blocks, temp);
4158
4159 emit_cmp_and_jump_insns (blocks, const0_rtx,
4160 EQ, NULL_RTX, mode, 1, loop_end_label);
4161
4162 emit_jump (loop_start_label);
4163 emit_label (loop_end_label);
4164
4165 emit_insn (gen_cmpmem_short (op0, op1,
4166 convert_to_mode (Pmode, count, 1)));
4167 emit_label (end_label);
4168
4169 emit_insn (gen_cmpint (target, ccreg));
4170 }
4171 }
4172
4173
4174 /* Expand conditional increment or decrement using alc/slb instructions.
4175 Should generate code setting DST to either SRC or SRC + INCREMENT,
4176 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4177 Returns true if successful, false otherwise.
4178
4179 That makes it possible to implement some if-constructs without jumps e.g.:
4180 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4181 unsigned int a, b, c;
4182 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4183 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4184 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4185 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4186
4187 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4188 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4189 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4190 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4191 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4192
4193 bool
4194 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4195 rtx dst, rtx src, rtx increment)
4196 {
4197 enum machine_mode cmp_mode;
4198 enum machine_mode cc_mode;
4199 rtx op_res;
4200 rtx insn;
4201 rtvec p;
4202 int ret;
4203
4204 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4205 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4206 cmp_mode = SImode;
4207 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4208 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4209 cmp_mode = DImode;
4210 else
4211 return false;
4212
4213 /* Try ADD LOGICAL WITH CARRY. */
4214 if (increment == const1_rtx)
4215 {
4216 /* Determine CC mode to use. */
4217 if (cmp_code == EQ || cmp_code == NE)
4218 {
4219 if (cmp_op1 != const0_rtx)
4220 {
4221 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4222 NULL_RTX, 0, OPTAB_WIDEN);
4223 cmp_op1 = const0_rtx;
4224 }
4225
4226 cmp_code = cmp_code == EQ ? LEU : GTU;
4227 }
4228
4229 if (cmp_code == LTU || cmp_code == LEU)
4230 {
4231 rtx tem = cmp_op0;
4232 cmp_op0 = cmp_op1;
4233 cmp_op1 = tem;
4234 cmp_code = swap_condition (cmp_code);
4235 }
4236
4237 switch (cmp_code)
4238 {
4239 case GTU:
4240 cc_mode = CCUmode;
4241 break;
4242
4243 case GEU:
4244 cc_mode = CCL3mode;
4245 break;
4246
4247 default:
4248 return false;
4249 }
4250
4251 /* Emit comparison instruction pattern. */
4252 if (!register_operand (cmp_op0, cmp_mode))
4253 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4254
4255 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4256 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4257 /* We use insn_invalid_p here to add clobbers if required. */
4258 ret = insn_invalid_p (emit_insn (insn));
4259 gcc_assert (!ret);
4260
4261 /* Emit ALC instruction pattern. */
4262 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4263 gen_rtx_REG (cc_mode, CC_REGNUM),
4264 const0_rtx);
4265
4266 if (src != const0_rtx)
4267 {
4268 if (!register_operand (src, GET_MODE (dst)))
4269 src = force_reg (GET_MODE (dst), src);
4270
4271 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4272 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4273 }
4274
4275 p = rtvec_alloc (2);
4276 RTVEC_ELT (p, 0) =
4277 gen_rtx_SET (VOIDmode, dst, op_res);
4278 RTVEC_ELT (p, 1) =
4279 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4280 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4281
4282 return true;
4283 }
4284
4285 /* Try SUBTRACT LOGICAL WITH BORROW. */
4286 if (increment == constm1_rtx)
4287 {
4288 /* Determine CC mode to use. */
4289 if (cmp_code == EQ || cmp_code == NE)
4290 {
4291 if (cmp_op1 != const0_rtx)
4292 {
4293 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4294 NULL_RTX, 0, OPTAB_WIDEN);
4295 cmp_op1 = const0_rtx;
4296 }
4297
4298 cmp_code = cmp_code == EQ ? LEU : GTU;
4299 }
4300
4301 if (cmp_code == GTU || cmp_code == GEU)
4302 {
4303 rtx tem = cmp_op0;
4304 cmp_op0 = cmp_op1;
4305 cmp_op1 = tem;
4306 cmp_code = swap_condition (cmp_code);
4307 }
4308
4309 switch (cmp_code)
4310 {
4311 case LEU:
4312 cc_mode = CCUmode;
4313 break;
4314
4315 case LTU:
4316 cc_mode = CCL3mode;
4317 break;
4318
4319 default:
4320 return false;
4321 }
4322
4323 /* Emit comparison instruction pattern. */
4324 if (!register_operand (cmp_op0, cmp_mode))
4325 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4326
4327 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4328 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4329 /* We use insn_invalid_p here to add clobbers if required. */
4330 ret = insn_invalid_p (emit_insn (insn));
4331 gcc_assert (!ret);
4332
4333 /* Emit SLB instruction pattern. */
4334 if (!register_operand (src, GET_MODE (dst)))
4335 src = force_reg (GET_MODE (dst), src);
4336
4337 op_res = gen_rtx_MINUS (GET_MODE (dst),
4338 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4339 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4340 gen_rtx_REG (cc_mode, CC_REGNUM),
4341 const0_rtx));
4342 p = rtvec_alloc (2);
4343 RTVEC_ELT (p, 0) =
4344 gen_rtx_SET (VOIDmode, dst, op_res);
4345 RTVEC_ELT (p, 1) =
4346 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4347 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4348
4349 return true;
4350 }
4351
4352 return false;
4353 }
4354
4355 /* Expand code for the insv template. Return true if successful. */
4356
4357 bool
4358 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4359 {
4360 int bitsize = INTVAL (op1);
4361 int bitpos = INTVAL (op2);
4362
4363 /* On z10 we can use the risbg instruction to implement insv. */
4364 if (TARGET_Z10
4365 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4366 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4367 {
4368 rtx op;
4369 rtx clobber;
4370
4371 op = gen_rtx_SET (GET_MODE(src),
4372 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4373 src);
4374 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4375 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4376
4377 return true;
4378 }
4379
4380 /* We need byte alignment. */
4381 if (bitsize % BITS_PER_UNIT)
4382 return false;
4383
4384 if (bitpos == 0
4385 && memory_operand (dest, VOIDmode)
4386 && (register_operand (src, word_mode)
4387 || const_int_operand (src, VOIDmode)))
4388 {
4389 /* Emit standard pattern if possible. */
4390 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4391 if (GET_MODE_BITSIZE (mode) == bitsize)
4392 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4393
4394 /* (set (ze (mem)) (const_int)). */
4395 else if (const_int_operand (src, VOIDmode))
4396 {
4397 int size = bitsize / BITS_PER_UNIT;
4398 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4399 GET_MODE_SIZE (word_mode) - size);
4400
4401 dest = adjust_address (dest, BLKmode, 0);
4402 set_mem_size (dest, GEN_INT (size));
4403 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4404 }
4405
4406 /* (set (ze (mem)) (reg)). */
4407 else if (register_operand (src, word_mode))
4408 {
4409 if (bitsize <= GET_MODE_BITSIZE (SImode))
4410 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4411 const0_rtx), src);
4412 else
4413 {
4414 /* Emit st,stcmh sequence. */
4415 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4416 int size = stcmh_width / BITS_PER_UNIT;
4417
4418 emit_move_insn (adjust_address (dest, SImode, size),
4419 gen_lowpart (SImode, src));
4420 set_mem_size (dest, GEN_INT (size));
4421 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4422 (stcmh_width), const0_rtx),
4423 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4424 (GET_MODE_BITSIZE (SImode))));
4425 }
4426 }
4427 else
4428 return false;
4429
4430 return true;
4431 }
4432
4433 /* (set (ze (reg)) (const_int)). */
4434 if (TARGET_ZARCH
4435 && register_operand (dest, word_mode)
4436 && (bitpos % 16) == 0
4437 && (bitsize % 16) == 0
4438 && const_int_operand (src, VOIDmode))
4439 {
4440 HOST_WIDE_INT val = INTVAL (src);
4441 int regpos = bitpos + bitsize;
4442
4443 while (regpos > bitpos)
4444 {
4445 enum machine_mode putmode;
4446 int putsize;
4447
4448 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4449 putmode = SImode;
4450 else
4451 putmode = HImode;
4452
4453 putsize = GET_MODE_BITSIZE (putmode);
4454 regpos -= putsize;
4455 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4456 GEN_INT (putsize),
4457 GEN_INT (regpos)),
4458 gen_int_mode (val, putmode));
4459 val >>= putsize;
4460 }
4461 gcc_assert (regpos == bitpos);
4462 return true;
4463 }
4464
4465 return false;
4466 }
4467
4468 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4469 register that holds VAL of mode MODE shifted by COUNT bits. */
4470
4471 static inline rtx
4472 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4473 {
4474 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4475 NULL_RTX, 1, OPTAB_DIRECT);
4476 return expand_simple_binop (SImode, ASHIFT, val, count,
4477 NULL_RTX, 1, OPTAB_DIRECT);
4478 }
4479
4480 /* Structure to hold the initial parameters for a compare_and_swap operation
4481 in HImode and QImode. */
4482
4483 struct alignment_context
4484 {
4485 rtx memsi; /* SI aligned memory location. */
4486 rtx shift; /* Bit offset with regard to lsb. */
4487 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4488 rtx modemaski; /* ~modemask */
4489 bool aligned; /* True if memory is aligned, false else. */
4490 };
4491
4492 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4493 structure AC for transparent simplifying, if the memory alignment is known
4494 to be at least 32bit. MEM is the memory location for the actual operation
4495 and MODE its mode. */
4496
4497 static void
4498 init_alignment_context (struct alignment_context *ac, rtx mem,
4499 enum machine_mode mode)
4500 {
4501 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4502 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4503
4504 if (ac->aligned)
4505 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4506 else
4507 {
4508 /* Alignment is unknown. */
4509 rtx byteoffset, addr, align;
4510
4511 /* Force the address into a register. */
4512 addr = force_reg (Pmode, XEXP (mem, 0));
4513
4514 /* Align it to SImode. */
4515 align = expand_simple_binop (Pmode, AND, addr,
4516 GEN_INT (-GET_MODE_SIZE (SImode)),
4517 NULL_RTX, 1, OPTAB_DIRECT);
4518 /* Generate MEM. */
4519 ac->memsi = gen_rtx_MEM (SImode, align);
4520 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4521 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4522 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4523
4524 /* Calculate shiftcount. */
4525 byteoffset = expand_simple_binop (Pmode, AND, addr,
4526 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4527 NULL_RTX, 1, OPTAB_DIRECT);
4528 /* As we already have some offset, evaluate the remaining distance. */
4529 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4530 NULL_RTX, 1, OPTAB_DIRECT);
4531
4532 }
4533 /* Shift is the byte count, but we need the bitcount. */
4534 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4535 NULL_RTX, 1, OPTAB_DIRECT);
4536 /* Calculate masks. */
4537 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4538 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4539 NULL_RTX, 1, OPTAB_DIRECT);
4540 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4541 }
4542
4543 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4544 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4545 to set if CMP == MEM.
4546 CMP is never in memory for compare_and_swap_cc because
4547 expand_bool_compare_and_swap puts it into a register for later compare. */
4548
4549 void
4550 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4551 {
4552 struct alignment_context ac;
4553 rtx cmpv, newv, val, resv, cc;
4554 rtx res = gen_reg_rtx (SImode);
4555 rtx csloop = gen_label_rtx ();
4556 rtx csend = gen_label_rtx ();
4557
4558 gcc_assert (register_operand (target, VOIDmode));
4559 gcc_assert (MEM_P (mem));
4560
4561 init_alignment_context (&ac, mem, mode);
4562
4563 /* Shift the values to the correct bit positions. */
4564 if (!(ac.aligned && MEM_P (cmp)))
4565 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4566 if (!(ac.aligned && MEM_P (new_rtx)))
4567 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4568
4569 /* Load full word. Subsequent loads are performed by CS. */
4570 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4571 NULL_RTX, 1, OPTAB_DIRECT);
4572
4573 /* Start CS loop. */
4574 emit_label (csloop);
4575 /* val = "<mem>00..0<mem>"
4576 * cmp = "00..0<cmp>00..0"
4577 * new = "00..0<new>00..0"
4578 */
4579
4580 /* Patch cmp and new with val at correct position. */
4581 if (ac.aligned && MEM_P (cmp))
4582 {
4583 cmpv = force_reg (SImode, val);
4584 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4585 }
4586 else
4587 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4588 NULL_RTX, 1, OPTAB_DIRECT));
4589 if (ac.aligned && MEM_P (new_rtx))
4590 {
4591 newv = force_reg (SImode, val);
4592 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4593 }
4594 else
4595 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4596 NULL_RTX, 1, OPTAB_DIRECT));
4597
4598 /* Jump to end if we're done (likely?). */
4599 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4600 cmpv, newv));
4601
4602 /* Check for changes outside mode. */
4603 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4604 NULL_RTX, 1, OPTAB_DIRECT);
4605 cc = s390_emit_compare (NE, resv, val);
4606 emit_move_insn (val, resv);
4607 /* Loop internal if so. */
4608 s390_emit_jump (csloop, cc);
4609
4610 emit_label (csend);
4611
4612 /* Return the correct part of the bitfield. */
4613 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4614 NULL_RTX, 1, OPTAB_DIRECT), 1);
4615 }
4616
4617 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4618 and VAL the value to play with. If AFTER is true then store the value
4619 MEM holds after the operation, if AFTER is false then store the value MEM
4620 holds before the operation. If TARGET is zero then discard that value, else
4621 store it to TARGET. */
4622
4623 void
4624 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4625 rtx target, rtx mem, rtx val, bool after)
4626 {
4627 struct alignment_context ac;
4628 rtx cmp;
4629 rtx new_rtx = gen_reg_rtx (SImode);
4630 rtx orig = gen_reg_rtx (SImode);
4631 rtx csloop = gen_label_rtx ();
4632
4633 gcc_assert (!target || register_operand (target, VOIDmode));
4634 gcc_assert (MEM_P (mem));
4635
4636 init_alignment_context (&ac, mem, mode);
4637
4638 /* Shift val to the correct bit positions.
4639 Preserve "icm", but prevent "ex icm". */
4640 if (!(ac.aligned && code == SET && MEM_P (val)))
4641 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4642
4643 /* Further preparation insns. */
4644 if (code == PLUS || code == MINUS)
4645 emit_move_insn (orig, val);
4646 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4647 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4648 NULL_RTX, 1, OPTAB_DIRECT);
4649
4650 /* Load full word. Subsequent loads are performed by CS. */
4651 cmp = force_reg (SImode, ac.memsi);
4652
4653 /* Start CS loop. */
4654 emit_label (csloop);
4655 emit_move_insn (new_rtx, cmp);
4656
4657 /* Patch new with val at correct position. */
4658 switch (code)
4659 {
4660 case PLUS:
4661 case MINUS:
4662 val = expand_simple_binop (SImode, code, new_rtx, orig,
4663 NULL_RTX, 1, OPTAB_DIRECT);
4664 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4665 NULL_RTX, 1, OPTAB_DIRECT);
4666 /* FALLTHRU */
4667 case SET:
4668 if (ac.aligned && MEM_P (val))
4669 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4670 else
4671 {
4672 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4673 NULL_RTX, 1, OPTAB_DIRECT);
4674 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4675 NULL_RTX, 1, OPTAB_DIRECT);
4676 }
4677 break;
4678 case AND:
4679 case IOR:
4680 case XOR:
4681 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4682 NULL_RTX, 1, OPTAB_DIRECT);
4683 break;
4684 case MULT: /* NAND */
4685 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4686 NULL_RTX, 1, OPTAB_DIRECT);
4687 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4688 NULL_RTX, 1, OPTAB_DIRECT);
4689 break;
4690 default:
4691 gcc_unreachable ();
4692 }
4693
4694 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4695 ac.memsi, cmp, new_rtx));
4696
4697 /* Return the correct part of the bitfield. */
4698 if (target)
4699 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4700 after ? new_rtx : cmp, ac.shift,
4701 NULL_RTX, 1, OPTAB_DIRECT), 1);
4702 }
4703
4704 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4705 We need to emit DTP-relative relocations. */
4706
4707 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4708
4709 static void
4710 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4711 {
4712 switch (size)
4713 {
4714 case 4:
4715 fputs ("\t.long\t", file);
4716 break;
4717 case 8:
4718 fputs ("\t.quad\t", file);
4719 break;
4720 default:
4721 gcc_unreachable ();
4722 }
4723 output_addr_const (file, x);
4724 fputs ("@DTPOFF", file);
4725 }
4726
4727 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4728 /* Implement TARGET_MANGLE_TYPE. */
4729
4730 static const char *
4731 s390_mangle_type (const_tree type)
4732 {
4733 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4734 && TARGET_LONG_DOUBLE_128)
4735 return "g";
4736
4737 /* For all other types, use normal C++ mangling. */
4738 return NULL;
4739 }
4740 #endif
4741
4742 /* In the name of slightly smaller debug output, and to cater to
4743 general assembler lossage, recognize various UNSPEC sequences
4744 and turn them back into a direct symbol reference. */
4745
4746 static rtx
4747 s390_delegitimize_address (rtx orig_x)
4748 {
4749 rtx x = orig_x, y;
4750
4751 if (GET_CODE (x) != MEM)
4752 return orig_x;
4753
4754 x = XEXP (x, 0);
4755 if (GET_CODE (x) == PLUS
4756 && GET_CODE (XEXP (x, 1)) == CONST
4757 && GET_CODE (XEXP (x, 0)) == REG
4758 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4759 {
4760 y = XEXP (XEXP (x, 1), 0);
4761 if (GET_CODE (y) == UNSPEC
4762 && XINT (y, 1) == UNSPEC_GOT)
4763 return XVECEXP (y, 0, 0);
4764 return orig_x;
4765 }
4766
4767 if (GET_CODE (x) == CONST)
4768 {
4769 y = XEXP (x, 0);
4770 if (GET_CODE (y) == UNSPEC
4771 && XINT (y, 1) == UNSPEC_GOTENT)
4772 return XVECEXP (y, 0, 0);
4773 return orig_x;
4774 }
4775
4776 return orig_x;
4777 }
4778
4779 /* Output operand OP to stdio stream FILE.
4780 OP is an address (register + offset) which is not used to address data;
4781 instead the rightmost bits are interpreted as the value. */
4782
4783 static void
4784 print_shift_count_operand (FILE *file, rtx op)
4785 {
4786 HOST_WIDE_INT offset;
4787 rtx base;
4788
4789 /* Extract base register and offset. */
4790 if (!s390_decompose_shift_count (op, &base, &offset))
4791 gcc_unreachable ();
4792
4793 /* Sanity check. */
4794 if (base)
4795 {
4796 gcc_assert (GET_CODE (base) == REG);
4797 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
4798 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
4799 }
4800
4801 /* Offsets are constricted to twelve bits. */
4802 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
4803 if (base)
4804 fprintf (file, "(%s)", reg_names[REGNO (base)]);
4805 }
4806
4807 /* See 'get_some_local_dynamic_name'. */
4808
4809 static int
4810 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4811 {
4812 rtx x = *px;
4813
4814 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4815 {
4816 x = get_pool_constant (x);
4817 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
4818 }
4819
4820 if (GET_CODE (x) == SYMBOL_REF
4821 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
4822 {
4823 cfun->machine->some_ld_name = XSTR (x, 0);
4824 return 1;
4825 }
4826
4827 return 0;
4828 }
4829
4830 /* Locate some local-dynamic symbol still in use by this function
4831 so that we can print its name in local-dynamic base patterns. */
4832
4833 static const char *
4834 get_some_local_dynamic_name (void)
4835 {
4836 rtx insn;
4837
4838 if (cfun->machine->some_ld_name)
4839 return cfun->machine->some_ld_name;
4840
4841 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4842 if (INSN_P (insn)
4843 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4844 return cfun->machine->some_ld_name;
4845
4846 gcc_unreachable ();
4847 }
4848
4849 /* Output machine-dependent UNSPECs occurring in address constant X
4850 in assembler syntax to stdio stream FILE. Returns true if the
4851 constant X could be recognized, false otherwise. */
4852
4853 bool
4854 s390_output_addr_const_extra (FILE *file, rtx x)
4855 {
4856 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
4857 switch (XINT (x, 1))
4858 {
4859 case UNSPEC_GOTENT:
4860 output_addr_const (file, XVECEXP (x, 0, 0));
4861 fprintf (file, "@GOTENT");
4862 return true;
4863 case UNSPEC_GOT:
4864 output_addr_const (file, XVECEXP (x, 0, 0));
4865 fprintf (file, "@GOT");
4866 return true;
4867 case UNSPEC_GOTOFF:
4868 output_addr_const (file, XVECEXP (x, 0, 0));
4869 fprintf (file, "@GOTOFF");
4870 return true;
4871 case UNSPEC_PLT:
4872 output_addr_const (file, XVECEXP (x, 0, 0));
4873 fprintf (file, "@PLT");
4874 return true;
4875 case UNSPEC_PLTOFF:
4876 output_addr_const (file, XVECEXP (x, 0, 0));
4877 fprintf (file, "@PLTOFF");
4878 return true;
4879 case UNSPEC_TLSGD:
4880 output_addr_const (file, XVECEXP (x, 0, 0));
4881 fprintf (file, "@TLSGD");
4882 return true;
4883 case UNSPEC_TLSLDM:
4884 assemble_name (file, get_some_local_dynamic_name ());
4885 fprintf (file, "@TLSLDM");
4886 return true;
4887 case UNSPEC_DTPOFF:
4888 output_addr_const (file, XVECEXP (x, 0, 0));
4889 fprintf (file, "@DTPOFF");
4890 return true;
4891 case UNSPEC_NTPOFF:
4892 output_addr_const (file, XVECEXP (x, 0, 0));
4893 fprintf (file, "@NTPOFF");
4894 return true;
4895 case UNSPEC_GOTNTPOFF:
4896 output_addr_const (file, XVECEXP (x, 0, 0));
4897 fprintf (file, "@GOTNTPOFF");
4898 return true;
4899 case UNSPEC_INDNTPOFF:
4900 output_addr_const (file, XVECEXP (x, 0, 0));
4901 fprintf (file, "@INDNTPOFF");
4902 return true;
4903 }
4904
4905 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
4906 switch (XINT (x, 1))
4907 {
4908 case UNSPEC_POOL_OFFSET:
4909 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
4910 output_addr_const (file, x);
4911 return true;
4912 }
4913 return false;
4914 }
4915
4916 /* Output address operand ADDR in assembler syntax to
4917 stdio stream FILE. */
4918
4919 void
4920 print_operand_address (FILE *file, rtx addr)
4921 {
4922 struct s390_address ad;
4923
4924 if (s390_symref_operand_p (addr, NULL, NULL))
4925 {
4926 gcc_assert (TARGET_Z10);
4927 output_addr_const (file, addr);
4928 return;
4929 }
4930
4931 if (!s390_decompose_address (addr, &ad)
4932 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4933 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4934 output_operand_lossage ("cannot decompose address");
4935
4936 if (ad.disp)
4937 output_addr_const (file, ad.disp);
4938 else
4939 fprintf (file, "0");
4940
4941 if (ad.base && ad.indx)
4942 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
4943 reg_names[REGNO (ad.base)]);
4944 else if (ad.base)
4945 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
4946 }
4947
4948 /* Output operand X in assembler syntax to stdio stream FILE.
4949 CODE specified the format flag. The following format flags
4950 are recognized:
4951
4952 'C': print opcode suffix for branch condition.
4953 'D': print opcode suffix for inverse branch condition.
4954 'J': print tls_load/tls_gdcall/tls_ldcall suffix
4955 'G': print the size of the operand in bytes.
4956 'O': print only the displacement of a memory reference.
4957 'R': print only the base register of a memory reference.
4958 'S': print S-type memory reference (base+displacement).
4959 'N': print the second word of a DImode operand.
4960 'M': print the second word of a TImode operand.
4961 'Y': print shift count operand.
4962
4963 'b': print integer X as if it's an unsigned byte.
4964 'c': print integer X as if it's an signed byte.
4965 'x': print integer X as if it's an unsigned halfword.
4966 'h': print integer X as if it's a signed halfword.
4967 'i': print the first nonzero HImode part of X.
4968 'j': print the first HImode part unequal to -1 of X.
4969 'k': print the first nonzero SImode part of X.
4970 'm': print the first SImode part unequal to -1 of X.
4971 'o': print integer X as if it's an unsigned 32bit word. */
4972
4973 void
4974 print_operand (FILE *file, rtx x, int code)
4975 {
4976 switch (code)
4977 {
4978 case 'C':
4979 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
4980 return;
4981
4982 case 'D':
4983 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
4984 return;
4985
4986 case 'J':
4987 if (GET_CODE (x) == SYMBOL_REF)
4988 {
4989 fprintf (file, "%s", ":tls_load:");
4990 output_addr_const (file, x);
4991 }
4992 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
4993 {
4994 fprintf (file, "%s", ":tls_gdcall:");
4995 output_addr_const (file, XVECEXP (x, 0, 0));
4996 }
4997 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
4998 {
4999 fprintf (file, "%s", ":tls_ldcall:");
5000 assemble_name (file, get_some_local_dynamic_name ());
5001 }
5002 else
5003 gcc_unreachable ();
5004 return;
5005
5006 case 'G':
5007 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5008 return;
5009
5010 case 'O':
5011 {
5012 struct s390_address ad;
5013 int ret;
5014
5015 gcc_assert (GET_CODE (x) == MEM);
5016 ret = s390_decompose_address (XEXP (x, 0), &ad);
5017 gcc_assert (ret);
5018 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5019 gcc_assert (!ad.indx);
5020
5021 if (ad.disp)
5022 output_addr_const (file, ad.disp);
5023 else
5024 fprintf (file, "0");
5025 }
5026 return;
5027
5028 case 'R':
5029 {
5030 struct s390_address ad;
5031 int ret;
5032
5033 gcc_assert (GET_CODE (x) == MEM);
5034 ret = s390_decompose_address (XEXP (x, 0), &ad);
5035 gcc_assert (ret);
5036 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5037 gcc_assert (!ad.indx);
5038
5039 if (ad.base)
5040 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5041 else
5042 fprintf (file, "0");
5043 }
5044 return;
5045
5046 case 'S':
5047 {
5048 struct s390_address ad;
5049 int ret;
5050
5051 gcc_assert (GET_CODE (x) == MEM);
5052 ret = s390_decompose_address (XEXP (x, 0), &ad);
5053 gcc_assert (ret);
5054 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5055 gcc_assert (!ad.indx);
5056
5057 if (ad.disp)
5058 output_addr_const (file, ad.disp);
5059 else
5060 fprintf (file, "0");
5061
5062 if (ad.base)
5063 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5064 }
5065 return;
5066
5067 case 'N':
5068 if (GET_CODE (x) == REG)
5069 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5070 else if (GET_CODE (x) == MEM)
5071 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5072 else
5073 gcc_unreachable ();
5074 break;
5075
5076 case 'M':
5077 if (GET_CODE (x) == REG)
5078 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5079 else if (GET_CODE (x) == MEM)
5080 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5081 else
5082 gcc_unreachable ();
5083 break;
5084
5085 case 'Y':
5086 print_shift_count_operand (file, x);
5087 return;
5088 }
5089
5090 switch (GET_CODE (x))
5091 {
5092 case REG:
5093 fprintf (file, "%s", reg_names[REGNO (x)]);
5094 break;
5095
5096 case MEM:
5097 output_address (XEXP (x, 0));
5098 break;
5099
5100 case CONST:
5101 case CODE_LABEL:
5102 case LABEL_REF:
5103 case SYMBOL_REF:
5104 output_addr_const (file, x);
5105 break;
5106
5107 case CONST_INT:
5108 if (code == 'b')
5109 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5110 else if (code == 'c')
5111 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5112 else if (code == 'x')
5113 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5114 else if (code == 'h')
5115 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5116 else if (code == 'i')
5117 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5118 s390_extract_part (x, HImode, 0));
5119 else if (code == 'j')
5120 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5121 s390_extract_part (x, HImode, -1));
5122 else if (code == 'k')
5123 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5124 s390_extract_part (x, SImode, 0));
5125 else if (code == 'm')
5126 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5127 s390_extract_part (x, SImode, -1));
5128 else if (code == 'o')
5129 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5130 else
5131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5132 break;
5133
5134 case CONST_DOUBLE:
5135 gcc_assert (GET_MODE (x) == VOIDmode);
5136 if (code == 'b')
5137 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5138 else if (code == 'x')
5139 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5140 else if (code == 'h')
5141 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5142 else
5143 gcc_unreachable ();
5144 break;
5145
5146 default:
5147 fatal_insn ("UNKNOWN in print_operand !?", x);
5148 break;
5149 }
5150 }
5151
5152 /* Target hook for assembling integer objects. We need to define it
5153 here to work a round a bug in some versions of GAS, which couldn't
5154 handle values smaller than INT_MIN when printed in decimal. */
5155
5156 static bool
5157 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5158 {
5159 if (size == 8 && aligned_p
5160 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5161 {
5162 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5163 INTVAL (x));
5164 return true;
5165 }
5166 return default_assemble_integer (x, size, aligned_p);
5167 }
5168
5169 /* Returns true if register REGNO is used for forming
5170 a memory address in expression X. */
5171
5172 static bool
5173 reg_used_in_mem_p (int regno, rtx x)
5174 {
5175 enum rtx_code code = GET_CODE (x);
5176 int i, j;
5177 const char *fmt;
5178
5179 if (code == MEM)
5180 {
5181 if (refers_to_regno_p (regno, regno+1,
5182 XEXP (x, 0), 0))
5183 return true;
5184 }
5185 else if (code == SET
5186 && GET_CODE (SET_DEST (x)) == PC)
5187 {
5188 if (refers_to_regno_p (regno, regno+1,
5189 SET_SRC (x), 0))
5190 return true;
5191 }
5192
5193 fmt = GET_RTX_FORMAT (code);
5194 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5195 {
5196 if (fmt[i] == 'e'
5197 && reg_used_in_mem_p (regno, XEXP (x, i)))
5198 return true;
5199
5200 else if (fmt[i] == 'E')
5201 for (j = 0; j < XVECLEN (x, i); j++)
5202 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5203 return true;
5204 }
5205 return false;
5206 }
5207
5208 /* Returns true if expression DEP_RTX sets an address register
5209 used by instruction INSN to address memory. */
5210
5211 static bool
5212 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5213 {
5214 rtx target, pat;
5215
5216 if (GET_CODE (dep_rtx) == INSN)
5217 dep_rtx = PATTERN (dep_rtx);
5218
5219 if (GET_CODE (dep_rtx) == SET)
5220 {
5221 target = SET_DEST (dep_rtx);
5222 if (GET_CODE (target) == STRICT_LOW_PART)
5223 target = XEXP (target, 0);
5224 while (GET_CODE (target) == SUBREG)
5225 target = SUBREG_REG (target);
5226
5227 if (GET_CODE (target) == REG)
5228 {
5229 int regno = REGNO (target);
5230
5231 if (s390_safe_attr_type (insn) == TYPE_LA)
5232 {
5233 pat = PATTERN (insn);
5234 if (GET_CODE (pat) == PARALLEL)
5235 {
5236 gcc_assert (XVECLEN (pat, 0) == 2);
5237 pat = XVECEXP (pat, 0, 0);
5238 }
5239 gcc_assert (GET_CODE (pat) == SET);
5240 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5241 }
5242 else if (get_attr_atype (insn) == ATYPE_AGEN)
5243 return reg_used_in_mem_p (regno, PATTERN (insn));
5244 }
5245 }
5246 return false;
5247 }
5248
5249 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5250
5251 int
5252 s390_agen_dep_p (rtx dep_insn, rtx insn)
5253 {
5254 rtx dep_rtx = PATTERN (dep_insn);
5255 int i;
5256
5257 if (GET_CODE (dep_rtx) == SET
5258 && addr_generation_dependency_p (dep_rtx, insn))
5259 return 1;
5260 else if (GET_CODE (dep_rtx) == PARALLEL)
5261 {
5262 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5263 {
5264 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5265 return 1;
5266 }
5267 }
5268 return 0;
5269 }
5270
5271
5272 /* A C statement (sans semicolon) to update the integer scheduling priority
5273 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5274 reduce the priority to execute INSN later. Do not define this macro if
5275 you do not need to adjust the scheduling priorities of insns.
5276
5277 A STD instruction should be scheduled earlier,
5278 in order to use the bypass. */
5279
5280 static int
5281 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5282 {
5283 if (! INSN_P (insn))
5284 return priority;
5285
5286 if (s390_tune != PROCESSOR_2084_Z990
5287 && s390_tune != PROCESSOR_2094_Z9_109)
5288 return priority;
5289
5290 switch (s390_safe_attr_type (insn))
5291 {
5292 case TYPE_FSTOREDF:
5293 case TYPE_FSTORESF:
5294 priority = priority << 3;
5295 break;
5296 case TYPE_STORE:
5297 case TYPE_STM:
5298 priority = priority << 1;
5299 break;
5300 default:
5301 break;
5302 }
5303 return priority;
5304 }
5305
5306 /* The number of instructions that can be issued per cycle. */
5307
5308 static int
5309 s390_issue_rate (void)
5310 {
5311 switch (s390_tune)
5312 {
5313 case PROCESSOR_2084_Z990:
5314 case PROCESSOR_2094_Z9_109:
5315 return 3;
5316 case PROCESSOR_2097_Z10:
5317 return 2;
5318 default:
5319 return 1;
5320 }
5321 }
5322
5323 static int
5324 s390_first_cycle_multipass_dfa_lookahead (void)
5325 {
5326 return 4;
5327 }
5328
5329
5330 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5331 Fix up MEMs as required. */
5332
5333 static void
5334 annotate_constant_pool_refs (rtx *x)
5335 {
5336 int i, j;
5337 const char *fmt;
5338
5339 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5340 || !CONSTANT_POOL_ADDRESS_P (*x));
5341
5342 /* Literal pool references can only occur inside a MEM ... */
5343 if (GET_CODE (*x) == MEM)
5344 {
5345 rtx memref = XEXP (*x, 0);
5346
5347 if (GET_CODE (memref) == SYMBOL_REF
5348 && CONSTANT_POOL_ADDRESS_P (memref))
5349 {
5350 rtx base = cfun->machine->base_reg;
5351 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5352 UNSPEC_LTREF);
5353
5354 *x = replace_equiv_address (*x, addr);
5355 return;
5356 }
5357
5358 if (GET_CODE (memref) == CONST
5359 && GET_CODE (XEXP (memref, 0)) == PLUS
5360 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5361 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5362 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5363 {
5364 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5365 rtx sym = XEXP (XEXP (memref, 0), 0);
5366 rtx base = cfun->machine->base_reg;
5367 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5368 UNSPEC_LTREF);
5369
5370 *x = replace_equiv_address (*x, plus_constant (addr, off));
5371 return;
5372 }
5373 }
5374
5375 /* ... or a load-address type pattern. */
5376 if (GET_CODE (*x) == SET)
5377 {
5378 rtx addrref = SET_SRC (*x);
5379
5380 if (GET_CODE (addrref) == SYMBOL_REF
5381 && CONSTANT_POOL_ADDRESS_P (addrref))
5382 {
5383 rtx base = cfun->machine->base_reg;
5384 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5385 UNSPEC_LTREF);
5386
5387 SET_SRC (*x) = addr;
5388 return;
5389 }
5390
5391 if (GET_CODE (addrref) == CONST
5392 && GET_CODE (XEXP (addrref, 0)) == PLUS
5393 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5394 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5395 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5396 {
5397 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5398 rtx sym = XEXP (XEXP (addrref, 0), 0);
5399 rtx base = cfun->machine->base_reg;
5400 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5401 UNSPEC_LTREF);
5402
5403 SET_SRC (*x) = plus_constant (addr, off);
5404 return;
5405 }
5406 }
5407
5408 /* Annotate LTREL_BASE as well. */
5409 if (GET_CODE (*x) == UNSPEC
5410 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5411 {
5412 rtx base = cfun->machine->base_reg;
5413 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5414 UNSPEC_LTREL_BASE);
5415 return;
5416 }
5417
5418 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5419 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5420 {
5421 if (fmt[i] == 'e')
5422 {
5423 annotate_constant_pool_refs (&XEXP (*x, i));
5424 }
5425 else if (fmt[i] == 'E')
5426 {
5427 for (j = 0; j < XVECLEN (*x, i); j++)
5428 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5429 }
5430 }
5431 }
5432
5433 /* Split all branches that exceed the maximum distance.
5434 Returns true if this created a new literal pool entry. */
5435
5436 static int
5437 s390_split_branches (void)
5438 {
5439 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5440 int new_literal = 0, ret;
5441 rtx insn, pat, tmp, target;
5442 rtx *label;
5443
5444 /* We need correct insn addresses. */
5445
5446 shorten_branches (get_insns ());
5447
5448 /* Find all branches that exceed 64KB, and split them. */
5449
5450 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5451 {
5452 if (GET_CODE (insn) != JUMP_INSN)
5453 continue;
5454
5455 pat = PATTERN (insn);
5456 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5457 pat = XVECEXP (pat, 0, 0);
5458 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5459 continue;
5460
5461 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5462 {
5463 label = &SET_SRC (pat);
5464 }
5465 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5466 {
5467 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5468 label = &XEXP (SET_SRC (pat), 1);
5469 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5470 label = &XEXP (SET_SRC (pat), 2);
5471 else
5472 continue;
5473 }
5474 else
5475 continue;
5476
5477 if (get_attr_length (insn) <= 4)
5478 continue;
5479
5480 /* We are going to use the return register as scratch register,
5481 make sure it will be saved/restored by the prologue/epilogue. */
5482 cfun_frame_layout.save_return_addr_p = 1;
5483
5484 if (!flag_pic)
5485 {
5486 new_literal = 1;
5487 tmp = force_const_mem (Pmode, *label);
5488 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5489 INSN_ADDRESSES_NEW (tmp, -1);
5490 annotate_constant_pool_refs (&PATTERN (tmp));
5491
5492 target = temp_reg;
5493 }
5494 else
5495 {
5496 new_literal = 1;
5497 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5498 UNSPEC_LTREL_OFFSET);
5499 target = gen_rtx_CONST (Pmode, target);
5500 target = force_const_mem (Pmode, target);
5501 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5502 INSN_ADDRESSES_NEW (tmp, -1);
5503 annotate_constant_pool_refs (&PATTERN (tmp));
5504
5505 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5506 cfun->machine->base_reg),
5507 UNSPEC_LTREL_BASE);
5508 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5509 }
5510
5511 ret = validate_change (insn, label, target, 0);
5512 gcc_assert (ret);
5513 }
5514
5515 return new_literal;
5516 }
5517
5518
5519 /* Find an annotated literal pool symbol referenced in RTX X,
5520 and store it at REF. Will abort if X contains references to
5521 more than one such pool symbol; multiple references to the same
5522 symbol are allowed, however.
5523
5524 The rtx pointed to by REF must be initialized to NULL_RTX
5525 by the caller before calling this routine. */
5526
5527 static void
5528 find_constant_pool_ref (rtx x, rtx *ref)
5529 {
5530 int i, j;
5531 const char *fmt;
5532
5533 /* Ignore LTREL_BASE references. */
5534 if (GET_CODE (x) == UNSPEC
5535 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5536 return;
5537 /* Likewise POOL_ENTRY insns. */
5538 if (GET_CODE (x) == UNSPEC_VOLATILE
5539 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5540 return;
5541
5542 gcc_assert (GET_CODE (x) != SYMBOL_REF
5543 || !CONSTANT_POOL_ADDRESS_P (x));
5544
5545 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5546 {
5547 rtx sym = XVECEXP (x, 0, 0);
5548 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5549 && CONSTANT_POOL_ADDRESS_P (sym));
5550
5551 if (*ref == NULL_RTX)
5552 *ref = sym;
5553 else
5554 gcc_assert (*ref == sym);
5555
5556 return;
5557 }
5558
5559 fmt = GET_RTX_FORMAT (GET_CODE (x));
5560 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5561 {
5562 if (fmt[i] == 'e')
5563 {
5564 find_constant_pool_ref (XEXP (x, i), ref);
5565 }
5566 else if (fmt[i] == 'E')
5567 {
5568 for (j = 0; j < XVECLEN (x, i); j++)
5569 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5570 }
5571 }
5572 }
5573
5574 /* Replace every reference to the annotated literal pool
5575 symbol REF in X by its base plus OFFSET. */
5576
5577 static void
5578 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5579 {
5580 int i, j;
5581 const char *fmt;
5582
5583 gcc_assert (*x != ref);
5584
5585 if (GET_CODE (*x) == UNSPEC
5586 && XINT (*x, 1) == UNSPEC_LTREF
5587 && XVECEXP (*x, 0, 0) == ref)
5588 {
5589 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5590 return;
5591 }
5592
5593 if (GET_CODE (*x) == PLUS
5594 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5595 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5596 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5597 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5598 {
5599 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5600 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5601 return;
5602 }
5603
5604 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5605 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5606 {
5607 if (fmt[i] == 'e')
5608 {
5609 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5610 }
5611 else if (fmt[i] == 'E')
5612 {
5613 for (j = 0; j < XVECLEN (*x, i); j++)
5614 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5615 }
5616 }
5617 }
5618
5619 /* Check whether X contains an UNSPEC_LTREL_BASE.
5620 Return its constant pool symbol if found, NULL_RTX otherwise. */
5621
5622 static rtx
5623 find_ltrel_base (rtx x)
5624 {
5625 int i, j;
5626 const char *fmt;
5627
5628 if (GET_CODE (x) == UNSPEC
5629 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5630 return XVECEXP (x, 0, 0);
5631
5632 fmt = GET_RTX_FORMAT (GET_CODE (x));
5633 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5634 {
5635 if (fmt[i] == 'e')
5636 {
5637 rtx fnd = find_ltrel_base (XEXP (x, i));
5638 if (fnd)
5639 return fnd;
5640 }
5641 else if (fmt[i] == 'E')
5642 {
5643 for (j = 0; j < XVECLEN (x, i); j++)
5644 {
5645 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5646 if (fnd)
5647 return fnd;
5648 }
5649 }
5650 }
5651
5652 return NULL_RTX;
5653 }
5654
5655 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5656
5657 static void
5658 replace_ltrel_base (rtx *x)
5659 {
5660 int i, j;
5661 const char *fmt;
5662
5663 if (GET_CODE (*x) == UNSPEC
5664 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5665 {
5666 *x = XVECEXP (*x, 0, 1);
5667 return;
5668 }
5669
5670 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5671 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5672 {
5673 if (fmt[i] == 'e')
5674 {
5675 replace_ltrel_base (&XEXP (*x, i));
5676 }
5677 else if (fmt[i] == 'E')
5678 {
5679 for (j = 0; j < XVECLEN (*x, i); j++)
5680 replace_ltrel_base (&XVECEXP (*x, i, j));
5681 }
5682 }
5683 }
5684
5685
5686 /* We keep a list of constants which we have to add to internal
5687 constant tables in the middle of large functions. */
5688
5689 #define NR_C_MODES 11
5690 enum machine_mode constant_modes[NR_C_MODES] =
5691 {
5692 TFmode, TImode, TDmode,
5693 DFmode, DImode, DDmode,
5694 SFmode, SImode, SDmode,
5695 HImode,
5696 QImode
5697 };
5698
5699 struct constant
5700 {
5701 struct constant *next;
5702 rtx value;
5703 rtx label;
5704 };
5705
5706 struct constant_pool
5707 {
5708 struct constant_pool *next;
5709 rtx first_insn;
5710 rtx pool_insn;
5711 bitmap insns;
5712 rtx emit_pool_after;
5713
5714 struct constant *constants[NR_C_MODES];
5715 struct constant *execute;
5716 rtx label;
5717 int size;
5718 };
5719
5720 /* Allocate new constant_pool structure. */
5721
5722 static struct constant_pool *
5723 s390_alloc_pool (void)
5724 {
5725 struct constant_pool *pool;
5726 int i;
5727
5728 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5729 pool->next = NULL;
5730 for (i = 0; i < NR_C_MODES; i++)
5731 pool->constants[i] = NULL;
5732
5733 pool->execute = NULL;
5734 pool->label = gen_label_rtx ();
5735 pool->first_insn = NULL_RTX;
5736 pool->pool_insn = NULL_RTX;
5737 pool->insns = BITMAP_ALLOC (NULL);
5738 pool->size = 0;
5739 pool->emit_pool_after = NULL_RTX;
5740
5741 return pool;
5742 }
5743
5744 /* Create new constant pool covering instructions starting at INSN
5745 and chain it to the end of POOL_LIST. */
5746
5747 static struct constant_pool *
5748 s390_start_pool (struct constant_pool **pool_list, rtx insn)
5749 {
5750 struct constant_pool *pool, **prev;
5751
5752 pool = s390_alloc_pool ();
5753 pool->first_insn = insn;
5754
5755 for (prev = pool_list; *prev; prev = &(*prev)->next)
5756 ;
5757 *prev = pool;
5758
5759 return pool;
5760 }
5761
5762 /* End range of instructions covered by POOL at INSN and emit
5763 placeholder insn representing the pool. */
5764
5765 static void
5766 s390_end_pool (struct constant_pool *pool, rtx insn)
5767 {
5768 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
5769
5770 if (!insn)
5771 insn = get_last_insn ();
5772
5773 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
5774 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5775 }
5776
5777 /* Add INSN to the list of insns covered by POOL. */
5778
5779 static void
5780 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
5781 {
5782 bitmap_set_bit (pool->insns, INSN_UID (insn));
5783 }
5784
5785 /* Return pool out of POOL_LIST that covers INSN. */
5786
5787 static struct constant_pool *
5788 s390_find_pool (struct constant_pool *pool_list, rtx insn)
5789 {
5790 struct constant_pool *pool;
5791
5792 for (pool = pool_list; pool; pool = pool->next)
5793 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
5794 break;
5795
5796 return pool;
5797 }
5798
5799 /* Add constant VAL of mode MODE to the constant pool POOL. */
5800
5801 static void
5802 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
5803 {
5804 struct constant *c;
5805 int i;
5806
5807 for (i = 0; i < NR_C_MODES; i++)
5808 if (constant_modes[i] == mode)
5809 break;
5810 gcc_assert (i != NR_C_MODES);
5811
5812 for (c = pool->constants[i]; c != NULL; c = c->next)
5813 if (rtx_equal_p (val, c->value))
5814 break;
5815
5816 if (c == NULL)
5817 {
5818 c = (struct constant *) xmalloc (sizeof *c);
5819 c->value = val;
5820 c->label = gen_label_rtx ();
5821 c->next = pool->constants[i];
5822 pool->constants[i] = c;
5823 pool->size += GET_MODE_SIZE (mode);
5824 }
5825 }
5826
5827 /* Return an rtx that represents the offset of X from the start of
5828 pool POOL. */
5829
5830 static rtx
5831 s390_pool_offset (struct constant_pool *pool, rtx x)
5832 {
5833 rtx label;
5834
5835 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
5836 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
5837 UNSPEC_POOL_OFFSET);
5838 return gen_rtx_CONST (GET_MODE (x), x);
5839 }
5840
5841 /* Find constant VAL of mode MODE in the constant pool POOL.
5842 Return an RTX describing the distance from the start of
5843 the pool to the location of the new constant. */
5844
5845 static rtx
5846 s390_find_constant (struct constant_pool *pool, rtx val,
5847 enum machine_mode mode)
5848 {
5849 struct constant *c;
5850 int i;
5851
5852 for (i = 0; i < NR_C_MODES; i++)
5853 if (constant_modes[i] == mode)
5854 break;
5855 gcc_assert (i != NR_C_MODES);
5856
5857 for (c = pool->constants[i]; c != NULL; c = c->next)
5858 if (rtx_equal_p (val, c->value))
5859 break;
5860
5861 gcc_assert (c);
5862
5863 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
5864 }
5865
5866 /* Check whether INSN is an execute. Return the label_ref to its
5867 execute target template if so, NULL_RTX otherwise. */
5868
5869 static rtx
5870 s390_execute_label (rtx insn)
5871 {
5872 if (GET_CODE (insn) == INSN
5873 && GET_CODE (PATTERN (insn)) == PARALLEL
5874 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
5875 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
5876 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
5877
5878 return NULL_RTX;
5879 }
5880
5881 /* Add execute target for INSN to the constant pool POOL. */
5882
5883 static void
5884 s390_add_execute (struct constant_pool *pool, rtx insn)
5885 {
5886 struct constant *c;
5887
5888 for (c = pool->execute; c != NULL; c = c->next)
5889 if (INSN_UID (insn) == INSN_UID (c->value))
5890 break;
5891
5892 if (c == NULL)
5893 {
5894 c = (struct constant *) xmalloc (sizeof *c);
5895 c->value = insn;
5896 c->label = gen_label_rtx ();
5897 c->next = pool->execute;
5898 pool->execute = c;
5899 pool->size += 6;
5900 }
5901 }
5902
5903 /* Find execute target for INSN in the constant pool POOL.
5904 Return an RTX describing the distance from the start of
5905 the pool to the location of the execute target. */
5906
5907 static rtx
5908 s390_find_execute (struct constant_pool *pool, rtx insn)
5909 {
5910 struct constant *c;
5911
5912 for (c = pool->execute; c != NULL; c = c->next)
5913 if (INSN_UID (insn) == INSN_UID (c->value))
5914 break;
5915
5916 gcc_assert (c);
5917
5918 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
5919 }
5920
5921 /* For an execute INSN, extract the execute target template. */
5922
5923 static rtx
5924 s390_execute_target (rtx insn)
5925 {
5926 rtx pattern = PATTERN (insn);
5927 gcc_assert (s390_execute_label (insn));
5928
5929 if (XVECLEN (pattern, 0) == 2)
5930 {
5931 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
5932 }
5933 else
5934 {
5935 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
5936 int i;
5937
5938 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
5939 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
5940
5941 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
5942 }
5943
5944 return pattern;
5945 }
5946
5947 /* Indicate that INSN cannot be duplicated. This is the case for
5948 execute insns that carry a unique label. */
5949
5950 static bool
5951 s390_cannot_copy_insn_p (rtx insn)
5952 {
5953 rtx label = s390_execute_label (insn);
5954 return label && label != const0_rtx;
5955 }
5956
5957 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
5958 do not emit the pool base label. */
5959
5960 static void
5961 s390_dump_pool (struct constant_pool *pool, bool remote_label)
5962 {
5963 struct constant *c;
5964 rtx insn = pool->pool_insn;
5965 int i;
5966
5967 /* Switch to rodata section. */
5968 if (TARGET_CPU_ZARCH)
5969 {
5970 insn = emit_insn_after (gen_pool_section_start (), insn);
5971 INSN_ADDRESSES_NEW (insn, -1);
5972 }
5973
5974 /* Ensure minimum pool alignment. */
5975 if (TARGET_CPU_ZARCH)
5976 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
5977 else
5978 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
5979 INSN_ADDRESSES_NEW (insn, -1);
5980
5981 /* Emit pool base label. */
5982 if (!remote_label)
5983 {
5984 insn = emit_label_after (pool->label, insn);
5985 INSN_ADDRESSES_NEW (insn, -1);
5986 }
5987
5988 /* Dump constants in descending alignment requirement order,
5989 ensuring proper alignment for every constant. */
5990 for (i = 0; i < NR_C_MODES; i++)
5991 for (c = pool->constants[i]; c; c = c->next)
5992 {
5993 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
5994 rtx value = copy_rtx (c->value);
5995 if (GET_CODE (value) == CONST
5996 && GET_CODE (XEXP (value, 0)) == UNSPEC
5997 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
5998 && XVECLEN (XEXP (value, 0), 0) == 1)
5999 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6000
6001 insn = emit_label_after (c->label, insn);
6002 INSN_ADDRESSES_NEW (insn, -1);
6003
6004 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6005 gen_rtvec (1, value),
6006 UNSPECV_POOL_ENTRY);
6007 insn = emit_insn_after (value, insn);
6008 INSN_ADDRESSES_NEW (insn, -1);
6009 }
6010
6011 /* Ensure minimum alignment for instructions. */
6012 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6013 INSN_ADDRESSES_NEW (insn, -1);
6014
6015 /* Output in-pool execute template insns. */
6016 for (c = pool->execute; c; c = c->next)
6017 {
6018 insn = emit_label_after (c->label, insn);
6019 INSN_ADDRESSES_NEW (insn, -1);
6020
6021 insn = emit_insn_after (s390_execute_target (c->value), insn);
6022 INSN_ADDRESSES_NEW (insn, -1);
6023 }
6024
6025 /* Switch back to previous section. */
6026 if (TARGET_CPU_ZARCH)
6027 {
6028 insn = emit_insn_after (gen_pool_section_end (), insn);
6029 INSN_ADDRESSES_NEW (insn, -1);
6030 }
6031
6032 insn = emit_barrier_after (insn);
6033 INSN_ADDRESSES_NEW (insn, -1);
6034
6035 /* Remove placeholder insn. */
6036 remove_insn (pool->pool_insn);
6037 }
6038
6039 /* Free all memory used by POOL. */
6040
6041 static void
6042 s390_free_pool (struct constant_pool *pool)
6043 {
6044 struct constant *c, *next;
6045 int i;
6046
6047 for (i = 0; i < NR_C_MODES; i++)
6048 for (c = pool->constants[i]; c; c = next)
6049 {
6050 next = c->next;
6051 free (c);
6052 }
6053
6054 for (c = pool->execute; c; c = next)
6055 {
6056 next = c->next;
6057 free (c);
6058 }
6059
6060 BITMAP_FREE (pool->insns);
6061 free (pool);
6062 }
6063
6064
6065 /* Collect main literal pool. Return NULL on overflow. */
6066
6067 static struct constant_pool *
6068 s390_mainpool_start (void)
6069 {
6070 struct constant_pool *pool;
6071 rtx insn;
6072
6073 pool = s390_alloc_pool ();
6074
6075 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6076 {
6077 if (GET_CODE (insn) == INSN
6078 && GET_CODE (PATTERN (insn)) == SET
6079 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6080 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6081 {
6082 gcc_assert (!pool->pool_insn);
6083 pool->pool_insn = insn;
6084 }
6085
6086 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6087 {
6088 s390_add_execute (pool, insn);
6089 }
6090 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6091 {
6092 rtx pool_ref = NULL_RTX;
6093 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6094 if (pool_ref)
6095 {
6096 rtx constant = get_pool_constant (pool_ref);
6097 enum machine_mode mode = get_pool_mode (pool_ref);
6098 s390_add_constant (pool, constant, mode);
6099 }
6100 }
6101
6102 /* If hot/cold partitioning is enabled we have to make sure that
6103 the literal pool is emitted in the same section where the
6104 initialization of the literal pool base pointer takes place.
6105 emit_pool_after is only used in the non-overflow case on non
6106 Z cpus where we can emit the literal pool at the end of the
6107 function body within the text section. */
6108 if (NOTE_P (insn)
6109 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6110 && !pool->emit_pool_after)
6111 pool->emit_pool_after = PREV_INSN (insn);
6112 }
6113
6114 gcc_assert (pool->pool_insn || pool->size == 0);
6115
6116 if (pool->size >= 4096)
6117 {
6118 /* We're going to chunkify the pool, so remove the main
6119 pool placeholder insn. */
6120 remove_insn (pool->pool_insn);
6121
6122 s390_free_pool (pool);
6123 pool = NULL;
6124 }
6125
6126 /* If the functions ends with the section where the literal pool
6127 should be emitted set the marker to its end. */
6128 if (pool && !pool->emit_pool_after)
6129 pool->emit_pool_after = get_last_insn ();
6130
6131 return pool;
6132 }
6133
6134 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6135 Modify the current function to output the pool constants as well as
6136 the pool register setup instruction. */
6137
6138 static void
6139 s390_mainpool_finish (struct constant_pool *pool)
6140 {
6141 rtx base_reg = cfun->machine->base_reg;
6142 rtx insn;
6143
6144 /* If the pool is empty, we're done. */
6145 if (pool->size == 0)
6146 {
6147 /* We don't actually need a base register after all. */
6148 cfun->machine->base_reg = NULL_RTX;
6149
6150 if (pool->pool_insn)
6151 remove_insn (pool->pool_insn);
6152 s390_free_pool (pool);
6153 return;
6154 }
6155
6156 /* We need correct insn addresses. */
6157 shorten_branches (get_insns ());
6158
6159 /* On zSeries, we use a LARL to load the pool register. The pool is
6160 located in the .rodata section, so we emit it after the function. */
6161 if (TARGET_CPU_ZARCH)
6162 {
6163 insn = gen_main_base_64 (base_reg, pool->label);
6164 insn = emit_insn_after (insn, pool->pool_insn);
6165 INSN_ADDRESSES_NEW (insn, -1);
6166 remove_insn (pool->pool_insn);
6167
6168 insn = get_last_insn ();
6169 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6170 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6171
6172 s390_dump_pool (pool, 0);
6173 }
6174
6175 /* On S/390, if the total size of the function's code plus literal pool
6176 does not exceed 4096 bytes, we use BASR to set up a function base
6177 pointer, and emit the literal pool at the end of the function. */
6178 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6179 + pool->size + 8 /* alignment slop */ < 4096)
6180 {
6181 insn = gen_main_base_31_small (base_reg, pool->label);
6182 insn = emit_insn_after (insn, pool->pool_insn);
6183 INSN_ADDRESSES_NEW (insn, -1);
6184 remove_insn (pool->pool_insn);
6185
6186 insn = emit_label_after (pool->label, insn);
6187 INSN_ADDRESSES_NEW (insn, -1);
6188
6189 /* emit_pool_after will be set by s390_mainpool_start to the
6190 last insn of the section where the literal pool should be
6191 emitted. */
6192 insn = pool->emit_pool_after;
6193
6194 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6195 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6196
6197 s390_dump_pool (pool, 1);
6198 }
6199
6200 /* Otherwise, we emit an inline literal pool and use BASR to branch
6201 over it, setting up the pool register at the same time. */
6202 else
6203 {
6204 rtx pool_end = gen_label_rtx ();
6205
6206 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6207 insn = emit_insn_after (insn, pool->pool_insn);
6208 INSN_ADDRESSES_NEW (insn, -1);
6209 remove_insn (pool->pool_insn);
6210
6211 insn = emit_label_after (pool->label, insn);
6212 INSN_ADDRESSES_NEW (insn, -1);
6213
6214 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6215 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6216
6217 insn = emit_label_after (pool_end, pool->pool_insn);
6218 INSN_ADDRESSES_NEW (insn, -1);
6219
6220 s390_dump_pool (pool, 1);
6221 }
6222
6223
6224 /* Replace all literal pool references. */
6225
6226 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6227 {
6228 if (INSN_P (insn))
6229 replace_ltrel_base (&PATTERN (insn));
6230
6231 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6232 {
6233 rtx addr, pool_ref = NULL_RTX;
6234 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6235 if (pool_ref)
6236 {
6237 if (s390_execute_label (insn))
6238 addr = s390_find_execute (pool, insn);
6239 else
6240 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6241 get_pool_mode (pool_ref));
6242
6243 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6244 INSN_CODE (insn) = -1;
6245 }
6246 }
6247 }
6248
6249
6250 /* Free the pool. */
6251 s390_free_pool (pool);
6252 }
6253
6254 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6255 We have decided we cannot use this pool, so revert all changes
6256 to the current function that were done by s390_mainpool_start. */
6257 static void
6258 s390_mainpool_cancel (struct constant_pool *pool)
6259 {
6260 /* We didn't actually change the instruction stream, so simply
6261 free the pool memory. */
6262 s390_free_pool (pool);
6263 }
6264
6265
6266 /* Chunkify the literal pool. */
6267
6268 #define S390_POOL_CHUNK_MIN 0xc00
6269 #define S390_POOL_CHUNK_MAX 0xe00
6270
6271 static struct constant_pool *
6272 s390_chunkify_start (void)
6273 {
6274 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6275 int extra_size = 0;
6276 bitmap far_labels;
6277 rtx pending_ltrel = NULL_RTX;
6278 rtx insn;
6279
6280 rtx (*gen_reload_base) (rtx, rtx) =
6281 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6282
6283
6284 /* We need correct insn addresses. */
6285
6286 shorten_branches (get_insns ());
6287
6288 /* Scan all insns and move literals to pool chunks. */
6289
6290 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6291 {
6292 bool section_switch_p = false;
6293
6294 /* Check for pending LTREL_BASE. */
6295 if (INSN_P (insn))
6296 {
6297 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6298 if (ltrel_base)
6299 {
6300 gcc_assert (ltrel_base == pending_ltrel);
6301 pending_ltrel = NULL_RTX;
6302 }
6303 }
6304
6305 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6306 {
6307 if (!curr_pool)
6308 curr_pool = s390_start_pool (&pool_list, insn);
6309
6310 s390_add_execute (curr_pool, insn);
6311 s390_add_pool_insn (curr_pool, insn);
6312 }
6313 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6314 {
6315 rtx pool_ref = NULL_RTX;
6316 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6317 if (pool_ref)
6318 {
6319 rtx constant = get_pool_constant (pool_ref);
6320 enum machine_mode mode = get_pool_mode (pool_ref);
6321
6322 if (!curr_pool)
6323 curr_pool = s390_start_pool (&pool_list, insn);
6324
6325 s390_add_constant (curr_pool, constant, mode);
6326 s390_add_pool_insn (curr_pool, insn);
6327
6328 /* Don't split the pool chunk between a LTREL_OFFSET load
6329 and the corresponding LTREL_BASE. */
6330 if (GET_CODE (constant) == CONST
6331 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6332 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6333 {
6334 gcc_assert (!pending_ltrel);
6335 pending_ltrel = pool_ref;
6336 }
6337 }
6338 }
6339
6340 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6341 {
6342 if (curr_pool)
6343 s390_add_pool_insn (curr_pool, insn);
6344 /* An LTREL_BASE must follow within the same basic block. */
6345 gcc_assert (!pending_ltrel);
6346 }
6347
6348 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6349 section_switch_p = true;
6350
6351 if (!curr_pool
6352 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6353 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6354 continue;
6355
6356 if (TARGET_CPU_ZARCH)
6357 {
6358 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6359 continue;
6360
6361 s390_end_pool (curr_pool, NULL_RTX);
6362 curr_pool = NULL;
6363 }
6364 else
6365 {
6366 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6367 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6368 + extra_size;
6369
6370 /* We will later have to insert base register reload insns.
6371 Those will have an effect on code size, which we need to
6372 consider here. This calculation makes rather pessimistic
6373 worst-case assumptions. */
6374 if (GET_CODE (insn) == CODE_LABEL)
6375 extra_size += 6;
6376
6377 if (chunk_size < S390_POOL_CHUNK_MIN
6378 && curr_pool->size < S390_POOL_CHUNK_MIN
6379 && !section_switch_p)
6380 continue;
6381
6382 /* Pool chunks can only be inserted after BARRIERs ... */
6383 if (GET_CODE (insn) == BARRIER)
6384 {
6385 s390_end_pool (curr_pool, insn);
6386 curr_pool = NULL;
6387 extra_size = 0;
6388 }
6389
6390 /* ... so if we don't find one in time, create one. */
6391 else if (chunk_size > S390_POOL_CHUNK_MAX
6392 || curr_pool->size > S390_POOL_CHUNK_MAX
6393 || section_switch_p)
6394 {
6395 rtx label, jump, barrier;
6396
6397 if (!section_switch_p)
6398 {
6399 /* We can insert the barrier only after a 'real' insn. */
6400 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6401 continue;
6402 if (get_attr_length (insn) == 0)
6403 continue;
6404 /* Don't separate LTREL_BASE from the corresponding
6405 LTREL_OFFSET load. */
6406 if (pending_ltrel)
6407 continue;
6408 }
6409 else
6410 {
6411 gcc_assert (!pending_ltrel);
6412
6413 /* The old pool has to end before the section switch
6414 note in order to make it part of the current
6415 section. */
6416 insn = PREV_INSN (insn);
6417 }
6418
6419 label = gen_label_rtx ();
6420 jump = emit_jump_insn_after (gen_jump (label), insn);
6421 barrier = emit_barrier_after (jump);
6422 insn = emit_label_after (label, barrier);
6423 JUMP_LABEL (jump) = label;
6424 LABEL_NUSES (label) = 1;
6425
6426 INSN_ADDRESSES_NEW (jump, -1);
6427 INSN_ADDRESSES_NEW (barrier, -1);
6428 INSN_ADDRESSES_NEW (insn, -1);
6429
6430 s390_end_pool (curr_pool, barrier);
6431 curr_pool = NULL;
6432 extra_size = 0;
6433 }
6434 }
6435 }
6436
6437 if (curr_pool)
6438 s390_end_pool (curr_pool, NULL_RTX);
6439 gcc_assert (!pending_ltrel);
6440
6441 /* Find all labels that are branched into
6442 from an insn belonging to a different chunk. */
6443
6444 far_labels = BITMAP_ALLOC (NULL);
6445
6446 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6447 {
6448 /* Labels marked with LABEL_PRESERVE_P can be target
6449 of non-local jumps, so we have to mark them.
6450 The same holds for named labels.
6451
6452 Don't do that, however, if it is the label before
6453 a jump table. */
6454
6455 if (GET_CODE (insn) == CODE_LABEL
6456 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6457 {
6458 rtx vec_insn = next_real_insn (insn);
6459 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6460 PATTERN (vec_insn) : NULL_RTX;
6461 if (!vec_pat
6462 || !(GET_CODE (vec_pat) == ADDR_VEC
6463 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6464 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6465 }
6466
6467 /* If we have a direct jump (conditional or unconditional)
6468 or a casesi jump, check all potential targets. */
6469 else if (GET_CODE (insn) == JUMP_INSN)
6470 {
6471 rtx pat = PATTERN (insn);
6472 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6473 pat = XVECEXP (pat, 0, 0);
6474
6475 if (GET_CODE (pat) == SET)
6476 {
6477 rtx label = JUMP_LABEL (insn);
6478 if (label)
6479 {
6480 if (s390_find_pool (pool_list, label)
6481 != s390_find_pool (pool_list, insn))
6482 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6483 }
6484 }
6485 else if (GET_CODE (pat) == PARALLEL
6486 && XVECLEN (pat, 0) == 2
6487 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6488 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6489 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6490 {
6491 /* Find the jump table used by this casesi jump. */
6492 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6493 rtx vec_insn = next_real_insn (vec_label);
6494 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6495 PATTERN (vec_insn) : NULL_RTX;
6496 if (vec_pat
6497 && (GET_CODE (vec_pat) == ADDR_VEC
6498 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6499 {
6500 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6501
6502 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6503 {
6504 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6505
6506 if (s390_find_pool (pool_list, label)
6507 != s390_find_pool (pool_list, insn))
6508 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6509 }
6510 }
6511 }
6512 }
6513 }
6514
6515 /* Insert base register reload insns before every pool. */
6516
6517 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6518 {
6519 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6520 curr_pool->label);
6521 rtx insn = curr_pool->first_insn;
6522 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6523 }
6524
6525 /* Insert base register reload insns at every far label. */
6526
6527 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6528 if (GET_CODE (insn) == CODE_LABEL
6529 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6530 {
6531 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6532 if (pool)
6533 {
6534 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6535 pool->label);
6536 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6537 }
6538 }
6539
6540
6541 BITMAP_FREE (far_labels);
6542
6543
6544 /* Recompute insn addresses. */
6545
6546 init_insn_lengths ();
6547 shorten_branches (get_insns ());
6548
6549 return pool_list;
6550 }
6551
6552 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6553 After we have decided to use this list, finish implementing
6554 all changes to the current function as required. */
6555
6556 static void
6557 s390_chunkify_finish (struct constant_pool *pool_list)
6558 {
6559 struct constant_pool *curr_pool = NULL;
6560 rtx insn;
6561
6562
6563 /* Replace all literal pool references. */
6564
6565 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6566 {
6567 if (INSN_P (insn))
6568 replace_ltrel_base (&PATTERN (insn));
6569
6570 curr_pool = s390_find_pool (pool_list, insn);
6571 if (!curr_pool)
6572 continue;
6573
6574 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6575 {
6576 rtx addr, pool_ref = NULL_RTX;
6577 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6578 if (pool_ref)
6579 {
6580 if (s390_execute_label (insn))
6581 addr = s390_find_execute (curr_pool, insn);
6582 else
6583 addr = s390_find_constant (curr_pool,
6584 get_pool_constant (pool_ref),
6585 get_pool_mode (pool_ref));
6586
6587 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6588 INSN_CODE (insn) = -1;
6589 }
6590 }
6591 }
6592
6593 /* Dump out all literal pools. */
6594
6595 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6596 s390_dump_pool (curr_pool, 0);
6597
6598 /* Free pool list. */
6599
6600 while (pool_list)
6601 {
6602 struct constant_pool *next = pool_list->next;
6603 s390_free_pool (pool_list);
6604 pool_list = next;
6605 }
6606 }
6607
6608 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6609 We have decided we cannot use this list, so revert all changes
6610 to the current function that were done by s390_chunkify_start. */
6611
6612 static void
6613 s390_chunkify_cancel (struct constant_pool *pool_list)
6614 {
6615 struct constant_pool *curr_pool = NULL;
6616 rtx insn;
6617
6618 /* Remove all pool placeholder insns. */
6619
6620 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6621 {
6622 /* Did we insert an extra barrier? Remove it. */
6623 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6624 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6625 rtx label = NEXT_INSN (curr_pool->pool_insn);
6626
6627 if (jump && GET_CODE (jump) == JUMP_INSN
6628 && barrier && GET_CODE (barrier) == BARRIER
6629 && label && GET_CODE (label) == CODE_LABEL
6630 && GET_CODE (PATTERN (jump)) == SET
6631 && SET_DEST (PATTERN (jump)) == pc_rtx
6632 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6633 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6634 {
6635 remove_insn (jump);
6636 remove_insn (barrier);
6637 remove_insn (label);
6638 }
6639
6640 remove_insn (curr_pool->pool_insn);
6641 }
6642
6643 /* Remove all base register reload insns. */
6644
6645 for (insn = get_insns (); insn; )
6646 {
6647 rtx next_insn = NEXT_INSN (insn);
6648
6649 if (GET_CODE (insn) == INSN
6650 && GET_CODE (PATTERN (insn)) == SET
6651 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6652 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6653 remove_insn (insn);
6654
6655 insn = next_insn;
6656 }
6657
6658 /* Free pool list. */
6659
6660 while (pool_list)
6661 {
6662 struct constant_pool *next = pool_list->next;
6663 s390_free_pool (pool_list);
6664 pool_list = next;
6665 }
6666 }
6667
6668 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6669
6670 void
6671 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6672 {
6673 REAL_VALUE_TYPE r;
6674
6675 switch (GET_MODE_CLASS (mode))
6676 {
6677 case MODE_FLOAT:
6678 case MODE_DECIMAL_FLOAT:
6679 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6680
6681 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6682 assemble_real (r, mode, align);
6683 break;
6684
6685 case MODE_INT:
6686 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6687 mark_symbol_refs_as_used (exp);
6688 break;
6689
6690 default:
6691 gcc_unreachable ();
6692 }
6693 }
6694
6695
6696 /* Return an RTL expression representing the value of the return address
6697 for the frame COUNT steps up from the current frame. FRAME is the
6698 frame pointer of that frame. */
6699
6700 rtx
6701 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6702 {
6703 int offset;
6704 rtx addr;
6705
6706 /* Without backchain, we fail for all but the current frame. */
6707
6708 if (!TARGET_BACKCHAIN && count > 0)
6709 return NULL_RTX;
6710
6711 /* For the current frame, we need to make sure the initial
6712 value of RETURN_REGNUM is actually saved. */
6713
6714 if (count == 0)
6715 {
6716 /* On non-z architectures branch splitting could overwrite r14. */
6717 if (TARGET_CPU_ZARCH)
6718 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6719 else
6720 {
6721 cfun_frame_layout.save_return_addr_p = true;
6722 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
6723 }
6724 }
6725
6726 if (TARGET_PACKED_STACK)
6727 offset = -2 * UNITS_PER_WORD;
6728 else
6729 offset = RETURN_REGNUM * UNITS_PER_WORD;
6730
6731 addr = plus_constant (frame, offset);
6732 addr = memory_address (Pmode, addr);
6733 return gen_rtx_MEM (Pmode, addr);
6734 }
6735
6736 /* Return an RTL expression representing the back chain stored in
6737 the current stack frame. */
6738
6739 rtx
6740 s390_back_chain_rtx (void)
6741 {
6742 rtx chain;
6743
6744 gcc_assert (TARGET_BACKCHAIN);
6745
6746 if (TARGET_PACKED_STACK)
6747 chain = plus_constant (stack_pointer_rtx,
6748 STACK_POINTER_OFFSET - UNITS_PER_WORD);
6749 else
6750 chain = stack_pointer_rtx;
6751
6752 chain = gen_rtx_MEM (Pmode, chain);
6753 return chain;
6754 }
6755
6756 /* Find first call clobbered register unused in a function.
6757 This could be used as base register in a leaf function
6758 or for holding the return address before epilogue. */
6759
6760 static int
6761 find_unused_clobbered_reg (void)
6762 {
6763 int i;
6764 for (i = 0; i < 6; i++)
6765 if (!df_regs_ever_live_p (i))
6766 return i;
6767 return 0;
6768 }
6769
6770
6771 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
6772 clobbered hard regs in SETREG. */
6773
6774 static void
6775 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
6776 {
6777 int *regs_ever_clobbered = (int *)data;
6778 unsigned int i, regno;
6779 enum machine_mode mode = GET_MODE (setreg);
6780
6781 if (GET_CODE (setreg) == SUBREG)
6782 {
6783 rtx inner = SUBREG_REG (setreg);
6784 if (!GENERAL_REG_P (inner))
6785 return;
6786 regno = subreg_regno (setreg);
6787 }
6788 else if (GENERAL_REG_P (setreg))
6789 regno = REGNO (setreg);
6790 else
6791 return;
6792
6793 for (i = regno;
6794 i < regno + HARD_REGNO_NREGS (regno, mode);
6795 i++)
6796 regs_ever_clobbered[i] = 1;
6797 }
6798
6799 /* Walks through all basic blocks of the current function looking
6800 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
6801 of the passed integer array REGS_EVER_CLOBBERED are set to one for
6802 each of those regs. */
6803
6804 static void
6805 s390_regs_ever_clobbered (int *regs_ever_clobbered)
6806 {
6807 basic_block cur_bb;
6808 rtx cur_insn;
6809 unsigned int i;
6810
6811 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
6812
6813 /* For non-leaf functions we have to consider all call clobbered regs to be
6814 clobbered. */
6815 if (!current_function_is_leaf)
6816 {
6817 for (i = 0; i < 16; i++)
6818 regs_ever_clobbered[i] = call_really_used_regs[i];
6819 }
6820
6821 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
6822 this work is done by liveness analysis (mark_regs_live_at_end).
6823 Special care is needed for functions containing landing pads. Landing pads
6824 may use the eh registers, but the code which sets these registers is not
6825 contained in that function. Hence s390_regs_ever_clobbered is not able to
6826 deal with this automatically. */
6827 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
6828 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
6829 if (crtl->calls_eh_return
6830 || (cfun->machine->has_landing_pad_p
6831 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
6832 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
6833
6834 /* For nonlocal gotos all call-saved registers have to be saved.
6835 This flag is also set for the unwinding code in libgcc.
6836 See expand_builtin_unwind_init. For regs_ever_live this is done by
6837 reload. */
6838 if (cfun->has_nonlocal_label)
6839 for (i = 0; i < 16; i++)
6840 if (!call_really_used_regs[i])
6841 regs_ever_clobbered[i] = 1;
6842
6843 FOR_EACH_BB (cur_bb)
6844 {
6845 FOR_BB_INSNS (cur_bb, cur_insn)
6846 {
6847 if (INSN_P (cur_insn))
6848 note_stores (PATTERN (cur_insn),
6849 s390_reg_clobbered_rtx,
6850 regs_ever_clobbered);
6851 }
6852 }
6853 }
6854
6855 /* Determine the frame area which actually has to be accessed
6856 in the function epilogue. The values are stored at the
6857 given pointers AREA_BOTTOM (address of the lowest used stack
6858 address) and AREA_TOP (address of the first item which does
6859 not belong to the stack frame). */
6860
6861 static void
6862 s390_frame_area (int *area_bottom, int *area_top)
6863 {
6864 int b, t;
6865 int i;
6866
6867 b = INT_MAX;
6868 t = INT_MIN;
6869
6870 if (cfun_frame_layout.first_restore_gpr != -1)
6871 {
6872 b = (cfun_frame_layout.gprs_offset
6873 + cfun_frame_layout.first_restore_gpr * UNITS_PER_WORD);
6874 t = b + (cfun_frame_layout.last_restore_gpr
6875 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_WORD;
6876 }
6877
6878 if (TARGET_64BIT && cfun_save_high_fprs_p)
6879 {
6880 b = MIN (b, cfun_frame_layout.f8_offset);
6881 t = MAX (t, (cfun_frame_layout.f8_offset
6882 + cfun_frame_layout.high_fprs * 8));
6883 }
6884
6885 if (!TARGET_64BIT)
6886 for (i = 2; i < 4; i++)
6887 if (cfun_fpr_bit_p (i))
6888 {
6889 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
6890 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
6891 }
6892
6893 *area_bottom = b;
6894 *area_top = t;
6895 }
6896
6897 /* Fill cfun->machine with info about register usage of current function.
6898 Return in CLOBBERED_REGS which GPRs are currently considered set. */
6899
6900 static void
6901 s390_register_info (int clobbered_regs[])
6902 {
6903 int i, j;
6904
6905 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
6906 cfun_frame_layout.fpr_bitmap = 0;
6907 cfun_frame_layout.high_fprs = 0;
6908 if (TARGET_64BIT)
6909 for (i = 24; i < 32; i++)
6910 if (df_regs_ever_live_p (i) && !global_regs[i])
6911 {
6912 cfun_set_fpr_bit (i - 16);
6913 cfun_frame_layout.high_fprs++;
6914 }
6915
6916 /* Find first and last gpr to be saved. We trust regs_ever_live
6917 data, except that we don't save and restore global registers.
6918
6919 Also, all registers with special meaning to the compiler need
6920 to be handled extra. */
6921
6922 s390_regs_ever_clobbered (clobbered_regs);
6923
6924 for (i = 0; i < 16; i++)
6925 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
6926
6927 if (frame_pointer_needed)
6928 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
6929
6930 if (flag_pic)
6931 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
6932 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6933
6934 clobbered_regs[BASE_REGNUM]
6935 |= (cfun->machine->base_reg
6936 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
6937
6938 clobbered_regs[RETURN_REGNUM]
6939 |= (!current_function_is_leaf
6940 || TARGET_TPF_PROFILING
6941 || cfun->machine->split_branches_pending_p
6942 || cfun_frame_layout.save_return_addr_p
6943 || crtl->calls_eh_return
6944 || cfun->stdarg);
6945
6946 clobbered_regs[STACK_POINTER_REGNUM]
6947 |= (!current_function_is_leaf
6948 || TARGET_TPF_PROFILING
6949 || cfun_save_high_fprs_p
6950 || get_frame_size () > 0
6951 || cfun->calls_alloca
6952 || cfun->stdarg);
6953
6954 for (i = 6; i < 16; i++)
6955 if (df_regs_ever_live_p (i) || clobbered_regs[i])
6956 break;
6957 for (j = 15; j > i; j--)
6958 if (df_regs_ever_live_p (j) || clobbered_regs[j])
6959 break;
6960
6961 if (i == 16)
6962 {
6963 /* Nothing to save/restore. */
6964 cfun_frame_layout.first_save_gpr_slot = -1;
6965 cfun_frame_layout.last_save_gpr_slot = -1;
6966 cfun_frame_layout.first_save_gpr = -1;
6967 cfun_frame_layout.first_restore_gpr = -1;
6968 cfun_frame_layout.last_save_gpr = -1;
6969 cfun_frame_layout.last_restore_gpr = -1;
6970 }
6971 else
6972 {
6973 /* Save slots for gprs from i to j. */
6974 cfun_frame_layout.first_save_gpr_slot = i;
6975 cfun_frame_layout.last_save_gpr_slot = j;
6976
6977 for (i = cfun_frame_layout.first_save_gpr_slot;
6978 i < cfun_frame_layout.last_save_gpr_slot + 1;
6979 i++)
6980 if (clobbered_regs[i])
6981 break;
6982
6983 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
6984 if (clobbered_regs[j])
6985 break;
6986
6987 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
6988 {
6989 /* Nothing to save/restore. */
6990 cfun_frame_layout.first_save_gpr = -1;
6991 cfun_frame_layout.first_restore_gpr = -1;
6992 cfun_frame_layout.last_save_gpr = -1;
6993 cfun_frame_layout.last_restore_gpr = -1;
6994 }
6995 else
6996 {
6997 /* Save / Restore from gpr i to j. */
6998 cfun_frame_layout.first_save_gpr = i;
6999 cfun_frame_layout.first_restore_gpr = i;
7000 cfun_frame_layout.last_save_gpr = j;
7001 cfun_frame_layout.last_restore_gpr = j;
7002 }
7003 }
7004
7005 if (cfun->stdarg)
7006 {
7007 /* Varargs functions need to save gprs 2 to 6. */
7008 if (cfun->va_list_gpr_size
7009 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7010 {
7011 int min_gpr = crtl->args.info.gprs;
7012 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7013 if (max_gpr > GP_ARG_NUM_REG)
7014 max_gpr = GP_ARG_NUM_REG;
7015
7016 if (cfun_frame_layout.first_save_gpr == -1
7017 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7018 {
7019 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7020 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7021 }
7022
7023 if (cfun_frame_layout.last_save_gpr == -1
7024 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7025 {
7026 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7027 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7028 }
7029 }
7030
7031 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7032 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7033 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7034 {
7035 int min_fpr = crtl->args.info.fprs;
7036 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7037 if (max_fpr > FP_ARG_NUM_REG)
7038 max_fpr = FP_ARG_NUM_REG;
7039
7040 /* ??? This is currently required to ensure proper location
7041 of the fpr save slots within the va_list save area. */
7042 if (TARGET_PACKED_STACK)
7043 min_fpr = 0;
7044
7045 for (i = min_fpr; i < max_fpr; i++)
7046 cfun_set_fpr_bit (i);
7047 }
7048 }
7049
7050 if (!TARGET_64BIT)
7051 for (i = 2; i < 4; i++)
7052 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7053 cfun_set_fpr_bit (i);
7054 }
7055
7056 /* Fill cfun->machine with info about frame of current function. */
7057
7058 static void
7059 s390_frame_info (void)
7060 {
7061 int i;
7062
7063 cfun_frame_layout.frame_size = get_frame_size ();
7064 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7065 fatal_error ("total size of local variables exceeds architecture limit");
7066
7067 if (!TARGET_PACKED_STACK)
7068 {
7069 cfun_frame_layout.backchain_offset = 0;
7070 cfun_frame_layout.f0_offset = 16 * UNITS_PER_WORD;
7071 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7072 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7073 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7074 * UNITS_PER_WORD);
7075 }
7076 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7077 {
7078 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7079 - UNITS_PER_WORD);
7080 cfun_frame_layout.gprs_offset
7081 = (cfun_frame_layout.backchain_offset
7082 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7083 * UNITS_PER_WORD);
7084
7085 if (TARGET_64BIT)
7086 {
7087 cfun_frame_layout.f4_offset
7088 = (cfun_frame_layout.gprs_offset
7089 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7090
7091 cfun_frame_layout.f0_offset
7092 = (cfun_frame_layout.f4_offset
7093 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7094 }
7095 else
7096 {
7097 /* On 31 bit we have to care about alignment of the
7098 floating point regs to provide fastest access. */
7099 cfun_frame_layout.f0_offset
7100 = ((cfun_frame_layout.gprs_offset
7101 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7102 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7103
7104 cfun_frame_layout.f4_offset
7105 = (cfun_frame_layout.f0_offset
7106 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7107 }
7108 }
7109 else /* no backchain */
7110 {
7111 cfun_frame_layout.f4_offset
7112 = (STACK_POINTER_OFFSET
7113 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7114
7115 cfun_frame_layout.f0_offset
7116 = (cfun_frame_layout.f4_offset
7117 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7118
7119 cfun_frame_layout.gprs_offset
7120 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7121 }
7122
7123 if (current_function_is_leaf
7124 && !TARGET_TPF_PROFILING
7125 && cfun_frame_layout.frame_size == 0
7126 && !cfun_save_high_fprs_p
7127 && !cfun->calls_alloca
7128 && !cfun->stdarg)
7129 return;
7130
7131 if (!TARGET_PACKED_STACK)
7132 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7133 + crtl->outgoing_args_size
7134 + cfun_frame_layout.high_fprs * 8);
7135 else
7136 {
7137 if (TARGET_BACKCHAIN)
7138 cfun_frame_layout.frame_size += UNITS_PER_WORD;
7139
7140 /* No alignment trouble here because f8-f15 are only saved under
7141 64 bit. */
7142 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7143 cfun_frame_layout.f4_offset),
7144 cfun_frame_layout.gprs_offset)
7145 - cfun_frame_layout.high_fprs * 8);
7146
7147 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7148
7149 for (i = 0; i < 8; i++)
7150 if (cfun_fpr_bit_p (i))
7151 cfun_frame_layout.frame_size += 8;
7152
7153 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7154
7155 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7156 the frame size to sustain 8 byte alignment of stack frames. */
7157 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7158 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7159 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7160
7161 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7162 }
7163 }
7164
7165 /* Generate frame layout. Fills in register and frame data for the current
7166 function in cfun->machine. This routine can be called multiple times;
7167 it will re-do the complete frame layout every time. */
7168
7169 static void
7170 s390_init_frame_layout (void)
7171 {
7172 HOST_WIDE_INT frame_size;
7173 int base_used;
7174 int clobbered_regs[16];
7175
7176 /* On S/390 machines, we may need to perform branch splitting, which
7177 will require both base and return address register. We have no
7178 choice but to assume we're going to need them until right at the
7179 end of the machine dependent reorg phase. */
7180 if (!TARGET_CPU_ZARCH)
7181 cfun->machine->split_branches_pending_p = true;
7182
7183 do
7184 {
7185 frame_size = cfun_frame_layout.frame_size;
7186
7187 /* Try to predict whether we'll need the base register. */
7188 base_used = cfun->machine->split_branches_pending_p
7189 || crtl->uses_const_pool
7190 || (!DISP_IN_RANGE (frame_size)
7191 && !CONST_OK_FOR_K (frame_size));
7192
7193 /* Decide which register to use as literal pool base. In small
7194 leaf functions, try to use an unused call-clobbered register
7195 as base register to avoid save/restore overhead. */
7196 if (!base_used)
7197 cfun->machine->base_reg = NULL_RTX;
7198 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7199 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7200 else
7201 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7202
7203 s390_register_info (clobbered_regs);
7204 s390_frame_info ();
7205 }
7206 while (frame_size != cfun_frame_layout.frame_size);
7207 }
7208
7209 /* Update frame layout. Recompute actual register save data based on
7210 current info and update regs_ever_live for the special registers.
7211 May be called multiple times, but may never cause *more* registers
7212 to be saved than s390_init_frame_layout allocated room for. */
7213
7214 static void
7215 s390_update_frame_layout (void)
7216 {
7217 int clobbered_regs[16];
7218
7219 s390_register_info (clobbered_regs);
7220
7221 df_set_regs_ever_live (BASE_REGNUM,
7222 clobbered_regs[BASE_REGNUM] ? true : false);
7223 df_set_regs_ever_live (RETURN_REGNUM,
7224 clobbered_regs[RETURN_REGNUM] ? true : false);
7225 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7226 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7227
7228 if (cfun->machine->base_reg)
7229 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7230 }
7231
7232 /* Return true if it is legal to put a value with MODE into REGNO. */
7233
7234 bool
7235 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7236 {
7237 switch (REGNO_REG_CLASS (regno))
7238 {
7239 case FP_REGS:
7240 if (REGNO_PAIR_OK (regno, mode))
7241 {
7242 if (mode == SImode || mode == DImode)
7243 return true;
7244
7245 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7246 return true;
7247 }
7248 break;
7249 case ADDR_REGS:
7250 if (FRAME_REGNO_P (regno) && mode == Pmode)
7251 return true;
7252
7253 /* fallthrough */
7254 case GENERAL_REGS:
7255 if (REGNO_PAIR_OK (regno, mode))
7256 {
7257 if (TARGET_64BIT
7258 || (mode != TFmode && mode != TCmode && mode != TDmode))
7259 return true;
7260 }
7261 break;
7262 case CC_REGS:
7263 if (GET_MODE_CLASS (mode) == MODE_CC)
7264 return true;
7265 break;
7266 case ACCESS_REGS:
7267 if (REGNO_PAIR_OK (regno, mode))
7268 {
7269 if (mode == SImode || mode == Pmode)
7270 return true;
7271 }
7272 break;
7273 default:
7274 return false;
7275 }
7276
7277 return false;
7278 }
7279
7280 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7281
7282 bool
7283 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7284 {
7285 /* Once we've decided upon a register to use as base register, it must
7286 no longer be used for any other purpose. */
7287 if (cfun->machine->base_reg)
7288 if (REGNO (cfun->machine->base_reg) == old_reg
7289 || REGNO (cfun->machine->base_reg) == new_reg)
7290 return false;
7291
7292 return true;
7293 }
7294
7295 /* Maximum number of registers to represent a value of mode MODE
7296 in a register of class RCLASS. */
7297
7298 bool
7299 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7300 {
7301 switch (rclass)
7302 {
7303 case FP_REGS:
7304 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7305 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7306 else
7307 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7308 case ACCESS_REGS:
7309 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7310 default:
7311 break;
7312 }
7313 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7314 }
7315
7316 /* Return true if register FROM can be eliminated via register TO. */
7317
7318 bool
7319 s390_can_eliminate (int from, int to)
7320 {
7321 /* On zSeries machines, we have not marked the base register as fixed.
7322 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7323 If a function requires the base register, we say here that this
7324 elimination cannot be performed. This will cause reload to free
7325 up the base register (as if it were fixed). On the other hand,
7326 if the current function does *not* require the base register, we
7327 say here the elimination succeeds, which in turn allows reload
7328 to allocate the base register for any other purpose. */
7329 if (from == BASE_REGNUM && to == BASE_REGNUM)
7330 {
7331 if (TARGET_CPU_ZARCH)
7332 {
7333 s390_init_frame_layout ();
7334 return cfun->machine->base_reg == NULL_RTX;
7335 }
7336
7337 return false;
7338 }
7339
7340 /* Everything else must point into the stack frame. */
7341 gcc_assert (to == STACK_POINTER_REGNUM
7342 || to == HARD_FRAME_POINTER_REGNUM);
7343
7344 gcc_assert (from == FRAME_POINTER_REGNUM
7345 || from == ARG_POINTER_REGNUM
7346 || from == RETURN_ADDRESS_POINTER_REGNUM);
7347
7348 /* Make sure we actually saved the return address. */
7349 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7350 if (!crtl->calls_eh_return
7351 && !cfun->stdarg
7352 && !cfun_frame_layout.save_return_addr_p)
7353 return false;
7354
7355 return true;
7356 }
7357
7358 /* Return offset between register FROM and TO initially after prolog. */
7359
7360 HOST_WIDE_INT
7361 s390_initial_elimination_offset (int from, int to)
7362 {
7363 HOST_WIDE_INT offset;
7364 int index;
7365
7366 /* ??? Why are we called for non-eliminable pairs? */
7367 if (!s390_can_eliminate (from, to))
7368 return 0;
7369
7370 switch (from)
7371 {
7372 case FRAME_POINTER_REGNUM:
7373 offset = (get_frame_size()
7374 + STACK_POINTER_OFFSET
7375 + crtl->outgoing_args_size);
7376 break;
7377
7378 case ARG_POINTER_REGNUM:
7379 s390_init_frame_layout ();
7380 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7381 break;
7382
7383 case RETURN_ADDRESS_POINTER_REGNUM:
7384 s390_init_frame_layout ();
7385 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7386 gcc_assert (index >= 0);
7387 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7388 offset += index * UNITS_PER_WORD;
7389 break;
7390
7391 case BASE_REGNUM:
7392 offset = 0;
7393 break;
7394
7395 default:
7396 gcc_unreachable ();
7397 }
7398
7399 return offset;
7400 }
7401
7402 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7403 to register BASE. Return generated insn. */
7404
7405 static rtx
7406 save_fpr (rtx base, int offset, int regnum)
7407 {
7408 rtx addr;
7409 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7410
7411 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7412 set_mem_alias_set (addr, get_varargs_alias_set ());
7413 else
7414 set_mem_alias_set (addr, get_frame_alias_set ());
7415
7416 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7417 }
7418
7419 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7420 to register BASE. Return generated insn. */
7421
7422 static rtx
7423 restore_fpr (rtx base, int offset, int regnum)
7424 {
7425 rtx addr;
7426 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7427 set_mem_alias_set (addr, get_frame_alias_set ());
7428
7429 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7430 }
7431
7432 /* Generate insn to save registers FIRST to LAST into
7433 the register save area located at offset OFFSET
7434 relative to register BASE. */
7435
7436 static rtx
7437 save_gprs (rtx base, int offset, int first, int last)
7438 {
7439 rtx addr, insn, note;
7440 int i;
7441
7442 addr = plus_constant (base, offset);
7443 addr = gen_rtx_MEM (Pmode, addr);
7444
7445 set_mem_alias_set (addr, get_frame_alias_set ());
7446
7447 /* Special-case single register. */
7448 if (first == last)
7449 {
7450 if (TARGET_64BIT)
7451 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7452 else
7453 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7454
7455 RTX_FRAME_RELATED_P (insn) = 1;
7456 return insn;
7457 }
7458
7459
7460 insn = gen_store_multiple (addr,
7461 gen_rtx_REG (Pmode, first),
7462 GEN_INT (last - first + 1));
7463
7464 if (first <= 6 && cfun->stdarg)
7465 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7466 {
7467 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7468
7469 if (first + i <= 6)
7470 set_mem_alias_set (mem, get_varargs_alias_set ());
7471 }
7472
7473 /* We need to set the FRAME_RELATED flag on all SETs
7474 inside the store-multiple pattern.
7475
7476 However, we must not emit DWARF records for registers 2..5
7477 if they are stored for use by variable arguments ...
7478
7479 ??? Unfortunately, it is not enough to simply not the
7480 FRAME_RELATED flags for those SETs, because the first SET
7481 of the PARALLEL is always treated as if it had the flag
7482 set, even if it does not. Therefore we emit a new pattern
7483 without those registers as REG_FRAME_RELATED_EXPR note. */
7484
7485 if (first >= 6)
7486 {
7487 rtx pat = PATTERN (insn);
7488
7489 for (i = 0; i < XVECLEN (pat, 0); i++)
7490 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
7491 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7492
7493 RTX_FRAME_RELATED_P (insn) = 1;
7494 }
7495 else if (last >= 6)
7496 {
7497 addr = plus_constant (base, offset + (6 - first) * UNITS_PER_WORD);
7498 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7499 gen_rtx_REG (Pmode, 6),
7500 GEN_INT (last - 6 + 1));
7501 note = PATTERN (note);
7502
7503 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7504
7505 for (i = 0; i < XVECLEN (note, 0); i++)
7506 if (GET_CODE (XVECEXP (note, 0, i)) == SET)
7507 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7508
7509 RTX_FRAME_RELATED_P (insn) = 1;
7510 }
7511
7512 return insn;
7513 }
7514
7515 /* Generate insn to restore registers FIRST to LAST from
7516 the register save area located at offset OFFSET
7517 relative to register BASE. */
7518
7519 static rtx
7520 restore_gprs (rtx base, int offset, int first, int last)
7521 {
7522 rtx addr, insn;
7523
7524 addr = plus_constant (base, offset);
7525 addr = gen_rtx_MEM (Pmode, addr);
7526 set_mem_alias_set (addr, get_frame_alias_set ());
7527
7528 /* Special-case single register. */
7529 if (first == last)
7530 {
7531 if (TARGET_64BIT)
7532 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7533 else
7534 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7535
7536 return insn;
7537 }
7538
7539 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7540 addr,
7541 GEN_INT (last - first + 1));
7542 return insn;
7543 }
7544
7545 /* Return insn sequence to load the GOT register. */
7546
7547 static GTY(()) rtx got_symbol;
7548 rtx
7549 s390_load_got (void)
7550 {
7551 rtx insns;
7552
7553 if (!got_symbol)
7554 {
7555 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7556 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7557 }
7558
7559 start_sequence ();
7560
7561 if (TARGET_CPU_ZARCH)
7562 {
7563 emit_move_insn (pic_offset_table_rtx, got_symbol);
7564 }
7565 else
7566 {
7567 rtx offset;
7568
7569 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7570 UNSPEC_LTREL_OFFSET);
7571 offset = gen_rtx_CONST (Pmode, offset);
7572 offset = force_const_mem (Pmode, offset);
7573
7574 emit_move_insn (pic_offset_table_rtx, offset);
7575
7576 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7577 UNSPEC_LTREL_BASE);
7578 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7579
7580 emit_move_insn (pic_offset_table_rtx, offset);
7581 }
7582
7583 insns = get_insns ();
7584 end_sequence ();
7585 return insns;
7586 }
7587
7588 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7589 and the change to the stack pointer. */
7590
7591 static void
7592 s390_emit_stack_tie (void)
7593 {
7594 rtx mem = gen_frame_mem (BLKmode,
7595 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7596
7597 emit_insn (gen_stack_tie (mem));
7598 }
7599
7600 /* Expand the prologue into a bunch of separate insns. */
7601
7602 void
7603 s390_emit_prologue (void)
7604 {
7605 rtx insn, addr;
7606 rtx temp_reg;
7607 int i;
7608 int offset;
7609 int next_fpr = 0;
7610
7611 /* Complete frame layout. */
7612
7613 s390_update_frame_layout ();
7614
7615 /* Annotate all constant pool references to let the scheduler know
7616 they implicitly use the base register. */
7617
7618 push_topmost_sequence ();
7619
7620 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7621 if (INSN_P (insn))
7622 {
7623 annotate_constant_pool_refs (&PATTERN (insn));
7624 df_insn_rescan (insn);
7625 }
7626
7627 pop_topmost_sequence ();
7628
7629 /* Choose best register to use for temp use within prologue.
7630 See below for why TPF must use the register 1. */
7631
7632 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7633 && !current_function_is_leaf
7634 && !TARGET_TPF_PROFILING)
7635 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7636 else
7637 temp_reg = gen_rtx_REG (Pmode, 1);
7638
7639 /* Save call saved gprs. */
7640 if (cfun_frame_layout.first_save_gpr != -1)
7641 {
7642 insn = save_gprs (stack_pointer_rtx,
7643 cfun_frame_layout.gprs_offset +
7644 UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
7645 - cfun_frame_layout.first_save_gpr_slot),
7646 cfun_frame_layout.first_save_gpr,
7647 cfun_frame_layout.last_save_gpr);
7648 emit_insn (insn);
7649 }
7650
7651 /* Dummy insn to mark literal pool slot. */
7652
7653 if (cfun->machine->base_reg)
7654 emit_insn (gen_main_pool (cfun->machine->base_reg));
7655
7656 offset = cfun_frame_layout.f0_offset;
7657
7658 /* Save f0 and f2. */
7659 for (i = 0; i < 2; i++)
7660 {
7661 if (cfun_fpr_bit_p (i))
7662 {
7663 save_fpr (stack_pointer_rtx, offset, i + 16);
7664 offset += 8;
7665 }
7666 else if (!TARGET_PACKED_STACK)
7667 offset += 8;
7668 }
7669
7670 /* Save f4 and f6. */
7671 offset = cfun_frame_layout.f4_offset;
7672 for (i = 2; i < 4; i++)
7673 {
7674 if (cfun_fpr_bit_p (i))
7675 {
7676 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7677 offset += 8;
7678
7679 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7680 therefore are not frame related. */
7681 if (!call_really_used_regs[i + 16])
7682 RTX_FRAME_RELATED_P (insn) = 1;
7683 }
7684 else if (!TARGET_PACKED_STACK)
7685 offset += 8;
7686 }
7687
7688 if (TARGET_PACKED_STACK
7689 && cfun_save_high_fprs_p
7690 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7691 {
7692 offset = (cfun_frame_layout.f8_offset
7693 + (cfun_frame_layout.high_fprs - 1) * 8);
7694
7695 for (i = 15; i > 7 && offset >= 0; i--)
7696 if (cfun_fpr_bit_p (i))
7697 {
7698 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7699
7700 RTX_FRAME_RELATED_P (insn) = 1;
7701 offset -= 8;
7702 }
7703 if (offset >= cfun_frame_layout.f8_offset)
7704 next_fpr = i + 16;
7705 }
7706
7707 if (!TARGET_PACKED_STACK)
7708 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
7709
7710 /* Decrement stack pointer. */
7711
7712 if (cfun_frame_layout.frame_size > 0)
7713 {
7714 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7715 rtx real_frame_off;
7716
7717 if (s390_stack_size)
7718 {
7719 HOST_WIDE_INT stack_guard;
7720
7721 if (s390_stack_guard)
7722 stack_guard = s390_stack_guard;
7723 else
7724 {
7725 /* If no value for stack guard is provided the smallest power of 2
7726 larger than the current frame size is chosen. */
7727 stack_guard = 1;
7728 while (stack_guard < cfun_frame_layout.frame_size)
7729 stack_guard <<= 1;
7730 }
7731
7732 if (cfun_frame_layout.frame_size >= s390_stack_size)
7733 {
7734 warning (0, "frame size of function %qs is "
7735 HOST_WIDE_INT_PRINT_DEC
7736 " bytes exceeding user provided stack limit of "
7737 HOST_WIDE_INT_PRINT_DEC " bytes. "
7738 "An unconditional trap is added.",
7739 current_function_name(), cfun_frame_layout.frame_size,
7740 s390_stack_size);
7741 emit_insn (gen_trap ());
7742 }
7743 else
7744 {
7745 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
7746 & ~(stack_guard - 1));
7747 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
7748 GEN_INT (stack_check_mask));
7749 if (TARGET_64BIT)
7750 gen_cmpdi (t, const0_rtx);
7751 else
7752 gen_cmpsi (t, const0_rtx);
7753
7754 emit_insn (gen_conditional_trap (gen_rtx_EQ (CCmode,
7755 gen_rtx_REG (CCmode,
7756 CC_REGNUM),
7757 const0_rtx),
7758 const0_rtx));
7759 }
7760 }
7761
7762 if (s390_warn_framesize > 0
7763 && cfun_frame_layout.frame_size >= s390_warn_framesize)
7764 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
7765 current_function_name (), cfun_frame_layout.frame_size);
7766
7767 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
7768 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
7769
7770 /* Save incoming stack pointer into temp reg. */
7771 if (TARGET_BACKCHAIN || next_fpr)
7772 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
7773
7774 /* Subtract frame size from stack pointer. */
7775
7776 if (DISP_IN_RANGE (INTVAL (frame_off)))
7777 {
7778 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7779 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7780 frame_off));
7781 insn = emit_insn (insn);
7782 }
7783 else
7784 {
7785 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
7786 frame_off = force_const_mem (Pmode, frame_off);
7787
7788 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
7789 annotate_constant_pool_refs (&PATTERN (insn));
7790 }
7791
7792 RTX_FRAME_RELATED_P (insn) = 1;
7793 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7794 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7795 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7796 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7797 real_frame_off)));
7798
7799 /* Set backchain. */
7800
7801 if (TARGET_BACKCHAIN)
7802 {
7803 if (cfun_frame_layout.backchain_offset)
7804 addr = gen_rtx_MEM (Pmode,
7805 plus_constant (stack_pointer_rtx,
7806 cfun_frame_layout.backchain_offset));
7807 else
7808 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7809 set_mem_alias_set (addr, get_frame_alias_set ());
7810 insn = emit_insn (gen_move_insn (addr, temp_reg));
7811 }
7812
7813 /* If we support asynchronous exceptions (e.g. for Java),
7814 we need to make sure the backchain pointer is set up
7815 before any possibly trapping memory access. */
7816
7817 if (TARGET_BACKCHAIN && flag_non_call_exceptions)
7818 {
7819 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
7820 emit_clobber (addr);
7821 }
7822 }
7823
7824 /* Save fprs 8 - 15 (64 bit ABI). */
7825
7826 if (cfun_save_high_fprs_p && next_fpr)
7827 {
7828 /* If the stack might be accessed through a different register
7829 we have to make sure that the stack pointer decrement is not
7830 moved below the use of the stack slots. */
7831 s390_emit_stack_tie ();
7832
7833 insn = emit_insn (gen_add2_insn (temp_reg,
7834 GEN_INT (cfun_frame_layout.f8_offset)));
7835
7836 offset = 0;
7837
7838 for (i = 24; i <= next_fpr; i++)
7839 if (cfun_fpr_bit_p (i - 16))
7840 {
7841 rtx addr = plus_constant (stack_pointer_rtx,
7842 cfun_frame_layout.frame_size
7843 + cfun_frame_layout.f8_offset
7844 + offset);
7845
7846 insn = save_fpr (temp_reg, offset, i);
7847 offset += 8;
7848 RTX_FRAME_RELATED_P (insn) = 1;
7849 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7850 gen_rtx_SET (VOIDmode,
7851 gen_rtx_MEM (DFmode, addr),
7852 gen_rtx_REG (DFmode, i)));
7853 }
7854 }
7855
7856 /* Set frame pointer, if needed. */
7857
7858 if (frame_pointer_needed)
7859 {
7860 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7861 RTX_FRAME_RELATED_P (insn) = 1;
7862 }
7863
7864 /* Set up got pointer, if needed. */
7865
7866 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
7867 {
7868 rtx insns = s390_load_got ();
7869
7870 for (insn = insns; insn; insn = NEXT_INSN (insn))
7871 annotate_constant_pool_refs (&PATTERN (insn));
7872
7873 emit_insn (insns);
7874 }
7875
7876 if (TARGET_TPF_PROFILING)
7877 {
7878 /* Generate a BAS instruction to serve as a function
7879 entry intercept to facilitate the use of tracing
7880 algorithms located at the branch target. */
7881 emit_insn (gen_prologue_tpf ());
7882
7883 /* Emit a blockage here so that all code
7884 lies between the profiling mechanisms. */
7885 emit_insn (gen_blockage ());
7886 }
7887 }
7888
7889 /* Expand the epilogue into a bunch of separate insns. */
7890
7891 void
7892 s390_emit_epilogue (bool sibcall)
7893 {
7894 rtx frame_pointer, return_reg;
7895 int area_bottom, area_top, offset = 0;
7896 int next_offset;
7897 rtvec p;
7898 int i;
7899
7900 if (TARGET_TPF_PROFILING)
7901 {
7902
7903 /* Generate a BAS instruction to serve as a function
7904 entry intercept to facilitate the use of tracing
7905 algorithms located at the branch target. */
7906
7907 /* Emit a blockage here so that all code
7908 lies between the profiling mechanisms. */
7909 emit_insn (gen_blockage ());
7910
7911 emit_insn (gen_epilogue_tpf ());
7912 }
7913
7914 /* Check whether to use frame or stack pointer for restore. */
7915
7916 frame_pointer = (frame_pointer_needed
7917 ? hard_frame_pointer_rtx : stack_pointer_rtx);
7918
7919 s390_frame_area (&area_bottom, &area_top);
7920
7921 /* Check whether we can access the register save area.
7922 If not, increment the frame pointer as required. */
7923
7924 if (area_top <= area_bottom)
7925 {
7926 /* Nothing to restore. */
7927 }
7928 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
7929 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
7930 {
7931 /* Area is in range. */
7932 offset = cfun_frame_layout.frame_size;
7933 }
7934 else
7935 {
7936 rtx insn, frame_off;
7937
7938 offset = area_bottom < 0 ? -area_bottom : 0;
7939 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
7940
7941 if (DISP_IN_RANGE (INTVAL (frame_off)))
7942 {
7943 insn = gen_rtx_SET (VOIDmode, frame_pointer,
7944 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
7945 insn = emit_insn (insn);
7946 }
7947 else
7948 {
7949 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
7950 frame_off = force_const_mem (Pmode, frame_off);
7951
7952 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
7953 annotate_constant_pool_refs (&PATTERN (insn));
7954 }
7955 }
7956
7957 /* Restore call saved fprs. */
7958
7959 if (TARGET_64BIT)
7960 {
7961 if (cfun_save_high_fprs_p)
7962 {
7963 next_offset = cfun_frame_layout.f8_offset;
7964 for (i = 24; i < 32; i++)
7965 {
7966 if (cfun_fpr_bit_p (i - 16))
7967 {
7968 restore_fpr (frame_pointer,
7969 offset + next_offset, i);
7970 next_offset += 8;
7971 }
7972 }
7973 }
7974
7975 }
7976 else
7977 {
7978 next_offset = cfun_frame_layout.f4_offset;
7979 for (i = 18; i < 20; i++)
7980 {
7981 if (cfun_fpr_bit_p (i - 16))
7982 {
7983 restore_fpr (frame_pointer,
7984 offset + next_offset, i);
7985 next_offset += 8;
7986 }
7987 else if (!TARGET_PACKED_STACK)
7988 next_offset += 8;
7989 }
7990
7991 }
7992
7993 /* Return register. */
7994
7995 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7996
7997 /* Restore call saved gprs. */
7998
7999 if (cfun_frame_layout.first_restore_gpr != -1)
8000 {
8001 rtx insn, addr;
8002 int i;
8003
8004 /* Check for global register and save them
8005 to stack location from where they get restored. */
8006
8007 for (i = cfun_frame_layout.first_restore_gpr;
8008 i <= cfun_frame_layout.last_restore_gpr;
8009 i++)
8010 {
8011 /* These registers are special and need to be
8012 restored in any case. */
8013 if (i == STACK_POINTER_REGNUM
8014 || i == RETURN_REGNUM
8015 || i == BASE_REGNUM
8016 || (flag_pic && i == (int)PIC_OFFSET_TABLE_REGNUM))
8017 continue;
8018
8019 if (global_regs[i])
8020 {
8021 addr = plus_constant (frame_pointer,
8022 offset + cfun_frame_layout.gprs_offset
8023 + (i - cfun_frame_layout.first_save_gpr_slot)
8024 * UNITS_PER_WORD);
8025 addr = gen_rtx_MEM (Pmode, addr);
8026 set_mem_alias_set (addr, get_frame_alias_set ());
8027 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8028 }
8029 }
8030
8031 if (! sibcall)
8032 {
8033 /* Fetch return address from stack before load multiple,
8034 this will do good for scheduling. */
8035
8036 if (cfun_frame_layout.save_return_addr_p
8037 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8038 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8039 {
8040 int return_regnum = find_unused_clobbered_reg();
8041 if (!return_regnum)
8042 return_regnum = 4;
8043 return_reg = gen_rtx_REG (Pmode, return_regnum);
8044
8045 addr = plus_constant (frame_pointer,
8046 offset + cfun_frame_layout.gprs_offset
8047 + (RETURN_REGNUM
8048 - cfun_frame_layout.first_save_gpr_slot)
8049 * UNITS_PER_WORD);
8050 addr = gen_rtx_MEM (Pmode, addr);
8051 set_mem_alias_set (addr, get_frame_alias_set ());
8052 emit_move_insn (return_reg, addr);
8053 }
8054 }
8055
8056 insn = restore_gprs (frame_pointer,
8057 offset + cfun_frame_layout.gprs_offset
8058 + (cfun_frame_layout.first_restore_gpr
8059 - cfun_frame_layout.first_save_gpr_slot)
8060 * UNITS_PER_WORD,
8061 cfun_frame_layout.first_restore_gpr,
8062 cfun_frame_layout.last_restore_gpr);
8063 emit_insn (insn);
8064 }
8065
8066 if (! sibcall)
8067 {
8068
8069 /* Return to caller. */
8070
8071 p = rtvec_alloc (2);
8072
8073 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8074 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8075 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8076 }
8077 }
8078
8079
8080 /* Return the size in bytes of a function argument of
8081 type TYPE and/or mode MODE. At least one of TYPE or
8082 MODE must be specified. */
8083
8084 static int
8085 s390_function_arg_size (enum machine_mode mode, const_tree type)
8086 {
8087 if (type)
8088 return int_size_in_bytes (type);
8089
8090 /* No type info available for some library calls ... */
8091 if (mode != BLKmode)
8092 return GET_MODE_SIZE (mode);
8093
8094 /* If we have neither type nor mode, abort */
8095 gcc_unreachable ();
8096 }
8097
8098 /* Return true if a function argument of type TYPE and mode MODE
8099 is to be passed in a floating-point register, if available. */
8100
8101 static bool
8102 s390_function_arg_float (enum machine_mode mode, tree type)
8103 {
8104 int size = s390_function_arg_size (mode, type);
8105 if (size > 8)
8106 return false;
8107
8108 /* Soft-float changes the ABI: no floating-point registers are used. */
8109 if (TARGET_SOFT_FLOAT)
8110 return false;
8111
8112 /* No type info available for some library calls ... */
8113 if (!type)
8114 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8115
8116 /* The ABI says that record types with a single member are treated
8117 just like that member would be. */
8118 while (TREE_CODE (type) == RECORD_TYPE)
8119 {
8120 tree field, single = NULL_TREE;
8121
8122 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8123 {
8124 if (TREE_CODE (field) != FIELD_DECL)
8125 continue;
8126
8127 if (single == NULL_TREE)
8128 single = TREE_TYPE (field);
8129 else
8130 return false;
8131 }
8132
8133 if (single == NULL_TREE)
8134 return false;
8135 else
8136 type = single;
8137 }
8138
8139 return TREE_CODE (type) == REAL_TYPE;
8140 }
8141
8142 /* Return true if a function argument of type TYPE and mode MODE
8143 is to be passed in an integer register, or a pair of integer
8144 registers, if available. */
8145
8146 static bool
8147 s390_function_arg_integer (enum machine_mode mode, tree type)
8148 {
8149 int size = s390_function_arg_size (mode, type);
8150 if (size > 8)
8151 return false;
8152
8153 /* No type info available for some library calls ... */
8154 if (!type)
8155 return GET_MODE_CLASS (mode) == MODE_INT
8156 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8157
8158 /* We accept small integral (and similar) types. */
8159 if (INTEGRAL_TYPE_P (type)
8160 || POINTER_TYPE_P (type)
8161 || TREE_CODE (type) == OFFSET_TYPE
8162 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8163 return true;
8164
8165 /* We also accept structs of size 1, 2, 4, 8 that are not
8166 passed in floating-point registers. */
8167 if (AGGREGATE_TYPE_P (type)
8168 && exact_log2 (size) >= 0
8169 && !s390_function_arg_float (mode, type))
8170 return true;
8171
8172 return false;
8173 }
8174
8175 /* Return 1 if a function argument of type TYPE and mode MODE
8176 is to be passed by reference. The ABI specifies that only
8177 structures of size 1, 2, 4, or 8 bytes are passed by value,
8178 all other structures (and complex numbers) are passed by
8179 reference. */
8180
8181 static bool
8182 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8183 enum machine_mode mode, const_tree type,
8184 bool named ATTRIBUTE_UNUSED)
8185 {
8186 int size = s390_function_arg_size (mode, type);
8187 if (size > 8)
8188 return true;
8189
8190 if (type)
8191 {
8192 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8193 return 1;
8194
8195 if (TREE_CODE (type) == COMPLEX_TYPE
8196 || TREE_CODE (type) == VECTOR_TYPE)
8197 return 1;
8198 }
8199
8200 return 0;
8201 }
8202
8203 /* Update the data in CUM to advance over an argument of mode MODE and
8204 data type TYPE. (TYPE is null for libcalls where that information
8205 may not be available.). The boolean NAMED specifies whether the
8206 argument is a named argument (as opposed to an unnamed argument
8207 matching an ellipsis). */
8208
8209 void
8210 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8211 tree type, int named ATTRIBUTE_UNUSED)
8212 {
8213 if (s390_function_arg_float (mode, type))
8214 {
8215 cum->fprs += 1;
8216 }
8217 else if (s390_function_arg_integer (mode, type))
8218 {
8219 int size = s390_function_arg_size (mode, type);
8220 cum->gprs += ((size + UNITS_PER_WORD-1) / UNITS_PER_WORD);
8221 }
8222 else
8223 gcc_unreachable ();
8224 }
8225
8226 /* Define where to put the arguments to a function.
8227 Value is zero to push the argument on the stack,
8228 or a hard register in which to store the argument.
8229
8230 MODE is the argument's machine mode.
8231 TYPE is the data type of the argument (as a tree).
8232 This is null for libcalls where that information may
8233 not be available.
8234 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8235 the preceding args and about the function being called.
8236 NAMED is nonzero if this argument is a named parameter
8237 (otherwise it is an extra parameter matching an ellipsis).
8238
8239 On S/390, we use general purpose registers 2 through 6 to
8240 pass integer, pointer, and certain structure arguments, and
8241 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8242 to pass floating point arguments. All remaining arguments
8243 are pushed to the stack. */
8244
8245 rtx
8246 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8247 int named ATTRIBUTE_UNUSED)
8248 {
8249 if (s390_function_arg_float (mode, type))
8250 {
8251 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8252 return 0;
8253 else
8254 return gen_rtx_REG (mode, cum->fprs + 16);
8255 }
8256 else if (s390_function_arg_integer (mode, type))
8257 {
8258 int size = s390_function_arg_size (mode, type);
8259 int n_gprs = (size + UNITS_PER_WORD-1) / UNITS_PER_WORD;
8260
8261 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8262 return 0;
8263 else
8264 return gen_rtx_REG (mode, cum->gprs + 2);
8265 }
8266
8267 /* After the real arguments, expand_call calls us once again
8268 with a void_type_node type. Whatever we return here is
8269 passed as operand 2 to the call expanders.
8270
8271 We don't need this feature ... */
8272 else if (type == void_type_node)
8273 return const0_rtx;
8274
8275 gcc_unreachable ();
8276 }
8277
8278 /* Return true if return values of type TYPE should be returned
8279 in a memory buffer whose address is passed by the caller as
8280 hidden first argument. */
8281
8282 static bool
8283 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8284 {
8285 /* We accept small integral (and similar) types. */
8286 if (INTEGRAL_TYPE_P (type)
8287 || POINTER_TYPE_P (type)
8288 || TREE_CODE (type) == OFFSET_TYPE
8289 || TREE_CODE (type) == REAL_TYPE)
8290 return int_size_in_bytes (type) > 8;
8291
8292 /* Aggregates and similar constructs are always returned
8293 in memory. */
8294 if (AGGREGATE_TYPE_P (type)
8295 || TREE_CODE (type) == COMPLEX_TYPE
8296 || TREE_CODE (type) == VECTOR_TYPE)
8297 return true;
8298
8299 /* ??? We get called on all sorts of random stuff from
8300 aggregate_value_p. We can't abort, but it's not clear
8301 what's safe to return. Pretend it's a struct I guess. */
8302 return true;
8303 }
8304
8305 /* Define where to return a (scalar) value of type TYPE.
8306 If TYPE is null, define where to return a (scalar)
8307 value of mode MODE from a libcall. */
8308
8309 rtx
8310 s390_function_value (const_tree type, enum machine_mode mode)
8311 {
8312 if (type)
8313 {
8314 int unsignedp = TYPE_UNSIGNED (type);
8315 mode = promote_mode (type, TYPE_MODE (type), &unsignedp, 1);
8316 }
8317
8318 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8319 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8320
8321 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8322 return gen_rtx_REG (mode, 16);
8323 else
8324 return gen_rtx_REG (mode, 2);
8325 }
8326
8327
8328 /* Create and return the va_list datatype.
8329
8330 On S/390, va_list is an array type equivalent to
8331
8332 typedef struct __va_list_tag
8333 {
8334 long __gpr;
8335 long __fpr;
8336 void *__overflow_arg_area;
8337 void *__reg_save_area;
8338 } va_list[1];
8339
8340 where __gpr and __fpr hold the number of general purpose
8341 or floating point arguments used up to now, respectively,
8342 __overflow_arg_area points to the stack location of the
8343 next argument passed on the stack, and __reg_save_area
8344 always points to the start of the register area in the
8345 call frame of the current function. The function prologue
8346 saves all registers used for argument passing into this
8347 area if the function uses variable arguments. */
8348
8349 static tree
8350 s390_build_builtin_va_list (void)
8351 {
8352 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8353
8354 record = lang_hooks.types.make_type (RECORD_TYPE);
8355
8356 type_decl =
8357 build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
8358
8359 f_gpr = build_decl (FIELD_DECL, get_identifier ("__gpr"),
8360 long_integer_type_node);
8361 f_fpr = build_decl (FIELD_DECL, get_identifier ("__fpr"),
8362 long_integer_type_node);
8363 f_ovf = build_decl (FIELD_DECL, get_identifier ("__overflow_arg_area"),
8364 ptr_type_node);
8365 f_sav = build_decl (FIELD_DECL, get_identifier ("__reg_save_area"),
8366 ptr_type_node);
8367
8368 va_list_gpr_counter_field = f_gpr;
8369 va_list_fpr_counter_field = f_fpr;
8370
8371 DECL_FIELD_CONTEXT (f_gpr) = record;
8372 DECL_FIELD_CONTEXT (f_fpr) = record;
8373 DECL_FIELD_CONTEXT (f_ovf) = record;
8374 DECL_FIELD_CONTEXT (f_sav) = record;
8375
8376 TREE_CHAIN (record) = type_decl;
8377 TYPE_NAME (record) = type_decl;
8378 TYPE_FIELDS (record) = f_gpr;
8379 TREE_CHAIN (f_gpr) = f_fpr;
8380 TREE_CHAIN (f_fpr) = f_ovf;
8381 TREE_CHAIN (f_ovf) = f_sav;
8382
8383 layout_type (record);
8384
8385 /* The correct type is an array type of one element. */
8386 return build_array_type (record, build_index_type (size_zero_node));
8387 }
8388
8389 /* Implement va_start by filling the va_list structure VALIST.
8390 STDARG_P is always true, and ignored.
8391 NEXTARG points to the first anonymous stack argument.
8392
8393 The following global variables are used to initialize
8394 the va_list structure:
8395
8396 crtl->args.info:
8397 holds number of gprs and fprs used for named arguments.
8398 crtl->args.arg_offset_rtx:
8399 holds the offset of the first anonymous stack argument
8400 (relative to the virtual arg pointer). */
8401
8402 static void
8403 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8404 {
8405 HOST_WIDE_INT n_gpr, n_fpr;
8406 int off;
8407 tree f_gpr, f_fpr, f_ovf, f_sav;
8408 tree gpr, fpr, ovf, sav, t;
8409
8410 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8411 f_fpr = TREE_CHAIN (f_gpr);
8412 f_ovf = TREE_CHAIN (f_fpr);
8413 f_sav = TREE_CHAIN (f_ovf);
8414
8415 valist = build_va_arg_indirect_ref (valist);
8416 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8417 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8418 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8419 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8420
8421 /* Count number of gp and fp argument registers used. */
8422
8423 n_gpr = crtl->args.info.gprs;
8424 n_fpr = crtl->args.info.fprs;
8425
8426 if (cfun->va_list_gpr_size)
8427 {
8428 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8429 build_int_cst (NULL_TREE, n_gpr));
8430 TREE_SIDE_EFFECTS (t) = 1;
8431 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8432 }
8433
8434 if (cfun->va_list_fpr_size)
8435 {
8436 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8437 build_int_cst (NULL_TREE, n_fpr));
8438 TREE_SIDE_EFFECTS (t) = 1;
8439 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8440 }
8441
8442 /* Find the overflow area. */
8443 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8444 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8445 {
8446 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8447
8448 off = INTVAL (crtl->args.arg_offset_rtx);
8449 off = off < 0 ? 0 : off;
8450 if (TARGET_DEBUG_ARG)
8451 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8452 (int)n_gpr, (int)n_fpr, off);
8453
8454 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8455
8456 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8457 TREE_SIDE_EFFECTS (t) = 1;
8458 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8459 }
8460
8461 /* Find the register save area. */
8462 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8463 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8464 {
8465 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8466 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8467 size_int (-RETURN_REGNUM * UNITS_PER_WORD));
8468
8469 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8470 TREE_SIDE_EFFECTS (t) = 1;
8471 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8472 }
8473 }
8474
8475 /* Implement va_arg by updating the va_list structure
8476 VALIST as required to retrieve an argument of type
8477 TYPE, and returning that argument.
8478
8479 Generates code equivalent to:
8480
8481 if (integral value) {
8482 if (size <= 4 && args.gpr < 5 ||
8483 size > 4 && args.gpr < 4 )
8484 ret = args.reg_save_area[args.gpr+8]
8485 else
8486 ret = *args.overflow_arg_area++;
8487 } else if (float value) {
8488 if (args.fgpr < 2)
8489 ret = args.reg_save_area[args.fpr+64]
8490 else
8491 ret = *args.overflow_arg_area++;
8492 } else if (aggregate value) {
8493 if (args.gpr < 5)
8494 ret = *args.reg_save_area[args.gpr]
8495 else
8496 ret = **args.overflow_arg_area++;
8497 } */
8498
8499 static tree
8500 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8501 gimple_seq *post_p ATTRIBUTE_UNUSED)
8502 {
8503 tree f_gpr, f_fpr, f_ovf, f_sav;
8504 tree gpr, fpr, ovf, sav, reg, t, u;
8505 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8506 tree lab_false, lab_over, addr;
8507
8508 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8509 f_fpr = TREE_CHAIN (f_gpr);
8510 f_ovf = TREE_CHAIN (f_fpr);
8511 f_sav = TREE_CHAIN (f_ovf);
8512
8513 valist = build_va_arg_indirect_ref (valist);
8514 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8515 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8516 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8517
8518 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8519 both appear on a lhs. */
8520 valist = unshare_expr (valist);
8521 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8522
8523 size = int_size_in_bytes (type);
8524
8525 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8526 {
8527 if (TARGET_DEBUG_ARG)
8528 {
8529 fprintf (stderr, "va_arg: aggregate type");
8530 debug_tree (type);
8531 }
8532
8533 /* Aggregates are passed by reference. */
8534 indirect_p = 1;
8535 reg = gpr;
8536 n_reg = 1;
8537
8538 /* kernel stack layout on 31 bit: It is assumed here that no padding
8539 will be added by s390_frame_info because for va_args always an even
8540 number of gprs has to be saved r15-r2 = 14 regs. */
8541 sav_ofs = 2 * UNITS_PER_WORD;
8542 sav_scale = UNITS_PER_WORD;
8543 size = UNITS_PER_WORD;
8544 max_reg = GP_ARG_NUM_REG - n_reg;
8545 }
8546 else if (s390_function_arg_float (TYPE_MODE (type), type))
8547 {
8548 if (TARGET_DEBUG_ARG)
8549 {
8550 fprintf (stderr, "va_arg: float type");
8551 debug_tree (type);
8552 }
8553
8554 /* FP args go in FP registers, if present. */
8555 indirect_p = 0;
8556 reg = fpr;
8557 n_reg = 1;
8558 sav_ofs = 16 * UNITS_PER_WORD;
8559 sav_scale = 8;
8560 max_reg = FP_ARG_NUM_REG - n_reg;
8561 }
8562 else
8563 {
8564 if (TARGET_DEBUG_ARG)
8565 {
8566 fprintf (stderr, "va_arg: other type");
8567 debug_tree (type);
8568 }
8569
8570 /* Otherwise into GP registers. */
8571 indirect_p = 0;
8572 reg = gpr;
8573 n_reg = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8574
8575 /* kernel stack layout on 31 bit: It is assumed here that no padding
8576 will be added by s390_frame_info because for va_args always an even
8577 number of gprs has to be saved r15-r2 = 14 regs. */
8578 sav_ofs = 2 * UNITS_PER_WORD;
8579
8580 if (size < UNITS_PER_WORD)
8581 sav_ofs += UNITS_PER_WORD - size;
8582
8583 sav_scale = UNITS_PER_WORD;
8584 max_reg = GP_ARG_NUM_REG - n_reg;
8585 }
8586
8587 /* Pull the value out of the saved registers ... */
8588
8589 lab_false = create_artificial_label ();
8590 lab_over = create_artificial_label ();
8591 addr = create_tmp_var (ptr_type_node, "addr");
8592 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
8593
8594 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8595 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8596 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8597 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8598 gimplify_and_add (t, pre_p);
8599
8600 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
8601 size_int (sav_ofs));
8602 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
8603 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
8604 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
8605
8606 gimplify_assign (addr, t, pre_p);
8607
8608 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8609
8610 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8611
8612
8613 /* ... Otherwise out of the overflow area. */
8614
8615 t = ovf;
8616 if (size < UNITS_PER_WORD)
8617 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8618 size_int (UNITS_PER_WORD - size));
8619
8620 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8621
8622 gimplify_assign (addr, t, pre_p);
8623
8624 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8625 size_int (size));
8626 gimplify_assign (ovf, t, pre_p);
8627
8628 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8629
8630
8631 /* Increment register save count. */
8632
8633 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
8634 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
8635 gimplify_and_add (u, pre_p);
8636
8637 if (indirect_p)
8638 {
8639 t = build_pointer_type (build_pointer_type (type));
8640 addr = fold_convert (t, addr);
8641 addr = build_va_arg_indirect_ref (addr);
8642 }
8643 else
8644 {
8645 t = build_pointer_type (type);
8646 addr = fold_convert (t, addr);
8647 }
8648
8649 return build_va_arg_indirect_ref (addr);
8650 }
8651
8652
8653 /* Builtins. */
8654
8655 enum s390_builtin
8656 {
8657 S390_BUILTIN_THREAD_POINTER,
8658 S390_BUILTIN_SET_THREAD_POINTER,
8659
8660 S390_BUILTIN_max
8661 };
8662
8663 static unsigned int const code_for_builtin_64[S390_BUILTIN_max] = {
8664 CODE_FOR_get_tp_64,
8665 CODE_FOR_set_tp_64
8666 };
8667
8668 static unsigned int const code_for_builtin_31[S390_BUILTIN_max] = {
8669 CODE_FOR_get_tp_31,
8670 CODE_FOR_set_tp_31
8671 };
8672
8673 static void
8674 s390_init_builtins (void)
8675 {
8676 tree ftype;
8677
8678 ftype = build_function_type (ptr_type_node, void_list_node);
8679 add_builtin_function ("__builtin_thread_pointer", ftype,
8680 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
8681 NULL, NULL_TREE);
8682
8683 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
8684 add_builtin_function ("__builtin_set_thread_pointer", ftype,
8685 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
8686 NULL, NULL_TREE);
8687 }
8688
8689 /* Expand an expression EXP that calls a built-in function,
8690 with result going to TARGET if that's convenient
8691 (and in mode MODE if that's convenient).
8692 SUBTARGET may be used as the target for computing one of EXP's operands.
8693 IGNORE is nonzero if the value is to be ignored. */
8694
8695 static rtx
8696 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8697 enum machine_mode mode ATTRIBUTE_UNUSED,
8698 int ignore ATTRIBUTE_UNUSED)
8699 {
8700 #define MAX_ARGS 2
8701
8702 unsigned int const *code_for_builtin =
8703 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
8704
8705 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8706 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8707 enum insn_code icode;
8708 rtx op[MAX_ARGS], pat;
8709 int arity;
8710 bool nonvoid;
8711 tree arg;
8712 call_expr_arg_iterator iter;
8713
8714 if (fcode >= S390_BUILTIN_max)
8715 internal_error ("bad builtin fcode");
8716 icode = code_for_builtin[fcode];
8717 if (icode == 0)
8718 internal_error ("bad builtin fcode");
8719
8720 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
8721
8722 arity = 0;
8723 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8724 {
8725 const struct insn_operand_data *insn_op;
8726
8727 if (arg == error_mark_node)
8728 return NULL_RTX;
8729 if (arity > MAX_ARGS)
8730 return NULL_RTX;
8731
8732 insn_op = &insn_data[icode].operand[arity + nonvoid];
8733
8734 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
8735
8736 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
8737 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
8738 arity++;
8739 }
8740
8741 if (nonvoid)
8742 {
8743 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8744 if (!target
8745 || GET_MODE (target) != tmode
8746 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
8747 target = gen_reg_rtx (tmode);
8748 }
8749
8750 switch (arity)
8751 {
8752 case 0:
8753 pat = GEN_FCN (icode) (target);
8754 break;
8755 case 1:
8756 if (nonvoid)
8757 pat = GEN_FCN (icode) (target, op[0]);
8758 else
8759 pat = GEN_FCN (icode) (op[0]);
8760 break;
8761 case 2:
8762 pat = GEN_FCN (icode) (target, op[0], op[1]);
8763 break;
8764 default:
8765 gcc_unreachable ();
8766 }
8767 if (!pat)
8768 return NULL_RTX;
8769 emit_insn (pat);
8770
8771 if (nonvoid)
8772 return target;
8773 else
8774 return const0_rtx;
8775 }
8776
8777
8778 /* Output assembly code for the trampoline template to
8779 stdio stream FILE.
8780
8781 On S/390, we use gpr 1 internally in the trampoline code;
8782 gpr 0 is used to hold the static chain. */
8783
8784 void
8785 s390_trampoline_template (FILE *file)
8786 {
8787 rtx op[2];
8788 op[0] = gen_rtx_REG (Pmode, 0);
8789 op[1] = gen_rtx_REG (Pmode, 1);
8790
8791 if (TARGET_64BIT)
8792 {
8793 output_asm_insn ("basr\t%1,0", op);
8794 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
8795 output_asm_insn ("br\t%1", op);
8796 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
8797 }
8798 else
8799 {
8800 output_asm_insn ("basr\t%1,0", op);
8801 output_asm_insn ("lm\t%0,%1,6(%1)", op);
8802 output_asm_insn ("br\t%1", op);
8803 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
8804 }
8805 }
8806
8807 /* Emit RTL insns to initialize the variable parts of a trampoline.
8808 FNADDR is an RTX for the address of the function's pure code.
8809 CXT is an RTX for the static chain value for the function. */
8810
8811 void
8812 s390_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
8813 {
8814 emit_move_insn (gen_rtx_MEM (Pmode,
8815 memory_address (Pmode,
8816 plus_constant (addr, (TARGET_64BIT ? 16 : 8)))), cxt);
8817 emit_move_insn (gen_rtx_MEM (Pmode,
8818 memory_address (Pmode,
8819 plus_constant (addr, (TARGET_64BIT ? 24 : 12)))), fnaddr);
8820 }
8821
8822 /* Output assembler code to FILE to increment profiler label # LABELNO
8823 for profiling a function entry. */
8824
8825 void
8826 s390_function_profiler (FILE *file, int labelno)
8827 {
8828 rtx op[7];
8829
8830 char label[128];
8831 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
8832
8833 fprintf (file, "# function profiler \n");
8834
8835 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
8836 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
8837 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_WORD));
8838
8839 op[2] = gen_rtx_REG (Pmode, 1);
8840 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8841 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
8842
8843 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
8844 if (flag_pic)
8845 {
8846 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
8847 op[4] = gen_rtx_CONST (Pmode, op[4]);
8848 }
8849
8850 if (TARGET_64BIT)
8851 {
8852 output_asm_insn ("stg\t%0,%1", op);
8853 output_asm_insn ("larl\t%2,%3", op);
8854 output_asm_insn ("brasl\t%0,%4", op);
8855 output_asm_insn ("lg\t%0,%1", op);
8856 }
8857 else if (!flag_pic)
8858 {
8859 op[6] = gen_label_rtx ();
8860
8861 output_asm_insn ("st\t%0,%1", op);
8862 output_asm_insn ("bras\t%2,%l6", op);
8863 output_asm_insn (".long\t%4", op);
8864 output_asm_insn (".long\t%3", op);
8865 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
8866 output_asm_insn ("l\t%0,0(%2)", op);
8867 output_asm_insn ("l\t%2,4(%2)", op);
8868 output_asm_insn ("basr\t%0,%0", op);
8869 output_asm_insn ("l\t%0,%1", op);
8870 }
8871 else
8872 {
8873 op[5] = gen_label_rtx ();
8874 op[6] = gen_label_rtx ();
8875
8876 output_asm_insn ("st\t%0,%1", op);
8877 output_asm_insn ("bras\t%2,%l6", op);
8878 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
8879 output_asm_insn (".long\t%4-%l5", op);
8880 output_asm_insn (".long\t%3-%l5", op);
8881 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
8882 output_asm_insn ("lr\t%0,%2", op);
8883 output_asm_insn ("a\t%0,0(%2)", op);
8884 output_asm_insn ("a\t%2,4(%2)", op);
8885 output_asm_insn ("basr\t%0,%0", op);
8886 output_asm_insn ("l\t%0,%1", op);
8887 }
8888 }
8889
8890 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
8891 into its SYMBOL_REF_FLAGS. */
8892
8893 static void
8894 s390_encode_section_info (tree decl, rtx rtl, int first)
8895 {
8896 default_encode_section_info (decl, rtl, first);
8897
8898 if (TREE_CODE (decl) == VAR_DECL)
8899 {
8900 /* If a variable has a forced alignment to < 2 bytes, mark it
8901 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
8902 operand. */
8903 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
8904 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
8905 if (!DECL_SIZE (decl)
8906 || !DECL_ALIGN (decl)
8907 || !host_integerp (DECL_SIZE (decl), 0)
8908 || (DECL_ALIGN (decl) <= 64
8909 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
8910 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
8911 }
8912
8913 /* Literal pool references don't have a decl so they are handled
8914 differently here. We rely on the information in the MEM_ALIGN
8915 entry to decide upon natural alignment. */
8916 if (MEM_P (rtl)
8917 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
8918 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
8919 && (MEM_ALIGN (rtl) == 0
8920 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
8921 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
8922 }
8923
8924 /* Output thunk to FILE that implements a C++ virtual function call (with
8925 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
8926 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
8927 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
8928 relative to the resulting this pointer. */
8929
8930 static void
8931 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8932 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8933 tree function)
8934 {
8935 rtx op[10];
8936 int nonlocal = 0;
8937
8938 /* Operand 0 is the target function. */
8939 op[0] = XEXP (DECL_RTL (function), 0);
8940 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
8941 {
8942 nonlocal = 1;
8943 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
8944 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
8945 op[0] = gen_rtx_CONST (Pmode, op[0]);
8946 }
8947
8948 /* Operand 1 is the 'this' pointer. */
8949 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8950 op[1] = gen_rtx_REG (Pmode, 3);
8951 else
8952 op[1] = gen_rtx_REG (Pmode, 2);
8953
8954 /* Operand 2 is the delta. */
8955 op[2] = GEN_INT (delta);
8956
8957 /* Operand 3 is the vcall_offset. */
8958 op[3] = GEN_INT (vcall_offset);
8959
8960 /* Operand 4 is the temporary register. */
8961 op[4] = gen_rtx_REG (Pmode, 1);
8962
8963 /* Operands 5 to 8 can be used as labels. */
8964 op[5] = NULL_RTX;
8965 op[6] = NULL_RTX;
8966 op[7] = NULL_RTX;
8967 op[8] = NULL_RTX;
8968
8969 /* Operand 9 can be used for temporary register. */
8970 op[9] = NULL_RTX;
8971
8972 /* Generate code. */
8973 if (TARGET_64BIT)
8974 {
8975 /* Setup literal pool pointer if required. */
8976 if ((!DISP_IN_RANGE (delta)
8977 && !CONST_OK_FOR_K (delta)
8978 && !CONST_OK_FOR_Os (delta))
8979 || (!DISP_IN_RANGE (vcall_offset)
8980 && !CONST_OK_FOR_K (vcall_offset)
8981 && !CONST_OK_FOR_Os (vcall_offset)))
8982 {
8983 op[5] = gen_label_rtx ();
8984 output_asm_insn ("larl\t%4,%5", op);
8985 }
8986
8987 /* Add DELTA to this pointer. */
8988 if (delta)
8989 {
8990 if (CONST_OK_FOR_J (delta))
8991 output_asm_insn ("la\t%1,%2(%1)", op);
8992 else if (DISP_IN_RANGE (delta))
8993 output_asm_insn ("lay\t%1,%2(%1)", op);
8994 else if (CONST_OK_FOR_K (delta))
8995 output_asm_insn ("aghi\t%1,%2", op);
8996 else if (CONST_OK_FOR_Os (delta))
8997 output_asm_insn ("agfi\t%1,%2", op);
8998 else
8999 {
9000 op[6] = gen_label_rtx ();
9001 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9002 }
9003 }
9004
9005 /* Perform vcall adjustment. */
9006 if (vcall_offset)
9007 {
9008 if (DISP_IN_RANGE (vcall_offset))
9009 {
9010 output_asm_insn ("lg\t%4,0(%1)", op);
9011 output_asm_insn ("ag\t%1,%3(%4)", op);
9012 }
9013 else if (CONST_OK_FOR_K (vcall_offset))
9014 {
9015 output_asm_insn ("lghi\t%4,%3", op);
9016 output_asm_insn ("ag\t%4,0(%1)", op);
9017 output_asm_insn ("ag\t%1,0(%4)", op);
9018 }
9019 else if (CONST_OK_FOR_Os (vcall_offset))
9020 {
9021 output_asm_insn ("lgfi\t%4,%3", op);
9022 output_asm_insn ("ag\t%4,0(%1)", op);
9023 output_asm_insn ("ag\t%1,0(%4)", op);
9024 }
9025 else
9026 {
9027 op[7] = gen_label_rtx ();
9028 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9029 output_asm_insn ("ag\t%4,0(%1)", op);
9030 output_asm_insn ("ag\t%1,0(%4)", op);
9031 }
9032 }
9033
9034 /* Jump to target. */
9035 output_asm_insn ("jg\t%0", op);
9036
9037 /* Output literal pool if required. */
9038 if (op[5])
9039 {
9040 output_asm_insn (".align\t4", op);
9041 targetm.asm_out.internal_label (file, "L",
9042 CODE_LABEL_NUMBER (op[5]));
9043 }
9044 if (op[6])
9045 {
9046 targetm.asm_out.internal_label (file, "L",
9047 CODE_LABEL_NUMBER (op[6]));
9048 output_asm_insn (".long\t%2", op);
9049 }
9050 if (op[7])
9051 {
9052 targetm.asm_out.internal_label (file, "L",
9053 CODE_LABEL_NUMBER (op[7]));
9054 output_asm_insn (".long\t%3", op);
9055 }
9056 }
9057 else
9058 {
9059 /* Setup base pointer if required. */
9060 if (!vcall_offset
9061 || (!DISP_IN_RANGE (delta)
9062 && !CONST_OK_FOR_K (delta)
9063 && !CONST_OK_FOR_Os (delta))
9064 || (!DISP_IN_RANGE (delta)
9065 && !CONST_OK_FOR_K (vcall_offset)
9066 && !CONST_OK_FOR_Os (vcall_offset)))
9067 {
9068 op[5] = gen_label_rtx ();
9069 output_asm_insn ("basr\t%4,0", op);
9070 targetm.asm_out.internal_label (file, "L",
9071 CODE_LABEL_NUMBER (op[5]));
9072 }
9073
9074 /* Add DELTA to this pointer. */
9075 if (delta)
9076 {
9077 if (CONST_OK_FOR_J (delta))
9078 output_asm_insn ("la\t%1,%2(%1)", op);
9079 else if (DISP_IN_RANGE (delta))
9080 output_asm_insn ("lay\t%1,%2(%1)", op);
9081 else if (CONST_OK_FOR_K (delta))
9082 output_asm_insn ("ahi\t%1,%2", op);
9083 else if (CONST_OK_FOR_Os (delta))
9084 output_asm_insn ("afi\t%1,%2", op);
9085 else
9086 {
9087 op[6] = gen_label_rtx ();
9088 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9089 }
9090 }
9091
9092 /* Perform vcall adjustment. */
9093 if (vcall_offset)
9094 {
9095 if (CONST_OK_FOR_J (vcall_offset))
9096 {
9097 output_asm_insn ("l\t%4,0(%1)", op);
9098 output_asm_insn ("a\t%1,%3(%4)", op);
9099 }
9100 else if (DISP_IN_RANGE (vcall_offset))
9101 {
9102 output_asm_insn ("l\t%4,0(%1)", op);
9103 output_asm_insn ("ay\t%1,%3(%4)", op);
9104 }
9105 else if (CONST_OK_FOR_K (vcall_offset))
9106 {
9107 output_asm_insn ("lhi\t%4,%3", op);
9108 output_asm_insn ("a\t%4,0(%1)", op);
9109 output_asm_insn ("a\t%1,0(%4)", op);
9110 }
9111 else if (CONST_OK_FOR_Os (vcall_offset))
9112 {
9113 output_asm_insn ("iilf\t%4,%3", op);
9114 output_asm_insn ("a\t%4,0(%1)", op);
9115 output_asm_insn ("a\t%1,0(%4)", op);
9116 }
9117 else
9118 {
9119 op[7] = gen_label_rtx ();
9120 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9121 output_asm_insn ("a\t%4,0(%1)", op);
9122 output_asm_insn ("a\t%1,0(%4)", op);
9123 }
9124
9125 /* We had to clobber the base pointer register.
9126 Re-setup the base pointer (with a different base). */
9127 op[5] = gen_label_rtx ();
9128 output_asm_insn ("basr\t%4,0", op);
9129 targetm.asm_out.internal_label (file, "L",
9130 CODE_LABEL_NUMBER (op[5]));
9131 }
9132
9133 /* Jump to target. */
9134 op[8] = gen_label_rtx ();
9135
9136 if (!flag_pic)
9137 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9138 else if (!nonlocal)
9139 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9140 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9141 else if (flag_pic == 1)
9142 {
9143 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9144 output_asm_insn ("l\t%4,%0(%4)", op);
9145 }
9146 else if (flag_pic == 2)
9147 {
9148 op[9] = gen_rtx_REG (Pmode, 0);
9149 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9150 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9151 output_asm_insn ("ar\t%4,%9", op);
9152 output_asm_insn ("l\t%4,0(%4)", op);
9153 }
9154
9155 output_asm_insn ("br\t%4", op);
9156
9157 /* Output literal pool. */
9158 output_asm_insn (".align\t4", op);
9159
9160 if (nonlocal && flag_pic == 2)
9161 output_asm_insn (".long\t%0", op);
9162 if (nonlocal)
9163 {
9164 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9165 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9166 }
9167
9168 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9169 if (!flag_pic)
9170 output_asm_insn (".long\t%0", op);
9171 else
9172 output_asm_insn (".long\t%0-%5", op);
9173
9174 if (op[6])
9175 {
9176 targetm.asm_out.internal_label (file, "L",
9177 CODE_LABEL_NUMBER (op[6]));
9178 output_asm_insn (".long\t%2", op);
9179 }
9180 if (op[7])
9181 {
9182 targetm.asm_out.internal_label (file, "L",
9183 CODE_LABEL_NUMBER (op[7]));
9184 output_asm_insn (".long\t%3", op);
9185 }
9186 }
9187 }
9188
9189 static bool
9190 s390_valid_pointer_mode (enum machine_mode mode)
9191 {
9192 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9193 }
9194
9195 /* Checks whether the given CALL_EXPR would use a caller
9196 saved register. This is used to decide whether sibling call
9197 optimization could be performed on the respective function
9198 call. */
9199
9200 static bool
9201 s390_call_saved_register_used (tree call_expr)
9202 {
9203 CUMULATIVE_ARGS cum;
9204 tree parameter;
9205 enum machine_mode mode;
9206 tree type;
9207 rtx parm_rtx;
9208 int reg, i;
9209
9210 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9211
9212 for (i = 0; i < call_expr_nargs (call_expr); i++)
9213 {
9214 parameter = CALL_EXPR_ARG (call_expr, i);
9215 gcc_assert (parameter);
9216
9217 /* For an undeclared variable passed as parameter we will get
9218 an ERROR_MARK node here. */
9219 if (TREE_CODE (parameter) == ERROR_MARK)
9220 return true;
9221
9222 type = TREE_TYPE (parameter);
9223 gcc_assert (type);
9224
9225 mode = TYPE_MODE (type);
9226 gcc_assert (mode);
9227
9228 if (pass_by_reference (&cum, mode, type, true))
9229 {
9230 mode = Pmode;
9231 type = build_pointer_type (type);
9232 }
9233
9234 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9235
9236 s390_function_arg_advance (&cum, mode, type, 0);
9237
9238 if (parm_rtx && REG_P (parm_rtx))
9239 {
9240 for (reg = 0;
9241 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9242 reg++)
9243 if (! call_used_regs[reg + REGNO (parm_rtx)])
9244 return true;
9245 }
9246 }
9247 return false;
9248 }
9249
9250 /* Return true if the given call expression can be
9251 turned into a sibling call.
9252 DECL holds the declaration of the function to be called whereas
9253 EXP is the call expression itself. */
9254
9255 static bool
9256 s390_function_ok_for_sibcall (tree decl, tree exp)
9257 {
9258 /* The TPF epilogue uses register 1. */
9259 if (TARGET_TPF_PROFILING)
9260 return false;
9261
9262 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9263 which would have to be restored before the sibcall. */
9264 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9265 return false;
9266
9267 /* Register 6 on s390 is available as an argument register but unfortunately
9268 "caller saved". This makes functions needing this register for arguments
9269 not suitable for sibcalls. */
9270 return !s390_call_saved_register_used (exp);
9271 }
9272
9273 /* Return the fixed registers used for condition codes. */
9274
9275 static bool
9276 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9277 {
9278 *p1 = CC_REGNUM;
9279 *p2 = INVALID_REGNUM;
9280
9281 return true;
9282 }
9283
9284 /* This function is used by the call expanders of the machine description.
9285 It emits the call insn itself together with the necessary operations
9286 to adjust the target address and returns the emitted insn.
9287 ADDR_LOCATION is the target address rtx
9288 TLS_CALL the location of the thread-local symbol
9289 RESULT_REG the register where the result of the call should be stored
9290 RETADDR_REG the register where the return address should be stored
9291 If this parameter is NULL_RTX the call is considered
9292 to be a sibling call. */
9293
9294 rtx
9295 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9296 rtx retaddr_reg)
9297 {
9298 bool plt_call = false;
9299 rtx insn;
9300 rtx call;
9301 rtx clobber;
9302 rtvec vec;
9303
9304 /* Direct function calls need special treatment. */
9305 if (GET_CODE (addr_location) == SYMBOL_REF)
9306 {
9307 /* When calling a global routine in PIC mode, we must
9308 replace the symbol itself with the PLT stub. */
9309 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9310 {
9311 addr_location = gen_rtx_UNSPEC (Pmode,
9312 gen_rtvec (1, addr_location),
9313 UNSPEC_PLT);
9314 addr_location = gen_rtx_CONST (Pmode, addr_location);
9315 plt_call = true;
9316 }
9317
9318 /* Unless we can use the bras(l) insn, force the
9319 routine address into a register. */
9320 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9321 {
9322 if (flag_pic)
9323 addr_location = legitimize_pic_address (addr_location, 0);
9324 else
9325 addr_location = force_reg (Pmode, addr_location);
9326 }
9327 }
9328
9329 /* If it is already an indirect call or the code above moved the
9330 SYMBOL_REF to somewhere else make sure the address can be found in
9331 register 1. */
9332 if (retaddr_reg == NULL_RTX
9333 && GET_CODE (addr_location) != SYMBOL_REF
9334 && !plt_call)
9335 {
9336 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9337 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9338 }
9339
9340 addr_location = gen_rtx_MEM (QImode, addr_location);
9341 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9342
9343 if (result_reg != NULL_RTX)
9344 call = gen_rtx_SET (VOIDmode, result_reg, call);
9345
9346 if (retaddr_reg != NULL_RTX)
9347 {
9348 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9349
9350 if (tls_call != NULL_RTX)
9351 vec = gen_rtvec (3, call, clobber,
9352 gen_rtx_USE (VOIDmode, tls_call));
9353 else
9354 vec = gen_rtvec (2, call, clobber);
9355
9356 call = gen_rtx_PARALLEL (VOIDmode, vec);
9357 }
9358
9359 insn = emit_call_insn (call);
9360
9361 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9362 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9363 {
9364 /* s390_function_ok_for_sibcall should
9365 have denied sibcalls in this case. */
9366 gcc_assert (retaddr_reg != NULL_RTX);
9367
9368 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9369 }
9370 return insn;
9371 }
9372
9373 /* Implement CONDITIONAL_REGISTER_USAGE. */
9374
9375 void
9376 s390_conditional_register_usage (void)
9377 {
9378 int i;
9379
9380 if (flag_pic)
9381 {
9382 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9383 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9384 }
9385 if (TARGET_CPU_ZARCH)
9386 {
9387 fixed_regs[BASE_REGNUM] = 0;
9388 call_used_regs[BASE_REGNUM] = 0;
9389 fixed_regs[RETURN_REGNUM] = 0;
9390 call_used_regs[RETURN_REGNUM] = 0;
9391 }
9392 if (TARGET_64BIT)
9393 {
9394 for (i = 24; i < 32; i++)
9395 call_used_regs[i] = call_really_used_regs[i] = 0;
9396 }
9397 else
9398 {
9399 for (i = 18; i < 20; i++)
9400 call_used_regs[i] = call_really_used_regs[i] = 0;
9401 }
9402
9403 if (TARGET_SOFT_FLOAT)
9404 {
9405 for (i = 16; i < 32; i++)
9406 call_used_regs[i] = fixed_regs[i] = 1;
9407 }
9408 }
9409
9410 /* Corresponding function to eh_return expander. */
9411
9412 static GTY(()) rtx s390_tpf_eh_return_symbol;
9413 void
9414 s390_emit_tpf_eh_return (rtx target)
9415 {
9416 rtx insn, reg;
9417
9418 if (!s390_tpf_eh_return_symbol)
9419 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9420
9421 reg = gen_rtx_REG (Pmode, 2);
9422
9423 emit_move_insn (reg, target);
9424 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9425 gen_rtx_REG (Pmode, RETURN_REGNUM));
9426 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9427
9428 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9429 }
9430
9431 /* Rework the prologue/epilogue to avoid saving/restoring
9432 registers unnecessarily. */
9433
9434 static void
9435 s390_optimize_prologue (void)
9436 {
9437 rtx insn, new_insn, next_insn;
9438
9439 /* Do a final recompute of the frame-related data. */
9440
9441 s390_update_frame_layout ();
9442
9443 /* If all special registers are in fact used, there's nothing we
9444 can do, so no point in walking the insn list. */
9445
9446 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9447 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9448 && (TARGET_CPU_ZARCH
9449 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9450 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9451 return;
9452
9453 /* Search for prologue/epilogue insns and replace them. */
9454
9455 for (insn = get_insns (); insn; insn = next_insn)
9456 {
9457 int first, last, off;
9458 rtx set, base, offset;
9459
9460 next_insn = NEXT_INSN (insn);
9461
9462 if (GET_CODE (insn) != INSN)
9463 continue;
9464
9465 if (GET_CODE (PATTERN (insn)) == PARALLEL
9466 && store_multiple_operation (PATTERN (insn), VOIDmode))
9467 {
9468 set = XVECEXP (PATTERN (insn), 0, 0);
9469 first = REGNO (SET_SRC (set));
9470 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9471 offset = const0_rtx;
9472 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9473 off = INTVAL (offset);
9474
9475 if (GET_CODE (base) != REG || off < 0)
9476 continue;
9477 if (cfun_frame_layout.first_save_gpr != -1
9478 && (cfun_frame_layout.first_save_gpr < first
9479 || cfun_frame_layout.last_save_gpr > last))
9480 continue;
9481 if (REGNO (base) != STACK_POINTER_REGNUM
9482 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9483 continue;
9484 if (first > BASE_REGNUM || last < BASE_REGNUM)
9485 continue;
9486
9487 if (cfun_frame_layout.first_save_gpr != -1)
9488 {
9489 new_insn = save_gprs (base,
9490 off + (cfun_frame_layout.first_save_gpr
9491 - first) * UNITS_PER_WORD,
9492 cfun_frame_layout.first_save_gpr,
9493 cfun_frame_layout.last_save_gpr);
9494 new_insn = emit_insn_before (new_insn, insn);
9495 INSN_ADDRESSES_NEW (new_insn, -1);
9496 }
9497
9498 remove_insn (insn);
9499 continue;
9500 }
9501
9502 if (cfun_frame_layout.first_save_gpr == -1
9503 && GET_CODE (PATTERN (insn)) == SET
9504 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9505 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9506 || (!TARGET_CPU_ZARCH
9507 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9508 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9509 {
9510 set = PATTERN (insn);
9511 first = REGNO (SET_SRC (set));
9512 offset = const0_rtx;
9513 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9514 off = INTVAL (offset);
9515
9516 if (GET_CODE (base) != REG || off < 0)
9517 continue;
9518 if (REGNO (base) != STACK_POINTER_REGNUM
9519 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9520 continue;
9521
9522 remove_insn (insn);
9523 continue;
9524 }
9525
9526 if (GET_CODE (PATTERN (insn)) == PARALLEL
9527 && load_multiple_operation (PATTERN (insn), VOIDmode))
9528 {
9529 set = XVECEXP (PATTERN (insn), 0, 0);
9530 first = REGNO (SET_DEST (set));
9531 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9532 offset = const0_rtx;
9533 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9534 off = INTVAL (offset);
9535
9536 if (GET_CODE (base) != REG || off < 0)
9537 continue;
9538 if (cfun_frame_layout.first_restore_gpr != -1
9539 && (cfun_frame_layout.first_restore_gpr < first
9540 || cfun_frame_layout.last_restore_gpr > last))
9541 continue;
9542 if (REGNO (base) != STACK_POINTER_REGNUM
9543 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9544 continue;
9545 if (first > BASE_REGNUM || last < BASE_REGNUM)
9546 continue;
9547
9548 if (cfun_frame_layout.first_restore_gpr != -1)
9549 {
9550 new_insn = restore_gprs (base,
9551 off + (cfun_frame_layout.first_restore_gpr
9552 - first) * UNITS_PER_WORD,
9553 cfun_frame_layout.first_restore_gpr,
9554 cfun_frame_layout.last_restore_gpr);
9555 new_insn = emit_insn_before (new_insn, insn);
9556 INSN_ADDRESSES_NEW (new_insn, -1);
9557 }
9558
9559 remove_insn (insn);
9560 continue;
9561 }
9562
9563 if (cfun_frame_layout.first_restore_gpr == -1
9564 && GET_CODE (PATTERN (insn)) == SET
9565 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
9566 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
9567 || (!TARGET_CPU_ZARCH
9568 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
9569 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
9570 {
9571 set = PATTERN (insn);
9572 first = REGNO (SET_DEST (set));
9573 offset = const0_rtx;
9574 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9575 off = INTVAL (offset);
9576
9577 if (GET_CODE (base) != REG || off < 0)
9578 continue;
9579 if (REGNO (base) != STACK_POINTER_REGNUM
9580 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9581 continue;
9582
9583 remove_insn (insn);
9584 continue;
9585 }
9586 }
9587 }
9588
9589 /* Returns 1 if INSN reads the value of REG for purposes not related
9590 to addressing of memory, and 0 otherwise. */
9591 static int
9592 s390_non_addr_reg_read_p (rtx reg, rtx insn)
9593 {
9594 return reg_referenced_p (reg, PATTERN (insn))
9595 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
9596 }
9597
9598 /* Starting from INSN find_cond_jump looks downwards in the insn
9599 stream for a single jump insn which is the last user of the
9600 condition code set in INSN. */
9601 static rtx
9602 find_cond_jump (rtx insn)
9603 {
9604 for (; insn; insn = NEXT_INSN (insn))
9605 {
9606 rtx ite, cc;
9607
9608 if (LABEL_P (insn))
9609 break;
9610
9611 if (!JUMP_P (insn))
9612 {
9613 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
9614 break;
9615 continue;
9616 }
9617
9618 /* This will be triggered by a return. */
9619 if (GET_CODE (PATTERN (insn)) != SET)
9620 break;
9621
9622 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
9623 ite = SET_SRC (PATTERN (insn));
9624
9625 if (GET_CODE (ite) != IF_THEN_ELSE)
9626 break;
9627
9628 cc = XEXP (XEXP (ite, 0), 0);
9629 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
9630 break;
9631
9632 if (find_reg_note (insn, REG_DEAD, cc))
9633 return insn;
9634 break;
9635 }
9636
9637 return NULL_RTX;
9638 }
9639
9640 /* Swap the condition in COND and the operands in OP0 and OP1 so that
9641 the semantics does not change. If NULL_RTX is passed as COND the
9642 function tries to find the conditional jump starting with INSN. */
9643 static void
9644 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
9645 {
9646 rtx tmp = *op0;
9647
9648 if (cond == NULL_RTX)
9649 {
9650 rtx jump = find_cond_jump (NEXT_INSN (insn));
9651 jump = jump ? single_set (jump) : NULL_RTX;
9652
9653 if (jump == NULL_RTX)
9654 return;
9655
9656 cond = XEXP (XEXP (jump, 1), 0);
9657 }
9658
9659 *op0 = *op1;
9660 *op1 = tmp;
9661 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
9662 }
9663
9664 /* On z10, instructions of the compare-and-branch family have the
9665 property to access the register occurring as second operand with
9666 its bits complemented. If such a compare is grouped with a second
9667 instruction that accesses the same register non-complemented, and
9668 if that register's value is delivered via a bypass, then the
9669 pipeline recycles, thereby causing significant performance decline.
9670 This function locates such situations and exchanges the two
9671 operands of the compare. */
9672 static void
9673 s390_z10_optimize_cmp (void)
9674 {
9675 rtx insn, prev_insn, next_insn;
9676 int added_NOPs = 0;
9677
9678 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9679 {
9680 rtx cond, *op0, *op1;
9681
9682 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9683 continue;
9684
9685 if (GET_CODE (PATTERN (insn)) == PARALLEL)
9686 {
9687 /* Handle compare and branch and branch on count
9688 instructions. */
9689 rtx pattern = single_set (insn);
9690
9691 if (!pattern
9692 || SET_DEST (pattern) != pc_rtx
9693 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
9694 continue;
9695
9696 cond = XEXP (SET_SRC (pattern), 0);
9697 op0 = &XEXP (cond, 0);
9698 op1 = &XEXP (cond, 1);
9699 }
9700 else if (GET_CODE (PATTERN (insn)) == SET)
9701 {
9702 rtx src, dest;
9703
9704 /* Handle normal compare instructions. */
9705 src = SET_SRC (PATTERN (insn));
9706 dest = SET_DEST (PATTERN (insn));
9707
9708 if (!REG_P (dest)
9709 || !CC_REGNO_P (REGNO (dest))
9710 || GET_CODE (src) != COMPARE)
9711 continue;
9712
9713 /* s390_swap_cmp will try to find the conditional
9714 jump when passing NULL_RTX as condition. */
9715 cond = NULL_RTX;
9716 op0 = &XEXP (src, 0);
9717 op1 = &XEXP (src, 1);
9718 }
9719 else
9720 continue;
9721
9722 if (!REG_P (*op0) || !REG_P (*op1))
9723 continue;
9724
9725 /* Swap the COMPARE arguments and its mask if there is a
9726 conflicting access in the previous insn. */
9727 prev_insn = PREV_INSN (insn);
9728 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
9729 && reg_referenced_p (*op1, PATTERN (prev_insn)))
9730 s390_swap_cmp (cond, op0, op1, insn);
9731
9732 /* Check if there is a conflict with the next insn. If there
9733 was no conflict with the previous insn, then swap the
9734 COMPARE arguments and its mask. If we already swapped
9735 the operands, or if swapping them would cause a conflict
9736 with the previous insn, issue a NOP after the COMPARE in
9737 order to separate the two instuctions. */
9738 next_insn = NEXT_INSN (insn);
9739 if (next_insn != NULL_RTX && INSN_P (next_insn)
9740 && s390_non_addr_reg_read_p (*op1, next_insn))
9741 {
9742 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
9743 && s390_non_addr_reg_read_p (*op0, prev_insn))
9744 {
9745 if (REGNO (*op1) == 0)
9746 emit_insn_after (gen_nop1 (), insn);
9747 else
9748 emit_insn_after (gen_nop (), insn);
9749 added_NOPs = 1;
9750 }
9751 else
9752 s390_swap_cmp (cond, op0, op1, insn);
9753 }
9754 }
9755
9756 /* Adjust branches if we added new instructions. */
9757 if (added_NOPs)
9758 shorten_branches (get_insns ());
9759 }
9760
9761
9762 /* Perform machine-dependent processing. */
9763
9764 static void
9765 s390_reorg (void)
9766 {
9767 bool pool_overflow = false;
9768
9769 /* Make sure all splits have been performed; splits after
9770 machine_dependent_reorg might confuse insn length counts. */
9771 split_all_insns_noflow ();
9772
9773 /* Install the main literal pool and the associated base
9774 register load insns.
9775
9776 In addition, there are two problematic situations we need
9777 to correct:
9778
9779 - the literal pool might be > 4096 bytes in size, so that
9780 some of its elements cannot be directly accessed
9781
9782 - a branch target might be > 64K away from the branch, so that
9783 it is not possible to use a PC-relative instruction.
9784
9785 To fix those, we split the single literal pool into multiple
9786 pool chunks, reloading the pool base register at various
9787 points throughout the function to ensure it always points to
9788 the pool chunk the following code expects, and / or replace
9789 PC-relative branches by absolute branches.
9790
9791 However, the two problems are interdependent: splitting the
9792 literal pool can move a branch further away from its target,
9793 causing the 64K limit to overflow, and on the other hand,
9794 replacing a PC-relative branch by an absolute branch means
9795 we need to put the branch target address into the literal
9796 pool, possibly causing it to overflow.
9797
9798 So, we loop trying to fix up both problems until we manage
9799 to satisfy both conditions at the same time. Note that the
9800 loop is guaranteed to terminate as every pass of the loop
9801 strictly decreases the total number of PC-relative branches
9802 in the function. (This is not completely true as there
9803 might be branch-over-pool insns introduced by chunkify_start.
9804 Those never need to be split however.) */
9805
9806 for (;;)
9807 {
9808 struct constant_pool *pool = NULL;
9809
9810 /* Collect the literal pool. */
9811 if (!pool_overflow)
9812 {
9813 pool = s390_mainpool_start ();
9814 if (!pool)
9815 pool_overflow = true;
9816 }
9817
9818 /* If literal pool overflowed, start to chunkify it. */
9819 if (pool_overflow)
9820 pool = s390_chunkify_start ();
9821
9822 /* Split out-of-range branches. If this has created new
9823 literal pool entries, cancel current chunk list and
9824 recompute it. zSeries machines have large branch
9825 instructions, so we never need to split a branch. */
9826 if (!TARGET_CPU_ZARCH && s390_split_branches ())
9827 {
9828 if (pool_overflow)
9829 s390_chunkify_cancel (pool);
9830 else
9831 s390_mainpool_cancel (pool);
9832
9833 continue;
9834 }
9835
9836 /* If we made it up to here, both conditions are satisfied.
9837 Finish up literal pool related changes. */
9838 if (pool_overflow)
9839 s390_chunkify_finish (pool);
9840 else
9841 s390_mainpool_finish (pool);
9842
9843 /* We're done splitting branches. */
9844 cfun->machine->split_branches_pending_p = false;
9845 break;
9846 }
9847
9848 /* Generate out-of-pool execute target insns. */
9849 if (TARGET_CPU_ZARCH)
9850 {
9851 rtx insn, label, target;
9852
9853 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9854 {
9855 label = s390_execute_label (insn);
9856 if (!label)
9857 continue;
9858
9859 gcc_assert (label != const0_rtx);
9860
9861 target = emit_label (XEXP (label, 0));
9862 INSN_ADDRESSES_NEW (target, -1);
9863
9864 target = emit_insn (s390_execute_target (insn));
9865 INSN_ADDRESSES_NEW (target, -1);
9866 }
9867 }
9868
9869 /* Try to optimize prologue and epilogue further. */
9870 s390_optimize_prologue ();
9871
9872 /* Eliminate z10-specific pipeline recycles related to some compare
9873 instructions. */
9874 if (s390_tune == PROCESSOR_2097_Z10)
9875 s390_z10_optimize_cmp ();
9876 }
9877
9878
9879 /* Initialize GCC target structure. */
9880
9881 #undef TARGET_ASM_ALIGNED_HI_OP
9882 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9883 #undef TARGET_ASM_ALIGNED_DI_OP
9884 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9885 #undef TARGET_ASM_INTEGER
9886 #define TARGET_ASM_INTEGER s390_assemble_integer
9887
9888 #undef TARGET_ASM_OPEN_PAREN
9889 #define TARGET_ASM_OPEN_PAREN ""
9890
9891 #undef TARGET_ASM_CLOSE_PAREN
9892 #define TARGET_ASM_CLOSE_PAREN ""
9893
9894 #undef TARGET_DEFAULT_TARGET_FLAGS
9895 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
9896 #undef TARGET_HANDLE_OPTION
9897 #define TARGET_HANDLE_OPTION s390_handle_option
9898
9899 #undef TARGET_ENCODE_SECTION_INFO
9900 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
9901
9902 #ifdef HAVE_AS_TLS
9903 #undef TARGET_HAVE_TLS
9904 #define TARGET_HAVE_TLS true
9905 #endif
9906 #undef TARGET_CANNOT_FORCE_CONST_MEM
9907 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
9908
9909 #undef TARGET_DELEGITIMIZE_ADDRESS
9910 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
9911
9912 #undef TARGET_RETURN_IN_MEMORY
9913 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
9914
9915 #undef TARGET_INIT_BUILTINS
9916 #define TARGET_INIT_BUILTINS s390_init_builtins
9917 #undef TARGET_EXPAND_BUILTIN
9918 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
9919
9920 #undef TARGET_ASM_OUTPUT_MI_THUNK
9921 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
9922 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9923 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9924
9925 #undef TARGET_SCHED_ADJUST_PRIORITY
9926 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
9927 #undef TARGET_SCHED_ISSUE_RATE
9928 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
9929 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9930 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
9931
9932 #undef TARGET_CANNOT_COPY_INSN_P
9933 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
9934 #undef TARGET_RTX_COSTS
9935 #define TARGET_RTX_COSTS s390_rtx_costs
9936 #undef TARGET_ADDRESS_COST
9937 #define TARGET_ADDRESS_COST s390_address_cost
9938
9939 #undef TARGET_MACHINE_DEPENDENT_REORG
9940 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
9941
9942 #undef TARGET_VALID_POINTER_MODE
9943 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
9944
9945 #undef TARGET_BUILD_BUILTIN_VA_LIST
9946 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
9947 #undef TARGET_EXPAND_BUILTIN_VA_START
9948 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
9949 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9950 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
9951
9952 #undef TARGET_PROMOTE_FUNCTION_ARGS
9953 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
9954 #undef TARGET_PROMOTE_FUNCTION_RETURN
9955 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
9956 #undef TARGET_PASS_BY_REFERENCE
9957 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
9958
9959 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9960 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
9961
9962 #undef TARGET_FIXED_CONDITION_CODE_REGS
9963 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
9964
9965 #undef TARGET_CC_MODES_COMPATIBLE
9966 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
9967
9968 #undef TARGET_INVALID_WITHIN_DOLOOP
9969 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
9970
9971 #ifdef HAVE_AS_TLS
9972 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
9973 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
9974 #endif
9975
9976 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9977 #undef TARGET_MANGLE_TYPE
9978 #define TARGET_MANGLE_TYPE s390_mangle_type
9979 #endif
9980
9981 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9982 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
9983
9984 #undef TARGET_SECONDARY_RELOAD
9985 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
9986
9987 #undef TARGET_LIBGCC_CMP_RETURN_MODE
9988 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
9989
9990 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
9991 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
9992
9993 struct gcc_target targetm = TARGET_INITIALIZER;
9994
9995 #include "gt-s390.h"