s390.c (z10_cost): New cost function for z10.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "tree-gimple.h"
53 #include "df.h"
54
55
56 /* Define the specific costs for a given cpu. */
57
58 struct processor_costs
59 {
60 /* multiplication */
61 const int m; /* cost of an M instruction. */
62 const int mghi; /* cost of an MGHI instruction. */
63 const int mh; /* cost of an MH instruction. */
64 const int mhi; /* cost of an MHI instruction. */
65 const int ml; /* cost of an ML instruction. */
66 const int mr; /* cost of an MR instruction. */
67 const int ms; /* cost of an MS instruction. */
68 const int msg; /* cost of an MSG instruction. */
69 const int msgf; /* cost of an MSGF instruction. */
70 const int msgfr; /* cost of an MSGFR instruction. */
71 const int msgr; /* cost of an MSGR instruction. */
72 const int msr; /* cost of an MSR instruction. */
73 const int mult_df; /* cost of multiplication in DFmode. */
74 const int mxbr;
75 /* square root */
76 const int sqxbr; /* cost of square root in TFmode. */
77 const int sqdbr; /* cost of square root in DFmode. */
78 const int sqebr; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr; /* cost of multiply and add in DFmode. */
81 const int maebr; /* cost of multiply and add in SFmode. */
82 /* division */
83 const int dxbr;
84 const int ddbr;
85 const int debr;
86 const int dlgr;
87 const int dlr;
88 const int dr;
89 const int dsgfr;
90 const int dsgr;
91 };
92
93 const struct processor_costs *s390_cost;
94
95 static const
96 struct processor_costs z900_cost =
97 {
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
125 };
126
127 static const
128 struct processor_costs z990_cost =
129 {
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
157 };
158
159 static const
160 struct processor_costs z9_109_cost =
161 {
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
189 };
190
191 static const
192 struct processor_costs z10_cost =
193 {
194 COSTS_N_INSNS (4), /* M */
195 COSTS_N_INSNS (2), /* MGHI */
196 COSTS_N_INSNS (2), /* MH */
197 COSTS_N_INSNS (2), /* MHI */
198 COSTS_N_INSNS (4), /* ML */
199 COSTS_N_INSNS (4), /* MR */
200 COSTS_N_INSNS (5), /* MS */
201 COSTS_N_INSNS (6), /* MSG */
202 COSTS_N_INSNS (4), /* MSGF */
203 COSTS_N_INSNS (4), /* MSGFR */
204 COSTS_N_INSNS (4), /* MSGR */
205 COSTS_N_INSNS (4), /* MSR */
206 COSTS_N_INSNS (1), /* multiplication in DFmode */
207 COSTS_N_INSNS (28), /* MXBR */
208 COSTS_N_INSNS (130), /* SQXBR */
209 COSTS_N_INSNS (66), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (60), /* DXBR */
214 COSTS_N_INSNS (40), /* DDBR */
215 COSTS_N_INSNS (26), /* DEBR */
216 COSTS_N_INSNS (30), /* DLGR */
217 COSTS_N_INSNS (23), /* DLR */
218 COSTS_N_INSNS (23), /* DR */
219 COSTS_N_INSNS (24), /* DSGFR */
220 COSTS_N_INSNS (24), /* DSGR */
221 };
222
223 extern int reload_completed;
224
225 /* Save information from a "cmpxx" operation until the branch or scc is
226 emitted. */
227 rtx s390_compare_op0, s390_compare_op1;
228
229 /* Save the result of a compare_and_swap until the branch or scc is
230 emitted. */
231 rtx s390_compare_emitted = NULL_RTX;
232
233 /* Structure used to hold the components of a S/390 memory
234 address. A legitimate address on S/390 is of the general
235 form
236 base + index + displacement
237 where any of the components is optional.
238
239 base and index are registers of the class ADDR_REGS,
240 displacement is an unsigned 12-bit immediate constant. */
241
242 struct s390_address
243 {
244 rtx base;
245 rtx indx;
246 rtx disp;
247 bool pointer;
248 bool literal_pool;
249 };
250
251 /* Which cpu are we tuning for. */
252 enum processor_type s390_tune = PROCESSOR_max;
253 enum processor_flags s390_tune_flags;
254 /* Which instruction set architecture to use. */
255 enum processor_type s390_arch;
256 enum processor_flags s390_arch_flags;
257
258 HOST_WIDE_INT s390_warn_framesize = 0;
259 HOST_WIDE_INT s390_stack_size = 0;
260 HOST_WIDE_INT s390_stack_guard = 0;
261
262 /* The following structure is embedded in the machine
263 specific part of struct function. */
264
265 struct s390_frame_layout GTY (())
266 {
267 /* Offset within stack frame. */
268 HOST_WIDE_INT gprs_offset;
269 HOST_WIDE_INT f0_offset;
270 HOST_WIDE_INT f4_offset;
271 HOST_WIDE_INT f8_offset;
272 HOST_WIDE_INT backchain_offset;
273
274 /* Number of first and last gpr where slots in the register
275 save area are reserved for. */
276 int first_save_gpr_slot;
277 int last_save_gpr_slot;
278
279 /* Number of first and last gpr to be saved, restored. */
280 int first_save_gpr;
281 int first_restore_gpr;
282 int last_save_gpr;
283 int last_restore_gpr;
284
285 /* Bits standing for floating point registers. Set, if the
286 respective register has to be saved. Starting with reg 16 (f0)
287 at the rightmost bit.
288 Bit 15 - 8 7 6 5 4 3 2 1 0
289 fpr 15 - 8 7 5 3 1 6 4 2 0
290 reg 31 - 24 23 22 21 20 19 18 17 16 */
291 unsigned int fpr_bitmap;
292
293 /* Number of floating point registers f8-f15 which must be saved. */
294 int high_fprs;
295
296 /* Set if return address needs to be saved.
297 This flag is set by s390_return_addr_rtx if it could not use
298 the initial value of r14 and therefore depends on r14 saved
299 to the stack. */
300 bool save_return_addr_p;
301
302 /* Size of stack frame. */
303 HOST_WIDE_INT frame_size;
304 };
305
306 /* Define the structure for the machine field in struct function. */
307
308 struct machine_function GTY(())
309 {
310 struct s390_frame_layout frame_layout;
311
312 /* Literal pool base register. */
313 rtx base_reg;
314
315 /* True if we may need to perform branch splitting. */
316 bool split_branches_pending_p;
317
318 /* True during final stage of literal pool processing. */
319 bool decomposed_literal_pool_addresses_ok_p;
320
321 /* Some local-dynamic TLS symbol name. */
322 const char *some_ld_name;
323
324 bool has_landing_pad_p;
325 };
326
327 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
328
329 #define cfun_frame_layout (cfun->machine->frame_layout)
330 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
331 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
332 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_WORD)
333 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
334 (1 << (BITNUM)))
335 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
336 (1 << (BITNUM))))
337
338 /* Number of GPRs and FPRs used for argument passing. */
339 #define GP_ARG_NUM_REG 5
340 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
341
342 /* A couple of shortcuts. */
343 #define CONST_OK_FOR_J(x) \
344 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
345 #define CONST_OK_FOR_K(x) \
346 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
347 #define CONST_OK_FOR_Os(x) \
348 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
349 #define CONST_OK_FOR_Op(x) \
350 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
351 #define CONST_OK_FOR_On(x) \
352 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
353
354 #define REGNO_PAIR_OK(REGNO, MODE) \
355 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
356
357 static enum machine_mode
358 s390_libgcc_cmp_return_mode (void)
359 {
360 return TARGET_64BIT ? DImode : SImode;
361 }
362
363 static enum machine_mode
364 s390_libgcc_shift_count_mode (void)
365 {
366 return TARGET_64BIT ? DImode : SImode;
367 }
368
369 /* Return true if the back end supports mode MODE. */
370 static bool
371 s390_scalar_mode_supported_p (enum machine_mode mode)
372 {
373 if (DECIMAL_FLOAT_MODE_P (mode))
374 return true;
375 else
376 return default_scalar_mode_supported_p (mode);
377 }
378
379 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
380
381 void
382 s390_set_has_landing_pad_p (bool value)
383 {
384 cfun->machine->has_landing_pad_p = value;
385 }
386
387 /* If two condition code modes are compatible, return a condition code
388 mode which is compatible with both. Otherwise, return
389 VOIDmode. */
390
391 static enum machine_mode
392 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
393 {
394 if (m1 == m2)
395 return m1;
396
397 switch (m1)
398 {
399 case CCZmode:
400 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
401 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
402 return m2;
403 return VOIDmode;
404
405 case CCSmode:
406 case CCUmode:
407 case CCTmode:
408 case CCSRmode:
409 case CCURmode:
410 case CCZ1mode:
411 if (m2 == CCZmode)
412 return m1;
413
414 return VOIDmode;
415
416 default:
417 return VOIDmode;
418 }
419 return VOIDmode;
420 }
421
422 /* Return true if SET either doesn't set the CC register, or else
423 the source and destination have matching CC modes and that
424 CC mode is at least as constrained as REQ_MODE. */
425
426 static bool
427 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
428 {
429 enum machine_mode set_mode;
430
431 gcc_assert (GET_CODE (set) == SET);
432
433 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
434 return 1;
435
436 set_mode = GET_MODE (SET_DEST (set));
437 switch (set_mode)
438 {
439 case CCSmode:
440 case CCSRmode:
441 case CCUmode:
442 case CCURmode:
443 case CCLmode:
444 case CCL1mode:
445 case CCL2mode:
446 case CCL3mode:
447 case CCT1mode:
448 case CCT2mode:
449 case CCT3mode:
450 if (req_mode != set_mode)
451 return 0;
452 break;
453
454 case CCZmode:
455 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
456 && req_mode != CCSRmode && req_mode != CCURmode)
457 return 0;
458 break;
459
460 case CCAPmode:
461 case CCANmode:
462 if (req_mode != CCAmode)
463 return 0;
464 break;
465
466 default:
467 gcc_unreachable ();
468 }
469
470 return (GET_MODE (SET_SRC (set)) == set_mode);
471 }
472
473 /* Return true if every SET in INSN that sets the CC register
474 has source and destination with matching CC modes and that
475 CC mode is at least as constrained as REQ_MODE.
476 If REQ_MODE is VOIDmode, always return false. */
477
478 bool
479 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
480 {
481 int i;
482
483 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
484 if (req_mode == VOIDmode)
485 return false;
486
487 if (GET_CODE (PATTERN (insn)) == SET)
488 return s390_match_ccmode_set (PATTERN (insn), req_mode);
489
490 if (GET_CODE (PATTERN (insn)) == PARALLEL)
491 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
492 {
493 rtx set = XVECEXP (PATTERN (insn), 0, i);
494 if (GET_CODE (set) == SET)
495 if (!s390_match_ccmode_set (set, req_mode))
496 return false;
497 }
498
499 return true;
500 }
501
502 /* If a test-under-mask instruction can be used to implement
503 (compare (and ... OP1) OP2), return the CC mode required
504 to do that. Otherwise, return VOIDmode.
505 MIXED is true if the instruction can distinguish between
506 CC1 and CC2 for mixed selected bits (TMxx), it is false
507 if the instruction cannot (TM). */
508
509 enum machine_mode
510 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
511 {
512 int bit0, bit1;
513
514 /* ??? Fixme: should work on CONST_DOUBLE as well. */
515 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
516 return VOIDmode;
517
518 /* Selected bits all zero: CC0.
519 e.g.: int a; if ((a & (16 + 128)) == 0) */
520 if (INTVAL (op2) == 0)
521 return CCTmode;
522
523 /* Selected bits all one: CC3.
524 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
525 if (INTVAL (op2) == INTVAL (op1))
526 return CCT3mode;
527
528 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
529 int a;
530 if ((a & (16 + 128)) == 16) -> CCT1
531 if ((a & (16 + 128)) == 128) -> CCT2 */
532 if (mixed)
533 {
534 bit1 = exact_log2 (INTVAL (op2));
535 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
536 if (bit0 != -1 && bit1 != -1)
537 return bit0 > bit1 ? CCT1mode : CCT2mode;
538 }
539
540 return VOIDmode;
541 }
542
543 /* Given a comparison code OP (EQ, NE, etc.) and the operands
544 OP0 and OP1 of a COMPARE, return the mode to be used for the
545 comparison. */
546
547 enum machine_mode
548 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
549 {
550 switch (code)
551 {
552 case EQ:
553 case NE:
554 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
555 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
556 return CCAPmode;
557 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
558 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
559 return CCAPmode;
560 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
561 || GET_CODE (op1) == NEG)
562 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
563 return CCLmode;
564
565 if (GET_CODE (op0) == AND)
566 {
567 /* Check whether we can potentially do it via TM. */
568 enum machine_mode ccmode;
569 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
570 if (ccmode != VOIDmode)
571 {
572 /* Relax CCTmode to CCZmode to allow fall-back to AND
573 if that turns out to be beneficial. */
574 return ccmode == CCTmode ? CCZmode : ccmode;
575 }
576 }
577
578 if (register_operand (op0, HImode)
579 && GET_CODE (op1) == CONST_INT
580 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
581 return CCT3mode;
582 if (register_operand (op0, QImode)
583 && GET_CODE (op1) == CONST_INT
584 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
585 return CCT3mode;
586
587 return CCZmode;
588
589 case LE:
590 case LT:
591 case GE:
592 case GT:
593 /* The only overflow condition of NEG and ABS happens when
594 -INT_MAX is used as parameter, which stays negative. So
595 we have an overflow from a positive value to a negative.
596 Using CCAP mode the resulting cc can be used for comparisons. */
597 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
598 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
599 return CCAPmode;
600
601 /* If constants are involved in an add instruction it is possible to use
602 the resulting cc for comparisons with zero. Knowing the sign of the
603 constant the overflow behavior gets predictable. e.g.:
604 int a, b; if ((b = a + c) > 0)
605 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
606 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
607 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
608 {
609 if (INTVAL (XEXP((op0), 1)) < 0)
610 return CCANmode;
611 else
612 return CCAPmode;
613 }
614 /* Fall through. */
615 case UNORDERED:
616 case ORDERED:
617 case UNEQ:
618 case UNLE:
619 case UNLT:
620 case UNGE:
621 case UNGT:
622 case LTGT:
623 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
624 && GET_CODE (op1) != CONST_INT)
625 return CCSRmode;
626 return CCSmode;
627
628 case LTU:
629 case GEU:
630 if (GET_CODE (op0) == PLUS
631 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
632 return CCL1mode;
633
634 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
635 && GET_CODE (op1) != CONST_INT)
636 return CCURmode;
637 return CCUmode;
638
639 case LEU:
640 case GTU:
641 if (GET_CODE (op0) == MINUS
642 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
643 return CCL2mode;
644
645 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
646 && GET_CODE (op1) != CONST_INT)
647 return CCURmode;
648 return CCUmode;
649
650 default:
651 gcc_unreachable ();
652 }
653 }
654
655 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
656 that we can implement more efficiently. */
657
658 void
659 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
660 {
661 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
662 if ((*code == EQ || *code == NE)
663 && *op1 == const0_rtx
664 && GET_CODE (*op0) == ZERO_EXTRACT
665 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
666 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
667 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
668 {
669 rtx inner = XEXP (*op0, 0);
670 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
671 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
672 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
673
674 if (len > 0 && len < modesize
675 && pos >= 0 && pos + len <= modesize
676 && modesize <= HOST_BITS_PER_WIDE_INT)
677 {
678 unsigned HOST_WIDE_INT block;
679 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
680 block <<= modesize - pos - len;
681
682 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
683 gen_int_mode (block, GET_MODE (inner)));
684 }
685 }
686
687 /* Narrow AND of memory against immediate to enable TM. */
688 if ((*code == EQ || *code == NE)
689 && *op1 == const0_rtx
690 && GET_CODE (*op0) == AND
691 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
692 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
693 {
694 rtx inner = XEXP (*op0, 0);
695 rtx mask = XEXP (*op0, 1);
696
697 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
698 if (GET_CODE (inner) == SUBREG
699 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
700 && (GET_MODE_SIZE (GET_MODE (inner))
701 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
702 && ((INTVAL (mask)
703 & GET_MODE_MASK (GET_MODE (inner))
704 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
705 == 0))
706 inner = SUBREG_REG (inner);
707
708 /* Do not change volatile MEMs. */
709 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
710 {
711 int part = s390_single_part (XEXP (*op0, 1),
712 GET_MODE (inner), QImode, 0);
713 if (part >= 0)
714 {
715 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
716 inner = adjust_address_nv (inner, QImode, part);
717 *op0 = gen_rtx_AND (QImode, inner, mask);
718 }
719 }
720 }
721
722 /* Narrow comparisons against 0xffff to HImode if possible. */
723 if ((*code == EQ || *code == NE)
724 && GET_CODE (*op1) == CONST_INT
725 && INTVAL (*op1) == 0xffff
726 && SCALAR_INT_MODE_P (GET_MODE (*op0))
727 && (nonzero_bits (*op0, GET_MODE (*op0))
728 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
729 {
730 *op0 = gen_lowpart (HImode, *op0);
731 *op1 = constm1_rtx;
732 }
733
734 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
735 if (GET_CODE (*op0) == UNSPEC
736 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
737 && XVECLEN (*op0, 0) == 1
738 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
739 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
740 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
741 && *op1 == const0_rtx)
742 {
743 enum rtx_code new_code = UNKNOWN;
744 switch (*code)
745 {
746 case EQ: new_code = EQ; break;
747 case NE: new_code = NE; break;
748 case LT: new_code = GTU; break;
749 case GT: new_code = LTU; break;
750 case LE: new_code = GEU; break;
751 case GE: new_code = LEU; break;
752 default: break;
753 }
754
755 if (new_code != UNKNOWN)
756 {
757 *op0 = XVECEXP (*op0, 0, 0);
758 *code = new_code;
759 }
760 }
761
762 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
763 if (GET_CODE (*op0) == UNSPEC
764 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
765 && XVECLEN (*op0, 0) == 1
766 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
767 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
768 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
769 && *op1 == const0_rtx)
770 {
771 enum rtx_code new_code = UNKNOWN;
772 switch (*code)
773 {
774 case EQ: new_code = EQ; break;
775 case NE: new_code = NE; break;
776 default: break;
777 }
778
779 if (new_code != UNKNOWN)
780 {
781 *op0 = XVECEXP (*op0, 0, 0);
782 *code = new_code;
783 }
784 }
785
786 /* Simplify cascaded EQ, NE with const0_rtx. */
787 if ((*code == NE || *code == EQ)
788 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
789 && GET_MODE (*op0) == SImode
790 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
791 && REG_P (XEXP (*op0, 0))
792 && XEXP (*op0, 1) == const0_rtx
793 && *op1 == const0_rtx)
794 {
795 if ((*code == EQ && GET_CODE (*op0) == NE)
796 || (*code == NE && GET_CODE (*op0) == EQ))
797 *code = EQ;
798 else
799 *code = NE;
800 *op0 = XEXP (*op0, 0);
801 }
802
803 /* Prefer register over memory as first operand. */
804 if (MEM_P (*op0) && REG_P (*op1))
805 {
806 rtx tem = *op0; *op0 = *op1; *op1 = tem;
807 *code = swap_condition (*code);
808 }
809 }
810
811 /* Emit a compare instruction suitable to implement the comparison
812 OP0 CODE OP1. Return the correct condition RTL to be placed in
813 the IF_THEN_ELSE of the conditional branch testing the result. */
814
815 rtx
816 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
817 {
818 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
819 rtx ret = NULL_RTX;
820
821 /* Do not output a redundant compare instruction if a compare_and_swap
822 pattern already computed the result and the machine modes are compatible. */
823 if (s390_compare_emitted
824 && (s390_cc_modes_compatible (GET_MODE (s390_compare_emitted), mode)
825 == GET_MODE (s390_compare_emitted)))
826 ret = gen_rtx_fmt_ee (code, VOIDmode, s390_compare_emitted, const0_rtx);
827 else
828 {
829 rtx cc = gen_rtx_REG (mode, CC_REGNUM);
830
831 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
832 ret = gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
833 }
834 s390_compare_emitted = NULL_RTX;
835 return ret;
836 }
837
838 /* Emit a SImode compare and swap instruction setting MEM to NEW if OLD
839 matches CMP.
840 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
841 conditional branch testing the result. */
842
843 static rtx
844 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new)
845 {
846 rtx ret;
847
848 emit_insn (gen_sync_compare_and_swap_ccsi (old, mem, cmp, new));
849 ret = gen_rtx_fmt_ee (code, VOIDmode, s390_compare_emitted, const0_rtx);
850
851 s390_compare_emitted = NULL_RTX;
852
853 return ret;
854 }
855
856 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
857 unconditional jump, else a conditional jump under condition COND. */
858
859 void
860 s390_emit_jump (rtx target, rtx cond)
861 {
862 rtx insn;
863
864 target = gen_rtx_LABEL_REF (VOIDmode, target);
865 if (cond)
866 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
867
868 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
869 emit_jump_insn (insn);
870 }
871
872 /* Return branch condition mask to implement a branch
873 specified by CODE. Return -1 for invalid comparisons. */
874
875 int
876 s390_branch_condition_mask (rtx code)
877 {
878 const int CC0 = 1 << 3;
879 const int CC1 = 1 << 2;
880 const int CC2 = 1 << 1;
881 const int CC3 = 1 << 0;
882
883 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
884 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
885 gcc_assert (XEXP (code, 1) == const0_rtx);
886
887 switch (GET_MODE (XEXP (code, 0)))
888 {
889 case CCZmode:
890 case CCZ1mode:
891 switch (GET_CODE (code))
892 {
893 case EQ: return CC0;
894 case NE: return CC1 | CC2 | CC3;
895 default: return -1;
896 }
897 break;
898
899 case CCT1mode:
900 switch (GET_CODE (code))
901 {
902 case EQ: return CC1;
903 case NE: return CC0 | CC2 | CC3;
904 default: return -1;
905 }
906 break;
907
908 case CCT2mode:
909 switch (GET_CODE (code))
910 {
911 case EQ: return CC2;
912 case NE: return CC0 | CC1 | CC3;
913 default: return -1;
914 }
915 break;
916
917 case CCT3mode:
918 switch (GET_CODE (code))
919 {
920 case EQ: return CC3;
921 case NE: return CC0 | CC1 | CC2;
922 default: return -1;
923 }
924 break;
925
926 case CCLmode:
927 switch (GET_CODE (code))
928 {
929 case EQ: return CC0 | CC2;
930 case NE: return CC1 | CC3;
931 default: return -1;
932 }
933 break;
934
935 case CCL1mode:
936 switch (GET_CODE (code))
937 {
938 case LTU: return CC2 | CC3; /* carry */
939 case GEU: return CC0 | CC1; /* no carry */
940 default: return -1;
941 }
942 break;
943
944 case CCL2mode:
945 switch (GET_CODE (code))
946 {
947 case GTU: return CC0 | CC1; /* borrow */
948 case LEU: return CC2 | CC3; /* no borrow */
949 default: return -1;
950 }
951 break;
952
953 case CCL3mode:
954 switch (GET_CODE (code))
955 {
956 case EQ: return CC0 | CC2;
957 case NE: return CC1 | CC3;
958 case LTU: return CC1;
959 case GTU: return CC3;
960 case LEU: return CC1 | CC2;
961 case GEU: return CC2 | CC3;
962 default: return -1;
963 }
964
965 case CCUmode:
966 switch (GET_CODE (code))
967 {
968 case EQ: return CC0;
969 case NE: return CC1 | CC2 | CC3;
970 case LTU: return CC1;
971 case GTU: return CC2;
972 case LEU: return CC0 | CC1;
973 case GEU: return CC0 | CC2;
974 default: return -1;
975 }
976 break;
977
978 case CCURmode:
979 switch (GET_CODE (code))
980 {
981 case EQ: return CC0;
982 case NE: return CC2 | CC1 | CC3;
983 case LTU: return CC2;
984 case GTU: return CC1;
985 case LEU: return CC0 | CC2;
986 case GEU: return CC0 | CC1;
987 default: return -1;
988 }
989 break;
990
991 case CCAPmode:
992 switch (GET_CODE (code))
993 {
994 case EQ: return CC0;
995 case NE: return CC1 | CC2 | CC3;
996 case LT: return CC1 | CC3;
997 case GT: return CC2;
998 case LE: return CC0 | CC1 | CC3;
999 case GE: return CC0 | CC2;
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCANmode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0;
1008 case NE: return CC1 | CC2 | CC3;
1009 case LT: return CC1;
1010 case GT: return CC2 | CC3;
1011 case LE: return CC0 | CC1;
1012 case GE: return CC0 | CC2 | CC3;
1013 default: return -1;
1014 }
1015 break;
1016
1017 case CCSmode:
1018 switch (GET_CODE (code))
1019 {
1020 case EQ: return CC0;
1021 case NE: return CC1 | CC2 | CC3;
1022 case LT: return CC1;
1023 case GT: return CC2;
1024 case LE: return CC0 | CC1;
1025 case GE: return CC0 | CC2;
1026 case UNORDERED: return CC3;
1027 case ORDERED: return CC0 | CC1 | CC2;
1028 case UNEQ: return CC0 | CC3;
1029 case UNLT: return CC1 | CC3;
1030 case UNGT: return CC2 | CC3;
1031 case UNLE: return CC0 | CC1 | CC3;
1032 case UNGE: return CC0 | CC2 | CC3;
1033 case LTGT: return CC1 | CC2;
1034 default: return -1;
1035 }
1036 break;
1037
1038 case CCSRmode:
1039 switch (GET_CODE (code))
1040 {
1041 case EQ: return CC0;
1042 case NE: return CC2 | CC1 | CC3;
1043 case LT: return CC2;
1044 case GT: return CC1;
1045 case LE: return CC0 | CC2;
1046 case GE: return CC0 | CC1;
1047 case UNORDERED: return CC3;
1048 case ORDERED: return CC0 | CC2 | CC1;
1049 case UNEQ: return CC0 | CC3;
1050 case UNLT: return CC2 | CC3;
1051 case UNGT: return CC1 | CC3;
1052 case UNLE: return CC0 | CC2 | CC3;
1053 case UNGE: return CC0 | CC1 | CC3;
1054 case LTGT: return CC2 | CC1;
1055 default: return -1;
1056 }
1057 break;
1058
1059 default:
1060 return -1;
1061 }
1062 }
1063
1064 /* If INV is false, return assembler mnemonic string to implement
1065 a branch specified by CODE. If INV is true, return mnemonic
1066 for the corresponding inverted branch. */
1067
1068 static const char *
1069 s390_branch_condition_mnemonic (rtx code, int inv)
1070 {
1071 static const char *const mnemonic[16] =
1072 {
1073 NULL, "o", "h", "nle",
1074 "l", "nhe", "lh", "ne",
1075 "e", "nlh", "he", "nl",
1076 "le", "nh", "no", NULL
1077 };
1078
1079 int mask = s390_branch_condition_mask (code);
1080 gcc_assert (mask >= 0);
1081
1082 if (inv)
1083 mask ^= 15;
1084
1085 gcc_assert (mask >= 1 && mask <= 14);
1086
1087 return mnemonic[mask];
1088 }
1089
1090 /* Return the part of op which has a value different from def.
1091 The size of the part is determined by mode.
1092 Use this function only if you already know that op really
1093 contains such a part. */
1094
1095 unsigned HOST_WIDE_INT
1096 s390_extract_part (rtx op, enum machine_mode mode, int def)
1097 {
1098 unsigned HOST_WIDE_INT value = 0;
1099 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1100 int part_bits = GET_MODE_BITSIZE (mode);
1101 unsigned HOST_WIDE_INT part_mask
1102 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1103 int i;
1104
1105 for (i = 0; i < max_parts; i++)
1106 {
1107 if (i == 0)
1108 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1109 else
1110 value >>= part_bits;
1111
1112 if ((value & part_mask) != (def & part_mask))
1113 return value & part_mask;
1114 }
1115
1116 gcc_unreachable ();
1117 }
1118
1119 /* If OP is an integer constant of mode MODE with exactly one
1120 part of mode PART_MODE unequal to DEF, return the number of that
1121 part. Otherwise, return -1. */
1122
1123 int
1124 s390_single_part (rtx op,
1125 enum machine_mode mode,
1126 enum machine_mode part_mode,
1127 int def)
1128 {
1129 unsigned HOST_WIDE_INT value = 0;
1130 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1131 unsigned HOST_WIDE_INT part_mask
1132 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1133 int i, part = -1;
1134
1135 if (GET_CODE (op) != CONST_INT)
1136 return -1;
1137
1138 for (i = 0; i < n_parts; i++)
1139 {
1140 if (i == 0)
1141 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1142 else
1143 value >>= GET_MODE_BITSIZE (part_mode);
1144
1145 if ((value & part_mask) != (def & part_mask))
1146 {
1147 if (part != -1)
1148 return -1;
1149 else
1150 part = i;
1151 }
1152 }
1153 return part == -1 ? -1 : n_parts - 1 - part;
1154 }
1155
1156 /* Check whether we can (and want to) split a double-word
1157 move in mode MODE from SRC to DST into two single-word
1158 moves, moving the subword FIRST_SUBWORD first. */
1159
1160 bool
1161 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1162 {
1163 /* Floating point registers cannot be split. */
1164 if (FP_REG_P (src) || FP_REG_P (dst))
1165 return false;
1166
1167 /* We don't need to split if operands are directly accessible. */
1168 if (s_operand (src, mode) || s_operand (dst, mode))
1169 return false;
1170
1171 /* Non-offsettable memory references cannot be split. */
1172 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1173 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1174 return false;
1175
1176 /* Moving the first subword must not clobber a register
1177 needed to move the second subword. */
1178 if (register_operand (dst, mode))
1179 {
1180 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1181 if (reg_overlap_mentioned_p (subreg, src))
1182 return false;
1183 }
1184
1185 return true;
1186 }
1187
1188 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1189 and [MEM2, MEM2 + SIZE] do overlap and false
1190 otherwise. */
1191
1192 bool
1193 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1194 {
1195 rtx addr1, addr2, addr_delta;
1196 HOST_WIDE_INT delta;
1197
1198 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1199 return true;
1200
1201 if (size == 0)
1202 return false;
1203
1204 addr1 = XEXP (mem1, 0);
1205 addr2 = XEXP (mem2, 0);
1206
1207 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1208
1209 /* This overlapping check is used by peepholes merging memory block operations.
1210 Overlapping operations would otherwise be recognized by the S/390 hardware
1211 and would fall back to a slower implementation. Allowing overlapping
1212 operations would lead to slow code but not to wrong code. Therefore we are
1213 somewhat optimistic if we cannot prove that the memory blocks are
1214 overlapping.
1215 That's why we return false here although this may accept operations on
1216 overlapping memory areas. */
1217 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1218 return false;
1219
1220 delta = INTVAL (addr_delta);
1221
1222 if (delta == 0
1223 || (delta > 0 && delta < size)
1224 || (delta < 0 && -delta < size))
1225 return true;
1226
1227 return false;
1228 }
1229
1230 /* Check whether the address of memory reference MEM2 equals exactly
1231 the address of memory reference MEM1 plus DELTA. Return true if
1232 we can prove this to be the case, false otherwise. */
1233
1234 bool
1235 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1236 {
1237 rtx addr1, addr2, addr_delta;
1238
1239 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1240 return false;
1241
1242 addr1 = XEXP (mem1, 0);
1243 addr2 = XEXP (mem2, 0);
1244
1245 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1246 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1247 return false;
1248
1249 return true;
1250 }
1251
1252 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1253
1254 void
1255 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1256 rtx *operands)
1257 {
1258 enum machine_mode wmode = mode;
1259 rtx dst = operands[0];
1260 rtx src1 = operands[1];
1261 rtx src2 = operands[2];
1262 rtx op, clob, tem;
1263
1264 /* If we cannot handle the operation directly, use a temp register. */
1265 if (!s390_logical_operator_ok_p (operands))
1266 dst = gen_reg_rtx (mode);
1267
1268 /* QImode and HImode patterns make sense only if we have a destination
1269 in memory. Otherwise perform the operation in SImode. */
1270 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1271 wmode = SImode;
1272
1273 /* Widen operands if required. */
1274 if (mode != wmode)
1275 {
1276 if (GET_CODE (dst) == SUBREG
1277 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1278 dst = tem;
1279 else if (REG_P (dst))
1280 dst = gen_rtx_SUBREG (wmode, dst, 0);
1281 else
1282 dst = gen_reg_rtx (wmode);
1283
1284 if (GET_CODE (src1) == SUBREG
1285 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1286 src1 = tem;
1287 else if (GET_MODE (src1) != VOIDmode)
1288 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1289
1290 if (GET_CODE (src2) == SUBREG
1291 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1292 src2 = tem;
1293 else if (GET_MODE (src2) != VOIDmode)
1294 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1295 }
1296
1297 /* Emit the instruction. */
1298 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1299 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1300 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1301
1302 /* Fix up the destination if needed. */
1303 if (dst != operands[0])
1304 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1305 }
1306
1307 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1308
1309 bool
1310 s390_logical_operator_ok_p (rtx *operands)
1311 {
1312 /* If the destination operand is in memory, it needs to coincide
1313 with one of the source operands. After reload, it has to be
1314 the first source operand. */
1315 if (GET_CODE (operands[0]) == MEM)
1316 return rtx_equal_p (operands[0], operands[1])
1317 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1318
1319 return true;
1320 }
1321
1322 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1323 operand IMMOP to switch from SS to SI type instructions. */
1324
1325 void
1326 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1327 {
1328 int def = code == AND ? -1 : 0;
1329 HOST_WIDE_INT mask;
1330 int part;
1331
1332 gcc_assert (GET_CODE (*memop) == MEM);
1333 gcc_assert (!MEM_VOLATILE_P (*memop));
1334
1335 mask = s390_extract_part (*immop, QImode, def);
1336 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1337 gcc_assert (part >= 0);
1338
1339 *memop = adjust_address (*memop, QImode, part);
1340 *immop = gen_int_mode (mask, QImode);
1341 }
1342
1343
1344 /* How to allocate a 'struct machine_function'. */
1345
1346 static struct machine_function *
1347 s390_init_machine_status (void)
1348 {
1349 return ggc_alloc_cleared (sizeof (struct machine_function));
1350 }
1351
1352 /* Change optimizations to be performed, depending on the
1353 optimization level.
1354
1355 LEVEL is the optimization level specified; 2 if `-O2' is
1356 specified, 1 if `-O' is specified, and 0 if neither is specified.
1357
1358 SIZE is nonzero if `-Os' is specified and zero otherwise. */
1359
1360 void
1361 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
1362 {
1363 /* ??? There are apparently still problems with -fcaller-saves. */
1364 flag_caller_saves = 0;
1365
1366 /* By default, always emit DWARF-2 unwind info. This allows debugging
1367 without maintaining a stack frame back-chain. */
1368 flag_asynchronous_unwind_tables = 1;
1369
1370 /* Use MVCLE instructions to decrease code size if requested. */
1371 if (size != 0)
1372 target_flags |= MASK_MVCLE;
1373 }
1374
1375 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1376 to the associated processor_type and processor_flags if so. */
1377
1378 static bool
1379 s390_handle_arch_option (const char *arg,
1380 enum processor_type *type,
1381 enum processor_flags *flags)
1382 {
1383 static struct pta
1384 {
1385 const char *const name; /* processor name or nickname. */
1386 const enum processor_type processor;
1387 const enum processor_flags flags;
1388 }
1389 const processor_alias_table[] =
1390 {
1391 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1392 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1393 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1394 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1395 | PF_LONG_DISPLACEMENT},
1396 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1397 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1398 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1399 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1400 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1401 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1402 };
1403 size_t i;
1404
1405 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1406 if (strcmp (arg, processor_alias_table[i].name) == 0)
1407 {
1408 *type = processor_alias_table[i].processor;
1409 *flags = processor_alias_table[i].flags;
1410 return true;
1411 }
1412 return false;
1413 }
1414
1415 /* Implement TARGET_HANDLE_OPTION. */
1416
1417 static bool
1418 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1419 {
1420 switch (code)
1421 {
1422 case OPT_march_:
1423 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1424
1425 case OPT_mstack_guard_:
1426 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1427 return false;
1428 if (exact_log2 (s390_stack_guard) == -1)
1429 error ("stack guard value must be an exact power of 2");
1430 return true;
1431
1432 case OPT_mstack_size_:
1433 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1434 return false;
1435 if (exact_log2 (s390_stack_size) == -1)
1436 error ("stack size must be an exact power of 2");
1437 return true;
1438
1439 case OPT_mtune_:
1440 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1441
1442 case OPT_mwarn_framesize_:
1443 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1444
1445 default:
1446 return true;
1447 }
1448 }
1449
1450 void
1451 override_options (void)
1452 {
1453 /* Set up function hooks. */
1454 init_machine_status = s390_init_machine_status;
1455
1456 /* Architecture mode defaults according to ABI. */
1457 if (!(target_flags_explicit & MASK_ZARCH))
1458 {
1459 if (TARGET_64BIT)
1460 target_flags |= MASK_ZARCH;
1461 else
1462 target_flags &= ~MASK_ZARCH;
1463 }
1464
1465 /* Determine processor architectural level. */
1466 if (!s390_arch_string)
1467 {
1468 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1469 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1470 }
1471
1472 /* Determine processor to tune for. */
1473 if (s390_tune == PROCESSOR_max)
1474 {
1475 s390_tune = s390_arch;
1476 s390_tune_flags = s390_arch_flags;
1477 }
1478
1479 /* Sanity checks. */
1480 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1481 error ("z/Architecture mode not supported on %s", s390_arch_string);
1482 if (TARGET_64BIT && !TARGET_ZARCH)
1483 error ("64-bit ABI not supported in ESA/390 mode");
1484
1485 if (TARGET_HARD_DFP && !TARGET_DFP)
1486 {
1487 if (target_flags_explicit & MASK_HARD_DFP)
1488 {
1489 if (!TARGET_CPU_DFP)
1490 error ("Hardware decimal floating point instructions"
1491 " not available on %s", s390_arch_string);
1492 if (!TARGET_ZARCH)
1493 error ("Hardware decimal floating point instructions"
1494 " not available in ESA/390 mode");
1495 }
1496 else
1497 target_flags &= ~MASK_HARD_DFP;
1498 }
1499
1500 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1501 {
1502 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1503 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1504
1505 target_flags &= ~MASK_HARD_DFP;
1506 }
1507
1508 /* Set processor cost function. */
1509 switch (s390_tune)
1510 {
1511 case PROCESSOR_2084_Z990:
1512 s390_cost = &z990_cost;
1513 break;
1514 case PROCESSOR_2094_Z9_109:
1515 s390_cost = &z9_109_cost;
1516 break;
1517 case PROCESSOR_2097_Z10:
1518 s390_cost = &z10_cost;
1519 break;
1520 default:
1521 s390_cost = &z900_cost;
1522 }
1523
1524 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1525 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1526 "in combination");
1527
1528 if (s390_stack_size)
1529 {
1530 if (s390_stack_guard >= s390_stack_size)
1531 error ("stack size must be greater than the stack guard value");
1532 else if (s390_stack_size > 1 << 16)
1533 error ("stack size must not be greater than 64k");
1534 }
1535 else if (s390_stack_guard)
1536 error ("-mstack-guard implies use of -mstack-size");
1537
1538 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1539 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1540 target_flags |= MASK_LONG_DOUBLE_128;
1541 #endif
1542 }
1543
1544 /* Map for smallest class containing reg regno. */
1545
1546 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1547 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1548 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1549 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1550 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1551 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1552 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1553 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1554 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1555 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1556 ACCESS_REGS, ACCESS_REGS
1557 };
1558
1559 /* Return attribute type of insn. */
1560
1561 static enum attr_type
1562 s390_safe_attr_type (rtx insn)
1563 {
1564 if (recog_memoized (insn) >= 0)
1565 return get_attr_type (insn);
1566 else
1567 return TYPE_NONE;
1568 }
1569
1570 /* Return true if DISP is a valid short displacement. */
1571
1572 static bool
1573 s390_short_displacement (rtx disp)
1574 {
1575 /* No displacement is OK. */
1576 if (!disp)
1577 return true;
1578
1579 /* Integer displacement in range. */
1580 if (GET_CODE (disp) == CONST_INT)
1581 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1582
1583 /* GOT offset is not OK, the GOT can be large. */
1584 if (GET_CODE (disp) == CONST
1585 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1586 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1587 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1588 return false;
1589
1590 /* All other symbolic constants are literal pool references,
1591 which are OK as the literal pool must be small. */
1592 if (GET_CODE (disp) == CONST)
1593 return true;
1594
1595 return false;
1596 }
1597
1598 /* Decompose a RTL expression ADDR for a memory address into
1599 its components, returned in OUT.
1600
1601 Returns false if ADDR is not a valid memory address, true
1602 otherwise. If OUT is NULL, don't return the components,
1603 but check for validity only.
1604
1605 Note: Only addresses in canonical form are recognized.
1606 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1607 canonical form so that they will be recognized. */
1608
1609 static int
1610 s390_decompose_address (rtx addr, struct s390_address *out)
1611 {
1612 HOST_WIDE_INT offset = 0;
1613 rtx base = NULL_RTX;
1614 rtx indx = NULL_RTX;
1615 rtx disp = NULL_RTX;
1616 rtx orig_disp;
1617 bool pointer = false;
1618 bool base_ptr = false;
1619 bool indx_ptr = false;
1620 bool literal_pool = false;
1621
1622 /* We may need to substitute the literal pool base register into the address
1623 below. However, at this point we do not know which register is going to
1624 be used as base, so we substitute the arg pointer register. This is going
1625 to be treated as holding a pointer below -- it shouldn't be used for any
1626 other purpose. */
1627 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1628
1629 /* Decompose address into base + index + displacement. */
1630
1631 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1632 base = addr;
1633
1634 else if (GET_CODE (addr) == PLUS)
1635 {
1636 rtx op0 = XEXP (addr, 0);
1637 rtx op1 = XEXP (addr, 1);
1638 enum rtx_code code0 = GET_CODE (op0);
1639 enum rtx_code code1 = GET_CODE (op1);
1640
1641 if (code0 == REG || code0 == UNSPEC)
1642 {
1643 if (code1 == REG || code1 == UNSPEC)
1644 {
1645 indx = op0; /* index + base */
1646 base = op1;
1647 }
1648
1649 else
1650 {
1651 base = op0; /* base + displacement */
1652 disp = op1;
1653 }
1654 }
1655
1656 else if (code0 == PLUS)
1657 {
1658 indx = XEXP (op0, 0); /* index + base + disp */
1659 base = XEXP (op0, 1);
1660 disp = op1;
1661 }
1662
1663 else
1664 {
1665 return false;
1666 }
1667 }
1668
1669 else
1670 disp = addr; /* displacement */
1671
1672 /* Extract integer part of displacement. */
1673 orig_disp = disp;
1674 if (disp)
1675 {
1676 if (GET_CODE (disp) == CONST_INT)
1677 {
1678 offset = INTVAL (disp);
1679 disp = NULL_RTX;
1680 }
1681 else if (GET_CODE (disp) == CONST
1682 && GET_CODE (XEXP (disp, 0)) == PLUS
1683 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1684 {
1685 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1686 disp = XEXP (XEXP (disp, 0), 0);
1687 }
1688 }
1689
1690 /* Strip off CONST here to avoid special case tests later. */
1691 if (disp && GET_CODE (disp) == CONST)
1692 disp = XEXP (disp, 0);
1693
1694 /* We can convert literal pool addresses to
1695 displacements by basing them off the base register. */
1696 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1697 {
1698 /* Either base or index must be free to hold the base register. */
1699 if (!base)
1700 base = fake_pool_base, literal_pool = true;
1701 else if (!indx)
1702 indx = fake_pool_base, literal_pool = true;
1703 else
1704 return false;
1705
1706 /* Mark up the displacement. */
1707 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1708 UNSPEC_LTREL_OFFSET);
1709 }
1710
1711 /* Validate base register. */
1712 if (base)
1713 {
1714 if (GET_CODE (base) == UNSPEC)
1715 switch (XINT (base, 1))
1716 {
1717 case UNSPEC_LTREF:
1718 if (!disp)
1719 disp = gen_rtx_UNSPEC (Pmode,
1720 gen_rtvec (1, XVECEXP (base, 0, 0)),
1721 UNSPEC_LTREL_OFFSET);
1722 else
1723 return false;
1724
1725 base = XVECEXP (base, 0, 1);
1726 break;
1727
1728 case UNSPEC_LTREL_BASE:
1729 if (XVECLEN (base, 0) == 1)
1730 base = fake_pool_base, literal_pool = true;
1731 else
1732 base = XVECEXP (base, 0, 1);
1733 break;
1734
1735 default:
1736 return false;
1737 }
1738
1739 if (!REG_P (base)
1740 || (GET_MODE (base) != SImode
1741 && GET_MODE (base) != Pmode))
1742 return false;
1743
1744 if (REGNO (base) == STACK_POINTER_REGNUM
1745 || REGNO (base) == FRAME_POINTER_REGNUM
1746 || ((reload_completed || reload_in_progress)
1747 && frame_pointer_needed
1748 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1749 || REGNO (base) == ARG_POINTER_REGNUM
1750 || (flag_pic
1751 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1752 pointer = base_ptr = true;
1753
1754 if ((reload_completed || reload_in_progress)
1755 && base == cfun->machine->base_reg)
1756 pointer = base_ptr = literal_pool = true;
1757 }
1758
1759 /* Validate index register. */
1760 if (indx)
1761 {
1762 if (GET_CODE (indx) == UNSPEC)
1763 switch (XINT (indx, 1))
1764 {
1765 case UNSPEC_LTREF:
1766 if (!disp)
1767 disp = gen_rtx_UNSPEC (Pmode,
1768 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1769 UNSPEC_LTREL_OFFSET);
1770 else
1771 return false;
1772
1773 indx = XVECEXP (indx, 0, 1);
1774 break;
1775
1776 case UNSPEC_LTREL_BASE:
1777 if (XVECLEN (indx, 0) == 1)
1778 indx = fake_pool_base, literal_pool = true;
1779 else
1780 indx = XVECEXP (indx, 0, 1);
1781 break;
1782
1783 default:
1784 return false;
1785 }
1786
1787 if (!REG_P (indx)
1788 || (GET_MODE (indx) != SImode
1789 && GET_MODE (indx) != Pmode))
1790 return false;
1791
1792 if (REGNO (indx) == STACK_POINTER_REGNUM
1793 || REGNO (indx) == FRAME_POINTER_REGNUM
1794 || ((reload_completed || reload_in_progress)
1795 && frame_pointer_needed
1796 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1797 || REGNO (indx) == ARG_POINTER_REGNUM
1798 || (flag_pic
1799 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1800 pointer = indx_ptr = true;
1801
1802 if ((reload_completed || reload_in_progress)
1803 && indx == cfun->machine->base_reg)
1804 pointer = indx_ptr = literal_pool = true;
1805 }
1806
1807 /* Prefer to use pointer as base, not index. */
1808 if (base && indx && !base_ptr
1809 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1810 {
1811 rtx tmp = base;
1812 base = indx;
1813 indx = tmp;
1814 }
1815
1816 /* Validate displacement. */
1817 if (!disp)
1818 {
1819 /* If virtual registers are involved, the displacement will change later
1820 anyway as the virtual registers get eliminated. This could make a
1821 valid displacement invalid, but it is more likely to make an invalid
1822 displacement valid, because we sometimes access the register save area
1823 via negative offsets to one of those registers.
1824 Thus we don't check the displacement for validity here. If after
1825 elimination the displacement turns out to be invalid after all,
1826 this is fixed up by reload in any case. */
1827 if (base != arg_pointer_rtx
1828 && indx != arg_pointer_rtx
1829 && base != return_address_pointer_rtx
1830 && indx != return_address_pointer_rtx
1831 && base != frame_pointer_rtx
1832 && indx != frame_pointer_rtx
1833 && base != virtual_stack_vars_rtx
1834 && indx != virtual_stack_vars_rtx)
1835 if (!DISP_IN_RANGE (offset))
1836 return false;
1837 }
1838 else
1839 {
1840 /* All the special cases are pointers. */
1841 pointer = true;
1842
1843 /* In the small-PIC case, the linker converts @GOT
1844 and @GOTNTPOFF offsets to possible displacements. */
1845 if (GET_CODE (disp) == UNSPEC
1846 && (XINT (disp, 1) == UNSPEC_GOT
1847 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1848 && flag_pic == 1)
1849 {
1850 ;
1851 }
1852
1853 /* Accept chunkified literal pool symbol references. */
1854 else if (cfun && cfun->machine
1855 && cfun->machine->decomposed_literal_pool_addresses_ok_p
1856 && GET_CODE (disp) == MINUS
1857 && GET_CODE (XEXP (disp, 0)) == LABEL_REF
1858 && GET_CODE (XEXP (disp, 1)) == LABEL_REF)
1859 {
1860 ;
1861 }
1862
1863 /* Accept literal pool references. */
1864 else if (GET_CODE (disp) == UNSPEC
1865 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1866 {
1867 orig_disp = gen_rtx_CONST (Pmode, disp);
1868 if (offset)
1869 {
1870 /* If we have an offset, make sure it does not
1871 exceed the size of the constant pool entry. */
1872 rtx sym = XVECEXP (disp, 0, 0);
1873 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1874 return false;
1875
1876 orig_disp = plus_constant (orig_disp, offset);
1877 }
1878 }
1879
1880 else
1881 return false;
1882 }
1883
1884 if (!base && !indx)
1885 pointer = true;
1886
1887 if (out)
1888 {
1889 out->base = base;
1890 out->indx = indx;
1891 out->disp = orig_disp;
1892 out->pointer = pointer;
1893 out->literal_pool = literal_pool;
1894 }
1895
1896 return true;
1897 }
1898
1899 /* Decompose a RTL expression OP for a shift count into its components,
1900 and return the base register in BASE and the offset in OFFSET.
1901
1902 Return true if OP is a valid shift count, false if not. */
1903
1904 bool
1905 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
1906 {
1907 HOST_WIDE_INT off = 0;
1908
1909 /* We can have an integer constant, an address register,
1910 or a sum of the two. */
1911 if (GET_CODE (op) == CONST_INT)
1912 {
1913 off = INTVAL (op);
1914 op = NULL_RTX;
1915 }
1916 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
1917 {
1918 off = INTVAL (XEXP (op, 1));
1919 op = XEXP (op, 0);
1920 }
1921 while (op && GET_CODE (op) == SUBREG)
1922 op = SUBREG_REG (op);
1923
1924 if (op && GET_CODE (op) != REG)
1925 return false;
1926
1927 if (offset)
1928 *offset = off;
1929 if (base)
1930 *base = op;
1931
1932 return true;
1933 }
1934
1935
1936 /* Return true if CODE is a valid address without index. */
1937
1938 bool
1939 s390_legitimate_address_without_index_p (rtx op)
1940 {
1941 struct s390_address addr;
1942
1943 if (!s390_decompose_address (XEXP (op, 0), &addr))
1944 return false;
1945 if (addr.indx)
1946 return false;
1947
1948 return true;
1949 }
1950
1951
1952 /* Evaluates constraint strings described by the regular expression
1953 ([A|B](Q|R|S|T))|U|W and returns 1 if OP is a valid operand for the
1954 constraint given in STR, or 0 else. */
1955
1956 int
1957 s390_mem_constraint (const char *str, rtx op)
1958 {
1959 struct s390_address addr;
1960 char c = str[0];
1961
1962 /* Check for offsettable variants of memory constraints. */
1963 if (c == 'A')
1964 {
1965 /* Only accept non-volatile MEMs. */
1966 if (!MEM_P (op) || MEM_VOLATILE_P (op))
1967 return 0;
1968
1969 if ((reload_completed || reload_in_progress)
1970 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
1971 return 0;
1972
1973 c = str[1];
1974 }
1975
1976 /* Check for non-literal-pool variants of memory constraints. */
1977 else if (c == 'B')
1978 {
1979 if (GET_CODE (op) != MEM)
1980 return 0;
1981 if (!s390_decompose_address (XEXP (op, 0), &addr))
1982 return 0;
1983 if (addr.literal_pool)
1984 return 0;
1985
1986 c = str[1];
1987 }
1988
1989 switch (c)
1990 {
1991 case 'Q':
1992 if (GET_CODE (op) != MEM)
1993 return 0;
1994 if (!s390_decompose_address (XEXP (op, 0), &addr))
1995 return 0;
1996 if (addr.indx)
1997 return 0;
1998
1999 if (TARGET_LONG_DISPLACEMENT)
2000 {
2001 if (!s390_short_displacement (addr.disp))
2002 return 0;
2003 }
2004 break;
2005
2006 case 'R':
2007 if (GET_CODE (op) != MEM)
2008 return 0;
2009
2010 if (TARGET_LONG_DISPLACEMENT)
2011 {
2012 if (!s390_decompose_address (XEXP (op, 0), &addr))
2013 return 0;
2014 if (!s390_short_displacement (addr.disp))
2015 return 0;
2016 }
2017 break;
2018
2019 case 'S':
2020 if (!TARGET_LONG_DISPLACEMENT)
2021 return 0;
2022 if (GET_CODE (op) != MEM)
2023 return 0;
2024 if (!s390_decompose_address (XEXP (op, 0), &addr))
2025 return 0;
2026 if (addr.indx)
2027 return 0;
2028 if (s390_short_displacement (addr.disp))
2029 return 0;
2030 break;
2031
2032 case 'T':
2033 if (!TARGET_LONG_DISPLACEMENT)
2034 return 0;
2035 if (GET_CODE (op) != MEM)
2036 return 0;
2037 /* Any invalid address here will be fixed up by reload,
2038 so accept it for the most generic constraint. */
2039 if (s390_decompose_address (XEXP (op, 0), &addr)
2040 && s390_short_displacement (addr.disp))
2041 return 0;
2042 break;
2043
2044 case 'U':
2045 if (TARGET_LONG_DISPLACEMENT)
2046 {
2047 if (!s390_decompose_address (op, &addr))
2048 return 0;
2049 if (!s390_short_displacement (addr.disp))
2050 return 0;
2051 }
2052 break;
2053
2054 case 'W':
2055 if (!TARGET_LONG_DISPLACEMENT)
2056 return 0;
2057 /* Any invalid address here will be fixed up by reload,
2058 so accept it for the most generic constraint. */
2059 if (s390_decompose_address (op, &addr)
2060 && s390_short_displacement (addr.disp))
2061 return 0;
2062 break;
2063
2064 case 'Y':
2065 /* Simply check for the basic form of a shift count. Reload will
2066 take care of making sure we have a proper base register. */
2067 if (!s390_decompose_shift_count (op, NULL, NULL))
2068 return 0;
2069 break;
2070
2071 default:
2072 return 0;
2073 }
2074
2075 return 1;
2076 }
2077
2078
2079
2080 /* Evaluates constraint strings starting with letter O. Input
2081 parameter C is the second letter following the "O" in the constraint
2082 string. Returns 1 if VALUE meets the respective constraint and 0
2083 otherwise. */
2084
2085 int
2086 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2087 {
2088 if (!TARGET_EXTIMM)
2089 return 0;
2090
2091 switch (c)
2092 {
2093 case 's':
2094 return trunc_int_for_mode (value, SImode) == value;
2095
2096 case 'p':
2097 return value == 0
2098 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2099
2100 case 'n':
2101 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2102
2103 default:
2104 gcc_unreachable ();
2105 }
2106 }
2107
2108
2109 /* Evaluates constraint strings starting with letter N. Parameter STR
2110 contains the letters following letter "N" in the constraint string.
2111 Returns true if VALUE matches the constraint. */
2112
2113 int
2114 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2115 {
2116 enum machine_mode mode, part_mode;
2117 int def;
2118 int part, part_goal;
2119
2120
2121 if (str[0] == 'x')
2122 part_goal = -1;
2123 else
2124 part_goal = str[0] - '0';
2125
2126 switch (str[1])
2127 {
2128 case 'Q':
2129 part_mode = QImode;
2130 break;
2131 case 'H':
2132 part_mode = HImode;
2133 break;
2134 case 'S':
2135 part_mode = SImode;
2136 break;
2137 default:
2138 return 0;
2139 }
2140
2141 switch (str[2])
2142 {
2143 case 'H':
2144 mode = HImode;
2145 break;
2146 case 'S':
2147 mode = SImode;
2148 break;
2149 case 'D':
2150 mode = DImode;
2151 break;
2152 default:
2153 return 0;
2154 }
2155
2156 switch (str[3])
2157 {
2158 case '0':
2159 def = 0;
2160 break;
2161 case 'F':
2162 def = -1;
2163 break;
2164 default:
2165 return 0;
2166 }
2167
2168 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2169 return 0;
2170
2171 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2172 if (part < 0)
2173 return 0;
2174 if (part_goal != -1 && part_goal != part)
2175 return 0;
2176
2177 return 1;
2178 }
2179
2180
2181 /* Returns true if the input parameter VALUE is a float zero. */
2182
2183 int
2184 s390_float_const_zero_p (rtx value)
2185 {
2186 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2187 && value == CONST0_RTX (GET_MODE (value)));
2188 }
2189
2190
2191 /* Compute a (partial) cost for rtx X. Return true if the complete
2192 cost has been computed, and false if subexpressions should be
2193 scanned. In either case, *TOTAL contains the cost result.
2194 CODE contains GET_CODE (x), OUTER_CODE contains the code
2195 of the superexpression of x. */
2196
2197 static bool
2198 s390_rtx_costs (rtx x, int code, int outer_code, int *total)
2199 {
2200 switch (code)
2201 {
2202 case CONST:
2203 case CONST_INT:
2204 case LABEL_REF:
2205 case SYMBOL_REF:
2206 case CONST_DOUBLE:
2207 case MEM:
2208 *total = 0;
2209 return true;
2210
2211 case ASHIFT:
2212 case ASHIFTRT:
2213 case LSHIFTRT:
2214 case ROTATE:
2215 case ROTATERT:
2216 case AND:
2217 case IOR:
2218 case XOR:
2219 case NEG:
2220 case NOT:
2221 *total = COSTS_N_INSNS (1);
2222 return false;
2223
2224 case PLUS:
2225 case MINUS:
2226 /* Check for multiply and add. */
2227 if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2228 && GET_CODE (XEXP (x, 0)) == MULT
2229 && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
2230 {
2231 /* This is the multiply and add case. */
2232 if (GET_MODE (x) == DFmode)
2233 *total = s390_cost->madbr;
2234 else
2235 *total = s390_cost->maebr;
2236 *total += rtx_cost (XEXP (XEXP (x, 0), 0), MULT)
2237 + rtx_cost (XEXP (XEXP (x, 0), 1), MULT)
2238 + rtx_cost (XEXP (x, 1), code);
2239 return true; /* Do not do an additional recursive descent. */
2240 }
2241 *total = COSTS_N_INSNS (1);
2242 return false;
2243
2244 case MULT:
2245 switch (GET_MODE (x))
2246 {
2247 case SImode:
2248 {
2249 rtx left = XEXP (x, 0);
2250 rtx right = XEXP (x, 1);
2251 if (GET_CODE (right) == CONST_INT
2252 && CONST_OK_FOR_K (INTVAL (right)))
2253 *total = s390_cost->mhi;
2254 else if (GET_CODE (left) == SIGN_EXTEND)
2255 *total = s390_cost->mh;
2256 else
2257 *total = s390_cost->ms; /* msr, ms, msy */
2258 break;
2259 }
2260 case DImode:
2261 {
2262 rtx left = XEXP (x, 0);
2263 rtx right = XEXP (x, 1);
2264 if (TARGET_64BIT)
2265 {
2266 if (GET_CODE (right) == CONST_INT
2267 && CONST_OK_FOR_K (INTVAL (right)))
2268 *total = s390_cost->mghi;
2269 else if (GET_CODE (left) == SIGN_EXTEND)
2270 *total = s390_cost->msgf;
2271 else
2272 *total = s390_cost->msg; /* msgr, msg */
2273 }
2274 else /* TARGET_31BIT */
2275 {
2276 if (GET_CODE (left) == SIGN_EXTEND
2277 && GET_CODE (right) == SIGN_EXTEND)
2278 /* mulsidi case: mr, m */
2279 *total = s390_cost->m;
2280 else if (GET_CODE (left) == ZERO_EXTEND
2281 && GET_CODE (right) == ZERO_EXTEND
2282 && TARGET_CPU_ZARCH)
2283 /* umulsidi case: ml, mlr */
2284 *total = s390_cost->ml;
2285 else
2286 /* Complex calculation is required. */
2287 *total = COSTS_N_INSNS (40);
2288 }
2289 break;
2290 }
2291 case SFmode:
2292 case DFmode:
2293 *total = s390_cost->mult_df;
2294 break;
2295 case TFmode:
2296 *total = s390_cost->mxbr;
2297 break;
2298 default:
2299 return false;
2300 }
2301 return false;
2302
2303 case UDIV:
2304 case UMOD:
2305 if (GET_MODE (x) == TImode) /* 128 bit division */
2306 *total = s390_cost->dlgr;
2307 else if (GET_MODE (x) == DImode)
2308 {
2309 rtx right = XEXP (x, 1);
2310 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2311 *total = s390_cost->dlr;
2312 else /* 64 by 64 bit division */
2313 *total = s390_cost->dlgr;
2314 }
2315 else if (GET_MODE (x) == SImode) /* 32 bit division */
2316 *total = s390_cost->dlr;
2317 return false;
2318
2319 case DIV:
2320 case MOD:
2321 if (GET_MODE (x) == DImode)
2322 {
2323 rtx right = XEXP (x, 1);
2324 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2325 if (TARGET_64BIT)
2326 *total = s390_cost->dsgfr;
2327 else
2328 *total = s390_cost->dr;
2329 else /* 64 by 64 bit division */
2330 *total = s390_cost->dsgr;
2331 }
2332 else if (GET_MODE (x) == SImode) /* 32 bit division */
2333 *total = s390_cost->dlr;
2334 else if (GET_MODE (x) == SFmode)
2335 {
2336 *total = s390_cost->debr;
2337 }
2338 else if (GET_MODE (x) == DFmode)
2339 {
2340 *total = s390_cost->ddbr;
2341 }
2342 else if (GET_MODE (x) == TFmode)
2343 {
2344 *total = s390_cost->dxbr;
2345 }
2346 return false;
2347
2348 case SQRT:
2349 if (GET_MODE (x) == SFmode)
2350 *total = s390_cost->sqebr;
2351 else if (GET_MODE (x) == DFmode)
2352 *total = s390_cost->sqdbr;
2353 else /* TFmode */
2354 *total = s390_cost->sqxbr;
2355 return false;
2356
2357 case SIGN_EXTEND:
2358 case ZERO_EXTEND:
2359 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2360 || outer_code == PLUS || outer_code == MINUS
2361 || outer_code == COMPARE)
2362 *total = 0;
2363 return false;
2364
2365 case COMPARE:
2366 *total = COSTS_N_INSNS (1);
2367 if (GET_CODE (XEXP (x, 0)) == AND
2368 && GET_CODE (XEXP (x, 1)) == CONST_INT
2369 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2370 {
2371 rtx op0 = XEXP (XEXP (x, 0), 0);
2372 rtx op1 = XEXP (XEXP (x, 0), 1);
2373 rtx op2 = XEXP (x, 1);
2374
2375 if (memory_operand (op0, GET_MODE (op0))
2376 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2377 return true;
2378 if (register_operand (op0, GET_MODE (op0))
2379 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2380 return true;
2381 }
2382 return false;
2383
2384 default:
2385 return false;
2386 }
2387 }
2388
2389 /* Return the cost of an address rtx ADDR. */
2390
2391 static int
2392 s390_address_cost (rtx addr)
2393 {
2394 struct s390_address ad;
2395 if (!s390_decompose_address (addr, &ad))
2396 return 1000;
2397
2398 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2399 }
2400
2401 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2402 otherwise return 0. */
2403
2404 int
2405 tls_symbolic_operand (rtx op)
2406 {
2407 if (GET_CODE (op) != SYMBOL_REF)
2408 return 0;
2409 return SYMBOL_REF_TLS_MODEL (op);
2410 }
2411 \f
2412 /* Split DImode access register reference REG (on 64-bit) into its constituent
2413 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2414 gen_highpart cannot be used as they assume all registers are word-sized,
2415 while our access registers have only half that size. */
2416
2417 void
2418 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2419 {
2420 gcc_assert (TARGET_64BIT);
2421 gcc_assert (ACCESS_REG_P (reg));
2422 gcc_assert (GET_MODE (reg) == DImode);
2423 gcc_assert (!(REGNO (reg) & 1));
2424
2425 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2426 *hi = gen_rtx_REG (SImode, REGNO (reg));
2427 }
2428
2429 /* Return true if OP contains a symbol reference */
2430
2431 bool
2432 symbolic_reference_mentioned_p (rtx op)
2433 {
2434 const char *fmt;
2435 int i;
2436
2437 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2438 return 1;
2439
2440 fmt = GET_RTX_FORMAT (GET_CODE (op));
2441 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2442 {
2443 if (fmt[i] == 'E')
2444 {
2445 int j;
2446
2447 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2448 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2449 return 1;
2450 }
2451
2452 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2453 return 1;
2454 }
2455
2456 return 0;
2457 }
2458
2459 /* Return true if OP contains a reference to a thread-local symbol. */
2460
2461 bool
2462 tls_symbolic_reference_mentioned_p (rtx op)
2463 {
2464 const char *fmt;
2465 int i;
2466
2467 if (GET_CODE (op) == SYMBOL_REF)
2468 return tls_symbolic_operand (op);
2469
2470 fmt = GET_RTX_FORMAT (GET_CODE (op));
2471 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2472 {
2473 if (fmt[i] == 'E')
2474 {
2475 int j;
2476
2477 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2478 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2479 return true;
2480 }
2481
2482 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2483 return true;
2484 }
2485
2486 return false;
2487 }
2488
2489
2490 /* Return true if OP is a legitimate general operand when
2491 generating PIC code. It is given that flag_pic is on
2492 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2493
2494 int
2495 legitimate_pic_operand_p (rtx op)
2496 {
2497 /* Accept all non-symbolic constants. */
2498 if (!SYMBOLIC_CONST (op))
2499 return 1;
2500
2501 /* Reject everything else; must be handled
2502 via emit_symbolic_move. */
2503 return 0;
2504 }
2505
2506 /* Returns true if the constant value OP is a legitimate general operand.
2507 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2508
2509 int
2510 legitimate_constant_p (rtx op)
2511 {
2512 /* Accept all non-symbolic constants. */
2513 if (!SYMBOLIC_CONST (op))
2514 return 1;
2515
2516 /* Accept immediate LARL operands. */
2517 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2518 return 1;
2519
2520 /* Thread-local symbols are never legal constants. This is
2521 so that emit_call knows that computing such addresses
2522 might require a function call. */
2523 if (TLS_SYMBOLIC_CONST (op))
2524 return 0;
2525
2526 /* In the PIC case, symbolic constants must *not* be
2527 forced into the literal pool. We accept them here,
2528 so that they will be handled by emit_symbolic_move. */
2529 if (flag_pic)
2530 return 1;
2531
2532 /* All remaining non-PIC symbolic constants are
2533 forced into the literal pool. */
2534 return 0;
2535 }
2536
2537 /* Determine if it's legal to put X into the constant pool. This
2538 is not possible if X contains the address of a symbol that is
2539 not constant (TLS) or not known at final link time (PIC). */
2540
2541 static bool
2542 s390_cannot_force_const_mem (rtx x)
2543 {
2544 switch (GET_CODE (x))
2545 {
2546 case CONST_INT:
2547 case CONST_DOUBLE:
2548 /* Accept all non-symbolic constants. */
2549 return false;
2550
2551 case LABEL_REF:
2552 /* Labels are OK iff we are non-PIC. */
2553 return flag_pic != 0;
2554
2555 case SYMBOL_REF:
2556 /* 'Naked' TLS symbol references are never OK,
2557 non-TLS symbols are OK iff we are non-PIC. */
2558 if (tls_symbolic_operand (x))
2559 return true;
2560 else
2561 return flag_pic != 0;
2562
2563 case CONST:
2564 return s390_cannot_force_const_mem (XEXP (x, 0));
2565 case PLUS:
2566 case MINUS:
2567 return s390_cannot_force_const_mem (XEXP (x, 0))
2568 || s390_cannot_force_const_mem (XEXP (x, 1));
2569
2570 case UNSPEC:
2571 switch (XINT (x, 1))
2572 {
2573 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2574 case UNSPEC_LTREL_OFFSET:
2575 case UNSPEC_GOT:
2576 case UNSPEC_GOTOFF:
2577 case UNSPEC_PLTOFF:
2578 case UNSPEC_TLSGD:
2579 case UNSPEC_TLSLDM:
2580 case UNSPEC_NTPOFF:
2581 case UNSPEC_DTPOFF:
2582 case UNSPEC_GOTNTPOFF:
2583 case UNSPEC_INDNTPOFF:
2584 return false;
2585
2586 /* If the literal pool shares the code section, be put
2587 execute template placeholders into the pool as well. */
2588 case UNSPEC_INSN:
2589 return TARGET_CPU_ZARCH;
2590
2591 default:
2592 return true;
2593 }
2594 break;
2595
2596 default:
2597 gcc_unreachable ();
2598 }
2599 }
2600
2601 /* Returns true if the constant value OP is a legitimate general
2602 operand during and after reload. The difference to
2603 legitimate_constant_p is that this function will not accept
2604 a constant that would need to be forced to the literal pool
2605 before it can be used as operand. */
2606
2607 bool
2608 legitimate_reload_constant_p (rtx op)
2609 {
2610 /* Accept la(y) operands. */
2611 if (GET_CODE (op) == CONST_INT
2612 && DISP_IN_RANGE (INTVAL (op)))
2613 return true;
2614
2615 /* Accept l(g)hi/l(g)fi operands. */
2616 if (GET_CODE (op) == CONST_INT
2617 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2618 return true;
2619
2620 /* Accept lliXX operands. */
2621 if (TARGET_ZARCH
2622 && GET_CODE (op) == CONST_INT
2623 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2624 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2625 return true;
2626
2627 if (TARGET_EXTIMM
2628 && GET_CODE (op) == CONST_INT
2629 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2630 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2631 return true;
2632
2633 /* Accept larl operands. */
2634 if (TARGET_CPU_ZARCH
2635 && larl_operand (op, VOIDmode))
2636 return true;
2637
2638 /* Accept lzXX operands. */
2639 if (GET_CODE (op) == CONST_DOUBLE
2640 && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'G', "G"))
2641 return true;
2642
2643 /* Accept double-word operands that can be split. */
2644 if (GET_CODE (op) == CONST_INT
2645 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2646 {
2647 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2648 rtx hi = operand_subword (op, 0, 0, dword_mode);
2649 rtx lo = operand_subword (op, 1, 0, dword_mode);
2650 return legitimate_reload_constant_p (hi)
2651 && legitimate_reload_constant_p (lo);
2652 }
2653
2654 /* Everything else cannot be handled without reload. */
2655 return false;
2656 }
2657
2658 /* Given an rtx OP being reloaded into a reg required to be in class CLASS,
2659 return the class of reg to actually use. */
2660
2661 enum reg_class
2662 s390_preferred_reload_class (rtx op, enum reg_class class)
2663 {
2664 switch (GET_CODE (op))
2665 {
2666 /* Constants we cannot reload must be forced into the
2667 literal pool. */
2668
2669 case CONST_DOUBLE:
2670 case CONST_INT:
2671 if (legitimate_reload_constant_p (op))
2672 return class;
2673 else
2674 return NO_REGS;
2675
2676 /* If a symbolic constant or a PLUS is reloaded,
2677 it is most likely being used as an address, so
2678 prefer ADDR_REGS. If 'class' is not a superset
2679 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2680 case PLUS:
2681 case LABEL_REF:
2682 case SYMBOL_REF:
2683 case CONST:
2684 if (reg_class_subset_p (ADDR_REGS, class))
2685 return ADDR_REGS;
2686 else
2687 return NO_REGS;
2688
2689 default:
2690 break;
2691 }
2692
2693 return class;
2694 }
2695
2696 /* Inform reload about cases where moving X with a mode MODE to a register in
2697 CLASS requires an extra scratch or immediate register. Return the class
2698 needed for the immediate register. */
2699
2700 static enum reg_class
2701 s390_secondary_reload (bool in_p, rtx x, enum reg_class class,
2702 enum machine_mode mode, secondary_reload_info *sri)
2703 {
2704 /* Intermediate register needed. */
2705 if (reg_classes_intersect_p (CC_REGS, class))
2706 return GENERAL_REGS;
2707
2708 /* We need a scratch register when loading a PLUS expression which
2709 is not a legitimate operand of the LOAD ADDRESS instruction. */
2710 if (in_p && s390_plus_operand (x, mode))
2711 sri->icode = (TARGET_64BIT ?
2712 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
2713
2714 /* Performing a multiword move from or to memory we have to make sure the
2715 second chunk in memory is addressable without causing a displacement
2716 overflow. If that would be the case we calculate the address in
2717 a scratch register. */
2718 if (MEM_P (x)
2719 && GET_CODE (XEXP (x, 0)) == PLUS
2720 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2721 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
2722 + GET_MODE_SIZE (mode) - 1))
2723 {
2724 /* For GENERAL_REGS a displacement overflow is no problem if occurring
2725 in a s_operand address since we may fallback to lm/stm. So we only
2726 have to care about overflows in the b+i+d case. */
2727 if ((reg_classes_intersect_p (GENERAL_REGS, class)
2728 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
2729 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
2730 /* For FP_REGS no lm/stm is available so this check is triggered
2731 for displacement overflows in b+i+d and b+d like addresses. */
2732 || (reg_classes_intersect_p (FP_REGS, class)
2733 && s390_class_max_nregs (FP_REGS, mode) > 1))
2734 {
2735 if (in_p)
2736 sri->icode = (TARGET_64BIT ?
2737 CODE_FOR_reloaddi_nonoffmem_in :
2738 CODE_FOR_reloadsi_nonoffmem_in);
2739 else
2740 sri->icode = (TARGET_64BIT ?
2741 CODE_FOR_reloaddi_nonoffmem_out :
2742 CODE_FOR_reloadsi_nonoffmem_out);
2743 }
2744 }
2745
2746 /* Either scratch or no register needed. */
2747 return NO_REGS;
2748 }
2749
2750 /* Generate code to load SRC, which is PLUS that is not a
2751 legitimate operand for the LA instruction, into TARGET.
2752 SCRATCH may be used as scratch register. */
2753
2754 void
2755 s390_expand_plus_operand (rtx target, rtx src,
2756 rtx scratch)
2757 {
2758 rtx sum1, sum2;
2759 struct s390_address ad;
2760
2761 /* src must be a PLUS; get its two operands. */
2762 gcc_assert (GET_CODE (src) == PLUS);
2763 gcc_assert (GET_MODE (src) == Pmode);
2764
2765 /* Check if any of the two operands is already scheduled
2766 for replacement by reload. This can happen e.g. when
2767 float registers occur in an address. */
2768 sum1 = find_replacement (&XEXP (src, 0));
2769 sum2 = find_replacement (&XEXP (src, 1));
2770 src = gen_rtx_PLUS (Pmode, sum1, sum2);
2771
2772 /* If the address is already strictly valid, there's nothing to do. */
2773 if (!s390_decompose_address (src, &ad)
2774 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
2775 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
2776 {
2777 /* Otherwise, one of the operands cannot be an address register;
2778 we reload its value into the scratch register. */
2779 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
2780 {
2781 emit_move_insn (scratch, sum1);
2782 sum1 = scratch;
2783 }
2784 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
2785 {
2786 emit_move_insn (scratch, sum2);
2787 sum2 = scratch;
2788 }
2789
2790 /* According to the way these invalid addresses are generated
2791 in reload.c, it should never happen (at least on s390) that
2792 *neither* of the PLUS components, after find_replacements
2793 was applied, is an address register. */
2794 if (sum1 == scratch && sum2 == scratch)
2795 {
2796 debug_rtx (src);
2797 gcc_unreachable ();
2798 }
2799
2800 src = gen_rtx_PLUS (Pmode, sum1, sum2);
2801 }
2802
2803 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
2804 is only ever performed on addresses, so we can mark the
2805 sum as legitimate for LA in any case. */
2806 s390_load_address (target, src);
2807 }
2808
2809
2810 /* Return true if ADDR is a valid memory address.
2811 STRICT specifies whether strict register checking applies. */
2812
2813 bool
2814 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
2815 rtx addr, int strict)
2816 {
2817 struct s390_address ad;
2818 if (!s390_decompose_address (addr, &ad))
2819 return false;
2820
2821 if (strict)
2822 {
2823 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
2824 return false;
2825
2826 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
2827 return false;
2828 }
2829 else
2830 {
2831 if (ad.base
2832 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
2833 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
2834 return false;
2835
2836 if (ad.indx
2837 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
2838 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
2839 return false;
2840 }
2841 return true;
2842 }
2843
2844 /* Return true if OP is a valid operand for the LA instruction.
2845 In 31-bit, we need to prove that the result is used as an
2846 address, as LA performs only a 31-bit addition. */
2847
2848 bool
2849 legitimate_la_operand_p (rtx op)
2850 {
2851 struct s390_address addr;
2852 if (!s390_decompose_address (op, &addr))
2853 return false;
2854
2855 return (TARGET_64BIT || addr.pointer);
2856 }
2857
2858 /* Return true if it is valid *and* preferable to use LA to
2859 compute the sum of OP1 and OP2. */
2860
2861 bool
2862 preferred_la_operand_p (rtx op1, rtx op2)
2863 {
2864 struct s390_address addr;
2865
2866 if (op2 != const0_rtx)
2867 op1 = gen_rtx_PLUS (Pmode, op1, op2);
2868
2869 if (!s390_decompose_address (op1, &addr))
2870 return false;
2871 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
2872 return false;
2873 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
2874 return false;
2875
2876 if (!TARGET_64BIT && !addr.pointer)
2877 return false;
2878
2879 if (addr.pointer)
2880 return true;
2881
2882 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
2883 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
2884 return true;
2885
2886 return false;
2887 }
2888
2889 /* Emit a forced load-address operation to load SRC into DST.
2890 This will use the LOAD ADDRESS instruction even in situations
2891 where legitimate_la_operand_p (SRC) returns false. */
2892
2893 void
2894 s390_load_address (rtx dst, rtx src)
2895 {
2896 if (TARGET_64BIT)
2897 emit_move_insn (dst, src);
2898 else
2899 emit_insn (gen_force_la_31 (dst, src));
2900 }
2901
2902 /* Return a legitimate reference for ORIG (an address) using the
2903 register REG. If REG is 0, a new pseudo is generated.
2904
2905 There are two types of references that must be handled:
2906
2907 1. Global data references must load the address from the GOT, via
2908 the PIC reg. An insn is emitted to do this load, and the reg is
2909 returned.
2910
2911 2. Static data references, constant pool addresses, and code labels
2912 compute the address as an offset from the GOT, whose base is in
2913 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
2914 differentiate them from global data objects. The returned
2915 address is the PIC reg + an unspec constant.
2916
2917 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
2918 reg also appears in the address. */
2919
2920 rtx
2921 legitimize_pic_address (rtx orig, rtx reg)
2922 {
2923 rtx addr = orig;
2924 rtx new = orig;
2925 rtx base;
2926
2927 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
2928
2929 if (GET_CODE (addr) == LABEL_REF
2930 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
2931 {
2932 /* This is a local symbol. */
2933 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
2934 {
2935 /* Access local symbols PC-relative via LARL.
2936 This is the same as in the non-PIC case, so it is
2937 handled automatically ... */
2938 }
2939 else
2940 {
2941 /* Access local symbols relative to the GOT. */
2942
2943 rtx temp = reg? reg : gen_reg_rtx (Pmode);
2944
2945 if (reload_in_progress || reload_completed)
2946 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
2947
2948 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
2949 addr = gen_rtx_CONST (Pmode, addr);
2950 addr = force_const_mem (Pmode, addr);
2951 emit_move_insn (temp, addr);
2952
2953 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
2954 if (reg != 0)
2955 {
2956 s390_load_address (reg, new);
2957 new = reg;
2958 }
2959 }
2960 }
2961 else if (GET_CODE (addr) == SYMBOL_REF)
2962 {
2963 if (reg == 0)
2964 reg = gen_reg_rtx (Pmode);
2965
2966 if (flag_pic == 1)
2967 {
2968 /* Assume GOT offset < 4k. This is handled the same way
2969 in both 31- and 64-bit code (@GOT). */
2970
2971 if (reload_in_progress || reload_completed)
2972 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
2973
2974 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
2975 new = gen_rtx_CONST (Pmode, new);
2976 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
2977 new = gen_const_mem (Pmode, new);
2978 emit_move_insn (reg, new);
2979 new = reg;
2980 }
2981 else if (TARGET_CPU_ZARCH)
2982 {
2983 /* If the GOT offset might be >= 4k, we determine the position
2984 of the GOT entry via a PC-relative LARL (@GOTENT). */
2985
2986 rtx temp = gen_reg_rtx (Pmode);
2987
2988 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
2989 new = gen_rtx_CONST (Pmode, new);
2990 emit_move_insn (temp, new);
2991
2992 new = gen_const_mem (Pmode, temp);
2993 emit_move_insn (reg, new);
2994 new = reg;
2995 }
2996 else
2997 {
2998 /* If the GOT offset might be >= 4k, we have to load it
2999 from the literal pool (@GOT). */
3000
3001 rtx temp = gen_reg_rtx (Pmode);
3002
3003 if (reload_in_progress || reload_completed)
3004 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3005
3006 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3007 addr = gen_rtx_CONST (Pmode, addr);
3008 addr = force_const_mem (Pmode, addr);
3009 emit_move_insn (temp, addr);
3010
3011 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3012 new = gen_const_mem (Pmode, new);
3013 emit_move_insn (reg, new);
3014 new = reg;
3015 }
3016 }
3017 else
3018 {
3019 if (GET_CODE (addr) == CONST)
3020 {
3021 addr = XEXP (addr, 0);
3022 if (GET_CODE (addr) == UNSPEC)
3023 {
3024 gcc_assert (XVECLEN (addr, 0) == 1);
3025 switch (XINT (addr, 1))
3026 {
3027 /* If someone moved a GOT-relative UNSPEC
3028 out of the literal pool, force them back in. */
3029 case UNSPEC_GOTOFF:
3030 case UNSPEC_PLTOFF:
3031 new = force_const_mem (Pmode, orig);
3032 break;
3033
3034 /* @GOT is OK as is if small. */
3035 case UNSPEC_GOT:
3036 if (flag_pic == 2)
3037 new = force_const_mem (Pmode, orig);
3038 break;
3039
3040 /* @GOTENT is OK as is. */
3041 case UNSPEC_GOTENT:
3042 break;
3043
3044 /* @PLT is OK as is on 64-bit, must be converted to
3045 GOT-relative @PLTOFF on 31-bit. */
3046 case UNSPEC_PLT:
3047 if (!TARGET_CPU_ZARCH)
3048 {
3049 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3050
3051 if (reload_in_progress || reload_completed)
3052 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3053
3054 addr = XVECEXP (addr, 0, 0);
3055 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3056 UNSPEC_PLTOFF);
3057 addr = gen_rtx_CONST (Pmode, addr);
3058 addr = force_const_mem (Pmode, addr);
3059 emit_move_insn (temp, addr);
3060
3061 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3062 if (reg != 0)
3063 {
3064 s390_load_address (reg, new);
3065 new = reg;
3066 }
3067 }
3068 break;
3069
3070 /* Everything else cannot happen. */
3071 default:
3072 gcc_unreachable ();
3073 }
3074 }
3075 else
3076 gcc_assert (GET_CODE (addr) == PLUS);
3077 }
3078 if (GET_CODE (addr) == PLUS)
3079 {
3080 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3081
3082 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3083 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3084
3085 /* Check first to see if this is a constant offset
3086 from a local symbol reference. */
3087 if ((GET_CODE (op0) == LABEL_REF
3088 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3089 && GET_CODE (op1) == CONST_INT)
3090 {
3091 if (TARGET_CPU_ZARCH
3092 && larl_operand (op0, VOIDmode)
3093 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3094 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3095 {
3096 if (INTVAL (op1) & 1)
3097 {
3098 /* LARL can't handle odd offsets, so emit a
3099 pair of LARL and LA. */
3100 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3101
3102 if (!DISP_IN_RANGE (INTVAL (op1)))
3103 {
3104 HOST_WIDE_INT even = INTVAL (op1) - 1;
3105 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3106 op0 = gen_rtx_CONST (Pmode, op0);
3107 op1 = const1_rtx;
3108 }
3109
3110 emit_move_insn (temp, op0);
3111 new = gen_rtx_PLUS (Pmode, temp, op1);
3112
3113 if (reg != 0)
3114 {
3115 s390_load_address (reg, new);
3116 new = reg;
3117 }
3118 }
3119 else
3120 {
3121 /* If the offset is even, we can just use LARL.
3122 This will happen automatically. */
3123 }
3124 }
3125 else
3126 {
3127 /* Access local symbols relative to the GOT. */
3128
3129 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3130
3131 if (reload_in_progress || reload_completed)
3132 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3133
3134 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3135 UNSPEC_GOTOFF);
3136 addr = gen_rtx_PLUS (Pmode, addr, op1);
3137 addr = gen_rtx_CONST (Pmode, addr);
3138 addr = force_const_mem (Pmode, addr);
3139 emit_move_insn (temp, addr);
3140
3141 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3142 if (reg != 0)
3143 {
3144 s390_load_address (reg, new);
3145 new = reg;
3146 }
3147 }
3148 }
3149
3150 /* Now, check whether it is a GOT relative symbol plus offset
3151 that was pulled out of the literal pool. Force it back in. */
3152
3153 else if (GET_CODE (op0) == UNSPEC
3154 && GET_CODE (op1) == CONST_INT
3155 && XINT (op0, 1) == UNSPEC_GOTOFF)
3156 {
3157 gcc_assert (XVECLEN (op0, 0) == 1);
3158
3159 new = force_const_mem (Pmode, orig);
3160 }
3161
3162 /* Otherwise, compute the sum. */
3163 else
3164 {
3165 base = legitimize_pic_address (XEXP (addr, 0), reg);
3166 new = legitimize_pic_address (XEXP (addr, 1),
3167 base == reg ? NULL_RTX : reg);
3168 if (GET_CODE (new) == CONST_INT)
3169 new = plus_constant (base, INTVAL (new));
3170 else
3171 {
3172 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
3173 {
3174 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
3175 new = XEXP (new, 1);
3176 }
3177 new = gen_rtx_PLUS (Pmode, base, new);
3178 }
3179
3180 if (GET_CODE (new) == CONST)
3181 new = XEXP (new, 0);
3182 new = force_operand (new, 0);
3183 }
3184 }
3185 }
3186 return new;
3187 }
3188
3189 /* Load the thread pointer into a register. */
3190
3191 rtx
3192 s390_get_thread_pointer (void)
3193 {
3194 rtx tp = gen_reg_rtx (Pmode);
3195
3196 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3197 mark_reg_pointer (tp, BITS_PER_WORD);
3198
3199 return tp;
3200 }
3201
3202 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3203 in s390_tls_symbol which always refers to __tls_get_offset.
3204 The returned offset is written to RESULT_REG and an USE rtx is
3205 generated for TLS_CALL. */
3206
3207 static GTY(()) rtx s390_tls_symbol;
3208
3209 static void
3210 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3211 {
3212 rtx insn;
3213
3214 gcc_assert (flag_pic);
3215
3216 if (!s390_tls_symbol)
3217 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3218
3219 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3220 gen_rtx_REG (Pmode, RETURN_REGNUM));
3221
3222 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3223 RTL_CONST_CALL_P (insn) = 1;
3224 }
3225
3226 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3227 this (thread-local) address. REG may be used as temporary. */
3228
3229 static rtx
3230 legitimize_tls_address (rtx addr, rtx reg)
3231 {
3232 rtx new, tls_call, temp, base, r2, insn;
3233
3234 if (GET_CODE (addr) == SYMBOL_REF)
3235 switch (tls_symbolic_operand (addr))
3236 {
3237 case TLS_MODEL_GLOBAL_DYNAMIC:
3238 start_sequence ();
3239 r2 = gen_rtx_REG (Pmode, 2);
3240 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3241 new = gen_rtx_CONST (Pmode, tls_call);
3242 new = force_const_mem (Pmode, new);
3243 emit_move_insn (r2, new);
3244 s390_emit_tls_call_insn (r2, tls_call);
3245 insn = get_insns ();
3246 end_sequence ();
3247
3248 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3249 temp = gen_reg_rtx (Pmode);
3250 emit_libcall_block (insn, temp, r2, new);
3251
3252 new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3253 if (reg != 0)
3254 {
3255 s390_load_address (reg, new);
3256 new = reg;
3257 }
3258 break;
3259
3260 case TLS_MODEL_LOCAL_DYNAMIC:
3261 start_sequence ();
3262 r2 = gen_rtx_REG (Pmode, 2);
3263 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3264 new = gen_rtx_CONST (Pmode, tls_call);
3265 new = force_const_mem (Pmode, new);
3266 emit_move_insn (r2, new);
3267 s390_emit_tls_call_insn (r2, tls_call);
3268 insn = get_insns ();
3269 end_sequence ();
3270
3271 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3272 temp = gen_reg_rtx (Pmode);
3273 emit_libcall_block (insn, temp, r2, new);
3274
3275 new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3276 base = gen_reg_rtx (Pmode);
3277 s390_load_address (base, new);
3278
3279 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3280 new = gen_rtx_CONST (Pmode, new);
3281 new = force_const_mem (Pmode, new);
3282 temp = gen_reg_rtx (Pmode);
3283 emit_move_insn (temp, new);
3284
3285 new = gen_rtx_PLUS (Pmode, base, temp);
3286 if (reg != 0)
3287 {
3288 s390_load_address (reg, new);
3289 new = reg;
3290 }
3291 break;
3292
3293 case TLS_MODEL_INITIAL_EXEC:
3294 if (flag_pic == 1)
3295 {
3296 /* Assume GOT offset < 4k. This is handled the same way
3297 in both 31- and 64-bit code. */
3298
3299 if (reload_in_progress || reload_completed)
3300 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3301
3302 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3303 new = gen_rtx_CONST (Pmode, new);
3304 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
3305 new = gen_const_mem (Pmode, new);
3306 temp = gen_reg_rtx (Pmode);
3307 emit_move_insn (temp, new);
3308 }
3309 else if (TARGET_CPU_ZARCH)
3310 {
3311 /* If the GOT offset might be >= 4k, we determine the position
3312 of the GOT entry via a PC-relative LARL. */
3313
3314 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3315 new = gen_rtx_CONST (Pmode, new);
3316 temp = gen_reg_rtx (Pmode);
3317 emit_move_insn (temp, new);
3318
3319 new = gen_const_mem (Pmode, temp);
3320 temp = gen_reg_rtx (Pmode);
3321 emit_move_insn (temp, new);
3322 }
3323 else if (flag_pic)
3324 {
3325 /* If the GOT offset might be >= 4k, we have to load it
3326 from the literal pool. */
3327
3328 if (reload_in_progress || reload_completed)
3329 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3330
3331 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3332 new = gen_rtx_CONST (Pmode, new);
3333 new = force_const_mem (Pmode, new);
3334 temp = gen_reg_rtx (Pmode);
3335 emit_move_insn (temp, new);
3336
3337 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3338 new = gen_const_mem (Pmode, new);
3339
3340 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new, addr), UNSPEC_TLS_LOAD);
3341 temp = gen_reg_rtx (Pmode);
3342 emit_insn (gen_rtx_SET (Pmode, temp, new));
3343 }
3344 else
3345 {
3346 /* In position-dependent code, load the absolute address of
3347 the GOT entry from the literal pool. */
3348
3349 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3350 new = gen_rtx_CONST (Pmode, new);
3351 new = force_const_mem (Pmode, new);
3352 temp = gen_reg_rtx (Pmode);
3353 emit_move_insn (temp, new);
3354
3355 new = temp;
3356 new = gen_const_mem (Pmode, new);
3357 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new, addr), UNSPEC_TLS_LOAD);
3358 temp = gen_reg_rtx (Pmode);
3359 emit_insn (gen_rtx_SET (Pmode, temp, new));
3360 }
3361
3362 new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3363 if (reg != 0)
3364 {
3365 s390_load_address (reg, new);
3366 new = reg;
3367 }
3368 break;
3369
3370 case TLS_MODEL_LOCAL_EXEC:
3371 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3372 new = gen_rtx_CONST (Pmode, new);
3373 new = force_const_mem (Pmode, new);
3374 temp = gen_reg_rtx (Pmode);
3375 emit_move_insn (temp, new);
3376
3377 new = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3378 if (reg != 0)
3379 {
3380 s390_load_address (reg, new);
3381 new = reg;
3382 }
3383 break;
3384
3385 default:
3386 gcc_unreachable ();
3387 }
3388
3389 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3390 {
3391 switch (XINT (XEXP (addr, 0), 1))
3392 {
3393 case UNSPEC_INDNTPOFF:
3394 gcc_assert (TARGET_CPU_ZARCH);
3395 new = addr;
3396 break;
3397
3398 default:
3399 gcc_unreachable ();
3400 }
3401 }
3402
3403 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3404 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3405 {
3406 new = XEXP (XEXP (addr, 0), 0);
3407 if (GET_CODE (new) != SYMBOL_REF)
3408 new = gen_rtx_CONST (Pmode, new);
3409
3410 new = legitimize_tls_address (new, reg);
3411 new = plus_constant (new, INTVAL (XEXP (XEXP (addr, 0), 1)));
3412 new = force_operand (new, 0);
3413 }
3414
3415 else
3416 gcc_unreachable (); /* for now ... */
3417
3418 return new;
3419 }
3420
3421 /* Emit insns to move operands[1] into operands[0]. */
3422
3423 void
3424 emit_symbolic_move (rtx *operands)
3425 {
3426 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3427
3428 if (GET_CODE (operands[0]) == MEM)
3429 operands[1] = force_reg (Pmode, operands[1]);
3430 else if (TLS_SYMBOLIC_CONST (operands[1]))
3431 operands[1] = legitimize_tls_address (operands[1], temp);
3432 else if (flag_pic)
3433 operands[1] = legitimize_pic_address (operands[1], temp);
3434 }
3435
3436 /* Try machine-dependent ways of modifying an illegitimate address X
3437 to be legitimate. If we find one, return the new, valid address.
3438
3439 OLDX is the address as it was before break_out_memory_refs was called.
3440 In some cases it is useful to look at this to decide what needs to be done.
3441
3442 MODE is the mode of the operand pointed to by X.
3443
3444 When -fpic is used, special handling is needed for symbolic references.
3445 See comments by legitimize_pic_address for details. */
3446
3447 rtx
3448 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3449 enum machine_mode mode ATTRIBUTE_UNUSED)
3450 {
3451 rtx constant_term = const0_rtx;
3452
3453 if (TLS_SYMBOLIC_CONST (x))
3454 {
3455 x = legitimize_tls_address (x, 0);
3456
3457 if (legitimate_address_p (mode, x, FALSE))
3458 return x;
3459 }
3460 else if (GET_CODE (x) == PLUS
3461 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3462 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3463 {
3464 return x;
3465 }
3466 else if (flag_pic)
3467 {
3468 if (SYMBOLIC_CONST (x)
3469 || (GET_CODE (x) == PLUS
3470 && (SYMBOLIC_CONST (XEXP (x, 0))
3471 || SYMBOLIC_CONST (XEXP (x, 1)))))
3472 x = legitimize_pic_address (x, 0);
3473
3474 if (legitimate_address_p (mode, x, FALSE))
3475 return x;
3476 }
3477
3478 x = eliminate_constant_term (x, &constant_term);
3479
3480 /* Optimize loading of large displacements by splitting them
3481 into the multiple of 4K and the rest; this allows the
3482 former to be CSE'd if possible.
3483
3484 Don't do this if the displacement is added to a register
3485 pointing into the stack frame, as the offsets will
3486 change later anyway. */
3487
3488 if (GET_CODE (constant_term) == CONST_INT
3489 && !TARGET_LONG_DISPLACEMENT
3490 && !DISP_IN_RANGE (INTVAL (constant_term))
3491 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3492 {
3493 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3494 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3495
3496 rtx temp = gen_reg_rtx (Pmode);
3497 rtx val = force_operand (GEN_INT (upper), temp);
3498 if (val != temp)
3499 emit_move_insn (temp, val);
3500
3501 x = gen_rtx_PLUS (Pmode, x, temp);
3502 constant_term = GEN_INT (lower);
3503 }
3504
3505 if (GET_CODE (x) == PLUS)
3506 {
3507 if (GET_CODE (XEXP (x, 0)) == REG)
3508 {
3509 rtx temp = gen_reg_rtx (Pmode);
3510 rtx val = force_operand (XEXP (x, 1), temp);
3511 if (val != temp)
3512 emit_move_insn (temp, val);
3513
3514 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3515 }
3516
3517 else if (GET_CODE (XEXP (x, 1)) == REG)
3518 {
3519 rtx temp = gen_reg_rtx (Pmode);
3520 rtx val = force_operand (XEXP (x, 0), temp);
3521 if (val != temp)
3522 emit_move_insn (temp, val);
3523
3524 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3525 }
3526 }
3527
3528 if (constant_term != const0_rtx)
3529 x = gen_rtx_PLUS (Pmode, x, constant_term);
3530
3531 return x;
3532 }
3533
3534 /* Try a machine-dependent way of reloading an illegitimate address AD
3535 operand. If we find one, push the reload and and return the new address.
3536
3537 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3538 and TYPE is the reload type of the current reload. */
3539
3540 rtx
3541 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3542 int opnum, int type)
3543 {
3544 if (!optimize || TARGET_LONG_DISPLACEMENT)
3545 return NULL_RTX;
3546
3547 if (GET_CODE (ad) == PLUS)
3548 {
3549 rtx tem = simplify_binary_operation (PLUS, Pmode,
3550 XEXP (ad, 0), XEXP (ad, 1));
3551 if (tem)
3552 ad = tem;
3553 }
3554
3555 if (GET_CODE (ad) == PLUS
3556 && GET_CODE (XEXP (ad, 0)) == REG
3557 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3558 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3559 {
3560 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3561 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3562 rtx cst, tem, new;
3563
3564 cst = GEN_INT (upper);
3565 if (!legitimate_reload_constant_p (cst))
3566 cst = force_const_mem (Pmode, cst);
3567
3568 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3569 new = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3570
3571 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3572 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3573 opnum, (enum reload_type) type);
3574 return new;
3575 }
3576
3577 return NULL_RTX;
3578 }
3579
3580 /* Emit code to move LEN bytes from DST to SRC. */
3581
3582 void
3583 s390_expand_movmem (rtx dst, rtx src, rtx len)
3584 {
3585 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3586 {
3587 if (INTVAL (len) > 0)
3588 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3589 }
3590
3591 else if (TARGET_MVCLE)
3592 {
3593 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3594 }
3595
3596 else
3597 {
3598 rtx dst_addr, src_addr, count, blocks, temp;
3599 rtx loop_start_label = gen_label_rtx ();
3600 rtx loop_end_label = gen_label_rtx ();
3601 rtx end_label = gen_label_rtx ();
3602 enum machine_mode mode;
3603
3604 mode = GET_MODE (len);
3605 if (mode == VOIDmode)
3606 mode = Pmode;
3607
3608 dst_addr = gen_reg_rtx (Pmode);
3609 src_addr = gen_reg_rtx (Pmode);
3610 count = gen_reg_rtx (mode);
3611 blocks = gen_reg_rtx (mode);
3612
3613 convert_move (count, len, 1);
3614 emit_cmp_and_jump_insns (count, const0_rtx,
3615 EQ, NULL_RTX, mode, 1, end_label);
3616
3617 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
3618 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
3619 dst = change_address (dst, VOIDmode, dst_addr);
3620 src = change_address (src, VOIDmode, src_addr);
3621
3622 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1, 0);
3623 if (temp != count)
3624 emit_move_insn (count, temp);
3625
3626 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1, 0);
3627 if (temp != blocks)
3628 emit_move_insn (blocks, temp);
3629
3630 emit_cmp_and_jump_insns (blocks, const0_rtx,
3631 EQ, NULL_RTX, mode, 1, loop_end_label);
3632
3633 emit_label (loop_start_label);
3634
3635 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
3636 s390_load_address (dst_addr,
3637 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
3638 s390_load_address (src_addr,
3639 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
3640
3641 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1, 0);
3642 if (temp != blocks)
3643 emit_move_insn (blocks, temp);
3644
3645 emit_cmp_and_jump_insns (blocks, const0_rtx,
3646 EQ, NULL_RTX, mode, 1, loop_end_label);
3647
3648 emit_jump (loop_start_label);
3649 emit_label (loop_end_label);
3650
3651 emit_insn (gen_movmem_short (dst, src,
3652 convert_to_mode (Pmode, count, 1)));
3653 emit_label (end_label);
3654 }
3655 }
3656
3657 /* Emit code to set LEN bytes at DST to VAL.
3658 Make use of clrmem if VAL is zero. */
3659
3660 void
3661 s390_expand_setmem (rtx dst, rtx len, rtx val)
3662 {
3663 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
3664 return;
3665
3666 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
3667
3668 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
3669 {
3670 if (val == const0_rtx && INTVAL (len) <= 256)
3671 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
3672 else
3673 {
3674 /* Initialize memory by storing the first byte. */
3675 emit_move_insn (adjust_address (dst, QImode, 0), val);
3676
3677 if (INTVAL (len) > 1)
3678 {
3679 /* Initiate 1 byte overlap move.
3680 The first byte of DST is propagated through DSTP1.
3681 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
3682 DST is set to size 1 so the rest of the memory location
3683 does not count as source operand. */
3684 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
3685 set_mem_size (dst, const1_rtx);
3686
3687 emit_insn (gen_movmem_short (dstp1, dst,
3688 GEN_INT (INTVAL (len) - 2)));
3689 }
3690 }
3691 }
3692
3693 else if (TARGET_MVCLE)
3694 {
3695 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
3696 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
3697 }
3698
3699 else
3700 {
3701 rtx dst_addr, src_addr, count, blocks, temp, dstp1 = NULL_RTX;
3702 rtx loop_start_label = gen_label_rtx ();
3703 rtx loop_end_label = gen_label_rtx ();
3704 rtx end_label = gen_label_rtx ();
3705 enum machine_mode mode;
3706
3707 mode = GET_MODE (len);
3708 if (mode == VOIDmode)
3709 mode = Pmode;
3710
3711 dst_addr = gen_reg_rtx (Pmode);
3712 src_addr = gen_reg_rtx (Pmode);
3713 count = gen_reg_rtx (mode);
3714 blocks = gen_reg_rtx (mode);
3715
3716 convert_move (count, len, 1);
3717 emit_cmp_and_jump_insns (count, const0_rtx,
3718 EQ, NULL_RTX, mode, 1, end_label);
3719
3720 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
3721 dst = change_address (dst, VOIDmode, dst_addr);
3722
3723 if (val == const0_rtx)
3724 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1, 0);
3725 else
3726 {
3727 dstp1 = adjust_address (dst, VOIDmode, 1);
3728 set_mem_size (dst, const1_rtx);
3729
3730 /* Initialize memory by storing the first byte. */
3731 emit_move_insn (adjust_address (dst, QImode, 0), val);
3732
3733 /* If count is 1 we are done. */
3734 emit_cmp_and_jump_insns (count, const1_rtx,
3735 EQ, NULL_RTX, mode, 1, end_label);
3736
3737 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1, 0);
3738 }
3739 if (temp != count)
3740 emit_move_insn (count, temp);
3741
3742 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1, 0);
3743 if (temp != blocks)
3744 emit_move_insn (blocks, temp);
3745
3746 emit_cmp_and_jump_insns (blocks, const0_rtx,
3747 EQ, NULL_RTX, mode, 1, loop_end_label);
3748
3749 emit_label (loop_start_label);
3750
3751 if (val == const0_rtx)
3752 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
3753 else
3754 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
3755 s390_load_address (dst_addr,
3756 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
3757
3758 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1, 0);
3759 if (temp != blocks)
3760 emit_move_insn (blocks, temp);
3761
3762 emit_cmp_and_jump_insns (blocks, const0_rtx,
3763 EQ, NULL_RTX, mode, 1, loop_end_label);
3764
3765 emit_jump (loop_start_label);
3766 emit_label (loop_end_label);
3767
3768 if (val == const0_rtx)
3769 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
3770 else
3771 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
3772 emit_label (end_label);
3773 }
3774 }
3775
3776 /* Emit code to compare LEN bytes at OP0 with those at OP1,
3777 and return the result in TARGET. */
3778
3779 void
3780 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
3781 {
3782 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
3783 rtx tmp;
3784
3785 /* As the result of CMPINT is inverted compared to what we need,
3786 we have to swap the operands. */
3787 tmp = op0; op0 = op1; op1 = tmp;
3788
3789 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3790 {
3791 if (INTVAL (len) > 0)
3792 {
3793 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
3794 emit_insn (gen_cmpint (target, ccreg));
3795 }
3796 else
3797 emit_move_insn (target, const0_rtx);
3798 }
3799 else if (TARGET_MVCLE)
3800 {
3801 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
3802 emit_insn (gen_cmpint (target, ccreg));
3803 }
3804 else
3805 {
3806 rtx addr0, addr1, count, blocks, temp;
3807 rtx loop_start_label = gen_label_rtx ();
3808 rtx loop_end_label = gen_label_rtx ();
3809 rtx end_label = gen_label_rtx ();
3810 enum machine_mode mode;
3811
3812 mode = GET_MODE (len);
3813 if (mode == VOIDmode)
3814 mode = Pmode;
3815
3816 addr0 = gen_reg_rtx (Pmode);
3817 addr1 = gen_reg_rtx (Pmode);
3818 count = gen_reg_rtx (mode);
3819 blocks = gen_reg_rtx (mode);
3820
3821 convert_move (count, len, 1);
3822 emit_cmp_and_jump_insns (count, const0_rtx,
3823 EQ, NULL_RTX, mode, 1, end_label);
3824
3825 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
3826 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
3827 op0 = change_address (op0, VOIDmode, addr0);
3828 op1 = change_address (op1, VOIDmode, addr1);
3829
3830 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1, 0);
3831 if (temp != count)
3832 emit_move_insn (count, temp);
3833
3834 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1, 0);
3835 if (temp != blocks)
3836 emit_move_insn (blocks, temp);
3837
3838 emit_cmp_and_jump_insns (blocks, const0_rtx,
3839 EQ, NULL_RTX, mode, 1, loop_end_label);
3840
3841 emit_label (loop_start_label);
3842
3843 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
3844 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
3845 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
3846 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
3847 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
3848 emit_jump_insn (temp);
3849
3850 s390_load_address (addr0,
3851 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
3852 s390_load_address (addr1,
3853 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
3854
3855 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1, 0);
3856 if (temp != blocks)
3857 emit_move_insn (blocks, temp);
3858
3859 emit_cmp_and_jump_insns (blocks, const0_rtx,
3860 EQ, NULL_RTX, mode, 1, loop_end_label);
3861
3862 emit_jump (loop_start_label);
3863 emit_label (loop_end_label);
3864
3865 emit_insn (gen_cmpmem_short (op0, op1,
3866 convert_to_mode (Pmode, count, 1)));
3867 emit_label (end_label);
3868
3869 emit_insn (gen_cmpint (target, ccreg));
3870 }
3871 }
3872
3873
3874 /* Expand conditional increment or decrement using alc/slb instructions.
3875 Should generate code setting DST to either SRC or SRC + INCREMENT,
3876 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
3877 Returns true if successful, false otherwise.
3878
3879 That makes it possible to implement some if-constructs without jumps e.g.:
3880 (borrow = CC0 | CC1 and carry = CC2 | CC3)
3881 unsigned int a, b, c;
3882 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
3883 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
3884 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
3885 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
3886
3887 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
3888 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
3889 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
3890 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
3891 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
3892
3893 bool
3894 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
3895 rtx dst, rtx src, rtx increment)
3896 {
3897 enum machine_mode cmp_mode;
3898 enum machine_mode cc_mode;
3899 rtx op_res;
3900 rtx insn;
3901 rtvec p;
3902 int ret;
3903
3904 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
3905 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
3906 cmp_mode = SImode;
3907 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
3908 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
3909 cmp_mode = DImode;
3910 else
3911 return false;
3912
3913 /* Try ADD LOGICAL WITH CARRY. */
3914 if (increment == const1_rtx)
3915 {
3916 /* Determine CC mode to use. */
3917 if (cmp_code == EQ || cmp_code == NE)
3918 {
3919 if (cmp_op1 != const0_rtx)
3920 {
3921 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
3922 NULL_RTX, 0, OPTAB_WIDEN);
3923 cmp_op1 = const0_rtx;
3924 }
3925
3926 cmp_code = cmp_code == EQ ? LEU : GTU;
3927 }
3928
3929 if (cmp_code == LTU || cmp_code == LEU)
3930 {
3931 rtx tem = cmp_op0;
3932 cmp_op0 = cmp_op1;
3933 cmp_op1 = tem;
3934 cmp_code = swap_condition (cmp_code);
3935 }
3936
3937 switch (cmp_code)
3938 {
3939 case GTU:
3940 cc_mode = CCUmode;
3941 break;
3942
3943 case GEU:
3944 cc_mode = CCL3mode;
3945 break;
3946
3947 default:
3948 return false;
3949 }
3950
3951 /* Emit comparison instruction pattern. */
3952 if (!register_operand (cmp_op0, cmp_mode))
3953 cmp_op0 = force_reg (cmp_mode, cmp_op0);
3954
3955 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
3956 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
3957 /* We use insn_invalid_p here to add clobbers if required. */
3958 ret = insn_invalid_p (emit_insn (insn));
3959 gcc_assert (!ret);
3960
3961 /* Emit ALC instruction pattern. */
3962 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
3963 gen_rtx_REG (cc_mode, CC_REGNUM),
3964 const0_rtx);
3965
3966 if (src != const0_rtx)
3967 {
3968 if (!register_operand (src, GET_MODE (dst)))
3969 src = force_reg (GET_MODE (dst), src);
3970
3971 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
3972 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
3973 }
3974
3975 p = rtvec_alloc (2);
3976 RTVEC_ELT (p, 0) =
3977 gen_rtx_SET (VOIDmode, dst, op_res);
3978 RTVEC_ELT (p, 1) =
3979 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
3980 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
3981
3982 return true;
3983 }
3984
3985 /* Try SUBTRACT LOGICAL WITH BORROW. */
3986 if (increment == constm1_rtx)
3987 {
3988 /* Determine CC mode to use. */
3989 if (cmp_code == EQ || cmp_code == NE)
3990 {
3991 if (cmp_op1 != const0_rtx)
3992 {
3993 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
3994 NULL_RTX, 0, OPTAB_WIDEN);
3995 cmp_op1 = const0_rtx;
3996 }
3997
3998 cmp_code = cmp_code == EQ ? LEU : GTU;
3999 }
4000
4001 if (cmp_code == GTU || cmp_code == GEU)
4002 {
4003 rtx tem = cmp_op0;
4004 cmp_op0 = cmp_op1;
4005 cmp_op1 = tem;
4006 cmp_code = swap_condition (cmp_code);
4007 }
4008
4009 switch (cmp_code)
4010 {
4011 case LEU:
4012 cc_mode = CCUmode;
4013 break;
4014
4015 case LTU:
4016 cc_mode = CCL3mode;
4017 break;
4018
4019 default:
4020 return false;
4021 }
4022
4023 /* Emit comparison instruction pattern. */
4024 if (!register_operand (cmp_op0, cmp_mode))
4025 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4026
4027 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4028 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4029 /* We use insn_invalid_p here to add clobbers if required. */
4030 ret = insn_invalid_p (emit_insn (insn));
4031 gcc_assert (!ret);
4032
4033 /* Emit SLB instruction pattern. */
4034 if (!register_operand (src, GET_MODE (dst)))
4035 src = force_reg (GET_MODE (dst), src);
4036
4037 op_res = gen_rtx_MINUS (GET_MODE (dst),
4038 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4039 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4040 gen_rtx_REG (cc_mode, CC_REGNUM),
4041 const0_rtx));
4042 p = rtvec_alloc (2);
4043 RTVEC_ELT (p, 0) =
4044 gen_rtx_SET (VOIDmode, dst, op_res);
4045 RTVEC_ELT (p, 1) =
4046 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4047 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4048
4049 return true;
4050 }
4051
4052 return false;
4053 }
4054
4055 /* Expand code for the insv template. Return true if successful, false else. */
4056
4057 bool
4058 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4059 {
4060 int bitsize = INTVAL (op1);
4061 int bitpos = INTVAL (op2);
4062
4063 /* We need byte alignment. */
4064 if (bitsize % BITS_PER_UNIT)
4065 return false;
4066
4067 if (bitpos == 0
4068 && memory_operand (dest, VOIDmode)
4069 && (register_operand (src, word_mode)
4070 || const_int_operand (src, VOIDmode)))
4071 {
4072 /* Emit standard pattern if possible. */
4073 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4074 if (GET_MODE_BITSIZE (mode) == bitsize)
4075 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4076
4077 /* (set (ze (mem)) (const_int)). */
4078 else if (const_int_operand (src, VOIDmode))
4079 {
4080 int size = bitsize / BITS_PER_UNIT;
4081 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4082 GET_MODE_SIZE (word_mode) - size);
4083
4084 dest = adjust_address (dest, BLKmode, 0);
4085 set_mem_size (dest, GEN_INT (size));
4086 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4087 }
4088
4089 /* (set (ze (mem)) (reg)). */
4090 else if (register_operand (src, word_mode))
4091 {
4092 if (bitsize <= GET_MODE_BITSIZE (SImode))
4093 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4094 const0_rtx), src);
4095 else
4096 {
4097 /* Emit st,stcmh sequence. */
4098 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4099 int size = stcmh_width / BITS_PER_UNIT;
4100
4101 emit_move_insn (adjust_address (dest, SImode, size),
4102 gen_lowpart (SImode, src));
4103 set_mem_size (dest, GEN_INT (size));
4104 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4105 (stcmh_width), const0_rtx),
4106 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4107 (GET_MODE_BITSIZE (SImode))));
4108 }
4109 }
4110 else
4111 return false;
4112
4113 return true;
4114 }
4115
4116 /* (set (ze (reg)) (const_int)). */
4117 if (TARGET_ZARCH
4118 && register_operand (dest, word_mode)
4119 && (bitpos % 16) == 0
4120 && (bitsize % 16) == 0
4121 && const_int_operand (src, VOIDmode))
4122 {
4123 HOST_WIDE_INT val = INTVAL (src);
4124 int regpos = bitpos + bitsize;
4125
4126 while (regpos > bitpos)
4127 {
4128 enum machine_mode putmode;
4129 int putsize;
4130
4131 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4132 putmode = SImode;
4133 else
4134 putmode = HImode;
4135
4136 putsize = GET_MODE_BITSIZE (putmode);
4137 regpos -= putsize;
4138 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4139 GEN_INT (putsize),
4140 GEN_INT (regpos)),
4141 gen_int_mode (val, putmode));
4142 val >>= putsize;
4143 }
4144 gcc_assert (regpos == bitpos);
4145 return true;
4146 }
4147
4148 return false;
4149 }
4150
4151 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4152 register that holds VAL of mode MODE shifted by COUNT bits. */
4153
4154 static inline rtx
4155 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4156 {
4157 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4158 NULL_RTX, 1, OPTAB_DIRECT);
4159 return expand_simple_binop (SImode, ASHIFT, val, count,
4160 NULL_RTX, 1, OPTAB_DIRECT);
4161 }
4162
4163 /* Structure to hold the initial parameters for a compare_and_swap operation
4164 in HImode and QImode. */
4165
4166 struct alignment_context
4167 {
4168 rtx memsi; /* SI aligned memory location. */
4169 rtx shift; /* Bit offset with regard to lsb. */
4170 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4171 rtx modemaski; /* ~modemask */
4172 bool aligned; /* True if memory is aligned, false else. */
4173 };
4174
4175 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4176 structure AC for transparent simplifying, if the memory alignment is known
4177 to be at least 32bit. MEM is the memory location for the actual operation
4178 and MODE its mode. */
4179
4180 static void
4181 init_alignment_context (struct alignment_context *ac, rtx mem,
4182 enum machine_mode mode)
4183 {
4184 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4185 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4186
4187 if (ac->aligned)
4188 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4189 else
4190 {
4191 /* Alignment is unknown. */
4192 rtx byteoffset, addr, align;
4193
4194 /* Force the address into a register. */
4195 addr = force_reg (Pmode, XEXP (mem, 0));
4196
4197 /* Align it to SImode. */
4198 align = expand_simple_binop (Pmode, AND, addr,
4199 GEN_INT (-GET_MODE_SIZE (SImode)),
4200 NULL_RTX, 1, OPTAB_DIRECT);
4201 /* Generate MEM. */
4202 ac->memsi = gen_rtx_MEM (SImode, align);
4203 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4204 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4205 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4206
4207 /* Calculate shiftcount. */
4208 byteoffset = expand_simple_binop (Pmode, AND, addr,
4209 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4210 NULL_RTX, 1, OPTAB_DIRECT);
4211 /* As we already have some offset, evaluate the remaining distance. */
4212 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4213 NULL_RTX, 1, OPTAB_DIRECT);
4214
4215 }
4216 /* Shift is the byte count, but we need the bitcount. */
4217 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4218 NULL_RTX, 1, OPTAB_DIRECT);
4219 /* Calculate masks. */
4220 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4221 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4222 NULL_RTX, 1, OPTAB_DIRECT);
4223 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4224 }
4225
4226 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4227 the memory location, CMP the old value to compare MEM with and NEW the value
4228 to set if CMP == MEM.
4229 CMP is never in memory for compare_and_swap_cc because
4230 expand_bool_compare_and_swap puts it into a register for later compare. */
4231
4232 void
4233 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new)
4234 {
4235 struct alignment_context ac;
4236 rtx cmpv, newv, val, resv, cc;
4237 rtx res = gen_reg_rtx (SImode);
4238 rtx csloop = gen_label_rtx ();
4239 rtx csend = gen_label_rtx ();
4240
4241 gcc_assert (register_operand (target, VOIDmode));
4242 gcc_assert (MEM_P (mem));
4243
4244 init_alignment_context (&ac, mem, mode);
4245
4246 /* Shift the values to the correct bit positions. */
4247 if (!(ac.aligned && MEM_P (cmp)))
4248 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4249 if (!(ac.aligned && MEM_P (new)))
4250 new = s390_expand_mask_and_shift (new, mode, ac.shift);
4251
4252 /* Load full word. Subsequent loads are performed by CS. */
4253 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4254 NULL_RTX, 1, OPTAB_DIRECT);
4255
4256 /* Start CS loop. */
4257 emit_label (csloop);
4258 /* val = "<mem>00..0<mem>"
4259 * cmp = "00..0<cmp>00..0"
4260 * new = "00..0<new>00..0"
4261 */
4262
4263 /* Patch cmp and new with val at correct position. */
4264 if (ac.aligned && MEM_P (cmp))
4265 {
4266 cmpv = force_reg (SImode, val);
4267 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4268 }
4269 else
4270 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4271 NULL_RTX, 1, OPTAB_DIRECT));
4272 if (ac.aligned && MEM_P (new))
4273 {
4274 newv = force_reg (SImode, val);
4275 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new);
4276 }
4277 else
4278 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new, val,
4279 NULL_RTX, 1, OPTAB_DIRECT));
4280
4281 /* Jump to end if we're done (likely?). */
4282 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4283 cmpv, newv));
4284
4285 /* Check for changes outside mode. */
4286 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4287 NULL_RTX, 1, OPTAB_DIRECT);
4288 cc = s390_emit_compare (NE, resv, val);
4289 emit_move_insn (val, resv);
4290 /* Loop internal if so. */
4291 s390_emit_jump (csloop, cc);
4292
4293 emit_label (csend);
4294
4295 /* Return the correct part of the bitfield. */
4296 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4297 NULL_RTX, 1, OPTAB_DIRECT), 1);
4298 }
4299
4300 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4301 and VAL the value to play with. If AFTER is true then store the value
4302 MEM holds after the operation, if AFTER is false then store the value MEM
4303 holds before the operation. If TARGET is zero then discard that value, else
4304 store it to TARGET. */
4305
4306 void
4307 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4308 rtx target, rtx mem, rtx val, bool after)
4309 {
4310 struct alignment_context ac;
4311 rtx cmp;
4312 rtx new = gen_reg_rtx (SImode);
4313 rtx orig = gen_reg_rtx (SImode);
4314 rtx csloop = gen_label_rtx ();
4315
4316 gcc_assert (!target || register_operand (target, VOIDmode));
4317 gcc_assert (MEM_P (mem));
4318
4319 init_alignment_context (&ac, mem, mode);
4320
4321 /* Shift val to the correct bit positions.
4322 Preserve "icm", but prevent "ex icm". */
4323 if (!(ac.aligned && code == SET && MEM_P (val)))
4324 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4325
4326 /* Further preparation insns. */
4327 if (code == PLUS || code == MINUS)
4328 emit_move_insn (orig, val);
4329 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4330 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4331 NULL_RTX, 1, OPTAB_DIRECT);
4332
4333 /* Load full word. Subsequent loads are performed by CS. */
4334 cmp = force_reg (SImode, ac.memsi);
4335
4336 /* Start CS loop. */
4337 emit_label (csloop);
4338 emit_move_insn (new, cmp);
4339
4340 /* Patch new with val at correct position. */
4341 switch (code)
4342 {
4343 case PLUS:
4344 case MINUS:
4345 val = expand_simple_binop (SImode, code, new, orig,
4346 NULL_RTX, 1, OPTAB_DIRECT);
4347 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4348 NULL_RTX, 1, OPTAB_DIRECT);
4349 /* FALLTHRU */
4350 case SET:
4351 if (ac.aligned && MEM_P (val))
4352 store_bit_field (new, GET_MODE_BITSIZE (mode), 0, SImode, val);
4353 else
4354 {
4355 new = expand_simple_binop (SImode, AND, new, ac.modemaski,
4356 NULL_RTX, 1, OPTAB_DIRECT);
4357 new = expand_simple_binop (SImode, IOR, new, val,
4358 NULL_RTX, 1, OPTAB_DIRECT);
4359 }
4360 break;
4361 case AND:
4362 case IOR:
4363 case XOR:
4364 new = expand_simple_binop (SImode, code, new, val,
4365 NULL_RTX, 1, OPTAB_DIRECT);
4366 break;
4367 case MULT: /* NAND */
4368 new = expand_simple_binop (SImode, XOR, new, ac.modemask,
4369 NULL_RTX, 1, OPTAB_DIRECT);
4370 new = expand_simple_binop (SImode, AND, new, val,
4371 NULL_RTX, 1, OPTAB_DIRECT);
4372 break;
4373 default:
4374 gcc_unreachable ();
4375 }
4376
4377 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4378 ac.memsi, cmp, new));
4379
4380 /* Return the correct part of the bitfield. */
4381 if (target)
4382 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4383 after ? new : cmp, ac.shift,
4384 NULL_RTX, 1, OPTAB_DIRECT), 1);
4385 }
4386
4387 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4388 We need to emit DTP-relative relocations. */
4389
4390 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4391
4392 static void
4393 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4394 {
4395 switch (size)
4396 {
4397 case 4:
4398 fputs ("\t.long\t", file);
4399 break;
4400 case 8:
4401 fputs ("\t.quad\t", file);
4402 break;
4403 default:
4404 gcc_unreachable ();
4405 }
4406 output_addr_const (file, x);
4407 fputs ("@DTPOFF", file);
4408 }
4409
4410 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4411 /* Implement TARGET_MANGLE_TYPE. */
4412
4413 static const char *
4414 s390_mangle_type (const_tree type)
4415 {
4416 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4417 && TARGET_LONG_DOUBLE_128)
4418 return "g";
4419
4420 /* For all other types, use normal C++ mangling. */
4421 return NULL;
4422 }
4423 #endif
4424
4425 /* In the name of slightly smaller debug output, and to cater to
4426 general assembler lossage, recognize various UNSPEC sequences
4427 and turn them back into a direct symbol reference. */
4428
4429 static rtx
4430 s390_delegitimize_address (rtx orig_x)
4431 {
4432 rtx x = orig_x, y;
4433
4434 if (GET_CODE (x) != MEM)
4435 return orig_x;
4436
4437 x = XEXP (x, 0);
4438 if (GET_CODE (x) == PLUS
4439 && GET_CODE (XEXP (x, 1)) == CONST
4440 && GET_CODE (XEXP (x, 0)) == REG
4441 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4442 {
4443 y = XEXP (XEXP (x, 1), 0);
4444 if (GET_CODE (y) == UNSPEC
4445 && XINT (y, 1) == UNSPEC_GOT)
4446 return XVECEXP (y, 0, 0);
4447 return orig_x;
4448 }
4449
4450 if (GET_CODE (x) == CONST)
4451 {
4452 y = XEXP (x, 0);
4453 if (GET_CODE (y) == UNSPEC
4454 && XINT (y, 1) == UNSPEC_GOTENT)
4455 return XVECEXP (y, 0, 0);
4456 return orig_x;
4457 }
4458
4459 return orig_x;
4460 }
4461
4462 /* Output operand OP to stdio stream FILE.
4463 OP is an address (register + offset) which is not used to address data;
4464 instead the rightmost bits are interpreted as the value. */
4465
4466 static void
4467 print_shift_count_operand (FILE *file, rtx op)
4468 {
4469 HOST_WIDE_INT offset;
4470 rtx base;
4471
4472 /* Extract base register and offset. */
4473 if (!s390_decompose_shift_count (op, &base, &offset))
4474 gcc_unreachable ();
4475
4476 /* Sanity check. */
4477 if (base)
4478 {
4479 gcc_assert (GET_CODE (base) == REG);
4480 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
4481 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
4482 }
4483
4484 /* Offsets are constricted to twelve bits. */
4485 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
4486 if (base)
4487 fprintf (file, "(%s)", reg_names[REGNO (base)]);
4488 }
4489
4490 /* See 'get_some_local_dynamic_name'. */
4491
4492 static int
4493 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4494 {
4495 rtx x = *px;
4496
4497 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4498 {
4499 x = get_pool_constant (x);
4500 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
4501 }
4502
4503 if (GET_CODE (x) == SYMBOL_REF
4504 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
4505 {
4506 cfun->machine->some_ld_name = XSTR (x, 0);
4507 return 1;
4508 }
4509
4510 return 0;
4511 }
4512
4513 /* Locate some local-dynamic symbol still in use by this function
4514 so that we can print its name in local-dynamic base patterns. */
4515
4516 static const char *
4517 get_some_local_dynamic_name (void)
4518 {
4519 rtx insn;
4520
4521 if (cfun->machine->some_ld_name)
4522 return cfun->machine->some_ld_name;
4523
4524 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4525 if (INSN_P (insn)
4526 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4527 return cfun->machine->some_ld_name;
4528
4529 gcc_unreachable ();
4530 }
4531
4532 /* Output machine-dependent UNSPECs occurring in address constant X
4533 in assembler syntax to stdio stream FILE. Returns true if the
4534 constant X could be recognized, false otherwise. */
4535
4536 bool
4537 s390_output_addr_const_extra (FILE *file, rtx x)
4538 {
4539 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
4540 switch (XINT (x, 1))
4541 {
4542 case UNSPEC_GOTENT:
4543 output_addr_const (file, XVECEXP (x, 0, 0));
4544 fprintf (file, "@GOTENT");
4545 return true;
4546 case UNSPEC_GOT:
4547 output_addr_const (file, XVECEXP (x, 0, 0));
4548 fprintf (file, "@GOT");
4549 return true;
4550 case UNSPEC_GOTOFF:
4551 output_addr_const (file, XVECEXP (x, 0, 0));
4552 fprintf (file, "@GOTOFF");
4553 return true;
4554 case UNSPEC_PLT:
4555 output_addr_const (file, XVECEXP (x, 0, 0));
4556 fprintf (file, "@PLT");
4557 return true;
4558 case UNSPEC_PLTOFF:
4559 output_addr_const (file, XVECEXP (x, 0, 0));
4560 fprintf (file, "@PLTOFF");
4561 return true;
4562 case UNSPEC_TLSGD:
4563 output_addr_const (file, XVECEXP (x, 0, 0));
4564 fprintf (file, "@TLSGD");
4565 return true;
4566 case UNSPEC_TLSLDM:
4567 assemble_name (file, get_some_local_dynamic_name ());
4568 fprintf (file, "@TLSLDM");
4569 return true;
4570 case UNSPEC_DTPOFF:
4571 output_addr_const (file, XVECEXP (x, 0, 0));
4572 fprintf (file, "@DTPOFF");
4573 return true;
4574 case UNSPEC_NTPOFF:
4575 output_addr_const (file, XVECEXP (x, 0, 0));
4576 fprintf (file, "@NTPOFF");
4577 return true;
4578 case UNSPEC_GOTNTPOFF:
4579 output_addr_const (file, XVECEXP (x, 0, 0));
4580 fprintf (file, "@GOTNTPOFF");
4581 return true;
4582 case UNSPEC_INDNTPOFF:
4583 output_addr_const (file, XVECEXP (x, 0, 0));
4584 fprintf (file, "@INDNTPOFF");
4585 return true;
4586 }
4587
4588 return false;
4589 }
4590
4591 /* Output address operand ADDR in assembler syntax to
4592 stdio stream FILE. */
4593
4594 void
4595 print_operand_address (FILE *file, rtx addr)
4596 {
4597 struct s390_address ad;
4598
4599 if (!s390_decompose_address (addr, &ad)
4600 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4601 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4602 output_operand_lossage ("cannot decompose address");
4603
4604 if (ad.disp)
4605 output_addr_const (file, ad.disp);
4606 else
4607 fprintf (file, "0");
4608
4609 if (ad.base && ad.indx)
4610 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
4611 reg_names[REGNO (ad.base)]);
4612 else if (ad.base)
4613 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
4614 }
4615
4616 /* Output operand X in assembler syntax to stdio stream FILE.
4617 CODE specified the format flag. The following format flags
4618 are recognized:
4619
4620 'C': print opcode suffix for branch condition.
4621 'D': print opcode suffix for inverse branch condition.
4622 'J': print tls_load/tls_gdcall/tls_ldcall suffix
4623 'G': print the size of the operand in bytes.
4624 'O': print only the displacement of a memory reference.
4625 'R': print only the base register of a memory reference.
4626 'S': print S-type memory reference (base+displacement).
4627 'N': print the second word of a DImode operand.
4628 'M': print the second word of a TImode operand.
4629 'Y': print shift count operand.
4630
4631 'b': print integer X as if it's an unsigned byte.
4632 'x': print integer X as if it's an unsigned halfword.
4633 'h': print integer X as if it's a signed halfword.
4634 'i': print the first nonzero HImode part of X.
4635 'j': print the first HImode part unequal to -1 of X.
4636 'k': print the first nonzero SImode part of X.
4637 'm': print the first SImode part unequal to -1 of X.
4638 'o': print integer X as if it's an unsigned 32bit word. */
4639
4640 void
4641 print_operand (FILE *file, rtx x, int code)
4642 {
4643 switch (code)
4644 {
4645 case 'C':
4646 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
4647 return;
4648
4649 case 'D':
4650 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
4651 return;
4652
4653 case 'J':
4654 if (GET_CODE (x) == SYMBOL_REF)
4655 {
4656 fprintf (file, "%s", ":tls_load:");
4657 output_addr_const (file, x);
4658 }
4659 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
4660 {
4661 fprintf (file, "%s", ":tls_gdcall:");
4662 output_addr_const (file, XVECEXP (x, 0, 0));
4663 }
4664 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
4665 {
4666 fprintf (file, "%s", ":tls_ldcall:");
4667 assemble_name (file, get_some_local_dynamic_name ());
4668 }
4669 else
4670 gcc_unreachable ();
4671 return;
4672
4673 case 'G':
4674 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
4675 return;
4676
4677 case 'O':
4678 {
4679 struct s390_address ad;
4680 int ret;
4681
4682 gcc_assert (GET_CODE (x) == MEM);
4683 ret = s390_decompose_address (XEXP (x, 0), &ad);
4684 gcc_assert (ret);
4685 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
4686 gcc_assert (!ad.indx);
4687
4688 if (ad.disp)
4689 output_addr_const (file, ad.disp);
4690 else
4691 fprintf (file, "0");
4692 }
4693 return;
4694
4695 case 'R':
4696 {
4697 struct s390_address ad;
4698 int ret;
4699
4700 gcc_assert (GET_CODE (x) == MEM);
4701 ret = s390_decompose_address (XEXP (x, 0), &ad);
4702 gcc_assert (ret);
4703 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
4704 gcc_assert (!ad.indx);
4705
4706 if (ad.base)
4707 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
4708 else
4709 fprintf (file, "0");
4710 }
4711 return;
4712
4713 case 'S':
4714 {
4715 struct s390_address ad;
4716 int ret;
4717
4718 gcc_assert (GET_CODE (x) == MEM);
4719 ret = s390_decompose_address (XEXP (x, 0), &ad);
4720 gcc_assert (ret);
4721 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
4722 gcc_assert (!ad.indx);
4723
4724 if (ad.disp)
4725 output_addr_const (file, ad.disp);
4726 else
4727 fprintf (file, "0");
4728
4729 if (ad.base)
4730 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
4731 }
4732 return;
4733
4734 case 'N':
4735 if (GET_CODE (x) == REG)
4736 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
4737 else if (GET_CODE (x) == MEM)
4738 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
4739 else
4740 gcc_unreachable ();
4741 break;
4742
4743 case 'M':
4744 if (GET_CODE (x) == REG)
4745 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
4746 else if (GET_CODE (x) == MEM)
4747 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
4748 else
4749 gcc_unreachable ();
4750 break;
4751
4752 case 'Y':
4753 print_shift_count_operand (file, x);
4754 return;
4755 }
4756
4757 switch (GET_CODE (x))
4758 {
4759 case REG:
4760 fprintf (file, "%s", reg_names[REGNO (x)]);
4761 break;
4762
4763 case MEM:
4764 output_address (XEXP (x, 0));
4765 break;
4766
4767 case CONST:
4768 case CODE_LABEL:
4769 case LABEL_REF:
4770 case SYMBOL_REF:
4771 output_addr_const (file, x);
4772 break;
4773
4774 case CONST_INT:
4775 if (code == 'b')
4776 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
4777 else if (code == 'x')
4778 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
4779 else if (code == 'h')
4780 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
4781 else if (code == 'i')
4782 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4783 s390_extract_part (x, HImode, 0));
4784 else if (code == 'j')
4785 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4786 s390_extract_part (x, HImode, -1));
4787 else if (code == 'k')
4788 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4789 s390_extract_part (x, SImode, 0));
4790 else if (code == 'm')
4791 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4792 s390_extract_part (x, SImode, -1));
4793 else if (code == 'o')
4794 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
4795 else
4796 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
4797 break;
4798
4799 case CONST_DOUBLE:
4800 gcc_assert (GET_MODE (x) == VOIDmode);
4801 if (code == 'b')
4802 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
4803 else if (code == 'x')
4804 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
4805 else if (code == 'h')
4806 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
4807 else
4808 gcc_unreachable ();
4809 break;
4810
4811 default:
4812 fatal_insn ("UNKNOWN in print_operand !?", x);
4813 break;
4814 }
4815 }
4816
4817 /* Target hook for assembling integer objects. We need to define it
4818 here to work a round a bug in some versions of GAS, which couldn't
4819 handle values smaller than INT_MIN when printed in decimal. */
4820
4821 static bool
4822 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
4823 {
4824 if (size == 8 && aligned_p
4825 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
4826 {
4827 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
4828 INTVAL (x));
4829 return true;
4830 }
4831 return default_assemble_integer (x, size, aligned_p);
4832 }
4833
4834 /* Returns true if register REGNO is used for forming
4835 a memory address in expression X. */
4836
4837 static bool
4838 reg_used_in_mem_p (int regno, rtx x)
4839 {
4840 enum rtx_code code = GET_CODE (x);
4841 int i, j;
4842 const char *fmt;
4843
4844 if (code == MEM)
4845 {
4846 if (refers_to_regno_p (regno, regno+1,
4847 XEXP (x, 0), 0))
4848 return true;
4849 }
4850 else if (code == SET
4851 && GET_CODE (SET_DEST (x)) == PC)
4852 {
4853 if (refers_to_regno_p (regno, regno+1,
4854 SET_SRC (x), 0))
4855 return true;
4856 }
4857
4858 fmt = GET_RTX_FORMAT (code);
4859 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4860 {
4861 if (fmt[i] == 'e'
4862 && reg_used_in_mem_p (regno, XEXP (x, i)))
4863 return true;
4864
4865 else if (fmt[i] == 'E')
4866 for (j = 0; j < XVECLEN (x, i); j++)
4867 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
4868 return true;
4869 }
4870 return false;
4871 }
4872
4873 /* Returns true if expression DEP_RTX sets an address register
4874 used by instruction INSN to address memory. */
4875
4876 static bool
4877 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
4878 {
4879 rtx target, pat;
4880
4881 if (GET_CODE (dep_rtx) == INSN)
4882 dep_rtx = PATTERN (dep_rtx);
4883
4884 if (GET_CODE (dep_rtx) == SET)
4885 {
4886 target = SET_DEST (dep_rtx);
4887 if (GET_CODE (target) == STRICT_LOW_PART)
4888 target = XEXP (target, 0);
4889 while (GET_CODE (target) == SUBREG)
4890 target = SUBREG_REG (target);
4891
4892 if (GET_CODE (target) == REG)
4893 {
4894 int regno = REGNO (target);
4895
4896 if (s390_safe_attr_type (insn) == TYPE_LA)
4897 {
4898 pat = PATTERN (insn);
4899 if (GET_CODE (pat) == PARALLEL)
4900 {
4901 gcc_assert (XVECLEN (pat, 0) == 2);
4902 pat = XVECEXP (pat, 0, 0);
4903 }
4904 gcc_assert (GET_CODE (pat) == SET);
4905 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
4906 }
4907 else if (get_attr_atype (insn) == ATYPE_AGEN)
4908 return reg_used_in_mem_p (regno, PATTERN (insn));
4909 }
4910 }
4911 return false;
4912 }
4913
4914 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
4915
4916 int
4917 s390_agen_dep_p (rtx dep_insn, rtx insn)
4918 {
4919 rtx dep_rtx = PATTERN (dep_insn);
4920 int i;
4921
4922 if (GET_CODE (dep_rtx) == SET
4923 && addr_generation_dependency_p (dep_rtx, insn))
4924 return 1;
4925 else if (GET_CODE (dep_rtx) == PARALLEL)
4926 {
4927 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
4928 {
4929 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
4930 return 1;
4931 }
4932 }
4933 return 0;
4934 }
4935
4936 /* A C statement (sans semicolon) to update the integer scheduling priority
4937 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
4938 reduce the priority to execute INSN later. Do not define this macro if
4939 you do not need to adjust the scheduling priorities of insns.
4940
4941 A STD instruction should be scheduled earlier,
4942 in order to use the bypass. */
4943
4944 static int
4945 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
4946 {
4947 if (! INSN_P (insn))
4948 return priority;
4949
4950 if (s390_tune != PROCESSOR_2084_Z990
4951 && s390_tune != PROCESSOR_2094_Z9_109)
4952 return priority;
4953
4954 switch (s390_safe_attr_type (insn))
4955 {
4956 case TYPE_FSTOREDF:
4957 case TYPE_FSTORESF:
4958 priority = priority << 3;
4959 break;
4960 case TYPE_STORE:
4961 case TYPE_STM:
4962 priority = priority << 1;
4963 break;
4964 default:
4965 break;
4966 }
4967 return priority;
4968 }
4969
4970 /* The number of instructions that can be issued per cycle. */
4971
4972 static int
4973 s390_issue_rate (void)
4974 {
4975 switch (s390_tune)
4976 {
4977 case PROCESSOR_2084_Z990:
4978 case PROCESSOR_2094_Z9_109:
4979 return 3;
4980 case PROCESSOR_2097_Z10:
4981 return 2;
4982 default:
4983 return 1;
4984 }
4985 }
4986
4987 static int
4988 s390_first_cycle_multipass_dfa_lookahead (void)
4989 {
4990 return 4;
4991 }
4992
4993
4994 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
4995 Fix up MEMs as required. */
4996
4997 static void
4998 annotate_constant_pool_refs (rtx *x)
4999 {
5000 int i, j;
5001 const char *fmt;
5002
5003 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5004 || !CONSTANT_POOL_ADDRESS_P (*x));
5005
5006 /* Literal pool references can only occur inside a MEM ... */
5007 if (GET_CODE (*x) == MEM)
5008 {
5009 rtx memref = XEXP (*x, 0);
5010
5011 if (GET_CODE (memref) == SYMBOL_REF
5012 && CONSTANT_POOL_ADDRESS_P (memref))
5013 {
5014 rtx base = cfun->machine->base_reg;
5015 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5016 UNSPEC_LTREF);
5017
5018 *x = replace_equiv_address (*x, addr);
5019 return;
5020 }
5021
5022 if (GET_CODE (memref) == CONST
5023 && GET_CODE (XEXP (memref, 0)) == PLUS
5024 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5025 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5026 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5027 {
5028 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5029 rtx sym = XEXP (XEXP (memref, 0), 0);
5030 rtx base = cfun->machine->base_reg;
5031 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5032 UNSPEC_LTREF);
5033
5034 *x = replace_equiv_address (*x, plus_constant (addr, off));
5035 return;
5036 }
5037 }
5038
5039 /* ... or a load-address type pattern. */
5040 if (GET_CODE (*x) == SET)
5041 {
5042 rtx addrref = SET_SRC (*x);
5043
5044 if (GET_CODE (addrref) == SYMBOL_REF
5045 && CONSTANT_POOL_ADDRESS_P (addrref))
5046 {
5047 rtx base = cfun->machine->base_reg;
5048 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5049 UNSPEC_LTREF);
5050
5051 SET_SRC (*x) = addr;
5052 return;
5053 }
5054
5055 if (GET_CODE (addrref) == CONST
5056 && GET_CODE (XEXP (addrref, 0)) == PLUS
5057 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5058 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5059 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5060 {
5061 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5062 rtx sym = XEXP (XEXP (addrref, 0), 0);
5063 rtx base = cfun->machine->base_reg;
5064 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5065 UNSPEC_LTREF);
5066
5067 SET_SRC (*x) = plus_constant (addr, off);
5068 return;
5069 }
5070 }
5071
5072 /* Annotate LTREL_BASE as well. */
5073 if (GET_CODE (*x) == UNSPEC
5074 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5075 {
5076 rtx base = cfun->machine->base_reg;
5077 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5078 UNSPEC_LTREL_BASE);
5079 return;
5080 }
5081
5082 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5083 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5084 {
5085 if (fmt[i] == 'e')
5086 {
5087 annotate_constant_pool_refs (&XEXP (*x, i));
5088 }
5089 else if (fmt[i] == 'E')
5090 {
5091 for (j = 0; j < XVECLEN (*x, i); j++)
5092 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5093 }
5094 }
5095 }
5096
5097 /* Split all branches that exceed the maximum distance.
5098 Returns true if this created a new literal pool entry. */
5099
5100 static int
5101 s390_split_branches (void)
5102 {
5103 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5104 int new_literal = 0, ret;
5105 rtx insn, pat, tmp, target;
5106 rtx *label;
5107
5108 /* We need correct insn addresses. */
5109
5110 shorten_branches (get_insns ());
5111
5112 /* Find all branches that exceed 64KB, and split them. */
5113
5114 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5115 {
5116 if (GET_CODE (insn) != JUMP_INSN)
5117 continue;
5118
5119 pat = PATTERN (insn);
5120 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5121 pat = XVECEXP (pat, 0, 0);
5122 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5123 continue;
5124
5125 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5126 {
5127 label = &SET_SRC (pat);
5128 }
5129 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5130 {
5131 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5132 label = &XEXP (SET_SRC (pat), 1);
5133 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5134 label = &XEXP (SET_SRC (pat), 2);
5135 else
5136 continue;
5137 }
5138 else
5139 continue;
5140
5141 if (get_attr_length (insn) <= 4)
5142 continue;
5143
5144 /* We are going to use the return register as scratch register,
5145 make sure it will be saved/restored by the prologue/epilogue. */
5146 cfun_frame_layout.save_return_addr_p = 1;
5147
5148 if (!flag_pic)
5149 {
5150 new_literal = 1;
5151 tmp = force_const_mem (Pmode, *label);
5152 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5153 INSN_ADDRESSES_NEW (tmp, -1);
5154 annotate_constant_pool_refs (&PATTERN (tmp));
5155
5156 target = temp_reg;
5157 }
5158 else
5159 {
5160 new_literal = 1;
5161 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5162 UNSPEC_LTREL_OFFSET);
5163 target = gen_rtx_CONST (Pmode, target);
5164 target = force_const_mem (Pmode, target);
5165 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5166 INSN_ADDRESSES_NEW (tmp, -1);
5167 annotate_constant_pool_refs (&PATTERN (tmp));
5168
5169 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5170 cfun->machine->base_reg),
5171 UNSPEC_LTREL_BASE);
5172 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5173 }
5174
5175 ret = validate_change (insn, label, target, 0);
5176 gcc_assert (ret);
5177 }
5178
5179 return new_literal;
5180 }
5181
5182
5183 /* Find an annotated literal pool symbol referenced in RTX X,
5184 and store it at REF. Will abort if X contains references to
5185 more than one such pool symbol; multiple references to the same
5186 symbol are allowed, however.
5187
5188 The rtx pointed to by REF must be initialized to NULL_RTX
5189 by the caller before calling this routine. */
5190
5191 static void
5192 find_constant_pool_ref (rtx x, rtx *ref)
5193 {
5194 int i, j;
5195 const char *fmt;
5196
5197 /* Ignore LTREL_BASE references. */
5198 if (GET_CODE (x) == UNSPEC
5199 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5200 return;
5201 /* Likewise POOL_ENTRY insns. */
5202 if (GET_CODE (x) == UNSPEC_VOLATILE
5203 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5204 return;
5205
5206 gcc_assert (GET_CODE (x) != SYMBOL_REF
5207 || !CONSTANT_POOL_ADDRESS_P (x));
5208
5209 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5210 {
5211 rtx sym = XVECEXP (x, 0, 0);
5212 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5213 && CONSTANT_POOL_ADDRESS_P (sym));
5214
5215 if (*ref == NULL_RTX)
5216 *ref = sym;
5217 else
5218 gcc_assert (*ref == sym);
5219
5220 return;
5221 }
5222
5223 fmt = GET_RTX_FORMAT (GET_CODE (x));
5224 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5225 {
5226 if (fmt[i] == 'e')
5227 {
5228 find_constant_pool_ref (XEXP (x, i), ref);
5229 }
5230 else if (fmt[i] == 'E')
5231 {
5232 for (j = 0; j < XVECLEN (x, i); j++)
5233 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5234 }
5235 }
5236 }
5237
5238 /* Replace every reference to the annotated literal pool
5239 symbol REF in X by its base plus OFFSET. */
5240
5241 static void
5242 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5243 {
5244 int i, j;
5245 const char *fmt;
5246
5247 gcc_assert (*x != ref);
5248
5249 if (GET_CODE (*x) == UNSPEC
5250 && XINT (*x, 1) == UNSPEC_LTREF
5251 && XVECEXP (*x, 0, 0) == ref)
5252 {
5253 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5254 return;
5255 }
5256
5257 if (GET_CODE (*x) == PLUS
5258 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5259 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5260 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5261 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5262 {
5263 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5264 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5265 return;
5266 }
5267
5268 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5269 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5270 {
5271 if (fmt[i] == 'e')
5272 {
5273 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5274 }
5275 else if (fmt[i] == 'E')
5276 {
5277 for (j = 0; j < XVECLEN (*x, i); j++)
5278 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5279 }
5280 }
5281 }
5282
5283 /* Check whether X contains an UNSPEC_LTREL_BASE.
5284 Return its constant pool symbol if found, NULL_RTX otherwise. */
5285
5286 static rtx
5287 find_ltrel_base (rtx x)
5288 {
5289 int i, j;
5290 const char *fmt;
5291
5292 if (GET_CODE (x) == UNSPEC
5293 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5294 return XVECEXP (x, 0, 0);
5295
5296 fmt = GET_RTX_FORMAT (GET_CODE (x));
5297 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5298 {
5299 if (fmt[i] == 'e')
5300 {
5301 rtx fnd = find_ltrel_base (XEXP (x, i));
5302 if (fnd)
5303 return fnd;
5304 }
5305 else if (fmt[i] == 'E')
5306 {
5307 for (j = 0; j < XVECLEN (x, i); j++)
5308 {
5309 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5310 if (fnd)
5311 return fnd;
5312 }
5313 }
5314 }
5315
5316 return NULL_RTX;
5317 }
5318
5319 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5320
5321 static void
5322 replace_ltrel_base (rtx *x)
5323 {
5324 int i, j;
5325 const char *fmt;
5326
5327 if (GET_CODE (*x) == UNSPEC
5328 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5329 {
5330 *x = XVECEXP (*x, 0, 1);
5331 return;
5332 }
5333
5334 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5335 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5336 {
5337 if (fmt[i] == 'e')
5338 {
5339 replace_ltrel_base (&XEXP (*x, i));
5340 }
5341 else if (fmt[i] == 'E')
5342 {
5343 for (j = 0; j < XVECLEN (*x, i); j++)
5344 replace_ltrel_base (&XVECEXP (*x, i, j));
5345 }
5346 }
5347 }
5348
5349
5350 /* We keep a list of constants which we have to add to internal
5351 constant tables in the middle of large functions. */
5352
5353 #define NR_C_MODES 11
5354 enum machine_mode constant_modes[NR_C_MODES] =
5355 {
5356 TFmode, TImode, TDmode,
5357 DFmode, DImode, DDmode,
5358 SFmode, SImode, SDmode,
5359 HImode,
5360 QImode
5361 };
5362
5363 struct constant
5364 {
5365 struct constant *next;
5366 rtx value;
5367 rtx label;
5368 };
5369
5370 struct constant_pool
5371 {
5372 struct constant_pool *next;
5373 rtx first_insn;
5374 rtx pool_insn;
5375 bitmap insns;
5376 rtx emit_pool_after;
5377
5378 struct constant *constants[NR_C_MODES];
5379 struct constant *execute;
5380 rtx label;
5381 int size;
5382 };
5383
5384 /* Allocate new constant_pool structure. */
5385
5386 static struct constant_pool *
5387 s390_alloc_pool (void)
5388 {
5389 struct constant_pool *pool;
5390 int i;
5391
5392 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5393 pool->next = NULL;
5394 for (i = 0; i < NR_C_MODES; i++)
5395 pool->constants[i] = NULL;
5396
5397 pool->execute = NULL;
5398 pool->label = gen_label_rtx ();
5399 pool->first_insn = NULL_RTX;
5400 pool->pool_insn = NULL_RTX;
5401 pool->insns = BITMAP_ALLOC (NULL);
5402 pool->size = 0;
5403 pool->emit_pool_after = NULL_RTX;
5404
5405 return pool;
5406 }
5407
5408 /* Create new constant pool covering instructions starting at INSN
5409 and chain it to the end of POOL_LIST. */
5410
5411 static struct constant_pool *
5412 s390_start_pool (struct constant_pool **pool_list, rtx insn)
5413 {
5414 struct constant_pool *pool, **prev;
5415
5416 pool = s390_alloc_pool ();
5417 pool->first_insn = insn;
5418
5419 for (prev = pool_list; *prev; prev = &(*prev)->next)
5420 ;
5421 *prev = pool;
5422
5423 return pool;
5424 }
5425
5426 /* End range of instructions covered by POOL at INSN and emit
5427 placeholder insn representing the pool. */
5428
5429 static void
5430 s390_end_pool (struct constant_pool *pool, rtx insn)
5431 {
5432 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
5433
5434 if (!insn)
5435 insn = get_last_insn ();
5436
5437 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
5438 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5439 }
5440
5441 /* Add INSN to the list of insns covered by POOL. */
5442
5443 static void
5444 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
5445 {
5446 bitmap_set_bit (pool->insns, INSN_UID (insn));
5447 }
5448
5449 /* Return pool out of POOL_LIST that covers INSN. */
5450
5451 static struct constant_pool *
5452 s390_find_pool (struct constant_pool *pool_list, rtx insn)
5453 {
5454 struct constant_pool *pool;
5455
5456 for (pool = pool_list; pool; pool = pool->next)
5457 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
5458 break;
5459
5460 return pool;
5461 }
5462
5463 /* Add constant VAL of mode MODE to the constant pool POOL. */
5464
5465 static void
5466 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
5467 {
5468 struct constant *c;
5469 int i;
5470
5471 for (i = 0; i < NR_C_MODES; i++)
5472 if (constant_modes[i] == mode)
5473 break;
5474 gcc_assert (i != NR_C_MODES);
5475
5476 for (c = pool->constants[i]; c != NULL; c = c->next)
5477 if (rtx_equal_p (val, c->value))
5478 break;
5479
5480 if (c == NULL)
5481 {
5482 c = (struct constant *) xmalloc (sizeof *c);
5483 c->value = val;
5484 c->label = gen_label_rtx ();
5485 c->next = pool->constants[i];
5486 pool->constants[i] = c;
5487 pool->size += GET_MODE_SIZE (mode);
5488 }
5489 }
5490
5491 /* Find constant VAL of mode MODE in the constant pool POOL.
5492 Return an RTX describing the distance from the start of
5493 the pool to the location of the new constant. */
5494
5495 static rtx
5496 s390_find_constant (struct constant_pool *pool, rtx val,
5497 enum machine_mode mode)
5498 {
5499 struct constant *c;
5500 rtx offset;
5501 int i;
5502
5503 for (i = 0; i < NR_C_MODES; i++)
5504 if (constant_modes[i] == mode)
5505 break;
5506 gcc_assert (i != NR_C_MODES);
5507
5508 for (c = pool->constants[i]; c != NULL; c = c->next)
5509 if (rtx_equal_p (val, c->value))
5510 break;
5511
5512 gcc_assert (c);
5513
5514 offset = gen_rtx_MINUS (Pmode, gen_rtx_LABEL_REF (Pmode, c->label),
5515 gen_rtx_LABEL_REF (Pmode, pool->label));
5516 offset = gen_rtx_CONST (Pmode, offset);
5517 return offset;
5518 }
5519
5520 /* Check whether INSN is an execute. Return the label_ref to its
5521 execute target template if so, NULL_RTX otherwise. */
5522
5523 static rtx
5524 s390_execute_label (rtx insn)
5525 {
5526 if (GET_CODE (insn) == INSN
5527 && GET_CODE (PATTERN (insn)) == PARALLEL
5528 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
5529 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
5530 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
5531
5532 return NULL_RTX;
5533 }
5534
5535 /* Add execute target for INSN to the constant pool POOL. */
5536
5537 static void
5538 s390_add_execute (struct constant_pool *pool, rtx insn)
5539 {
5540 struct constant *c;
5541
5542 for (c = pool->execute; c != NULL; c = c->next)
5543 if (INSN_UID (insn) == INSN_UID (c->value))
5544 break;
5545
5546 if (c == NULL)
5547 {
5548 c = (struct constant *) xmalloc (sizeof *c);
5549 c->value = insn;
5550 c->label = gen_label_rtx ();
5551 c->next = pool->execute;
5552 pool->execute = c;
5553 pool->size += 6;
5554 }
5555 }
5556
5557 /* Find execute target for INSN in the constant pool POOL.
5558 Return an RTX describing the distance from the start of
5559 the pool to the location of the execute target. */
5560
5561 static rtx
5562 s390_find_execute (struct constant_pool *pool, rtx insn)
5563 {
5564 struct constant *c;
5565 rtx offset;
5566
5567 for (c = pool->execute; c != NULL; c = c->next)
5568 if (INSN_UID (insn) == INSN_UID (c->value))
5569 break;
5570
5571 gcc_assert (c);
5572
5573 offset = gen_rtx_MINUS (Pmode, gen_rtx_LABEL_REF (Pmode, c->label),
5574 gen_rtx_LABEL_REF (Pmode, pool->label));
5575 offset = gen_rtx_CONST (Pmode, offset);
5576 return offset;
5577 }
5578
5579 /* For an execute INSN, extract the execute target template. */
5580
5581 static rtx
5582 s390_execute_target (rtx insn)
5583 {
5584 rtx pattern = PATTERN (insn);
5585 gcc_assert (s390_execute_label (insn));
5586
5587 if (XVECLEN (pattern, 0) == 2)
5588 {
5589 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
5590 }
5591 else
5592 {
5593 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
5594 int i;
5595
5596 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
5597 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
5598
5599 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
5600 }
5601
5602 return pattern;
5603 }
5604
5605 /* Indicate that INSN cannot be duplicated. This is the case for
5606 execute insns that carry a unique label. */
5607
5608 static bool
5609 s390_cannot_copy_insn_p (rtx insn)
5610 {
5611 rtx label = s390_execute_label (insn);
5612 return label && label != const0_rtx;
5613 }
5614
5615 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
5616 do not emit the pool base label. */
5617
5618 static void
5619 s390_dump_pool (struct constant_pool *pool, bool remote_label)
5620 {
5621 struct constant *c;
5622 rtx insn = pool->pool_insn;
5623 int i;
5624
5625 /* Switch to rodata section. */
5626 if (TARGET_CPU_ZARCH)
5627 {
5628 insn = emit_insn_after (gen_pool_section_start (), insn);
5629 INSN_ADDRESSES_NEW (insn, -1);
5630 }
5631
5632 /* Ensure minimum pool alignment. */
5633 if (TARGET_CPU_ZARCH)
5634 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
5635 else
5636 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
5637 INSN_ADDRESSES_NEW (insn, -1);
5638
5639 /* Emit pool base label. */
5640 if (!remote_label)
5641 {
5642 insn = emit_label_after (pool->label, insn);
5643 INSN_ADDRESSES_NEW (insn, -1);
5644 }
5645
5646 /* Dump constants in descending alignment requirement order,
5647 ensuring proper alignment for every constant. */
5648 for (i = 0; i < NR_C_MODES; i++)
5649 for (c = pool->constants[i]; c; c = c->next)
5650 {
5651 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
5652 rtx value = copy_rtx (c->value);
5653 if (GET_CODE (value) == CONST
5654 && GET_CODE (XEXP (value, 0)) == UNSPEC
5655 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
5656 && XVECLEN (XEXP (value, 0), 0) == 1)
5657 {
5658 value = gen_rtx_MINUS (Pmode, XVECEXP (XEXP (value, 0), 0, 0),
5659 gen_rtx_LABEL_REF (VOIDmode, pool->label));
5660 value = gen_rtx_CONST (VOIDmode, value);
5661 }
5662
5663 insn = emit_label_after (c->label, insn);
5664 INSN_ADDRESSES_NEW (insn, -1);
5665
5666 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
5667 gen_rtvec (1, value),
5668 UNSPECV_POOL_ENTRY);
5669 insn = emit_insn_after (value, insn);
5670 INSN_ADDRESSES_NEW (insn, -1);
5671 }
5672
5673 /* Ensure minimum alignment for instructions. */
5674 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
5675 INSN_ADDRESSES_NEW (insn, -1);
5676
5677 /* Output in-pool execute template insns. */
5678 for (c = pool->execute; c; c = c->next)
5679 {
5680 insn = emit_label_after (c->label, insn);
5681 INSN_ADDRESSES_NEW (insn, -1);
5682
5683 insn = emit_insn_after (s390_execute_target (c->value), insn);
5684 INSN_ADDRESSES_NEW (insn, -1);
5685 }
5686
5687 /* Switch back to previous section. */
5688 if (TARGET_CPU_ZARCH)
5689 {
5690 insn = emit_insn_after (gen_pool_section_end (), insn);
5691 INSN_ADDRESSES_NEW (insn, -1);
5692 }
5693
5694 insn = emit_barrier_after (insn);
5695 INSN_ADDRESSES_NEW (insn, -1);
5696
5697 /* Remove placeholder insn. */
5698 remove_insn (pool->pool_insn);
5699 }
5700
5701 /* Free all memory used by POOL. */
5702
5703 static void
5704 s390_free_pool (struct constant_pool *pool)
5705 {
5706 struct constant *c, *next;
5707 int i;
5708
5709 for (i = 0; i < NR_C_MODES; i++)
5710 for (c = pool->constants[i]; c; c = next)
5711 {
5712 next = c->next;
5713 free (c);
5714 }
5715
5716 for (c = pool->execute; c; c = next)
5717 {
5718 next = c->next;
5719 free (c);
5720 }
5721
5722 BITMAP_FREE (pool->insns);
5723 free (pool);
5724 }
5725
5726
5727 /* Collect main literal pool. Return NULL on overflow. */
5728
5729 static struct constant_pool *
5730 s390_mainpool_start (void)
5731 {
5732 struct constant_pool *pool;
5733 rtx insn;
5734
5735 pool = s390_alloc_pool ();
5736
5737 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5738 {
5739 if (GET_CODE (insn) == INSN
5740 && GET_CODE (PATTERN (insn)) == SET
5741 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
5742 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
5743 {
5744 gcc_assert (!pool->pool_insn);
5745 pool->pool_insn = insn;
5746 }
5747
5748 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
5749 {
5750 s390_add_execute (pool, insn);
5751 }
5752 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
5753 {
5754 rtx pool_ref = NULL_RTX;
5755 find_constant_pool_ref (PATTERN (insn), &pool_ref);
5756 if (pool_ref)
5757 {
5758 rtx constant = get_pool_constant (pool_ref);
5759 enum machine_mode mode = get_pool_mode (pool_ref);
5760 s390_add_constant (pool, constant, mode);
5761 }
5762 }
5763
5764 /* If hot/cold partitioning is enabled we have to make sure that
5765 the literal pool is emitted in the same section where the
5766 initialization of the literal pool base pointer takes place.
5767 emit_pool_after is only used in the non-overflow case on non
5768 Z cpus where we can emit the literal pool at the end of the
5769 function body within the text section. */
5770 if (NOTE_P (insn)
5771 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
5772 && !pool->emit_pool_after)
5773 pool->emit_pool_after = PREV_INSN (insn);
5774 }
5775
5776 gcc_assert (pool->pool_insn || pool->size == 0);
5777
5778 if (pool->size >= 4096)
5779 {
5780 /* We're going to chunkify the pool, so remove the main
5781 pool placeholder insn. */
5782 remove_insn (pool->pool_insn);
5783
5784 s390_free_pool (pool);
5785 pool = NULL;
5786 }
5787
5788 /* If the functions ends with the section where the literal pool
5789 should be emitted set the marker to its end. */
5790 if (pool && !pool->emit_pool_after)
5791 pool->emit_pool_after = get_last_insn ();
5792
5793 return pool;
5794 }
5795
5796 /* POOL holds the main literal pool as collected by s390_mainpool_start.
5797 Modify the current function to output the pool constants as well as
5798 the pool register setup instruction. */
5799
5800 static void
5801 s390_mainpool_finish (struct constant_pool *pool)
5802 {
5803 rtx base_reg = cfun->machine->base_reg;
5804 rtx insn;
5805
5806 /* If the pool is empty, we're done. */
5807 if (pool->size == 0)
5808 {
5809 /* We don't actually need a base register after all. */
5810 cfun->machine->base_reg = NULL_RTX;
5811
5812 if (pool->pool_insn)
5813 remove_insn (pool->pool_insn);
5814 s390_free_pool (pool);
5815 return;
5816 }
5817
5818 /* We need correct insn addresses. */
5819 shorten_branches (get_insns ());
5820
5821 /* On zSeries, we use a LARL to load the pool register. The pool is
5822 located in the .rodata section, so we emit it after the function. */
5823 if (TARGET_CPU_ZARCH)
5824 {
5825 insn = gen_main_base_64 (base_reg, pool->label);
5826 insn = emit_insn_after (insn, pool->pool_insn);
5827 INSN_ADDRESSES_NEW (insn, -1);
5828 remove_insn (pool->pool_insn);
5829
5830 insn = get_last_insn ();
5831 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
5832 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5833
5834 s390_dump_pool (pool, 0);
5835 }
5836
5837 /* On S/390, if the total size of the function's code plus literal pool
5838 does not exceed 4096 bytes, we use BASR to set up a function base
5839 pointer, and emit the literal pool at the end of the function. */
5840 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
5841 + pool->size + 8 /* alignment slop */ < 4096)
5842 {
5843 insn = gen_main_base_31_small (base_reg, pool->label);
5844 insn = emit_insn_after (insn, pool->pool_insn);
5845 INSN_ADDRESSES_NEW (insn, -1);
5846 remove_insn (pool->pool_insn);
5847
5848 insn = emit_label_after (pool->label, insn);
5849 INSN_ADDRESSES_NEW (insn, -1);
5850
5851 /* emit_pool_after will be set by s390_mainpool_start to the
5852 last insn of the section where the literal pool should be
5853 emitted. */
5854 insn = pool->emit_pool_after;
5855
5856 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
5857 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5858
5859 s390_dump_pool (pool, 1);
5860 }
5861
5862 /* Otherwise, we emit an inline literal pool and use BASR to branch
5863 over it, setting up the pool register at the same time. */
5864 else
5865 {
5866 rtx pool_end = gen_label_rtx ();
5867
5868 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
5869 insn = emit_insn_after (insn, pool->pool_insn);
5870 INSN_ADDRESSES_NEW (insn, -1);
5871 remove_insn (pool->pool_insn);
5872
5873 insn = emit_label_after (pool->label, insn);
5874 INSN_ADDRESSES_NEW (insn, -1);
5875
5876 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
5877 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5878
5879 insn = emit_label_after (pool_end, pool->pool_insn);
5880 INSN_ADDRESSES_NEW (insn, -1);
5881
5882 s390_dump_pool (pool, 1);
5883 }
5884
5885
5886 /* Replace all literal pool references. */
5887
5888 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5889 {
5890 if (INSN_P (insn))
5891 replace_ltrel_base (&PATTERN (insn));
5892
5893 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
5894 {
5895 rtx addr, pool_ref = NULL_RTX;
5896 find_constant_pool_ref (PATTERN (insn), &pool_ref);
5897 if (pool_ref)
5898 {
5899 if (s390_execute_label (insn))
5900 addr = s390_find_execute (pool, insn);
5901 else
5902 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
5903 get_pool_mode (pool_ref));
5904
5905 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
5906 INSN_CODE (insn) = -1;
5907 }
5908 }
5909 }
5910
5911
5912 /* Free the pool. */
5913 s390_free_pool (pool);
5914 }
5915
5916 /* POOL holds the main literal pool as collected by s390_mainpool_start.
5917 We have decided we cannot use this pool, so revert all changes
5918 to the current function that were done by s390_mainpool_start. */
5919 static void
5920 s390_mainpool_cancel (struct constant_pool *pool)
5921 {
5922 /* We didn't actually change the instruction stream, so simply
5923 free the pool memory. */
5924 s390_free_pool (pool);
5925 }
5926
5927
5928 /* Chunkify the literal pool. */
5929
5930 #define S390_POOL_CHUNK_MIN 0xc00
5931 #define S390_POOL_CHUNK_MAX 0xe00
5932
5933 static struct constant_pool *
5934 s390_chunkify_start (void)
5935 {
5936 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
5937 int extra_size = 0;
5938 bitmap far_labels;
5939 rtx pending_ltrel = NULL_RTX;
5940 rtx insn;
5941
5942 rtx (*gen_reload_base) (rtx, rtx) =
5943 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
5944
5945
5946 /* We need correct insn addresses. */
5947
5948 shorten_branches (get_insns ());
5949
5950 /* Scan all insns and move literals to pool chunks. */
5951
5952 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5953 {
5954 bool section_switch_p = false;
5955
5956 /* Check for pending LTREL_BASE. */
5957 if (INSN_P (insn))
5958 {
5959 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
5960 if (ltrel_base)
5961 {
5962 gcc_assert (ltrel_base == pending_ltrel);
5963 pending_ltrel = NULL_RTX;
5964 }
5965 }
5966
5967 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
5968 {
5969 if (!curr_pool)
5970 curr_pool = s390_start_pool (&pool_list, insn);
5971
5972 s390_add_execute (curr_pool, insn);
5973 s390_add_pool_insn (curr_pool, insn);
5974 }
5975 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
5976 {
5977 rtx pool_ref = NULL_RTX;
5978 find_constant_pool_ref (PATTERN (insn), &pool_ref);
5979 if (pool_ref)
5980 {
5981 rtx constant = get_pool_constant (pool_ref);
5982 enum machine_mode mode = get_pool_mode (pool_ref);
5983
5984 if (!curr_pool)
5985 curr_pool = s390_start_pool (&pool_list, insn);
5986
5987 s390_add_constant (curr_pool, constant, mode);
5988 s390_add_pool_insn (curr_pool, insn);
5989
5990 /* Don't split the pool chunk between a LTREL_OFFSET load
5991 and the corresponding LTREL_BASE. */
5992 if (GET_CODE (constant) == CONST
5993 && GET_CODE (XEXP (constant, 0)) == UNSPEC
5994 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
5995 {
5996 gcc_assert (!pending_ltrel);
5997 pending_ltrel = pool_ref;
5998 }
5999 }
6000 }
6001
6002 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6003 {
6004 if (curr_pool)
6005 s390_add_pool_insn (curr_pool, insn);
6006 /* An LTREL_BASE must follow within the same basic block. */
6007 gcc_assert (!pending_ltrel);
6008 }
6009
6010 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6011 section_switch_p = true;
6012
6013 if (!curr_pool
6014 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6015 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6016 continue;
6017
6018 if (TARGET_CPU_ZARCH)
6019 {
6020 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6021 continue;
6022
6023 s390_end_pool (curr_pool, NULL_RTX);
6024 curr_pool = NULL;
6025 }
6026 else
6027 {
6028 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6029 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6030 + extra_size;
6031
6032 /* We will later have to insert base register reload insns.
6033 Those will have an effect on code size, which we need to
6034 consider here. This calculation makes rather pessimistic
6035 worst-case assumptions. */
6036 if (GET_CODE (insn) == CODE_LABEL)
6037 extra_size += 6;
6038
6039 if (chunk_size < S390_POOL_CHUNK_MIN
6040 && curr_pool->size < S390_POOL_CHUNK_MIN
6041 && !section_switch_p)
6042 continue;
6043
6044 /* Pool chunks can only be inserted after BARRIERs ... */
6045 if (GET_CODE (insn) == BARRIER)
6046 {
6047 s390_end_pool (curr_pool, insn);
6048 curr_pool = NULL;
6049 extra_size = 0;
6050 }
6051
6052 /* ... so if we don't find one in time, create one. */
6053 else if (chunk_size > S390_POOL_CHUNK_MAX
6054 || curr_pool->size > S390_POOL_CHUNK_MAX
6055 || section_switch_p)
6056 {
6057 rtx label, jump, barrier;
6058
6059 if (!section_switch_p)
6060 {
6061 /* We can insert the barrier only after a 'real' insn. */
6062 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6063 continue;
6064 if (get_attr_length (insn) == 0)
6065 continue;
6066 /* Don't separate LTREL_BASE from the corresponding
6067 LTREL_OFFSET load. */
6068 if (pending_ltrel)
6069 continue;
6070 }
6071 else
6072 {
6073 gcc_assert (!pending_ltrel);
6074
6075 /* The old pool has to end before the section switch
6076 note in order to make it part of the current
6077 section. */
6078 insn = PREV_INSN (insn);
6079 }
6080
6081 label = gen_label_rtx ();
6082 jump = emit_jump_insn_after (gen_jump (label), insn);
6083 barrier = emit_barrier_after (jump);
6084 insn = emit_label_after (label, barrier);
6085 JUMP_LABEL (jump) = label;
6086 LABEL_NUSES (label) = 1;
6087
6088 INSN_ADDRESSES_NEW (jump, -1);
6089 INSN_ADDRESSES_NEW (barrier, -1);
6090 INSN_ADDRESSES_NEW (insn, -1);
6091
6092 s390_end_pool (curr_pool, barrier);
6093 curr_pool = NULL;
6094 extra_size = 0;
6095 }
6096 }
6097 }
6098
6099 if (curr_pool)
6100 s390_end_pool (curr_pool, NULL_RTX);
6101 gcc_assert (!pending_ltrel);
6102
6103 /* Find all labels that are branched into
6104 from an insn belonging to a different chunk. */
6105
6106 far_labels = BITMAP_ALLOC (NULL);
6107
6108 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6109 {
6110 /* Labels marked with LABEL_PRESERVE_P can be target
6111 of non-local jumps, so we have to mark them.
6112 The same holds for named labels.
6113
6114 Don't do that, however, if it is the label before
6115 a jump table. */
6116
6117 if (GET_CODE (insn) == CODE_LABEL
6118 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6119 {
6120 rtx vec_insn = next_real_insn (insn);
6121 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6122 PATTERN (vec_insn) : NULL_RTX;
6123 if (!vec_pat
6124 || !(GET_CODE (vec_pat) == ADDR_VEC
6125 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6126 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6127 }
6128
6129 /* If we have a direct jump (conditional or unconditional)
6130 or a casesi jump, check all potential targets. */
6131 else if (GET_CODE (insn) == JUMP_INSN)
6132 {
6133 rtx pat = PATTERN (insn);
6134 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6135 pat = XVECEXP (pat, 0, 0);
6136
6137 if (GET_CODE (pat) == SET)
6138 {
6139 rtx label = JUMP_LABEL (insn);
6140 if (label)
6141 {
6142 if (s390_find_pool (pool_list, label)
6143 != s390_find_pool (pool_list, insn))
6144 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6145 }
6146 }
6147 else if (GET_CODE (pat) == PARALLEL
6148 && XVECLEN (pat, 0) == 2
6149 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6150 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6151 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6152 {
6153 /* Find the jump table used by this casesi jump. */
6154 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6155 rtx vec_insn = next_real_insn (vec_label);
6156 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6157 PATTERN (vec_insn) : NULL_RTX;
6158 if (vec_pat
6159 && (GET_CODE (vec_pat) == ADDR_VEC
6160 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6161 {
6162 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6163
6164 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6165 {
6166 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6167
6168 if (s390_find_pool (pool_list, label)
6169 != s390_find_pool (pool_list, insn))
6170 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6171 }
6172 }
6173 }
6174 }
6175 }
6176
6177 /* Insert base register reload insns before every pool. */
6178
6179 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6180 {
6181 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6182 curr_pool->label);
6183 rtx insn = curr_pool->first_insn;
6184 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6185 }
6186
6187 /* Insert base register reload insns at every far label. */
6188
6189 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6190 if (GET_CODE (insn) == CODE_LABEL
6191 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6192 {
6193 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6194 if (pool)
6195 {
6196 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6197 pool->label);
6198 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6199 }
6200 }
6201
6202
6203 BITMAP_FREE (far_labels);
6204
6205
6206 /* Recompute insn addresses. */
6207
6208 init_insn_lengths ();
6209 shorten_branches (get_insns ());
6210
6211 return pool_list;
6212 }
6213
6214 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6215 After we have decided to use this list, finish implementing
6216 all changes to the current function as required. */
6217
6218 static void
6219 s390_chunkify_finish (struct constant_pool *pool_list)
6220 {
6221 struct constant_pool *curr_pool = NULL;
6222 rtx insn;
6223
6224
6225 /* Replace all literal pool references. */
6226
6227 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6228 {
6229 if (INSN_P (insn))
6230 replace_ltrel_base (&PATTERN (insn));
6231
6232 curr_pool = s390_find_pool (pool_list, insn);
6233 if (!curr_pool)
6234 continue;
6235
6236 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6237 {
6238 rtx addr, pool_ref = NULL_RTX;
6239 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6240 if (pool_ref)
6241 {
6242 if (s390_execute_label (insn))
6243 addr = s390_find_execute (curr_pool, insn);
6244 else
6245 addr = s390_find_constant (curr_pool,
6246 get_pool_constant (pool_ref),
6247 get_pool_mode (pool_ref));
6248
6249 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6250 INSN_CODE (insn) = -1;
6251 }
6252 }
6253 }
6254
6255 /* Dump out all literal pools. */
6256
6257 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6258 s390_dump_pool (curr_pool, 0);
6259
6260 /* Free pool list. */
6261
6262 while (pool_list)
6263 {
6264 struct constant_pool *next = pool_list->next;
6265 s390_free_pool (pool_list);
6266 pool_list = next;
6267 }
6268 }
6269
6270 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6271 We have decided we cannot use this list, so revert all changes
6272 to the current function that were done by s390_chunkify_start. */
6273
6274 static void
6275 s390_chunkify_cancel (struct constant_pool *pool_list)
6276 {
6277 struct constant_pool *curr_pool = NULL;
6278 rtx insn;
6279
6280 /* Remove all pool placeholder insns. */
6281
6282 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6283 {
6284 /* Did we insert an extra barrier? Remove it. */
6285 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6286 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6287 rtx label = NEXT_INSN (curr_pool->pool_insn);
6288
6289 if (jump && GET_CODE (jump) == JUMP_INSN
6290 && barrier && GET_CODE (barrier) == BARRIER
6291 && label && GET_CODE (label) == CODE_LABEL
6292 && GET_CODE (PATTERN (jump)) == SET
6293 && SET_DEST (PATTERN (jump)) == pc_rtx
6294 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6295 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6296 {
6297 remove_insn (jump);
6298 remove_insn (barrier);
6299 remove_insn (label);
6300 }
6301
6302 remove_insn (curr_pool->pool_insn);
6303 }
6304
6305 /* Remove all base register reload insns. */
6306
6307 for (insn = get_insns (); insn; )
6308 {
6309 rtx next_insn = NEXT_INSN (insn);
6310
6311 if (GET_CODE (insn) == INSN
6312 && GET_CODE (PATTERN (insn)) == SET
6313 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6314 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6315 remove_insn (insn);
6316
6317 insn = next_insn;
6318 }
6319
6320 /* Free pool list. */
6321
6322 while (pool_list)
6323 {
6324 struct constant_pool *next = pool_list->next;
6325 s390_free_pool (pool_list);
6326 pool_list = next;
6327 }
6328 }
6329
6330
6331 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6332
6333 void
6334 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6335 {
6336 REAL_VALUE_TYPE r;
6337
6338 switch (GET_MODE_CLASS (mode))
6339 {
6340 case MODE_FLOAT:
6341 case MODE_DECIMAL_FLOAT:
6342 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6343
6344 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6345 assemble_real (r, mode, align);
6346 break;
6347
6348 case MODE_INT:
6349 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6350 break;
6351
6352 default:
6353 gcc_unreachable ();
6354 }
6355 }
6356
6357
6358 /* Return an RTL expression representing the value of the return address
6359 for the frame COUNT steps up from the current frame. FRAME is the
6360 frame pointer of that frame. */
6361
6362 rtx
6363 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6364 {
6365 int offset;
6366 rtx addr;
6367
6368 /* Without backchain, we fail for all but the current frame. */
6369
6370 if (!TARGET_BACKCHAIN && count > 0)
6371 return NULL_RTX;
6372
6373 /* For the current frame, we need to make sure the initial
6374 value of RETURN_REGNUM is actually saved. */
6375
6376 if (count == 0)
6377 {
6378 /* On non-z architectures branch splitting could overwrite r14. */
6379 if (TARGET_CPU_ZARCH)
6380 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6381 else
6382 {
6383 cfun_frame_layout.save_return_addr_p = true;
6384 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
6385 }
6386 }
6387
6388 if (TARGET_PACKED_STACK)
6389 offset = -2 * UNITS_PER_WORD;
6390 else
6391 offset = RETURN_REGNUM * UNITS_PER_WORD;
6392
6393 addr = plus_constant (frame, offset);
6394 addr = memory_address (Pmode, addr);
6395 return gen_rtx_MEM (Pmode, addr);
6396 }
6397
6398 /* Return an RTL expression representing the back chain stored in
6399 the current stack frame. */
6400
6401 rtx
6402 s390_back_chain_rtx (void)
6403 {
6404 rtx chain;
6405
6406 gcc_assert (TARGET_BACKCHAIN);
6407
6408 if (TARGET_PACKED_STACK)
6409 chain = plus_constant (stack_pointer_rtx,
6410 STACK_POINTER_OFFSET - UNITS_PER_WORD);
6411 else
6412 chain = stack_pointer_rtx;
6413
6414 chain = gen_rtx_MEM (Pmode, chain);
6415 return chain;
6416 }
6417
6418 /* Find first call clobbered register unused in a function.
6419 This could be used as base register in a leaf function
6420 or for holding the return address before epilogue. */
6421
6422 static int
6423 find_unused_clobbered_reg (void)
6424 {
6425 int i;
6426 for (i = 0; i < 6; i++)
6427 if (!df_regs_ever_live_p (i))
6428 return i;
6429 return 0;
6430 }
6431
6432
6433 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
6434 clobbered hard regs in SETREG. */
6435
6436 static void
6437 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
6438 {
6439 int *regs_ever_clobbered = (int *)data;
6440 unsigned int i, regno;
6441 enum machine_mode mode = GET_MODE (setreg);
6442
6443 if (GET_CODE (setreg) == SUBREG)
6444 {
6445 rtx inner = SUBREG_REG (setreg);
6446 if (!GENERAL_REG_P (inner))
6447 return;
6448 regno = subreg_regno (setreg);
6449 }
6450 else if (GENERAL_REG_P (setreg))
6451 regno = REGNO (setreg);
6452 else
6453 return;
6454
6455 for (i = regno;
6456 i < regno + HARD_REGNO_NREGS (regno, mode);
6457 i++)
6458 regs_ever_clobbered[i] = 1;
6459 }
6460
6461 /* Walks through all basic blocks of the current function looking
6462 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
6463 of the passed integer array REGS_EVER_CLOBBERED are set to one for
6464 each of those regs. */
6465
6466 static void
6467 s390_regs_ever_clobbered (int *regs_ever_clobbered)
6468 {
6469 basic_block cur_bb;
6470 rtx cur_insn;
6471 unsigned int i;
6472
6473 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
6474
6475 /* For non-leaf functions we have to consider all call clobbered regs to be
6476 clobbered. */
6477 if (!current_function_is_leaf)
6478 {
6479 for (i = 0; i < 16; i++)
6480 regs_ever_clobbered[i] = call_really_used_regs[i];
6481 }
6482
6483 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
6484 this work is done by liveness analysis (mark_regs_live_at_end).
6485 Special care is needed for functions containing landing pads. Landing pads
6486 may use the eh registers, but the code which sets these registers is not
6487 contained in that function. Hence s390_regs_ever_clobbered is not able to
6488 deal with this automatically. */
6489 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
6490 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
6491 if (crtl->calls_eh_return
6492 || (cfun->machine->has_landing_pad_p
6493 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
6494 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
6495
6496 /* For nonlocal gotos all call-saved registers have to be saved.
6497 This flag is also set for the unwinding code in libgcc.
6498 See expand_builtin_unwind_init. For regs_ever_live this is done by
6499 reload. */
6500 if (cfun->has_nonlocal_label)
6501 for (i = 0; i < 16; i++)
6502 if (!call_really_used_regs[i])
6503 regs_ever_clobbered[i] = 1;
6504
6505 FOR_EACH_BB (cur_bb)
6506 {
6507 FOR_BB_INSNS (cur_bb, cur_insn)
6508 {
6509 if (INSN_P (cur_insn))
6510 note_stores (PATTERN (cur_insn),
6511 s390_reg_clobbered_rtx,
6512 regs_ever_clobbered);
6513 }
6514 }
6515 }
6516
6517 /* Determine the frame area which actually has to be accessed
6518 in the function epilogue. The values are stored at the
6519 given pointers AREA_BOTTOM (address of the lowest used stack
6520 address) and AREA_TOP (address of the first item which does
6521 not belong to the stack frame). */
6522
6523 static void
6524 s390_frame_area (int *area_bottom, int *area_top)
6525 {
6526 int b, t;
6527 int i;
6528
6529 b = INT_MAX;
6530 t = INT_MIN;
6531
6532 if (cfun_frame_layout.first_restore_gpr != -1)
6533 {
6534 b = (cfun_frame_layout.gprs_offset
6535 + cfun_frame_layout.first_restore_gpr * UNITS_PER_WORD);
6536 t = b + (cfun_frame_layout.last_restore_gpr
6537 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_WORD;
6538 }
6539
6540 if (TARGET_64BIT && cfun_save_high_fprs_p)
6541 {
6542 b = MIN (b, cfun_frame_layout.f8_offset);
6543 t = MAX (t, (cfun_frame_layout.f8_offset
6544 + cfun_frame_layout.high_fprs * 8));
6545 }
6546
6547 if (!TARGET_64BIT)
6548 for (i = 2; i < 4; i++)
6549 if (cfun_fpr_bit_p (i))
6550 {
6551 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
6552 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
6553 }
6554
6555 *area_bottom = b;
6556 *area_top = t;
6557 }
6558
6559 /* Fill cfun->machine with info about register usage of current function.
6560 Return in CLOBBERED_REGS which GPRs are currently considered set. */
6561
6562 static void
6563 s390_register_info (int clobbered_regs[])
6564 {
6565 int i, j;
6566
6567 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
6568 cfun_frame_layout.fpr_bitmap = 0;
6569 cfun_frame_layout.high_fprs = 0;
6570 if (TARGET_64BIT)
6571 for (i = 24; i < 32; i++)
6572 if (df_regs_ever_live_p (i) && !global_regs[i])
6573 {
6574 cfun_set_fpr_bit (i - 16);
6575 cfun_frame_layout.high_fprs++;
6576 }
6577
6578 /* Find first and last gpr to be saved. We trust regs_ever_live
6579 data, except that we don't save and restore global registers.
6580
6581 Also, all registers with special meaning to the compiler need
6582 to be handled extra. */
6583
6584 s390_regs_ever_clobbered (clobbered_regs);
6585
6586 for (i = 0; i < 16; i++)
6587 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
6588
6589 if (frame_pointer_needed)
6590 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
6591
6592 if (flag_pic)
6593 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
6594 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6595
6596 clobbered_regs[BASE_REGNUM]
6597 |= (cfun->machine->base_reg
6598 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
6599
6600 clobbered_regs[RETURN_REGNUM]
6601 |= (!current_function_is_leaf
6602 || TARGET_TPF_PROFILING
6603 || cfun->machine->split_branches_pending_p
6604 || cfun_frame_layout.save_return_addr_p
6605 || crtl->calls_eh_return
6606 || cfun->stdarg);
6607
6608 clobbered_regs[STACK_POINTER_REGNUM]
6609 |= (!current_function_is_leaf
6610 || TARGET_TPF_PROFILING
6611 || cfun_save_high_fprs_p
6612 || get_frame_size () > 0
6613 || cfun->calls_alloca
6614 || cfun->stdarg);
6615
6616 for (i = 6; i < 16; i++)
6617 if (df_regs_ever_live_p (i) || clobbered_regs[i])
6618 break;
6619 for (j = 15; j > i; j--)
6620 if (df_regs_ever_live_p (j) || clobbered_regs[j])
6621 break;
6622
6623 if (i == 16)
6624 {
6625 /* Nothing to save/restore. */
6626 cfun_frame_layout.first_save_gpr_slot = -1;
6627 cfun_frame_layout.last_save_gpr_slot = -1;
6628 cfun_frame_layout.first_save_gpr = -1;
6629 cfun_frame_layout.first_restore_gpr = -1;
6630 cfun_frame_layout.last_save_gpr = -1;
6631 cfun_frame_layout.last_restore_gpr = -1;
6632 }
6633 else
6634 {
6635 /* Save slots for gprs from i to j. */
6636 cfun_frame_layout.first_save_gpr_slot = i;
6637 cfun_frame_layout.last_save_gpr_slot = j;
6638
6639 for (i = cfun_frame_layout.first_save_gpr_slot;
6640 i < cfun_frame_layout.last_save_gpr_slot + 1;
6641 i++)
6642 if (clobbered_regs[i])
6643 break;
6644
6645 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
6646 if (clobbered_regs[j])
6647 break;
6648
6649 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
6650 {
6651 /* Nothing to save/restore. */
6652 cfun_frame_layout.first_save_gpr = -1;
6653 cfun_frame_layout.first_restore_gpr = -1;
6654 cfun_frame_layout.last_save_gpr = -1;
6655 cfun_frame_layout.last_restore_gpr = -1;
6656 }
6657 else
6658 {
6659 /* Save / Restore from gpr i to j. */
6660 cfun_frame_layout.first_save_gpr = i;
6661 cfun_frame_layout.first_restore_gpr = i;
6662 cfun_frame_layout.last_save_gpr = j;
6663 cfun_frame_layout.last_restore_gpr = j;
6664 }
6665 }
6666
6667 if (cfun->stdarg)
6668 {
6669 /* Varargs functions need to save gprs 2 to 6. */
6670 if (cfun->va_list_gpr_size
6671 && crtl->args.info.gprs < GP_ARG_NUM_REG)
6672 {
6673 int min_gpr = crtl->args.info.gprs;
6674 int max_gpr = min_gpr + cfun->va_list_gpr_size;
6675 if (max_gpr > GP_ARG_NUM_REG)
6676 max_gpr = GP_ARG_NUM_REG;
6677
6678 if (cfun_frame_layout.first_save_gpr == -1
6679 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
6680 {
6681 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
6682 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
6683 }
6684
6685 if (cfun_frame_layout.last_save_gpr == -1
6686 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
6687 {
6688 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
6689 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
6690 }
6691 }
6692
6693 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
6694 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
6695 && crtl->args.info.fprs < FP_ARG_NUM_REG)
6696 {
6697 int min_fpr = crtl->args.info.fprs;
6698 int max_fpr = min_fpr + cfun->va_list_fpr_size;
6699 if (max_fpr > FP_ARG_NUM_REG)
6700 max_fpr = FP_ARG_NUM_REG;
6701
6702 /* ??? This is currently required to ensure proper location
6703 of the fpr save slots within the va_list save area. */
6704 if (TARGET_PACKED_STACK)
6705 min_fpr = 0;
6706
6707 for (i = min_fpr; i < max_fpr; i++)
6708 cfun_set_fpr_bit (i);
6709 }
6710 }
6711
6712 if (!TARGET_64BIT)
6713 for (i = 2; i < 4; i++)
6714 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
6715 cfun_set_fpr_bit (i);
6716 }
6717
6718 /* Fill cfun->machine with info about frame of current function. */
6719
6720 static void
6721 s390_frame_info (void)
6722 {
6723 int i;
6724
6725 cfun_frame_layout.frame_size = get_frame_size ();
6726 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
6727 fatal_error ("total size of local variables exceeds architecture limit");
6728
6729 if (!TARGET_PACKED_STACK)
6730 {
6731 cfun_frame_layout.backchain_offset = 0;
6732 cfun_frame_layout.f0_offset = 16 * UNITS_PER_WORD;
6733 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
6734 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
6735 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
6736 * UNITS_PER_WORD);
6737 }
6738 else if (TARGET_BACKCHAIN) /* kernel stack layout */
6739 {
6740 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
6741 - UNITS_PER_WORD);
6742 cfun_frame_layout.gprs_offset
6743 = (cfun_frame_layout.backchain_offset
6744 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
6745 * UNITS_PER_WORD);
6746
6747 if (TARGET_64BIT)
6748 {
6749 cfun_frame_layout.f4_offset
6750 = (cfun_frame_layout.gprs_offset
6751 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
6752
6753 cfun_frame_layout.f0_offset
6754 = (cfun_frame_layout.f4_offset
6755 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
6756 }
6757 else
6758 {
6759 /* On 31 bit we have to care about alignment of the
6760 floating point regs to provide fastest access. */
6761 cfun_frame_layout.f0_offset
6762 = ((cfun_frame_layout.gprs_offset
6763 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
6764 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
6765
6766 cfun_frame_layout.f4_offset
6767 = (cfun_frame_layout.f0_offset
6768 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
6769 }
6770 }
6771 else /* no backchain */
6772 {
6773 cfun_frame_layout.f4_offset
6774 = (STACK_POINTER_OFFSET
6775 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
6776
6777 cfun_frame_layout.f0_offset
6778 = (cfun_frame_layout.f4_offset
6779 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
6780
6781 cfun_frame_layout.gprs_offset
6782 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
6783 }
6784
6785 if (current_function_is_leaf
6786 && !TARGET_TPF_PROFILING
6787 && cfun_frame_layout.frame_size == 0
6788 && !cfun_save_high_fprs_p
6789 && !cfun->calls_alloca
6790 && !cfun->stdarg)
6791 return;
6792
6793 if (!TARGET_PACKED_STACK)
6794 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
6795 + crtl->outgoing_args_size
6796 + cfun_frame_layout.high_fprs * 8);
6797 else
6798 {
6799 if (TARGET_BACKCHAIN)
6800 cfun_frame_layout.frame_size += UNITS_PER_WORD;
6801
6802 /* No alignment trouble here because f8-f15 are only saved under
6803 64 bit. */
6804 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
6805 cfun_frame_layout.f4_offset),
6806 cfun_frame_layout.gprs_offset)
6807 - cfun_frame_layout.high_fprs * 8);
6808
6809 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
6810
6811 for (i = 0; i < 8; i++)
6812 if (cfun_fpr_bit_p (i))
6813 cfun_frame_layout.frame_size += 8;
6814
6815 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
6816
6817 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
6818 the frame size to sustain 8 byte alignment of stack frames. */
6819 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
6820 STACK_BOUNDARY / BITS_PER_UNIT - 1)
6821 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
6822
6823 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
6824 }
6825 }
6826
6827 /* Generate frame layout. Fills in register and frame data for the current
6828 function in cfun->machine. This routine can be called multiple times;
6829 it will re-do the complete frame layout every time. */
6830
6831 static void
6832 s390_init_frame_layout (void)
6833 {
6834 HOST_WIDE_INT frame_size;
6835 int base_used;
6836 int clobbered_regs[16];
6837
6838 /* On S/390 machines, we may need to perform branch splitting, which
6839 will require both base and return address register. We have no
6840 choice but to assume we're going to need them until right at the
6841 end of the machine dependent reorg phase. */
6842 if (!TARGET_CPU_ZARCH)
6843 cfun->machine->split_branches_pending_p = true;
6844
6845 do
6846 {
6847 frame_size = cfun_frame_layout.frame_size;
6848
6849 /* Try to predict whether we'll need the base register. */
6850 base_used = cfun->machine->split_branches_pending_p
6851 || crtl->uses_const_pool
6852 || (!DISP_IN_RANGE (frame_size)
6853 && !CONST_OK_FOR_K (frame_size));
6854
6855 /* Decide which register to use as literal pool base. In small
6856 leaf functions, try to use an unused call-clobbered register
6857 as base register to avoid save/restore overhead. */
6858 if (!base_used)
6859 cfun->machine->base_reg = NULL_RTX;
6860 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
6861 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
6862 else
6863 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
6864
6865 s390_register_info (clobbered_regs);
6866 s390_frame_info ();
6867 }
6868 while (frame_size != cfun_frame_layout.frame_size);
6869 }
6870
6871 /* Update frame layout. Recompute actual register save data based on
6872 current info and update regs_ever_live for the special registers.
6873 May be called multiple times, but may never cause *more* registers
6874 to be saved than s390_init_frame_layout allocated room for. */
6875
6876 static void
6877 s390_update_frame_layout (void)
6878 {
6879 int clobbered_regs[16];
6880
6881 s390_register_info (clobbered_regs);
6882
6883 df_set_regs_ever_live (BASE_REGNUM,
6884 clobbered_regs[BASE_REGNUM] ? true : false);
6885 df_set_regs_ever_live (RETURN_REGNUM,
6886 clobbered_regs[RETURN_REGNUM] ? true : false);
6887 df_set_regs_ever_live (STACK_POINTER_REGNUM,
6888 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
6889
6890 if (cfun->machine->base_reg)
6891 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
6892 }
6893
6894 /* Return true if it is legal to put a value with MODE into REGNO. */
6895
6896 bool
6897 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
6898 {
6899 switch (REGNO_REG_CLASS (regno))
6900 {
6901 case FP_REGS:
6902 if (REGNO_PAIR_OK (regno, mode))
6903 {
6904 if (mode == SImode || mode == DImode)
6905 return true;
6906
6907 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
6908 return true;
6909 }
6910 break;
6911 case ADDR_REGS:
6912 if (FRAME_REGNO_P (regno) && mode == Pmode)
6913 return true;
6914
6915 /* fallthrough */
6916 case GENERAL_REGS:
6917 if (REGNO_PAIR_OK (regno, mode))
6918 {
6919 if (TARGET_64BIT
6920 || (mode != TFmode && mode != TCmode && mode != TDmode))
6921 return true;
6922 }
6923 break;
6924 case CC_REGS:
6925 if (GET_MODE_CLASS (mode) == MODE_CC)
6926 return true;
6927 break;
6928 case ACCESS_REGS:
6929 if (REGNO_PAIR_OK (regno, mode))
6930 {
6931 if (mode == SImode || mode == Pmode)
6932 return true;
6933 }
6934 break;
6935 default:
6936 return false;
6937 }
6938
6939 return false;
6940 }
6941
6942 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
6943
6944 bool
6945 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
6946 {
6947 /* Once we've decided upon a register to use as base register, it must
6948 no longer be used for any other purpose. */
6949 if (cfun->machine->base_reg)
6950 if (REGNO (cfun->machine->base_reg) == old_reg
6951 || REGNO (cfun->machine->base_reg) == new_reg)
6952 return false;
6953
6954 return true;
6955 }
6956
6957 /* Maximum number of registers to represent a value of mode MODE
6958 in a register of class CLASS. */
6959
6960 bool
6961 s390_class_max_nregs (enum reg_class class, enum machine_mode mode)
6962 {
6963 switch (class)
6964 {
6965 case FP_REGS:
6966 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6967 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
6968 else
6969 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
6970 case ACCESS_REGS:
6971 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
6972 default:
6973 break;
6974 }
6975 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6976 }
6977
6978 /* Return true if register FROM can be eliminated via register TO. */
6979
6980 bool
6981 s390_can_eliminate (int from, int to)
6982 {
6983 /* On zSeries machines, we have not marked the base register as fixed.
6984 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
6985 If a function requires the base register, we say here that this
6986 elimination cannot be performed. This will cause reload to free
6987 up the base register (as if it were fixed). On the other hand,
6988 if the current function does *not* require the base register, we
6989 say here the elimination succeeds, which in turn allows reload
6990 to allocate the base register for any other purpose. */
6991 if (from == BASE_REGNUM && to == BASE_REGNUM)
6992 {
6993 if (TARGET_CPU_ZARCH)
6994 {
6995 s390_init_frame_layout ();
6996 return cfun->machine->base_reg == NULL_RTX;
6997 }
6998
6999 return false;
7000 }
7001
7002 /* Everything else must point into the stack frame. */
7003 gcc_assert (to == STACK_POINTER_REGNUM
7004 || to == HARD_FRAME_POINTER_REGNUM);
7005
7006 gcc_assert (from == FRAME_POINTER_REGNUM
7007 || from == ARG_POINTER_REGNUM
7008 || from == RETURN_ADDRESS_POINTER_REGNUM);
7009
7010 /* Make sure we actually saved the return address. */
7011 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7012 if (!crtl->calls_eh_return
7013 && !cfun->stdarg
7014 && !cfun_frame_layout.save_return_addr_p)
7015 return false;
7016
7017 return true;
7018 }
7019
7020 /* Return offset between register FROM and TO initially after prolog. */
7021
7022 HOST_WIDE_INT
7023 s390_initial_elimination_offset (int from, int to)
7024 {
7025 HOST_WIDE_INT offset;
7026 int index;
7027
7028 /* ??? Why are we called for non-eliminable pairs? */
7029 if (!s390_can_eliminate (from, to))
7030 return 0;
7031
7032 switch (from)
7033 {
7034 case FRAME_POINTER_REGNUM:
7035 offset = (get_frame_size()
7036 + STACK_POINTER_OFFSET
7037 + crtl->outgoing_args_size);
7038 break;
7039
7040 case ARG_POINTER_REGNUM:
7041 s390_init_frame_layout ();
7042 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7043 break;
7044
7045 case RETURN_ADDRESS_POINTER_REGNUM:
7046 s390_init_frame_layout ();
7047 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7048 gcc_assert (index >= 0);
7049 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7050 offset += index * UNITS_PER_WORD;
7051 break;
7052
7053 case BASE_REGNUM:
7054 offset = 0;
7055 break;
7056
7057 default:
7058 gcc_unreachable ();
7059 }
7060
7061 return offset;
7062 }
7063
7064 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7065 to register BASE. Return generated insn. */
7066
7067 static rtx
7068 save_fpr (rtx base, int offset, int regnum)
7069 {
7070 rtx addr;
7071 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7072
7073 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7074 set_mem_alias_set (addr, get_varargs_alias_set ());
7075 else
7076 set_mem_alias_set (addr, get_frame_alias_set ());
7077
7078 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7079 }
7080
7081 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7082 to register BASE. Return generated insn. */
7083
7084 static rtx
7085 restore_fpr (rtx base, int offset, int regnum)
7086 {
7087 rtx addr;
7088 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7089 set_mem_alias_set (addr, get_frame_alias_set ());
7090
7091 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7092 }
7093
7094 /* Generate insn to save registers FIRST to LAST into
7095 the register save area located at offset OFFSET
7096 relative to register BASE. */
7097
7098 static rtx
7099 save_gprs (rtx base, int offset, int first, int last)
7100 {
7101 rtx addr, insn, note;
7102 int i;
7103
7104 addr = plus_constant (base, offset);
7105 addr = gen_rtx_MEM (Pmode, addr);
7106
7107 set_mem_alias_set (addr, get_frame_alias_set ());
7108
7109 /* Special-case single register. */
7110 if (first == last)
7111 {
7112 if (TARGET_64BIT)
7113 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7114 else
7115 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7116
7117 RTX_FRAME_RELATED_P (insn) = 1;
7118 return insn;
7119 }
7120
7121
7122 insn = gen_store_multiple (addr,
7123 gen_rtx_REG (Pmode, first),
7124 GEN_INT (last - first + 1));
7125
7126 if (first <= 6 && cfun->stdarg)
7127 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7128 {
7129 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7130
7131 if (first + i <= 6)
7132 set_mem_alias_set (mem, get_varargs_alias_set ());
7133 }
7134
7135 /* We need to set the FRAME_RELATED flag on all SETs
7136 inside the store-multiple pattern.
7137
7138 However, we must not emit DWARF records for registers 2..5
7139 if they are stored for use by variable arguments ...
7140
7141 ??? Unfortunately, it is not enough to simply not the
7142 FRAME_RELATED flags for those SETs, because the first SET
7143 of the PARALLEL is always treated as if it had the flag
7144 set, even if it does not. Therefore we emit a new pattern
7145 without those registers as REG_FRAME_RELATED_EXPR note. */
7146
7147 if (first >= 6)
7148 {
7149 rtx pat = PATTERN (insn);
7150
7151 for (i = 0; i < XVECLEN (pat, 0); i++)
7152 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
7153 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7154
7155 RTX_FRAME_RELATED_P (insn) = 1;
7156 }
7157 else if (last >= 6)
7158 {
7159 addr = plus_constant (base, offset + (6 - first) * UNITS_PER_WORD);
7160 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7161 gen_rtx_REG (Pmode, 6),
7162 GEN_INT (last - 6 + 1));
7163 note = PATTERN (note);
7164
7165 REG_NOTES (insn) =
7166 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7167 note, REG_NOTES (insn));
7168
7169 for (i = 0; i < XVECLEN (note, 0); i++)
7170 if (GET_CODE (XVECEXP (note, 0, i)) == SET)
7171 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7172
7173 RTX_FRAME_RELATED_P (insn) = 1;
7174 }
7175
7176 return insn;
7177 }
7178
7179 /* Generate insn to restore registers FIRST to LAST from
7180 the register save area located at offset OFFSET
7181 relative to register BASE. */
7182
7183 static rtx
7184 restore_gprs (rtx base, int offset, int first, int last)
7185 {
7186 rtx addr, insn;
7187
7188 addr = plus_constant (base, offset);
7189 addr = gen_rtx_MEM (Pmode, addr);
7190 set_mem_alias_set (addr, get_frame_alias_set ());
7191
7192 /* Special-case single register. */
7193 if (first == last)
7194 {
7195 if (TARGET_64BIT)
7196 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7197 else
7198 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7199
7200 return insn;
7201 }
7202
7203 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7204 addr,
7205 GEN_INT (last - first + 1));
7206 return insn;
7207 }
7208
7209 /* Return insn sequence to load the GOT register. */
7210
7211 static GTY(()) rtx got_symbol;
7212 rtx
7213 s390_load_got (void)
7214 {
7215 rtx insns;
7216
7217 if (!got_symbol)
7218 {
7219 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7220 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7221 }
7222
7223 start_sequence ();
7224
7225 if (TARGET_CPU_ZARCH)
7226 {
7227 emit_move_insn (pic_offset_table_rtx, got_symbol);
7228 }
7229 else
7230 {
7231 rtx offset;
7232
7233 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7234 UNSPEC_LTREL_OFFSET);
7235 offset = gen_rtx_CONST (Pmode, offset);
7236 offset = force_const_mem (Pmode, offset);
7237
7238 emit_move_insn (pic_offset_table_rtx, offset);
7239
7240 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7241 UNSPEC_LTREL_BASE);
7242 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7243
7244 emit_move_insn (pic_offset_table_rtx, offset);
7245 }
7246
7247 insns = get_insns ();
7248 end_sequence ();
7249 return insns;
7250 }
7251
7252 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7253 and the change to the stack pointer. */
7254
7255 static void
7256 s390_emit_stack_tie (void)
7257 {
7258 rtx mem = gen_frame_mem (BLKmode,
7259 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7260
7261 emit_insn (gen_stack_tie (mem));
7262 }
7263
7264 /* Expand the prologue into a bunch of separate insns. */
7265
7266 void
7267 s390_emit_prologue (void)
7268 {
7269 rtx insn, addr;
7270 rtx temp_reg;
7271 int i;
7272 int offset;
7273 int next_fpr = 0;
7274
7275 /* Complete frame layout. */
7276
7277 s390_update_frame_layout ();
7278
7279 /* Annotate all constant pool references to let the scheduler know
7280 they implicitly use the base register. */
7281
7282 push_topmost_sequence ();
7283
7284 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7285 if (INSN_P (insn))
7286 {
7287 annotate_constant_pool_refs (&PATTERN (insn));
7288 df_insn_rescan (insn);
7289 }
7290
7291 pop_topmost_sequence ();
7292
7293 /* Choose best register to use for temp use within prologue.
7294 See below for why TPF must use the register 1. */
7295
7296 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7297 && !current_function_is_leaf
7298 && !TARGET_TPF_PROFILING)
7299 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7300 else
7301 temp_reg = gen_rtx_REG (Pmode, 1);
7302
7303 /* Save call saved gprs. */
7304 if (cfun_frame_layout.first_save_gpr != -1)
7305 {
7306 insn = save_gprs (stack_pointer_rtx,
7307 cfun_frame_layout.gprs_offset +
7308 UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
7309 - cfun_frame_layout.first_save_gpr_slot),
7310 cfun_frame_layout.first_save_gpr,
7311 cfun_frame_layout.last_save_gpr);
7312 emit_insn (insn);
7313 }
7314
7315 /* Dummy insn to mark literal pool slot. */
7316
7317 if (cfun->machine->base_reg)
7318 emit_insn (gen_main_pool (cfun->machine->base_reg));
7319
7320 offset = cfun_frame_layout.f0_offset;
7321
7322 /* Save f0 and f2. */
7323 for (i = 0; i < 2; i++)
7324 {
7325 if (cfun_fpr_bit_p (i))
7326 {
7327 save_fpr (stack_pointer_rtx, offset, i + 16);
7328 offset += 8;
7329 }
7330 else if (!TARGET_PACKED_STACK)
7331 offset += 8;
7332 }
7333
7334 /* Save f4 and f6. */
7335 offset = cfun_frame_layout.f4_offset;
7336 for (i = 2; i < 4; i++)
7337 {
7338 if (cfun_fpr_bit_p (i))
7339 {
7340 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7341 offset += 8;
7342
7343 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7344 therefore are not frame related. */
7345 if (!call_really_used_regs[i + 16])
7346 RTX_FRAME_RELATED_P (insn) = 1;
7347 }
7348 else if (!TARGET_PACKED_STACK)
7349 offset += 8;
7350 }
7351
7352 if (TARGET_PACKED_STACK
7353 && cfun_save_high_fprs_p
7354 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7355 {
7356 offset = (cfun_frame_layout.f8_offset
7357 + (cfun_frame_layout.high_fprs - 1) * 8);
7358
7359 for (i = 15; i > 7 && offset >= 0; i--)
7360 if (cfun_fpr_bit_p (i))
7361 {
7362 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7363
7364 RTX_FRAME_RELATED_P (insn) = 1;
7365 offset -= 8;
7366 }
7367 if (offset >= cfun_frame_layout.f8_offset)
7368 next_fpr = i + 16;
7369 }
7370
7371 if (!TARGET_PACKED_STACK)
7372 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
7373
7374 /* Decrement stack pointer. */
7375
7376 if (cfun_frame_layout.frame_size > 0)
7377 {
7378 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7379
7380 if (s390_stack_size)
7381 {
7382 HOST_WIDE_INT stack_guard;
7383
7384 if (s390_stack_guard)
7385 stack_guard = s390_stack_guard;
7386 else
7387 {
7388 /* If no value for stack guard is provided the smallest power of 2
7389 larger than the current frame size is chosen. */
7390 stack_guard = 1;
7391 while (stack_guard < cfun_frame_layout.frame_size)
7392 stack_guard <<= 1;
7393 }
7394
7395 if (cfun_frame_layout.frame_size >= s390_stack_size)
7396 {
7397 warning (0, "frame size of function %qs is "
7398 HOST_WIDE_INT_PRINT_DEC
7399 " bytes exceeding user provided stack limit of "
7400 HOST_WIDE_INT_PRINT_DEC " bytes. "
7401 "An unconditional trap is added.",
7402 current_function_name(), cfun_frame_layout.frame_size,
7403 s390_stack_size);
7404 emit_insn (gen_trap ());
7405 }
7406 else
7407 {
7408 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
7409 & ~(stack_guard - 1));
7410 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
7411 GEN_INT (stack_check_mask));
7412 if (TARGET_64BIT)
7413 gen_cmpdi (t, const0_rtx);
7414 else
7415 gen_cmpsi (t, const0_rtx);
7416
7417 emit_insn (gen_conditional_trap (gen_rtx_EQ (CCmode,
7418 gen_rtx_REG (CCmode,
7419 CC_REGNUM),
7420 const0_rtx),
7421 const0_rtx));
7422 }
7423 }
7424
7425 if (s390_warn_framesize > 0
7426 && cfun_frame_layout.frame_size >= s390_warn_framesize)
7427 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
7428 current_function_name (), cfun_frame_layout.frame_size);
7429
7430 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
7431 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
7432
7433 /* Save incoming stack pointer into temp reg. */
7434 if (TARGET_BACKCHAIN || next_fpr)
7435 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
7436
7437 /* Subtract frame size from stack pointer. */
7438
7439 if (DISP_IN_RANGE (INTVAL (frame_off)))
7440 {
7441 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7442 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7443 frame_off));
7444 insn = emit_insn (insn);
7445 }
7446 else
7447 {
7448 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
7449 frame_off = force_const_mem (Pmode, frame_off);
7450
7451 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
7452 annotate_constant_pool_refs (&PATTERN (insn));
7453 }
7454
7455 RTX_FRAME_RELATED_P (insn) = 1;
7456 REG_NOTES (insn) =
7457 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7458 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7459 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7460 GEN_INT (-cfun_frame_layout.frame_size))),
7461 REG_NOTES (insn));
7462
7463 /* Set backchain. */
7464
7465 if (TARGET_BACKCHAIN)
7466 {
7467 if (cfun_frame_layout.backchain_offset)
7468 addr = gen_rtx_MEM (Pmode,
7469 plus_constant (stack_pointer_rtx,
7470 cfun_frame_layout.backchain_offset));
7471 else
7472 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7473 set_mem_alias_set (addr, get_frame_alias_set ());
7474 insn = emit_insn (gen_move_insn (addr, temp_reg));
7475 }
7476
7477 /* If we support asynchronous exceptions (e.g. for Java),
7478 we need to make sure the backchain pointer is set up
7479 before any possibly trapping memory access. */
7480
7481 if (TARGET_BACKCHAIN && flag_non_call_exceptions)
7482 {
7483 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
7484 emit_insn (gen_rtx_CLOBBER (VOIDmode, addr));
7485 }
7486 }
7487
7488 /* Save fprs 8 - 15 (64 bit ABI). */
7489
7490 if (cfun_save_high_fprs_p && next_fpr)
7491 {
7492 /* If the stack might be accessed through a different register
7493 we have to make sure that the stack pointer decrement is not
7494 moved below the use of the stack slots. */
7495 s390_emit_stack_tie ();
7496
7497 insn = emit_insn (gen_add2_insn (temp_reg,
7498 GEN_INT (cfun_frame_layout.f8_offset)));
7499
7500 offset = 0;
7501
7502 for (i = 24; i <= next_fpr; i++)
7503 if (cfun_fpr_bit_p (i - 16))
7504 {
7505 rtx addr = plus_constant (stack_pointer_rtx,
7506 cfun_frame_layout.frame_size
7507 + cfun_frame_layout.f8_offset
7508 + offset);
7509
7510 insn = save_fpr (temp_reg, offset, i);
7511 offset += 8;
7512 RTX_FRAME_RELATED_P (insn) = 1;
7513 REG_NOTES (insn) =
7514 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7515 gen_rtx_SET (VOIDmode,
7516 gen_rtx_MEM (DFmode, addr),
7517 gen_rtx_REG (DFmode, i)),
7518 REG_NOTES (insn));
7519 }
7520 }
7521
7522 /* Set frame pointer, if needed. */
7523
7524 if (frame_pointer_needed)
7525 {
7526 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7527 RTX_FRAME_RELATED_P (insn) = 1;
7528 }
7529
7530 /* Set up got pointer, if needed. */
7531
7532 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
7533 {
7534 rtx insns = s390_load_got ();
7535
7536 for (insn = insns; insn; insn = NEXT_INSN (insn))
7537 annotate_constant_pool_refs (&PATTERN (insn));
7538
7539 emit_insn (insns);
7540 }
7541
7542 if (TARGET_TPF_PROFILING)
7543 {
7544 /* Generate a BAS instruction to serve as a function
7545 entry intercept to facilitate the use of tracing
7546 algorithms located at the branch target. */
7547 emit_insn (gen_prologue_tpf ());
7548
7549 /* Emit a blockage here so that all code
7550 lies between the profiling mechanisms. */
7551 emit_insn (gen_blockage ());
7552 }
7553 }
7554
7555 /* Expand the epilogue into a bunch of separate insns. */
7556
7557 void
7558 s390_emit_epilogue (bool sibcall)
7559 {
7560 rtx frame_pointer, return_reg;
7561 int area_bottom, area_top, offset = 0;
7562 int next_offset;
7563 rtvec p;
7564 int i;
7565
7566 if (TARGET_TPF_PROFILING)
7567 {
7568
7569 /* Generate a BAS instruction to serve as a function
7570 entry intercept to facilitate the use of tracing
7571 algorithms located at the branch target. */
7572
7573 /* Emit a blockage here so that all code
7574 lies between the profiling mechanisms. */
7575 emit_insn (gen_blockage ());
7576
7577 emit_insn (gen_epilogue_tpf ());
7578 }
7579
7580 /* Check whether to use frame or stack pointer for restore. */
7581
7582 frame_pointer = (frame_pointer_needed
7583 ? hard_frame_pointer_rtx : stack_pointer_rtx);
7584
7585 s390_frame_area (&area_bottom, &area_top);
7586
7587 /* Check whether we can access the register save area.
7588 If not, increment the frame pointer as required. */
7589
7590 if (area_top <= area_bottom)
7591 {
7592 /* Nothing to restore. */
7593 }
7594 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
7595 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
7596 {
7597 /* Area is in range. */
7598 offset = cfun_frame_layout.frame_size;
7599 }
7600 else
7601 {
7602 rtx insn, frame_off;
7603
7604 offset = area_bottom < 0 ? -area_bottom : 0;
7605 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
7606
7607 if (DISP_IN_RANGE (INTVAL (frame_off)))
7608 {
7609 insn = gen_rtx_SET (VOIDmode, frame_pointer,
7610 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
7611 insn = emit_insn (insn);
7612 }
7613 else
7614 {
7615 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
7616 frame_off = force_const_mem (Pmode, frame_off);
7617
7618 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
7619 annotate_constant_pool_refs (&PATTERN (insn));
7620 }
7621 }
7622
7623 /* Restore call saved fprs. */
7624
7625 if (TARGET_64BIT)
7626 {
7627 if (cfun_save_high_fprs_p)
7628 {
7629 next_offset = cfun_frame_layout.f8_offset;
7630 for (i = 24; i < 32; i++)
7631 {
7632 if (cfun_fpr_bit_p (i - 16))
7633 {
7634 restore_fpr (frame_pointer,
7635 offset + next_offset, i);
7636 next_offset += 8;
7637 }
7638 }
7639 }
7640
7641 }
7642 else
7643 {
7644 next_offset = cfun_frame_layout.f4_offset;
7645 for (i = 18; i < 20; i++)
7646 {
7647 if (cfun_fpr_bit_p (i - 16))
7648 {
7649 restore_fpr (frame_pointer,
7650 offset + next_offset, i);
7651 next_offset += 8;
7652 }
7653 else if (!TARGET_PACKED_STACK)
7654 next_offset += 8;
7655 }
7656
7657 }
7658
7659 /* Return register. */
7660
7661 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7662
7663 /* Restore call saved gprs. */
7664
7665 if (cfun_frame_layout.first_restore_gpr != -1)
7666 {
7667 rtx insn, addr;
7668 int i;
7669
7670 /* Check for global register and save them
7671 to stack location from where they get restored. */
7672
7673 for (i = cfun_frame_layout.first_restore_gpr;
7674 i <= cfun_frame_layout.last_restore_gpr;
7675 i++)
7676 {
7677 /* These registers are special and need to be
7678 restored in any case. */
7679 if (i == STACK_POINTER_REGNUM
7680 || i == RETURN_REGNUM
7681 || i == BASE_REGNUM
7682 || (flag_pic && i == (int)PIC_OFFSET_TABLE_REGNUM))
7683 continue;
7684
7685 if (global_regs[i])
7686 {
7687 addr = plus_constant (frame_pointer,
7688 offset + cfun_frame_layout.gprs_offset
7689 + (i - cfun_frame_layout.first_save_gpr_slot)
7690 * UNITS_PER_WORD);
7691 addr = gen_rtx_MEM (Pmode, addr);
7692 set_mem_alias_set (addr, get_frame_alias_set ());
7693 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
7694 }
7695 }
7696
7697 if (! sibcall)
7698 {
7699 /* Fetch return address from stack before load multiple,
7700 this will do good for scheduling. */
7701
7702 if (cfun_frame_layout.save_return_addr_p
7703 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
7704 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
7705 {
7706 int return_regnum = find_unused_clobbered_reg();
7707 if (!return_regnum)
7708 return_regnum = 4;
7709 return_reg = gen_rtx_REG (Pmode, return_regnum);
7710
7711 addr = plus_constant (frame_pointer,
7712 offset + cfun_frame_layout.gprs_offset
7713 + (RETURN_REGNUM
7714 - cfun_frame_layout.first_save_gpr_slot)
7715 * UNITS_PER_WORD);
7716 addr = gen_rtx_MEM (Pmode, addr);
7717 set_mem_alias_set (addr, get_frame_alias_set ());
7718 emit_move_insn (return_reg, addr);
7719 }
7720 }
7721
7722 insn = restore_gprs (frame_pointer,
7723 offset + cfun_frame_layout.gprs_offset
7724 + (cfun_frame_layout.first_restore_gpr
7725 - cfun_frame_layout.first_save_gpr_slot)
7726 * UNITS_PER_WORD,
7727 cfun_frame_layout.first_restore_gpr,
7728 cfun_frame_layout.last_restore_gpr);
7729 emit_insn (insn);
7730 }
7731
7732 if (! sibcall)
7733 {
7734
7735 /* Return to caller. */
7736
7737 p = rtvec_alloc (2);
7738
7739 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
7740 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
7741 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
7742 }
7743 }
7744
7745
7746 /* Return the size in bytes of a function argument of
7747 type TYPE and/or mode MODE. At least one of TYPE or
7748 MODE must be specified. */
7749
7750 static int
7751 s390_function_arg_size (enum machine_mode mode, const_tree type)
7752 {
7753 if (type)
7754 return int_size_in_bytes (type);
7755
7756 /* No type info available for some library calls ... */
7757 if (mode != BLKmode)
7758 return GET_MODE_SIZE (mode);
7759
7760 /* If we have neither type nor mode, abort */
7761 gcc_unreachable ();
7762 }
7763
7764 /* Return true if a function argument of type TYPE and mode MODE
7765 is to be passed in a floating-point register, if available. */
7766
7767 static bool
7768 s390_function_arg_float (enum machine_mode mode, tree type)
7769 {
7770 int size = s390_function_arg_size (mode, type);
7771 if (size > 8)
7772 return false;
7773
7774 /* Soft-float changes the ABI: no floating-point registers are used. */
7775 if (TARGET_SOFT_FLOAT)
7776 return false;
7777
7778 /* No type info available for some library calls ... */
7779 if (!type)
7780 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
7781
7782 /* The ABI says that record types with a single member are treated
7783 just like that member would be. */
7784 while (TREE_CODE (type) == RECORD_TYPE)
7785 {
7786 tree field, single = NULL_TREE;
7787
7788 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7789 {
7790 if (TREE_CODE (field) != FIELD_DECL)
7791 continue;
7792
7793 if (single == NULL_TREE)
7794 single = TREE_TYPE (field);
7795 else
7796 return false;
7797 }
7798
7799 if (single == NULL_TREE)
7800 return false;
7801 else
7802 type = single;
7803 }
7804
7805 return TREE_CODE (type) == REAL_TYPE;
7806 }
7807
7808 /* Return true if a function argument of type TYPE and mode MODE
7809 is to be passed in an integer register, or a pair of integer
7810 registers, if available. */
7811
7812 static bool
7813 s390_function_arg_integer (enum machine_mode mode, tree type)
7814 {
7815 int size = s390_function_arg_size (mode, type);
7816 if (size > 8)
7817 return false;
7818
7819 /* No type info available for some library calls ... */
7820 if (!type)
7821 return GET_MODE_CLASS (mode) == MODE_INT
7822 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
7823
7824 /* We accept small integral (and similar) types. */
7825 if (INTEGRAL_TYPE_P (type)
7826 || POINTER_TYPE_P (type)
7827 || TREE_CODE (type) == OFFSET_TYPE
7828 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
7829 return true;
7830
7831 /* We also accept structs of size 1, 2, 4, 8 that are not
7832 passed in floating-point registers. */
7833 if (AGGREGATE_TYPE_P (type)
7834 && exact_log2 (size) >= 0
7835 && !s390_function_arg_float (mode, type))
7836 return true;
7837
7838 return false;
7839 }
7840
7841 /* Return 1 if a function argument of type TYPE and mode MODE
7842 is to be passed by reference. The ABI specifies that only
7843 structures of size 1, 2, 4, or 8 bytes are passed by value,
7844 all other structures (and complex numbers) are passed by
7845 reference. */
7846
7847 static bool
7848 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
7849 enum machine_mode mode, const_tree type,
7850 bool named ATTRIBUTE_UNUSED)
7851 {
7852 int size = s390_function_arg_size (mode, type);
7853 if (size > 8)
7854 return true;
7855
7856 if (type)
7857 {
7858 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
7859 return 1;
7860
7861 if (TREE_CODE (type) == COMPLEX_TYPE
7862 || TREE_CODE (type) == VECTOR_TYPE)
7863 return 1;
7864 }
7865
7866 return 0;
7867 }
7868
7869 /* Update the data in CUM to advance over an argument of mode MODE and
7870 data type TYPE. (TYPE is null for libcalls where that information
7871 may not be available.). The boolean NAMED specifies whether the
7872 argument is a named argument (as opposed to an unnamed argument
7873 matching an ellipsis). */
7874
7875 void
7876 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7877 tree type, int named ATTRIBUTE_UNUSED)
7878 {
7879 if (s390_function_arg_float (mode, type))
7880 {
7881 cum->fprs += 1;
7882 }
7883 else if (s390_function_arg_integer (mode, type))
7884 {
7885 int size = s390_function_arg_size (mode, type);
7886 cum->gprs += ((size + UNITS_PER_WORD-1) / UNITS_PER_WORD);
7887 }
7888 else
7889 gcc_unreachable ();
7890 }
7891
7892 /* Define where to put the arguments to a function.
7893 Value is zero to push the argument on the stack,
7894 or a hard register in which to store the argument.
7895
7896 MODE is the argument's machine mode.
7897 TYPE is the data type of the argument (as a tree).
7898 This is null for libcalls where that information may
7899 not be available.
7900 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7901 the preceding args and about the function being called.
7902 NAMED is nonzero if this argument is a named parameter
7903 (otherwise it is an extra parameter matching an ellipsis).
7904
7905 On S/390, we use general purpose registers 2 through 6 to
7906 pass integer, pointer, and certain structure arguments, and
7907 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
7908 to pass floating point arguments. All remaining arguments
7909 are pushed to the stack. */
7910
7911 rtx
7912 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
7913 int named ATTRIBUTE_UNUSED)
7914 {
7915 if (s390_function_arg_float (mode, type))
7916 {
7917 if (cum->fprs + 1 > FP_ARG_NUM_REG)
7918 return 0;
7919 else
7920 return gen_rtx_REG (mode, cum->fprs + 16);
7921 }
7922 else if (s390_function_arg_integer (mode, type))
7923 {
7924 int size = s390_function_arg_size (mode, type);
7925 int n_gprs = (size + UNITS_PER_WORD-1) / UNITS_PER_WORD;
7926
7927 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
7928 return 0;
7929 else
7930 return gen_rtx_REG (mode, cum->gprs + 2);
7931 }
7932
7933 /* After the real arguments, expand_call calls us once again
7934 with a void_type_node type. Whatever we return here is
7935 passed as operand 2 to the call expanders.
7936
7937 We don't need this feature ... */
7938 else if (type == void_type_node)
7939 return const0_rtx;
7940
7941 gcc_unreachable ();
7942 }
7943
7944 /* Return true if return values of type TYPE should be returned
7945 in a memory buffer whose address is passed by the caller as
7946 hidden first argument. */
7947
7948 static bool
7949 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
7950 {
7951 /* We accept small integral (and similar) types. */
7952 if (INTEGRAL_TYPE_P (type)
7953 || POINTER_TYPE_P (type)
7954 || TREE_CODE (type) == OFFSET_TYPE
7955 || TREE_CODE (type) == REAL_TYPE)
7956 return int_size_in_bytes (type) > 8;
7957
7958 /* Aggregates and similar constructs are always returned
7959 in memory. */
7960 if (AGGREGATE_TYPE_P (type)
7961 || TREE_CODE (type) == COMPLEX_TYPE
7962 || TREE_CODE (type) == VECTOR_TYPE)
7963 return true;
7964
7965 /* ??? We get called on all sorts of random stuff from
7966 aggregate_value_p. We can't abort, but it's not clear
7967 what's safe to return. Pretend it's a struct I guess. */
7968 return true;
7969 }
7970
7971 /* Define where to return a (scalar) value of type TYPE.
7972 If TYPE is null, define where to return a (scalar)
7973 value of mode MODE from a libcall. */
7974
7975 rtx
7976 s390_function_value (const_tree type, enum machine_mode mode)
7977 {
7978 if (type)
7979 {
7980 int unsignedp = TYPE_UNSIGNED (type);
7981 mode = promote_mode (type, TYPE_MODE (type), &unsignedp, 1);
7982 }
7983
7984 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
7985 gcc_assert (GET_MODE_SIZE (mode) <= 8);
7986
7987 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
7988 return gen_rtx_REG (mode, 16);
7989 else
7990 return gen_rtx_REG (mode, 2);
7991 }
7992
7993
7994 /* Create and return the va_list datatype.
7995
7996 On S/390, va_list is an array type equivalent to
7997
7998 typedef struct __va_list_tag
7999 {
8000 long __gpr;
8001 long __fpr;
8002 void *__overflow_arg_area;
8003 void *__reg_save_area;
8004 } va_list[1];
8005
8006 where __gpr and __fpr hold the number of general purpose
8007 or floating point arguments used up to now, respectively,
8008 __overflow_arg_area points to the stack location of the
8009 next argument passed on the stack, and __reg_save_area
8010 always points to the start of the register area in the
8011 call frame of the current function. The function prologue
8012 saves all registers used for argument passing into this
8013 area if the function uses variable arguments. */
8014
8015 static tree
8016 s390_build_builtin_va_list (void)
8017 {
8018 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8019
8020 record = lang_hooks.types.make_type (RECORD_TYPE);
8021
8022 type_decl =
8023 build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
8024
8025 f_gpr = build_decl (FIELD_DECL, get_identifier ("__gpr"),
8026 long_integer_type_node);
8027 f_fpr = build_decl (FIELD_DECL, get_identifier ("__fpr"),
8028 long_integer_type_node);
8029 f_ovf = build_decl (FIELD_DECL, get_identifier ("__overflow_arg_area"),
8030 ptr_type_node);
8031 f_sav = build_decl (FIELD_DECL, get_identifier ("__reg_save_area"),
8032 ptr_type_node);
8033
8034 va_list_gpr_counter_field = f_gpr;
8035 va_list_fpr_counter_field = f_fpr;
8036
8037 DECL_FIELD_CONTEXT (f_gpr) = record;
8038 DECL_FIELD_CONTEXT (f_fpr) = record;
8039 DECL_FIELD_CONTEXT (f_ovf) = record;
8040 DECL_FIELD_CONTEXT (f_sav) = record;
8041
8042 TREE_CHAIN (record) = type_decl;
8043 TYPE_NAME (record) = type_decl;
8044 TYPE_FIELDS (record) = f_gpr;
8045 TREE_CHAIN (f_gpr) = f_fpr;
8046 TREE_CHAIN (f_fpr) = f_ovf;
8047 TREE_CHAIN (f_ovf) = f_sav;
8048
8049 layout_type (record);
8050
8051 /* The correct type is an array type of one element. */
8052 return build_array_type (record, build_index_type (size_zero_node));
8053 }
8054
8055 /* Implement va_start by filling the va_list structure VALIST.
8056 STDARG_P is always true, and ignored.
8057 NEXTARG points to the first anonymous stack argument.
8058
8059 The following global variables are used to initialize
8060 the va_list structure:
8061
8062 crtl->args.info:
8063 holds number of gprs and fprs used for named arguments.
8064 crtl->args.arg_offset_rtx:
8065 holds the offset of the first anonymous stack argument
8066 (relative to the virtual arg pointer). */
8067
8068 static void
8069 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8070 {
8071 HOST_WIDE_INT n_gpr, n_fpr;
8072 int off;
8073 tree f_gpr, f_fpr, f_ovf, f_sav;
8074 tree gpr, fpr, ovf, sav, t;
8075
8076 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8077 f_fpr = TREE_CHAIN (f_gpr);
8078 f_ovf = TREE_CHAIN (f_fpr);
8079 f_sav = TREE_CHAIN (f_ovf);
8080
8081 valist = build_va_arg_indirect_ref (valist);
8082 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8083 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8084 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8085 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8086
8087 /* Count number of gp and fp argument registers used. */
8088
8089 n_gpr = crtl->args.info.gprs;
8090 n_fpr = crtl->args.info.fprs;
8091
8092 if (cfun->va_list_gpr_size)
8093 {
8094 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr,
8095 build_int_cst (NULL_TREE, n_gpr));
8096 TREE_SIDE_EFFECTS (t) = 1;
8097 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8098 }
8099
8100 if (cfun->va_list_fpr_size)
8101 {
8102 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr,
8103 build_int_cst (NULL_TREE, n_fpr));
8104 TREE_SIDE_EFFECTS (t) = 1;
8105 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8106 }
8107
8108 /* Find the overflow area. */
8109 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8110 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8111 {
8112 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8113
8114 off = INTVAL (crtl->args.arg_offset_rtx);
8115 off = off < 0 ? 0 : off;
8116 if (TARGET_DEBUG_ARG)
8117 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8118 (int)n_gpr, (int)n_fpr, off);
8119
8120 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8121
8122 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
8123 TREE_SIDE_EFFECTS (t) = 1;
8124 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8125 }
8126
8127 /* Find the register save area. */
8128 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8129 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8130 {
8131 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8132 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8133 size_int (-RETURN_REGNUM * UNITS_PER_WORD));
8134
8135 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (sav), sav, t);
8136 TREE_SIDE_EFFECTS (t) = 1;
8137 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8138 }
8139 }
8140
8141 /* Implement va_arg by updating the va_list structure
8142 VALIST as required to retrieve an argument of type
8143 TYPE, and returning that argument.
8144
8145 Generates code equivalent to:
8146
8147 if (integral value) {
8148 if (size <= 4 && args.gpr < 5 ||
8149 size > 4 && args.gpr < 4 )
8150 ret = args.reg_save_area[args.gpr+8]
8151 else
8152 ret = *args.overflow_arg_area++;
8153 } else if (float value) {
8154 if (args.fgpr < 2)
8155 ret = args.reg_save_area[args.fpr+64]
8156 else
8157 ret = *args.overflow_arg_area++;
8158 } else if (aggregate value) {
8159 if (args.gpr < 5)
8160 ret = *args.reg_save_area[args.gpr]
8161 else
8162 ret = **args.overflow_arg_area++;
8163 } */
8164
8165 static tree
8166 s390_gimplify_va_arg (tree valist, tree type, tree *pre_p,
8167 tree *post_p ATTRIBUTE_UNUSED)
8168 {
8169 tree f_gpr, f_fpr, f_ovf, f_sav;
8170 tree gpr, fpr, ovf, sav, reg, t, u;
8171 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8172 tree lab_false, lab_over, addr;
8173
8174 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8175 f_fpr = TREE_CHAIN (f_gpr);
8176 f_ovf = TREE_CHAIN (f_fpr);
8177 f_sav = TREE_CHAIN (f_ovf);
8178
8179 valist = build_va_arg_indirect_ref (valist);
8180 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8181 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8182 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8183 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8184
8185 size = int_size_in_bytes (type);
8186
8187 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8188 {
8189 if (TARGET_DEBUG_ARG)
8190 {
8191 fprintf (stderr, "va_arg: aggregate type");
8192 debug_tree (type);
8193 }
8194
8195 /* Aggregates are passed by reference. */
8196 indirect_p = 1;
8197 reg = gpr;
8198 n_reg = 1;
8199
8200 /* kernel stack layout on 31 bit: It is assumed here that no padding
8201 will be added by s390_frame_info because for va_args always an even
8202 number of gprs has to be saved r15-r2 = 14 regs. */
8203 sav_ofs = 2 * UNITS_PER_WORD;
8204 sav_scale = UNITS_PER_WORD;
8205 size = UNITS_PER_WORD;
8206 max_reg = GP_ARG_NUM_REG - n_reg;
8207 }
8208 else if (s390_function_arg_float (TYPE_MODE (type), type))
8209 {
8210 if (TARGET_DEBUG_ARG)
8211 {
8212 fprintf (stderr, "va_arg: float type");
8213 debug_tree (type);
8214 }
8215
8216 /* FP args go in FP registers, if present. */
8217 indirect_p = 0;
8218 reg = fpr;
8219 n_reg = 1;
8220 sav_ofs = 16 * UNITS_PER_WORD;
8221 sav_scale = 8;
8222 max_reg = FP_ARG_NUM_REG - n_reg;
8223 }
8224 else
8225 {
8226 if (TARGET_DEBUG_ARG)
8227 {
8228 fprintf (stderr, "va_arg: other type");
8229 debug_tree (type);
8230 }
8231
8232 /* Otherwise into GP registers. */
8233 indirect_p = 0;
8234 reg = gpr;
8235 n_reg = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8236
8237 /* kernel stack layout on 31 bit: It is assumed here that no padding
8238 will be added by s390_frame_info because for va_args always an even
8239 number of gprs has to be saved r15-r2 = 14 regs. */
8240 sav_ofs = 2 * UNITS_PER_WORD;
8241
8242 if (size < UNITS_PER_WORD)
8243 sav_ofs += UNITS_PER_WORD - size;
8244
8245 sav_scale = UNITS_PER_WORD;
8246 max_reg = GP_ARG_NUM_REG - n_reg;
8247 }
8248
8249 /* Pull the value out of the saved registers ... */
8250
8251 lab_false = create_artificial_label ();
8252 lab_over = create_artificial_label ();
8253 addr = create_tmp_var (ptr_type_node, "addr");
8254 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
8255
8256 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8257 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8258 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8259 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8260 gimplify_and_add (t, pre_p);
8261
8262 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
8263 size_int (sav_ofs));
8264 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
8265 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
8266 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
8267
8268 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
8269 gimplify_and_add (t, pre_p);
8270
8271 t = build1 (GOTO_EXPR, void_type_node, lab_over);
8272 gimplify_and_add (t, pre_p);
8273
8274 t = build1 (LABEL_EXPR, void_type_node, lab_false);
8275 append_to_statement_list (t, pre_p);
8276
8277
8278 /* ... Otherwise out of the overflow area. */
8279
8280 t = ovf;
8281 if (size < UNITS_PER_WORD)
8282 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8283 size_int (UNITS_PER_WORD - size));
8284
8285 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8286
8287 u = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
8288 gimplify_and_add (u, pre_p);
8289
8290 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8291 size_int (size));
8292 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, ovf, t);
8293 gimplify_and_add (t, pre_p);
8294
8295 t = build1 (LABEL_EXPR, void_type_node, lab_over);
8296 append_to_statement_list (t, pre_p);
8297
8298
8299 /* Increment register save count. */
8300
8301 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
8302 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
8303 gimplify_and_add (u, pre_p);
8304
8305 if (indirect_p)
8306 {
8307 t = build_pointer_type (build_pointer_type (type));
8308 addr = fold_convert (t, addr);
8309 addr = build_va_arg_indirect_ref (addr);
8310 }
8311 else
8312 {
8313 t = build_pointer_type (type);
8314 addr = fold_convert (t, addr);
8315 }
8316
8317 return build_va_arg_indirect_ref (addr);
8318 }
8319
8320
8321 /* Builtins. */
8322
8323 enum s390_builtin
8324 {
8325 S390_BUILTIN_THREAD_POINTER,
8326 S390_BUILTIN_SET_THREAD_POINTER,
8327
8328 S390_BUILTIN_max
8329 };
8330
8331 static unsigned int const code_for_builtin_64[S390_BUILTIN_max] = {
8332 CODE_FOR_get_tp_64,
8333 CODE_FOR_set_tp_64
8334 };
8335
8336 static unsigned int const code_for_builtin_31[S390_BUILTIN_max] = {
8337 CODE_FOR_get_tp_31,
8338 CODE_FOR_set_tp_31
8339 };
8340
8341 static void
8342 s390_init_builtins (void)
8343 {
8344 tree ftype;
8345
8346 ftype = build_function_type (ptr_type_node, void_list_node);
8347 add_builtin_function ("__builtin_thread_pointer", ftype,
8348 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
8349 NULL, NULL_TREE);
8350
8351 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
8352 add_builtin_function ("__builtin_set_thread_pointer", ftype,
8353 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
8354 NULL, NULL_TREE);
8355 }
8356
8357 /* Expand an expression EXP that calls a built-in function,
8358 with result going to TARGET if that's convenient
8359 (and in mode MODE if that's convenient).
8360 SUBTARGET may be used as the target for computing one of EXP's operands.
8361 IGNORE is nonzero if the value is to be ignored. */
8362
8363 static rtx
8364 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8365 enum machine_mode mode ATTRIBUTE_UNUSED,
8366 int ignore ATTRIBUTE_UNUSED)
8367 {
8368 #define MAX_ARGS 2
8369
8370 unsigned int const *code_for_builtin =
8371 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
8372
8373 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8374 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8375 enum insn_code icode;
8376 rtx op[MAX_ARGS], pat;
8377 int arity;
8378 bool nonvoid;
8379 tree arg;
8380 call_expr_arg_iterator iter;
8381
8382 if (fcode >= S390_BUILTIN_max)
8383 internal_error ("bad builtin fcode");
8384 icode = code_for_builtin[fcode];
8385 if (icode == 0)
8386 internal_error ("bad builtin fcode");
8387
8388 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
8389
8390 arity = 0;
8391 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8392 {
8393 const struct insn_operand_data *insn_op;
8394
8395 if (arg == error_mark_node)
8396 return NULL_RTX;
8397 if (arity > MAX_ARGS)
8398 return NULL_RTX;
8399
8400 insn_op = &insn_data[icode].operand[arity + nonvoid];
8401
8402 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
8403
8404 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
8405 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
8406 arity++;
8407 }
8408
8409 if (nonvoid)
8410 {
8411 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8412 if (!target
8413 || GET_MODE (target) != tmode
8414 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
8415 target = gen_reg_rtx (tmode);
8416 }
8417
8418 switch (arity)
8419 {
8420 case 0:
8421 pat = GEN_FCN (icode) (target);
8422 break;
8423 case 1:
8424 if (nonvoid)
8425 pat = GEN_FCN (icode) (target, op[0]);
8426 else
8427 pat = GEN_FCN (icode) (op[0]);
8428 break;
8429 case 2:
8430 pat = GEN_FCN (icode) (target, op[0], op[1]);
8431 break;
8432 default:
8433 gcc_unreachable ();
8434 }
8435 if (!pat)
8436 return NULL_RTX;
8437 emit_insn (pat);
8438
8439 if (nonvoid)
8440 return target;
8441 else
8442 return const0_rtx;
8443 }
8444
8445
8446 /* Output assembly code for the trampoline template to
8447 stdio stream FILE.
8448
8449 On S/390, we use gpr 1 internally in the trampoline code;
8450 gpr 0 is used to hold the static chain. */
8451
8452 void
8453 s390_trampoline_template (FILE *file)
8454 {
8455 rtx op[2];
8456 op[0] = gen_rtx_REG (Pmode, 0);
8457 op[1] = gen_rtx_REG (Pmode, 1);
8458
8459 if (TARGET_64BIT)
8460 {
8461 output_asm_insn ("basr\t%1,0", op);
8462 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
8463 output_asm_insn ("br\t%1", op);
8464 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
8465 }
8466 else
8467 {
8468 output_asm_insn ("basr\t%1,0", op);
8469 output_asm_insn ("lm\t%0,%1,6(%1)", op);
8470 output_asm_insn ("br\t%1", op);
8471 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
8472 }
8473 }
8474
8475 /* Emit RTL insns to initialize the variable parts of a trampoline.
8476 FNADDR is an RTX for the address of the function's pure code.
8477 CXT is an RTX for the static chain value for the function. */
8478
8479 void
8480 s390_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
8481 {
8482 emit_move_insn (gen_rtx_MEM (Pmode,
8483 memory_address (Pmode,
8484 plus_constant (addr, (TARGET_64BIT ? 16 : 8)))), cxt);
8485 emit_move_insn (gen_rtx_MEM (Pmode,
8486 memory_address (Pmode,
8487 plus_constant (addr, (TARGET_64BIT ? 24 : 12)))), fnaddr);
8488 }
8489
8490 /* Output assembler code to FILE to increment profiler label # LABELNO
8491 for profiling a function entry. */
8492
8493 void
8494 s390_function_profiler (FILE *file, int labelno)
8495 {
8496 rtx op[7];
8497
8498 char label[128];
8499 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
8500
8501 fprintf (file, "# function profiler \n");
8502
8503 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
8504 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
8505 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_WORD));
8506
8507 op[2] = gen_rtx_REG (Pmode, 1);
8508 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8509 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
8510
8511 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
8512 if (flag_pic)
8513 {
8514 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
8515 op[4] = gen_rtx_CONST (Pmode, op[4]);
8516 }
8517
8518 if (TARGET_64BIT)
8519 {
8520 output_asm_insn ("stg\t%0,%1", op);
8521 output_asm_insn ("larl\t%2,%3", op);
8522 output_asm_insn ("brasl\t%0,%4", op);
8523 output_asm_insn ("lg\t%0,%1", op);
8524 }
8525 else if (!flag_pic)
8526 {
8527 op[6] = gen_label_rtx ();
8528
8529 output_asm_insn ("st\t%0,%1", op);
8530 output_asm_insn ("bras\t%2,%l6", op);
8531 output_asm_insn (".long\t%4", op);
8532 output_asm_insn (".long\t%3", op);
8533 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
8534 output_asm_insn ("l\t%0,0(%2)", op);
8535 output_asm_insn ("l\t%2,4(%2)", op);
8536 output_asm_insn ("basr\t%0,%0", op);
8537 output_asm_insn ("l\t%0,%1", op);
8538 }
8539 else
8540 {
8541 op[5] = gen_label_rtx ();
8542 op[6] = gen_label_rtx ();
8543
8544 output_asm_insn ("st\t%0,%1", op);
8545 output_asm_insn ("bras\t%2,%l6", op);
8546 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
8547 output_asm_insn (".long\t%4-%l5", op);
8548 output_asm_insn (".long\t%3-%l5", op);
8549 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
8550 output_asm_insn ("lr\t%0,%2", op);
8551 output_asm_insn ("a\t%0,0(%2)", op);
8552 output_asm_insn ("a\t%2,4(%2)", op);
8553 output_asm_insn ("basr\t%0,%0", op);
8554 output_asm_insn ("l\t%0,%1", op);
8555 }
8556 }
8557
8558 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
8559 into its SYMBOL_REF_FLAGS. */
8560
8561 static void
8562 s390_encode_section_info (tree decl, rtx rtl, int first)
8563 {
8564 default_encode_section_info (decl, rtl, first);
8565
8566 /* If a variable has a forced alignment to < 2 bytes, mark it with
8567 SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL operand. */
8568 if (TREE_CODE (decl) == VAR_DECL
8569 && DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
8570 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
8571 }
8572
8573 /* Output thunk to FILE that implements a C++ virtual function call (with
8574 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
8575 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
8576 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
8577 relative to the resulting this pointer. */
8578
8579 static void
8580 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8581 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8582 tree function)
8583 {
8584 rtx op[10];
8585 int nonlocal = 0;
8586
8587 /* Operand 0 is the target function. */
8588 op[0] = XEXP (DECL_RTL (function), 0);
8589 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
8590 {
8591 nonlocal = 1;
8592 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
8593 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
8594 op[0] = gen_rtx_CONST (Pmode, op[0]);
8595 }
8596
8597 /* Operand 1 is the 'this' pointer. */
8598 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8599 op[1] = gen_rtx_REG (Pmode, 3);
8600 else
8601 op[1] = gen_rtx_REG (Pmode, 2);
8602
8603 /* Operand 2 is the delta. */
8604 op[2] = GEN_INT (delta);
8605
8606 /* Operand 3 is the vcall_offset. */
8607 op[3] = GEN_INT (vcall_offset);
8608
8609 /* Operand 4 is the temporary register. */
8610 op[4] = gen_rtx_REG (Pmode, 1);
8611
8612 /* Operands 5 to 8 can be used as labels. */
8613 op[5] = NULL_RTX;
8614 op[6] = NULL_RTX;
8615 op[7] = NULL_RTX;
8616 op[8] = NULL_RTX;
8617
8618 /* Operand 9 can be used for temporary register. */
8619 op[9] = NULL_RTX;
8620
8621 /* Generate code. */
8622 if (TARGET_64BIT)
8623 {
8624 /* Setup literal pool pointer if required. */
8625 if ((!DISP_IN_RANGE (delta)
8626 && !CONST_OK_FOR_K (delta)
8627 && !CONST_OK_FOR_Os (delta))
8628 || (!DISP_IN_RANGE (vcall_offset)
8629 && !CONST_OK_FOR_K (vcall_offset)
8630 && !CONST_OK_FOR_Os (vcall_offset)))
8631 {
8632 op[5] = gen_label_rtx ();
8633 output_asm_insn ("larl\t%4,%5", op);
8634 }
8635
8636 /* Add DELTA to this pointer. */
8637 if (delta)
8638 {
8639 if (CONST_OK_FOR_J (delta))
8640 output_asm_insn ("la\t%1,%2(%1)", op);
8641 else if (DISP_IN_RANGE (delta))
8642 output_asm_insn ("lay\t%1,%2(%1)", op);
8643 else if (CONST_OK_FOR_K (delta))
8644 output_asm_insn ("aghi\t%1,%2", op);
8645 else if (CONST_OK_FOR_Os (delta))
8646 output_asm_insn ("agfi\t%1,%2", op);
8647 else
8648 {
8649 op[6] = gen_label_rtx ();
8650 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
8651 }
8652 }
8653
8654 /* Perform vcall adjustment. */
8655 if (vcall_offset)
8656 {
8657 if (DISP_IN_RANGE (vcall_offset))
8658 {
8659 output_asm_insn ("lg\t%4,0(%1)", op);
8660 output_asm_insn ("ag\t%1,%3(%4)", op);
8661 }
8662 else if (CONST_OK_FOR_K (vcall_offset))
8663 {
8664 output_asm_insn ("lghi\t%4,%3", op);
8665 output_asm_insn ("ag\t%4,0(%1)", op);
8666 output_asm_insn ("ag\t%1,0(%4)", op);
8667 }
8668 else if (CONST_OK_FOR_Os (vcall_offset))
8669 {
8670 output_asm_insn ("lgfi\t%4,%3", op);
8671 output_asm_insn ("ag\t%4,0(%1)", op);
8672 output_asm_insn ("ag\t%1,0(%4)", op);
8673 }
8674 else
8675 {
8676 op[7] = gen_label_rtx ();
8677 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
8678 output_asm_insn ("ag\t%4,0(%1)", op);
8679 output_asm_insn ("ag\t%1,0(%4)", op);
8680 }
8681 }
8682
8683 /* Jump to target. */
8684 output_asm_insn ("jg\t%0", op);
8685
8686 /* Output literal pool if required. */
8687 if (op[5])
8688 {
8689 output_asm_insn (".align\t4", op);
8690 targetm.asm_out.internal_label (file, "L",
8691 CODE_LABEL_NUMBER (op[5]));
8692 }
8693 if (op[6])
8694 {
8695 targetm.asm_out.internal_label (file, "L",
8696 CODE_LABEL_NUMBER (op[6]));
8697 output_asm_insn (".long\t%2", op);
8698 }
8699 if (op[7])
8700 {
8701 targetm.asm_out.internal_label (file, "L",
8702 CODE_LABEL_NUMBER (op[7]));
8703 output_asm_insn (".long\t%3", op);
8704 }
8705 }
8706 else
8707 {
8708 /* Setup base pointer if required. */
8709 if (!vcall_offset
8710 || (!DISP_IN_RANGE (delta)
8711 && !CONST_OK_FOR_K (delta)
8712 && !CONST_OK_FOR_Os (delta))
8713 || (!DISP_IN_RANGE (delta)
8714 && !CONST_OK_FOR_K (vcall_offset)
8715 && !CONST_OK_FOR_Os (vcall_offset)))
8716 {
8717 op[5] = gen_label_rtx ();
8718 output_asm_insn ("basr\t%4,0", op);
8719 targetm.asm_out.internal_label (file, "L",
8720 CODE_LABEL_NUMBER (op[5]));
8721 }
8722
8723 /* Add DELTA to this pointer. */
8724 if (delta)
8725 {
8726 if (CONST_OK_FOR_J (delta))
8727 output_asm_insn ("la\t%1,%2(%1)", op);
8728 else if (DISP_IN_RANGE (delta))
8729 output_asm_insn ("lay\t%1,%2(%1)", op);
8730 else if (CONST_OK_FOR_K (delta))
8731 output_asm_insn ("ahi\t%1,%2", op);
8732 else if (CONST_OK_FOR_Os (delta))
8733 output_asm_insn ("afi\t%1,%2", op);
8734 else
8735 {
8736 op[6] = gen_label_rtx ();
8737 output_asm_insn ("a\t%1,%6-%5(%4)", op);
8738 }
8739 }
8740
8741 /* Perform vcall adjustment. */
8742 if (vcall_offset)
8743 {
8744 if (CONST_OK_FOR_J (vcall_offset))
8745 {
8746 output_asm_insn ("l\t%4,0(%1)", op);
8747 output_asm_insn ("a\t%1,%3(%4)", op);
8748 }
8749 else if (DISP_IN_RANGE (vcall_offset))
8750 {
8751 output_asm_insn ("l\t%4,0(%1)", op);
8752 output_asm_insn ("ay\t%1,%3(%4)", op);
8753 }
8754 else if (CONST_OK_FOR_K (vcall_offset))
8755 {
8756 output_asm_insn ("lhi\t%4,%3", op);
8757 output_asm_insn ("a\t%4,0(%1)", op);
8758 output_asm_insn ("a\t%1,0(%4)", op);
8759 }
8760 else if (CONST_OK_FOR_Os (vcall_offset))
8761 {
8762 output_asm_insn ("iilf\t%4,%3", op);
8763 output_asm_insn ("a\t%4,0(%1)", op);
8764 output_asm_insn ("a\t%1,0(%4)", op);
8765 }
8766 else
8767 {
8768 op[7] = gen_label_rtx ();
8769 output_asm_insn ("l\t%4,%7-%5(%4)", op);
8770 output_asm_insn ("a\t%4,0(%1)", op);
8771 output_asm_insn ("a\t%1,0(%4)", op);
8772 }
8773
8774 /* We had to clobber the base pointer register.
8775 Re-setup the base pointer (with a different base). */
8776 op[5] = gen_label_rtx ();
8777 output_asm_insn ("basr\t%4,0", op);
8778 targetm.asm_out.internal_label (file, "L",
8779 CODE_LABEL_NUMBER (op[5]));
8780 }
8781
8782 /* Jump to target. */
8783 op[8] = gen_label_rtx ();
8784
8785 if (!flag_pic)
8786 output_asm_insn ("l\t%4,%8-%5(%4)", op);
8787 else if (!nonlocal)
8788 output_asm_insn ("a\t%4,%8-%5(%4)", op);
8789 /* We cannot call through .plt, since .plt requires %r12 loaded. */
8790 else if (flag_pic == 1)
8791 {
8792 output_asm_insn ("a\t%4,%8-%5(%4)", op);
8793 output_asm_insn ("l\t%4,%0(%4)", op);
8794 }
8795 else if (flag_pic == 2)
8796 {
8797 op[9] = gen_rtx_REG (Pmode, 0);
8798 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
8799 output_asm_insn ("a\t%4,%8-%5(%4)", op);
8800 output_asm_insn ("ar\t%4,%9", op);
8801 output_asm_insn ("l\t%4,0(%4)", op);
8802 }
8803
8804 output_asm_insn ("br\t%4", op);
8805
8806 /* Output literal pool. */
8807 output_asm_insn (".align\t4", op);
8808
8809 if (nonlocal && flag_pic == 2)
8810 output_asm_insn (".long\t%0", op);
8811 if (nonlocal)
8812 {
8813 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8814 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
8815 }
8816
8817 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
8818 if (!flag_pic)
8819 output_asm_insn (".long\t%0", op);
8820 else
8821 output_asm_insn (".long\t%0-%5", op);
8822
8823 if (op[6])
8824 {
8825 targetm.asm_out.internal_label (file, "L",
8826 CODE_LABEL_NUMBER (op[6]));
8827 output_asm_insn (".long\t%2", op);
8828 }
8829 if (op[7])
8830 {
8831 targetm.asm_out.internal_label (file, "L",
8832 CODE_LABEL_NUMBER (op[7]));
8833 output_asm_insn (".long\t%3", op);
8834 }
8835 }
8836 }
8837
8838 static bool
8839 s390_valid_pointer_mode (enum machine_mode mode)
8840 {
8841 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8842 }
8843
8844 /* Checks whether the given CALL_EXPR would use a caller
8845 saved register. This is used to decide whether sibling call
8846 optimization could be performed on the respective function
8847 call. */
8848
8849 static bool
8850 s390_call_saved_register_used (tree call_expr)
8851 {
8852 CUMULATIVE_ARGS cum;
8853 tree parameter;
8854 enum machine_mode mode;
8855 tree type;
8856 rtx parm_rtx;
8857 int reg, i;
8858
8859 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
8860
8861 for (i = 0; i < call_expr_nargs (call_expr); i++)
8862 {
8863 parameter = CALL_EXPR_ARG (call_expr, i);
8864 gcc_assert (parameter);
8865
8866 /* For an undeclared variable passed as parameter we will get
8867 an ERROR_MARK node here. */
8868 if (TREE_CODE (parameter) == ERROR_MARK)
8869 return true;
8870
8871 type = TREE_TYPE (parameter);
8872 gcc_assert (type);
8873
8874 mode = TYPE_MODE (type);
8875 gcc_assert (mode);
8876
8877 if (pass_by_reference (&cum, mode, type, true))
8878 {
8879 mode = Pmode;
8880 type = build_pointer_type (type);
8881 }
8882
8883 parm_rtx = s390_function_arg (&cum, mode, type, 0);
8884
8885 s390_function_arg_advance (&cum, mode, type, 0);
8886
8887 if (parm_rtx && REG_P (parm_rtx))
8888 {
8889 for (reg = 0;
8890 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
8891 reg++)
8892 if (! call_used_regs[reg + REGNO (parm_rtx)])
8893 return true;
8894 }
8895 }
8896 return false;
8897 }
8898
8899 /* Return true if the given call expression can be
8900 turned into a sibling call.
8901 DECL holds the declaration of the function to be called whereas
8902 EXP is the call expression itself. */
8903
8904 static bool
8905 s390_function_ok_for_sibcall (tree decl, tree exp)
8906 {
8907 /* The TPF epilogue uses register 1. */
8908 if (TARGET_TPF_PROFILING)
8909 return false;
8910
8911 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
8912 which would have to be restored before the sibcall. */
8913 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
8914 return false;
8915
8916 /* Register 6 on s390 is available as an argument register but unfortunately
8917 "caller saved". This makes functions needing this register for arguments
8918 not suitable for sibcalls. */
8919 return !s390_call_saved_register_used (exp);
8920 }
8921
8922 /* Return the fixed registers used for condition codes. */
8923
8924 static bool
8925 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8926 {
8927 *p1 = CC_REGNUM;
8928 *p2 = INVALID_REGNUM;
8929
8930 return true;
8931 }
8932
8933 /* This function is used by the call expanders of the machine description.
8934 It emits the call insn itself together with the necessary operations
8935 to adjust the target address and returns the emitted insn.
8936 ADDR_LOCATION is the target address rtx
8937 TLS_CALL the location of the thread-local symbol
8938 RESULT_REG the register where the result of the call should be stored
8939 RETADDR_REG the register where the return address should be stored
8940 If this parameter is NULL_RTX the call is considered
8941 to be a sibling call. */
8942
8943 rtx
8944 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
8945 rtx retaddr_reg)
8946 {
8947 bool plt_call = false;
8948 rtx insn;
8949 rtx call;
8950 rtx clobber;
8951 rtvec vec;
8952
8953 /* Direct function calls need special treatment. */
8954 if (GET_CODE (addr_location) == SYMBOL_REF)
8955 {
8956 /* When calling a global routine in PIC mode, we must
8957 replace the symbol itself with the PLT stub. */
8958 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
8959 {
8960 addr_location = gen_rtx_UNSPEC (Pmode,
8961 gen_rtvec (1, addr_location),
8962 UNSPEC_PLT);
8963 addr_location = gen_rtx_CONST (Pmode, addr_location);
8964 plt_call = true;
8965 }
8966
8967 /* Unless we can use the bras(l) insn, force the
8968 routine address into a register. */
8969 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
8970 {
8971 if (flag_pic)
8972 addr_location = legitimize_pic_address (addr_location, 0);
8973 else
8974 addr_location = force_reg (Pmode, addr_location);
8975 }
8976 }
8977
8978 /* If it is already an indirect call or the code above moved the
8979 SYMBOL_REF to somewhere else make sure the address can be found in
8980 register 1. */
8981 if (retaddr_reg == NULL_RTX
8982 && GET_CODE (addr_location) != SYMBOL_REF
8983 && !plt_call)
8984 {
8985 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
8986 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
8987 }
8988
8989 addr_location = gen_rtx_MEM (QImode, addr_location);
8990 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
8991
8992 if (result_reg != NULL_RTX)
8993 call = gen_rtx_SET (VOIDmode, result_reg, call);
8994
8995 if (retaddr_reg != NULL_RTX)
8996 {
8997 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
8998
8999 if (tls_call != NULL_RTX)
9000 vec = gen_rtvec (3, call, clobber,
9001 gen_rtx_USE (VOIDmode, tls_call));
9002 else
9003 vec = gen_rtvec (2, call, clobber);
9004
9005 call = gen_rtx_PARALLEL (VOIDmode, vec);
9006 }
9007
9008 insn = emit_call_insn (call);
9009
9010 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9011 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9012 {
9013 /* s390_function_ok_for_sibcall should
9014 have denied sibcalls in this case. */
9015 gcc_assert (retaddr_reg != NULL_RTX);
9016
9017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9018 }
9019 return insn;
9020 }
9021
9022 /* Implement CONDITIONAL_REGISTER_USAGE. */
9023
9024 void
9025 s390_conditional_register_usage (void)
9026 {
9027 int i;
9028
9029 if (flag_pic)
9030 {
9031 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9032 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9033 }
9034 if (TARGET_CPU_ZARCH)
9035 {
9036 fixed_regs[BASE_REGNUM] = 0;
9037 call_used_regs[BASE_REGNUM] = 0;
9038 fixed_regs[RETURN_REGNUM] = 0;
9039 call_used_regs[RETURN_REGNUM] = 0;
9040 }
9041 if (TARGET_64BIT)
9042 {
9043 for (i = 24; i < 32; i++)
9044 call_used_regs[i] = call_really_used_regs[i] = 0;
9045 }
9046 else
9047 {
9048 for (i = 18; i < 20; i++)
9049 call_used_regs[i] = call_really_used_regs[i] = 0;
9050 }
9051
9052 if (TARGET_SOFT_FLOAT)
9053 {
9054 for (i = 16; i < 32; i++)
9055 call_used_regs[i] = fixed_regs[i] = 1;
9056 }
9057 }
9058
9059 /* Corresponding function to eh_return expander. */
9060
9061 static GTY(()) rtx s390_tpf_eh_return_symbol;
9062 void
9063 s390_emit_tpf_eh_return (rtx target)
9064 {
9065 rtx insn, reg;
9066
9067 if (!s390_tpf_eh_return_symbol)
9068 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9069
9070 reg = gen_rtx_REG (Pmode, 2);
9071
9072 emit_move_insn (reg, target);
9073 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9074 gen_rtx_REG (Pmode, RETURN_REGNUM));
9075 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9076
9077 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9078 }
9079
9080 /* Rework the prologue/epilogue to avoid saving/restoring
9081 registers unnecessarily. */
9082
9083 static void
9084 s390_optimize_prologue (void)
9085 {
9086 rtx insn, new_insn, next_insn;
9087
9088 /* Do a final recompute of the frame-related data. */
9089
9090 s390_update_frame_layout ();
9091
9092 /* If all special registers are in fact used, there's nothing we
9093 can do, so no point in walking the insn list. */
9094
9095 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9096 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9097 && (TARGET_CPU_ZARCH
9098 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9099 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9100 return;
9101
9102 /* Search for prologue/epilogue insns and replace them. */
9103
9104 for (insn = get_insns (); insn; insn = next_insn)
9105 {
9106 int first, last, off;
9107 rtx set, base, offset;
9108
9109 next_insn = NEXT_INSN (insn);
9110
9111 if (GET_CODE (insn) != INSN)
9112 continue;
9113
9114 if (GET_CODE (PATTERN (insn)) == PARALLEL
9115 && store_multiple_operation (PATTERN (insn), VOIDmode))
9116 {
9117 set = XVECEXP (PATTERN (insn), 0, 0);
9118 first = REGNO (SET_SRC (set));
9119 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9120 offset = const0_rtx;
9121 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9122 off = INTVAL (offset);
9123
9124 if (GET_CODE (base) != REG || off < 0)
9125 continue;
9126 if (cfun_frame_layout.first_save_gpr != -1
9127 && (cfun_frame_layout.first_save_gpr < first
9128 || cfun_frame_layout.last_save_gpr > last))
9129 continue;
9130 if (REGNO (base) != STACK_POINTER_REGNUM
9131 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9132 continue;
9133 if (first > BASE_REGNUM || last < BASE_REGNUM)
9134 continue;
9135
9136 if (cfun_frame_layout.first_save_gpr != -1)
9137 {
9138 new_insn = save_gprs (base,
9139 off + (cfun_frame_layout.first_save_gpr
9140 - first) * UNITS_PER_WORD,
9141 cfun_frame_layout.first_save_gpr,
9142 cfun_frame_layout.last_save_gpr);
9143 new_insn = emit_insn_before (new_insn, insn);
9144 INSN_ADDRESSES_NEW (new_insn, -1);
9145 }
9146
9147 remove_insn (insn);
9148 continue;
9149 }
9150
9151 if (cfun_frame_layout.first_save_gpr == -1
9152 && GET_CODE (PATTERN (insn)) == SET
9153 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9154 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9155 || (!TARGET_CPU_ZARCH
9156 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9157 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9158 {
9159 set = PATTERN (insn);
9160 first = REGNO (SET_SRC (set));
9161 offset = const0_rtx;
9162 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9163 off = INTVAL (offset);
9164
9165 if (GET_CODE (base) != REG || off < 0)
9166 continue;
9167 if (REGNO (base) != STACK_POINTER_REGNUM
9168 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9169 continue;
9170
9171 remove_insn (insn);
9172 continue;
9173 }
9174
9175 if (GET_CODE (PATTERN (insn)) == PARALLEL
9176 && load_multiple_operation (PATTERN (insn), VOIDmode))
9177 {
9178 set = XVECEXP (PATTERN (insn), 0, 0);
9179 first = REGNO (SET_DEST (set));
9180 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9181 offset = const0_rtx;
9182 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9183 off = INTVAL (offset);
9184
9185 if (GET_CODE (base) != REG || off < 0)
9186 continue;
9187 if (cfun_frame_layout.first_restore_gpr != -1
9188 && (cfun_frame_layout.first_restore_gpr < first
9189 || cfun_frame_layout.last_restore_gpr > last))
9190 continue;
9191 if (REGNO (base) != STACK_POINTER_REGNUM
9192 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9193 continue;
9194 if (first > BASE_REGNUM || last < BASE_REGNUM)
9195 continue;
9196
9197 if (cfun_frame_layout.first_restore_gpr != -1)
9198 {
9199 new_insn = restore_gprs (base,
9200 off + (cfun_frame_layout.first_restore_gpr
9201 - first) * UNITS_PER_WORD,
9202 cfun_frame_layout.first_restore_gpr,
9203 cfun_frame_layout.last_restore_gpr);
9204 new_insn = emit_insn_before (new_insn, insn);
9205 INSN_ADDRESSES_NEW (new_insn, -1);
9206 }
9207
9208 remove_insn (insn);
9209 continue;
9210 }
9211
9212 if (cfun_frame_layout.first_restore_gpr == -1
9213 && GET_CODE (PATTERN (insn)) == SET
9214 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
9215 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
9216 || (!TARGET_CPU_ZARCH
9217 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
9218 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
9219 {
9220 set = PATTERN (insn);
9221 first = REGNO (SET_DEST (set));
9222 offset = const0_rtx;
9223 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9224 off = INTVAL (offset);
9225
9226 if (GET_CODE (base) != REG || off < 0)
9227 continue;
9228 if (REGNO (base) != STACK_POINTER_REGNUM
9229 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9230 continue;
9231
9232 remove_insn (insn);
9233 continue;
9234 }
9235 }
9236 }
9237
9238 /* Perform machine-dependent processing. */
9239
9240 static void
9241 s390_reorg (void)
9242 {
9243 bool pool_overflow = false;
9244
9245 /* Make sure all splits have been performed; splits after
9246 machine_dependent_reorg might confuse insn length counts. */
9247 split_all_insns_noflow ();
9248
9249 /* From here on decomposed literal pool addresses must be accepted. */
9250 cfun->machine->decomposed_literal_pool_addresses_ok_p = true;
9251
9252 /* Install the main literal pool and the associated base
9253 register load insns.
9254
9255 In addition, there are two problematic situations we need
9256 to correct:
9257
9258 - the literal pool might be > 4096 bytes in size, so that
9259 some of its elements cannot be directly accessed
9260
9261 - a branch target might be > 64K away from the branch, so that
9262 it is not possible to use a PC-relative instruction.
9263
9264 To fix those, we split the single literal pool into multiple
9265 pool chunks, reloading the pool base register at various
9266 points throughout the function to ensure it always points to
9267 the pool chunk the following code expects, and / or replace
9268 PC-relative branches by absolute branches.
9269
9270 However, the two problems are interdependent: splitting the
9271 literal pool can move a branch further away from its target,
9272 causing the 64K limit to overflow, and on the other hand,
9273 replacing a PC-relative branch by an absolute branch means
9274 we need to put the branch target address into the literal
9275 pool, possibly causing it to overflow.
9276
9277 So, we loop trying to fix up both problems until we manage
9278 to satisfy both conditions at the same time. Note that the
9279 loop is guaranteed to terminate as every pass of the loop
9280 strictly decreases the total number of PC-relative branches
9281 in the function. (This is not completely true as there
9282 might be branch-over-pool insns introduced by chunkify_start.
9283 Those never need to be split however.) */
9284
9285 for (;;)
9286 {
9287 struct constant_pool *pool = NULL;
9288
9289 /* Collect the literal pool. */
9290 if (!pool_overflow)
9291 {
9292 pool = s390_mainpool_start ();
9293 if (!pool)
9294 pool_overflow = true;
9295 }
9296
9297 /* If literal pool overflowed, start to chunkify it. */
9298 if (pool_overflow)
9299 pool = s390_chunkify_start ();
9300
9301 /* Split out-of-range branches. If this has created new
9302 literal pool entries, cancel current chunk list and
9303 recompute it. zSeries machines have large branch
9304 instructions, so we never need to split a branch. */
9305 if (!TARGET_CPU_ZARCH && s390_split_branches ())
9306 {
9307 if (pool_overflow)
9308 s390_chunkify_cancel (pool);
9309 else
9310 s390_mainpool_cancel (pool);
9311
9312 continue;
9313 }
9314
9315 /* If we made it up to here, both conditions are satisfied.
9316 Finish up literal pool related changes. */
9317 if (pool_overflow)
9318 s390_chunkify_finish (pool);
9319 else
9320 s390_mainpool_finish (pool);
9321
9322 /* We're done splitting branches. */
9323 cfun->machine->split_branches_pending_p = false;
9324 break;
9325 }
9326
9327 /* Generate out-of-pool execute target insns. */
9328 if (TARGET_CPU_ZARCH)
9329 {
9330 rtx insn, label, target;
9331
9332 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9333 {
9334 label = s390_execute_label (insn);
9335 if (!label)
9336 continue;
9337
9338 gcc_assert (label != const0_rtx);
9339
9340 target = emit_label (XEXP (label, 0));
9341 INSN_ADDRESSES_NEW (target, -1);
9342
9343 target = emit_insn (s390_execute_target (insn));
9344 INSN_ADDRESSES_NEW (target, -1);
9345 }
9346 }
9347
9348 /* Try to optimize prologue and epilogue further. */
9349 s390_optimize_prologue ();
9350 }
9351
9352
9353 /* Initialize GCC target structure. */
9354
9355 #undef TARGET_ASM_ALIGNED_HI_OP
9356 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9357 #undef TARGET_ASM_ALIGNED_DI_OP
9358 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9359 #undef TARGET_ASM_INTEGER
9360 #define TARGET_ASM_INTEGER s390_assemble_integer
9361
9362 #undef TARGET_ASM_OPEN_PAREN
9363 #define TARGET_ASM_OPEN_PAREN ""
9364
9365 #undef TARGET_ASM_CLOSE_PAREN
9366 #define TARGET_ASM_CLOSE_PAREN ""
9367
9368 #undef TARGET_DEFAULT_TARGET_FLAGS
9369 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
9370 #undef TARGET_HANDLE_OPTION
9371 #define TARGET_HANDLE_OPTION s390_handle_option
9372
9373 #undef TARGET_ENCODE_SECTION_INFO
9374 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
9375
9376 #ifdef HAVE_AS_TLS
9377 #undef TARGET_HAVE_TLS
9378 #define TARGET_HAVE_TLS true
9379 #endif
9380 #undef TARGET_CANNOT_FORCE_CONST_MEM
9381 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
9382
9383 #undef TARGET_DELEGITIMIZE_ADDRESS
9384 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
9385
9386 #undef TARGET_RETURN_IN_MEMORY
9387 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
9388
9389 #undef TARGET_INIT_BUILTINS
9390 #define TARGET_INIT_BUILTINS s390_init_builtins
9391 #undef TARGET_EXPAND_BUILTIN
9392 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
9393
9394 #undef TARGET_ASM_OUTPUT_MI_THUNK
9395 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
9396 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9397 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9398
9399 #undef TARGET_SCHED_ADJUST_PRIORITY
9400 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
9401 #undef TARGET_SCHED_ISSUE_RATE
9402 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
9403 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9404 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
9405
9406 #undef TARGET_CANNOT_COPY_INSN_P
9407 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
9408 #undef TARGET_RTX_COSTS
9409 #define TARGET_RTX_COSTS s390_rtx_costs
9410 #undef TARGET_ADDRESS_COST
9411 #define TARGET_ADDRESS_COST s390_address_cost
9412
9413 #undef TARGET_MACHINE_DEPENDENT_REORG
9414 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
9415
9416 #undef TARGET_VALID_POINTER_MODE
9417 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
9418
9419 #undef TARGET_BUILD_BUILTIN_VA_LIST
9420 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
9421 #undef TARGET_EXPAND_BUILTIN_VA_START
9422 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
9423 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9424 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
9425
9426 #undef TARGET_PROMOTE_FUNCTION_ARGS
9427 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
9428 #undef TARGET_PROMOTE_FUNCTION_RETURN
9429 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
9430 #undef TARGET_PASS_BY_REFERENCE
9431 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
9432
9433 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9434 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
9435
9436 #undef TARGET_FIXED_CONDITION_CODE_REGS
9437 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
9438
9439 #undef TARGET_CC_MODES_COMPATIBLE
9440 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
9441
9442 #undef TARGET_INVALID_WITHIN_DOLOOP
9443 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
9444
9445 #ifdef HAVE_AS_TLS
9446 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
9447 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
9448 #endif
9449
9450 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9451 #undef TARGET_MANGLE_TYPE
9452 #define TARGET_MANGLE_TYPE s390_mangle_type
9453 #endif
9454
9455 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9456 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
9457
9458 #undef TARGET_SECONDARY_RELOAD
9459 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
9460
9461 #undef TARGET_LIBGCC_CMP_RETURN_MODE
9462 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
9463
9464 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
9465 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
9466
9467 struct gcc_target targetm = TARGET_INITIALIZER;
9468
9469 #include "gt-s390.h"