builtins.c (expand_builtin_atomic_compare_exchange): Pass old value operand as MEM...
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "optabs.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "params.h"
54 #include "cfgloop.h"
55 #include "opts.h"
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (1) , /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 static const
225 struct processor_costs z196_cost =
226 {
227 COSTS_N_INSNS (7), /* M */
228 COSTS_N_INSNS (5), /* MGHI */
229 COSTS_N_INSNS (5), /* MH */
230 COSTS_N_INSNS (5), /* MHI */
231 COSTS_N_INSNS (7), /* ML */
232 COSTS_N_INSNS (7), /* MR */
233 COSTS_N_INSNS (6), /* MS */
234 COSTS_N_INSNS (8), /* MSG */
235 COSTS_N_INSNS (6), /* MSGF */
236 COSTS_N_INSNS (6), /* MSGFR */
237 COSTS_N_INSNS (8), /* MSGR */
238 COSTS_N_INSNS (6), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (40), /* MXBR B+40 */
241 COSTS_N_INSNS (100), /* SQXBR B+100 */
242 COSTS_N_INSNS (42), /* SQDBR B+42 */
243 COSTS_N_INSNS (28), /* SQEBR B+28 */
244 COSTS_N_INSNS (1), /* MADBR B */
245 COSTS_N_INSNS (1), /* MAEBR B */
246 COSTS_N_INSNS (101), /* DXBR B+101 */
247 COSTS_N_INSNS (29), /* DDBR */
248 COSTS_N_INSNS (22), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR cracked */
250 COSTS_N_INSNS (160), /* DLR cracked */
251 COSTS_N_INSNS (160), /* DR expanded */
252 COSTS_N_INSNS (160), /* DSGFR cracked */
253 COSTS_N_INSNS (160), /* DSGR cracked */
254 };
255
256 extern int reload_completed;
257
258 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
259 static rtx last_scheduled_insn;
260
261 /* Structure used to hold the components of a S/390 memory
262 address. A legitimate address on S/390 is of the general
263 form
264 base + index + displacement
265 where any of the components is optional.
266
267 base and index are registers of the class ADDR_REGS,
268 displacement is an unsigned 12-bit immediate constant. */
269
270 struct s390_address
271 {
272 rtx base;
273 rtx indx;
274 rtx disp;
275 bool pointer;
276 bool literal_pool;
277 };
278
279 /* The following structure is embedded in the machine
280 specific part of struct function. */
281
282 struct GTY (()) s390_frame_layout
283 {
284 /* Offset within stack frame. */
285 HOST_WIDE_INT gprs_offset;
286 HOST_WIDE_INT f0_offset;
287 HOST_WIDE_INT f4_offset;
288 HOST_WIDE_INT f8_offset;
289 HOST_WIDE_INT backchain_offset;
290
291 /* Number of first and last gpr where slots in the register
292 save area are reserved for. */
293 int first_save_gpr_slot;
294 int last_save_gpr_slot;
295
296 /* Number of first and last gpr to be saved, restored. */
297 int first_save_gpr;
298 int first_restore_gpr;
299 int last_save_gpr;
300 int last_restore_gpr;
301
302 /* Bits standing for floating point registers. Set, if the
303 respective register has to be saved. Starting with reg 16 (f0)
304 at the rightmost bit.
305 Bit 15 - 8 7 6 5 4 3 2 1 0
306 fpr 15 - 8 7 5 3 1 6 4 2 0
307 reg 31 - 24 23 22 21 20 19 18 17 16 */
308 unsigned int fpr_bitmap;
309
310 /* Number of floating point registers f8-f15 which must be saved. */
311 int high_fprs;
312
313 /* Set if return address needs to be saved.
314 This flag is set by s390_return_addr_rtx if it could not use
315 the initial value of r14 and therefore depends on r14 saved
316 to the stack. */
317 bool save_return_addr_p;
318
319 /* Size of stack frame. */
320 HOST_WIDE_INT frame_size;
321 };
322
323 /* Define the structure for the machine field in struct function. */
324
325 struct GTY(()) machine_function
326 {
327 struct s390_frame_layout frame_layout;
328
329 /* Literal pool base register. */
330 rtx base_reg;
331
332 /* True if we may need to perform branch splitting. */
333 bool split_branches_pending_p;
334
335 /* Some local-dynamic TLS symbol name. */
336 const char *some_ld_name;
337
338 bool has_landing_pad_p;
339 };
340
341 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
342
343 #define cfun_frame_layout (cfun->machine->frame_layout)
344 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
345 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
346 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
347 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
348 (1 << (BITNUM)))
349 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
350 (1 << (BITNUM))))
351
352 /* Number of GPRs and FPRs used for argument passing. */
353 #define GP_ARG_NUM_REG 5
354 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
355
356 /* A couple of shortcuts. */
357 #define CONST_OK_FOR_J(x) \
358 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
359 #define CONST_OK_FOR_K(x) \
360 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
361 #define CONST_OK_FOR_Os(x) \
362 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
363 #define CONST_OK_FOR_Op(x) \
364 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
365 #define CONST_OK_FOR_On(x) \
366 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
367
368 #define REGNO_PAIR_OK(REGNO, MODE) \
369 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
370
371 /* That's the read ahead of the dynamic branch prediction unit in
372 bytes on a z10 (or higher) CPU. */
373 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
374
375 /* Return the alignment for LABEL. We default to the -falign-labels
376 value except for the literal pool base label. */
377 int
378 s390_label_align (rtx label)
379 {
380 rtx prev_insn = prev_active_insn (label);
381
382 if (prev_insn == NULL_RTX)
383 goto old;
384
385 prev_insn = single_set (prev_insn);
386
387 if (prev_insn == NULL_RTX)
388 goto old;
389
390 prev_insn = SET_SRC (prev_insn);
391
392 /* Don't align literal pool base labels. */
393 if (GET_CODE (prev_insn) == UNSPEC
394 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
395 return 0;
396
397 old:
398 return align_labels_log;
399 }
400
401 static enum machine_mode
402 s390_libgcc_cmp_return_mode (void)
403 {
404 return TARGET_64BIT ? DImode : SImode;
405 }
406
407 static enum machine_mode
408 s390_libgcc_shift_count_mode (void)
409 {
410 return TARGET_64BIT ? DImode : SImode;
411 }
412
413 static enum machine_mode
414 s390_unwind_word_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 /* Return true if the back end supports mode MODE. */
420 static bool
421 s390_scalar_mode_supported_p (enum machine_mode mode)
422 {
423 /* In contrast to the default implementation reject TImode constants on 31bit
424 TARGET_ZARCH for ABI compliance. */
425 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
426 return false;
427
428 if (DECIMAL_FLOAT_MODE_P (mode))
429 return default_decimal_float_supported_p ();
430
431 return default_scalar_mode_supported_p (mode);
432 }
433
434 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
435
436 void
437 s390_set_has_landing_pad_p (bool value)
438 {
439 cfun->machine->has_landing_pad_p = value;
440 }
441
442 /* If two condition code modes are compatible, return a condition code
443 mode which is compatible with both. Otherwise, return
444 VOIDmode. */
445
446 static enum machine_mode
447 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
448 {
449 if (m1 == m2)
450 return m1;
451
452 switch (m1)
453 {
454 case CCZmode:
455 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
456 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
457 return m2;
458 return VOIDmode;
459
460 case CCSmode:
461 case CCUmode:
462 case CCTmode:
463 case CCSRmode:
464 case CCURmode:
465 case CCZ1mode:
466 if (m2 == CCZmode)
467 return m1;
468
469 return VOIDmode;
470
471 default:
472 return VOIDmode;
473 }
474 return VOIDmode;
475 }
476
477 /* Return true if SET either doesn't set the CC register, or else
478 the source and destination have matching CC modes and that
479 CC mode is at least as constrained as REQ_MODE. */
480
481 static bool
482 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
483 {
484 enum machine_mode set_mode;
485
486 gcc_assert (GET_CODE (set) == SET);
487
488 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
489 return 1;
490
491 set_mode = GET_MODE (SET_DEST (set));
492 switch (set_mode)
493 {
494 case CCSmode:
495 case CCSRmode:
496 case CCUmode:
497 case CCURmode:
498 case CCLmode:
499 case CCL1mode:
500 case CCL2mode:
501 case CCL3mode:
502 case CCT1mode:
503 case CCT2mode:
504 case CCT3mode:
505 if (req_mode != set_mode)
506 return 0;
507 break;
508
509 case CCZmode:
510 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
511 && req_mode != CCSRmode && req_mode != CCURmode)
512 return 0;
513 break;
514
515 case CCAPmode:
516 case CCANmode:
517 if (req_mode != CCAmode)
518 return 0;
519 break;
520
521 default:
522 gcc_unreachable ();
523 }
524
525 return (GET_MODE (SET_SRC (set)) == set_mode);
526 }
527
528 /* Return true if every SET in INSN that sets the CC register
529 has source and destination with matching CC modes and that
530 CC mode is at least as constrained as REQ_MODE.
531 If REQ_MODE is VOIDmode, always return false. */
532
533 bool
534 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
535 {
536 int i;
537
538 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
539 if (req_mode == VOIDmode)
540 return false;
541
542 if (GET_CODE (PATTERN (insn)) == SET)
543 return s390_match_ccmode_set (PATTERN (insn), req_mode);
544
545 if (GET_CODE (PATTERN (insn)) == PARALLEL)
546 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
547 {
548 rtx set = XVECEXP (PATTERN (insn), 0, i);
549 if (GET_CODE (set) == SET)
550 if (!s390_match_ccmode_set (set, req_mode))
551 return false;
552 }
553
554 return true;
555 }
556
557 /* If a test-under-mask instruction can be used to implement
558 (compare (and ... OP1) OP2), return the CC mode required
559 to do that. Otherwise, return VOIDmode.
560 MIXED is true if the instruction can distinguish between
561 CC1 and CC2 for mixed selected bits (TMxx), it is false
562 if the instruction cannot (TM). */
563
564 enum machine_mode
565 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
566 {
567 int bit0, bit1;
568
569 /* ??? Fixme: should work on CONST_DOUBLE as well. */
570 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
571 return VOIDmode;
572
573 /* Selected bits all zero: CC0.
574 e.g.: int a; if ((a & (16 + 128)) == 0) */
575 if (INTVAL (op2) == 0)
576 return CCTmode;
577
578 /* Selected bits all one: CC3.
579 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
580 if (INTVAL (op2) == INTVAL (op1))
581 return CCT3mode;
582
583 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
584 int a;
585 if ((a & (16 + 128)) == 16) -> CCT1
586 if ((a & (16 + 128)) == 128) -> CCT2 */
587 if (mixed)
588 {
589 bit1 = exact_log2 (INTVAL (op2));
590 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
591 if (bit0 != -1 && bit1 != -1)
592 return bit0 > bit1 ? CCT1mode : CCT2mode;
593 }
594
595 return VOIDmode;
596 }
597
598 /* Given a comparison code OP (EQ, NE, etc.) and the operands
599 OP0 and OP1 of a COMPARE, return the mode to be used for the
600 comparison. */
601
602 enum machine_mode
603 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
604 {
605 switch (code)
606 {
607 case EQ:
608 case NE:
609 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
610 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
611 return CCAPmode;
612 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
613 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
614 return CCAPmode;
615 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
616 || GET_CODE (op1) == NEG)
617 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
618 return CCLmode;
619
620 if (GET_CODE (op0) == AND)
621 {
622 /* Check whether we can potentially do it via TM. */
623 enum machine_mode ccmode;
624 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
625 if (ccmode != VOIDmode)
626 {
627 /* Relax CCTmode to CCZmode to allow fall-back to AND
628 if that turns out to be beneficial. */
629 return ccmode == CCTmode ? CCZmode : ccmode;
630 }
631 }
632
633 if (register_operand (op0, HImode)
634 && GET_CODE (op1) == CONST_INT
635 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
636 return CCT3mode;
637 if (register_operand (op0, QImode)
638 && GET_CODE (op1) == CONST_INT
639 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
640 return CCT3mode;
641
642 return CCZmode;
643
644 case LE:
645 case LT:
646 case GE:
647 case GT:
648 /* The only overflow condition of NEG and ABS happens when
649 -INT_MAX is used as parameter, which stays negative. So
650 we have an overflow from a positive value to a negative.
651 Using CCAP mode the resulting cc can be used for comparisons. */
652 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
653 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
654 return CCAPmode;
655
656 /* If constants are involved in an add instruction it is possible to use
657 the resulting cc for comparisons with zero. Knowing the sign of the
658 constant the overflow behavior gets predictable. e.g.:
659 int a, b; if ((b = a + c) > 0)
660 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
661 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
662 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
663 {
664 if (INTVAL (XEXP((op0), 1)) < 0)
665 return CCANmode;
666 else
667 return CCAPmode;
668 }
669 /* Fall through. */
670 case UNORDERED:
671 case ORDERED:
672 case UNEQ:
673 case UNLE:
674 case UNLT:
675 case UNGE:
676 case UNGT:
677 case LTGT:
678 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
679 && GET_CODE (op1) != CONST_INT)
680 return CCSRmode;
681 return CCSmode;
682
683 case LTU:
684 case GEU:
685 if (GET_CODE (op0) == PLUS
686 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
687 return CCL1mode;
688
689 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
690 && GET_CODE (op1) != CONST_INT)
691 return CCURmode;
692 return CCUmode;
693
694 case LEU:
695 case GTU:
696 if (GET_CODE (op0) == MINUS
697 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
698 return CCL2mode;
699
700 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
701 && GET_CODE (op1) != CONST_INT)
702 return CCURmode;
703 return CCUmode;
704
705 default:
706 gcc_unreachable ();
707 }
708 }
709
710 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
711 that we can implement more efficiently. */
712
713 void
714 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
715 {
716 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
717 if ((*code == EQ || *code == NE)
718 && *op1 == const0_rtx
719 && GET_CODE (*op0) == ZERO_EXTRACT
720 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
721 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
722 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
723 {
724 rtx inner = XEXP (*op0, 0);
725 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
726 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
727 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
728
729 if (len > 0 && len < modesize
730 && pos >= 0 && pos + len <= modesize
731 && modesize <= HOST_BITS_PER_WIDE_INT)
732 {
733 unsigned HOST_WIDE_INT block;
734 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
735 block <<= modesize - pos - len;
736
737 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
738 gen_int_mode (block, GET_MODE (inner)));
739 }
740 }
741
742 /* Narrow AND of memory against immediate to enable TM. */
743 if ((*code == EQ || *code == NE)
744 && *op1 == const0_rtx
745 && GET_CODE (*op0) == AND
746 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
747 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
748 {
749 rtx inner = XEXP (*op0, 0);
750 rtx mask = XEXP (*op0, 1);
751
752 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
753 if (GET_CODE (inner) == SUBREG
754 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
755 && (GET_MODE_SIZE (GET_MODE (inner))
756 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
757 && ((INTVAL (mask)
758 & GET_MODE_MASK (GET_MODE (inner))
759 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
760 == 0))
761 inner = SUBREG_REG (inner);
762
763 /* Do not change volatile MEMs. */
764 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
765 {
766 int part = s390_single_part (XEXP (*op0, 1),
767 GET_MODE (inner), QImode, 0);
768 if (part >= 0)
769 {
770 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
771 inner = adjust_address_nv (inner, QImode, part);
772 *op0 = gen_rtx_AND (QImode, inner, mask);
773 }
774 }
775 }
776
777 /* Narrow comparisons against 0xffff to HImode if possible. */
778 if ((*code == EQ || *code == NE)
779 && GET_CODE (*op1) == CONST_INT
780 && INTVAL (*op1) == 0xffff
781 && SCALAR_INT_MODE_P (GET_MODE (*op0))
782 && (nonzero_bits (*op0, GET_MODE (*op0))
783 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
784 {
785 *op0 = gen_lowpart (HImode, *op0);
786 *op1 = constm1_rtx;
787 }
788
789 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
790 if (GET_CODE (*op0) == UNSPEC
791 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
792 && XVECLEN (*op0, 0) == 1
793 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
794 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
795 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
796 && *op1 == const0_rtx)
797 {
798 enum rtx_code new_code = UNKNOWN;
799 switch (*code)
800 {
801 case EQ: new_code = EQ; break;
802 case NE: new_code = NE; break;
803 case LT: new_code = GTU; break;
804 case GT: new_code = LTU; break;
805 case LE: new_code = GEU; break;
806 case GE: new_code = LEU; break;
807 default: break;
808 }
809
810 if (new_code != UNKNOWN)
811 {
812 *op0 = XVECEXP (*op0, 0, 0);
813 *code = new_code;
814 }
815 }
816
817 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
818 if (GET_CODE (*op0) == UNSPEC
819 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
820 && XVECLEN (*op0, 0) == 1
821 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
822 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
823 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
824 && *op1 == const0_rtx)
825 {
826 enum rtx_code new_code = UNKNOWN;
827 switch (*code)
828 {
829 case EQ: new_code = EQ; break;
830 case NE: new_code = NE; break;
831 default: break;
832 }
833
834 if (new_code != UNKNOWN)
835 {
836 *op0 = XVECEXP (*op0, 0, 0);
837 *code = new_code;
838 }
839 }
840
841 /* Simplify cascaded EQ, NE with const0_rtx. */
842 if ((*code == NE || *code == EQ)
843 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
844 && GET_MODE (*op0) == SImode
845 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
846 && REG_P (XEXP (*op0, 0))
847 && XEXP (*op0, 1) == const0_rtx
848 && *op1 == const0_rtx)
849 {
850 if ((*code == EQ && GET_CODE (*op0) == NE)
851 || (*code == NE && GET_CODE (*op0) == EQ))
852 *code = EQ;
853 else
854 *code = NE;
855 *op0 = XEXP (*op0, 0);
856 }
857
858 /* Prefer register over memory as first operand. */
859 if (MEM_P (*op0) && REG_P (*op1))
860 {
861 rtx tem = *op0; *op0 = *op1; *op1 = tem;
862 *code = swap_condition (*code);
863 }
864 }
865
866 /* Emit a compare instruction suitable to implement the comparison
867 OP0 CODE OP1. Return the correct condition RTL to be placed in
868 the IF_THEN_ELSE of the conditional branch testing the result. */
869
870 rtx
871 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
872 {
873 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
874 rtx cc;
875
876 /* Do not output a redundant compare instruction if a compare_and_swap
877 pattern already computed the result and the machine modes are compatible. */
878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
879 {
880 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
881 == GET_MODE (op0));
882 cc = op0;
883 }
884 else
885 {
886 cc = gen_rtx_REG (mode, CC_REGNUM);
887 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
888 }
889
890 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
891 }
892
893 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
894 matches CMP.
895 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
896 conditional branch testing the result. */
897
898 static rtx
899 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
900 rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
904 const0_rtx);
905 }
906
907 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
908 unconditional jump, else a conditional jump under condition COND. */
909
910 void
911 s390_emit_jump (rtx target, rtx cond)
912 {
913 rtx insn;
914
915 target = gen_rtx_LABEL_REF (VOIDmode, target);
916 if (cond)
917 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
918
919 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
920 emit_jump_insn (insn);
921 }
922
923 /* Return branch condition mask to implement a branch
924 specified by CODE. Return -1 for invalid comparisons. */
925
926 int
927 s390_branch_condition_mask (rtx code)
928 {
929 const int CC0 = 1 << 3;
930 const int CC1 = 1 << 2;
931 const int CC2 = 1 << 1;
932 const int CC3 = 1 << 0;
933
934 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
935 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
936 gcc_assert (XEXP (code, 1) == const0_rtx);
937
938 switch (GET_MODE (XEXP (code, 0)))
939 {
940 case CCZmode:
941 case CCZ1mode:
942 switch (GET_CODE (code))
943 {
944 case EQ: return CC0;
945 case NE: return CC1 | CC2 | CC3;
946 default: return -1;
947 }
948 break;
949
950 case CCT1mode:
951 switch (GET_CODE (code))
952 {
953 case EQ: return CC1;
954 case NE: return CC0 | CC2 | CC3;
955 default: return -1;
956 }
957 break;
958
959 case CCT2mode:
960 switch (GET_CODE (code))
961 {
962 case EQ: return CC2;
963 case NE: return CC0 | CC1 | CC3;
964 default: return -1;
965 }
966 break;
967
968 case CCT3mode:
969 switch (GET_CODE (code))
970 {
971 case EQ: return CC3;
972 case NE: return CC0 | CC1 | CC2;
973 default: return -1;
974 }
975 break;
976
977 case CCLmode:
978 switch (GET_CODE (code))
979 {
980 case EQ: return CC0 | CC2;
981 case NE: return CC1 | CC3;
982 default: return -1;
983 }
984 break;
985
986 case CCL1mode:
987 switch (GET_CODE (code))
988 {
989 case LTU: return CC2 | CC3; /* carry */
990 case GEU: return CC0 | CC1; /* no carry */
991 default: return -1;
992 }
993 break;
994
995 case CCL2mode:
996 switch (GET_CODE (code))
997 {
998 case GTU: return CC0 | CC1; /* borrow */
999 case LEU: return CC2 | CC3; /* no borrow */
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCL3mode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0 | CC2;
1008 case NE: return CC1 | CC3;
1009 case LTU: return CC1;
1010 case GTU: return CC3;
1011 case LEU: return CC1 | CC2;
1012 case GEU: return CC2 | CC3;
1013 default: return -1;
1014 }
1015
1016 case CCUmode:
1017 switch (GET_CODE (code))
1018 {
1019 case EQ: return CC0;
1020 case NE: return CC1 | CC2 | CC3;
1021 case LTU: return CC1;
1022 case GTU: return CC2;
1023 case LEU: return CC0 | CC1;
1024 case GEU: return CC0 | CC2;
1025 default: return -1;
1026 }
1027 break;
1028
1029 case CCURmode:
1030 switch (GET_CODE (code))
1031 {
1032 case EQ: return CC0;
1033 case NE: return CC2 | CC1 | CC3;
1034 case LTU: return CC2;
1035 case GTU: return CC1;
1036 case LEU: return CC0 | CC2;
1037 case GEU: return CC0 | CC1;
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCAPmode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0;
1046 case NE: return CC1 | CC2 | CC3;
1047 case LT: return CC1 | CC3;
1048 case GT: return CC2;
1049 case LE: return CC0 | CC1 | CC3;
1050 case GE: return CC0 | CC2;
1051 default: return -1;
1052 }
1053 break;
1054
1055 case CCANmode:
1056 switch (GET_CODE (code))
1057 {
1058 case EQ: return CC0;
1059 case NE: return CC1 | CC2 | CC3;
1060 case LT: return CC1;
1061 case GT: return CC2 | CC3;
1062 case LE: return CC0 | CC1;
1063 case GE: return CC0 | CC2 | CC3;
1064 default: return -1;
1065 }
1066 break;
1067
1068 case CCSmode:
1069 switch (GET_CODE (code))
1070 {
1071 case EQ: return CC0;
1072 case NE: return CC1 | CC2 | CC3;
1073 case LT: return CC1;
1074 case GT: return CC2;
1075 case LE: return CC0 | CC1;
1076 case GE: return CC0 | CC2;
1077 case UNORDERED: return CC3;
1078 case ORDERED: return CC0 | CC1 | CC2;
1079 case UNEQ: return CC0 | CC3;
1080 case UNLT: return CC1 | CC3;
1081 case UNGT: return CC2 | CC3;
1082 case UNLE: return CC0 | CC1 | CC3;
1083 case UNGE: return CC0 | CC2 | CC3;
1084 case LTGT: return CC1 | CC2;
1085 default: return -1;
1086 }
1087 break;
1088
1089 case CCSRmode:
1090 switch (GET_CODE (code))
1091 {
1092 case EQ: return CC0;
1093 case NE: return CC2 | CC1 | CC3;
1094 case LT: return CC2;
1095 case GT: return CC1;
1096 case LE: return CC0 | CC2;
1097 case GE: return CC0 | CC1;
1098 case UNORDERED: return CC3;
1099 case ORDERED: return CC0 | CC2 | CC1;
1100 case UNEQ: return CC0 | CC3;
1101 case UNLT: return CC2 | CC3;
1102 case UNGT: return CC1 | CC3;
1103 case UNLE: return CC0 | CC2 | CC3;
1104 case UNGE: return CC0 | CC1 | CC3;
1105 case LTGT: return CC2 | CC1;
1106 default: return -1;
1107 }
1108 break;
1109
1110 default:
1111 return -1;
1112 }
1113 }
1114
1115
1116 /* Return branch condition mask to implement a compare and branch
1117 specified by CODE. Return -1 for invalid comparisons. */
1118
1119 int
1120 s390_compare_and_branch_condition_mask (rtx code)
1121 {
1122 const int CC0 = 1 << 3;
1123 const int CC1 = 1 << 2;
1124 const int CC2 = 1 << 1;
1125
1126 switch (GET_CODE (code))
1127 {
1128 case EQ:
1129 return CC0;
1130 case NE:
1131 return CC1 | CC2;
1132 case LT:
1133 case LTU:
1134 return CC1;
1135 case GT:
1136 case GTU:
1137 return CC2;
1138 case LE:
1139 case LEU:
1140 return CC0 | CC1;
1141 case GE:
1142 case GEU:
1143 return CC0 | CC2;
1144 default:
1145 gcc_unreachable ();
1146 }
1147 return -1;
1148 }
1149
1150 /* If INV is false, return assembler mnemonic string to implement
1151 a branch specified by CODE. If INV is true, return mnemonic
1152 for the corresponding inverted branch. */
1153
1154 static const char *
1155 s390_branch_condition_mnemonic (rtx code, int inv)
1156 {
1157 int mask;
1158
1159 static const char *const mnemonic[16] =
1160 {
1161 NULL, "o", "h", "nle",
1162 "l", "nhe", "lh", "ne",
1163 "e", "nlh", "he", "nl",
1164 "le", "nh", "no", NULL
1165 };
1166
1167 if (GET_CODE (XEXP (code, 0)) == REG
1168 && REGNO (XEXP (code, 0)) == CC_REGNUM
1169 && XEXP (code, 1) == const0_rtx)
1170 mask = s390_branch_condition_mask (code);
1171 else
1172 mask = s390_compare_and_branch_condition_mask (code);
1173
1174 gcc_assert (mask >= 0);
1175
1176 if (inv)
1177 mask ^= 15;
1178
1179 gcc_assert (mask >= 1 && mask <= 14);
1180
1181 return mnemonic[mask];
1182 }
1183
1184 /* Return the part of op which has a value different from def.
1185 The size of the part is determined by mode.
1186 Use this function only if you already know that op really
1187 contains such a part. */
1188
1189 unsigned HOST_WIDE_INT
1190 s390_extract_part (rtx op, enum machine_mode mode, int def)
1191 {
1192 unsigned HOST_WIDE_INT value = 0;
1193 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1194 int part_bits = GET_MODE_BITSIZE (mode);
1195 unsigned HOST_WIDE_INT part_mask
1196 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1197 int i;
1198
1199 for (i = 0; i < max_parts; i++)
1200 {
1201 if (i == 0)
1202 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1203 else
1204 value >>= part_bits;
1205
1206 if ((value & part_mask) != (def & part_mask))
1207 return value & part_mask;
1208 }
1209
1210 gcc_unreachable ();
1211 }
1212
1213 /* If OP is an integer constant of mode MODE with exactly one
1214 part of mode PART_MODE unequal to DEF, return the number of that
1215 part. Otherwise, return -1. */
1216
1217 int
1218 s390_single_part (rtx op,
1219 enum machine_mode mode,
1220 enum machine_mode part_mode,
1221 int def)
1222 {
1223 unsigned HOST_WIDE_INT value = 0;
1224 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1225 unsigned HOST_WIDE_INT part_mask
1226 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1227 int i, part = -1;
1228
1229 if (GET_CODE (op) != CONST_INT)
1230 return -1;
1231
1232 for (i = 0; i < n_parts; i++)
1233 {
1234 if (i == 0)
1235 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1236 else
1237 value >>= GET_MODE_BITSIZE (part_mode);
1238
1239 if ((value & part_mask) != (def & part_mask))
1240 {
1241 if (part != -1)
1242 return -1;
1243 else
1244 part = i;
1245 }
1246 }
1247 return part == -1 ? -1 : n_parts - 1 - part;
1248 }
1249
1250 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1251 bits and no other bits are set in IN. POS and LENGTH can be used
1252 to obtain the start position and the length of the bitfield.
1253
1254 POS gives the position of the first bit of the bitfield counting
1255 from the lowest order bit starting with zero. In order to use this
1256 value for S/390 instructions this has to be converted to "bits big
1257 endian" style. */
1258
1259 bool
1260 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1261 int *pos, int *length)
1262 {
1263 int tmp_pos = 0;
1264 int tmp_length = 0;
1265 int i;
1266 unsigned HOST_WIDE_INT mask = 1ULL;
1267 bool contiguous = false;
1268
1269 for (i = 0; i < size; mask <<= 1, i++)
1270 {
1271 if (contiguous)
1272 {
1273 if (mask & in)
1274 tmp_length++;
1275 else
1276 break;
1277 }
1278 else
1279 {
1280 if (mask & in)
1281 {
1282 contiguous = true;
1283 tmp_length++;
1284 }
1285 else
1286 tmp_pos++;
1287 }
1288 }
1289
1290 if (!tmp_length)
1291 return false;
1292
1293 /* Calculate a mask for all bits beyond the contiguous bits. */
1294 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1295
1296 if (mask & in)
1297 return false;
1298
1299 if (tmp_length + tmp_pos - 1 > size)
1300 return false;
1301
1302 if (length)
1303 *length = tmp_length;
1304
1305 if (pos)
1306 *pos = tmp_pos;
1307
1308 return true;
1309 }
1310
1311 /* Check whether we can (and want to) split a double-word
1312 move in mode MODE from SRC to DST into two single-word
1313 moves, moving the subword FIRST_SUBWORD first. */
1314
1315 bool
1316 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1317 {
1318 /* Floating point registers cannot be split. */
1319 if (FP_REG_P (src) || FP_REG_P (dst))
1320 return false;
1321
1322 /* We don't need to split if operands are directly accessible. */
1323 if (s_operand (src, mode) || s_operand (dst, mode))
1324 return false;
1325
1326 /* Non-offsettable memory references cannot be split. */
1327 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1328 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1329 return false;
1330
1331 /* Moving the first subword must not clobber a register
1332 needed to move the second subword. */
1333 if (register_operand (dst, mode))
1334 {
1335 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1336 if (reg_overlap_mentioned_p (subreg, src))
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
1343 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1344 and [MEM2, MEM2 + SIZE] do overlap and false
1345 otherwise. */
1346
1347 bool
1348 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1349 {
1350 rtx addr1, addr2, addr_delta;
1351 HOST_WIDE_INT delta;
1352
1353 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1354 return true;
1355
1356 if (size == 0)
1357 return false;
1358
1359 addr1 = XEXP (mem1, 0);
1360 addr2 = XEXP (mem2, 0);
1361
1362 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1363
1364 /* This overlapping check is used by peepholes merging memory block operations.
1365 Overlapping operations would otherwise be recognized by the S/390 hardware
1366 and would fall back to a slower implementation. Allowing overlapping
1367 operations would lead to slow code but not to wrong code. Therefore we are
1368 somewhat optimistic if we cannot prove that the memory blocks are
1369 overlapping.
1370 That's why we return false here although this may accept operations on
1371 overlapping memory areas. */
1372 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1373 return false;
1374
1375 delta = INTVAL (addr_delta);
1376
1377 if (delta == 0
1378 || (delta > 0 && delta < size)
1379 || (delta < 0 && -delta < size))
1380 return true;
1381
1382 return false;
1383 }
1384
1385 /* Check whether the address of memory reference MEM2 equals exactly
1386 the address of memory reference MEM1 plus DELTA. Return true if
1387 we can prove this to be the case, false otherwise. */
1388
1389 bool
1390 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1391 {
1392 rtx addr1, addr2, addr_delta;
1393
1394 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1395 return false;
1396
1397 addr1 = XEXP (mem1, 0);
1398 addr2 = XEXP (mem2, 0);
1399
1400 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1401 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1402 return false;
1403
1404 return true;
1405 }
1406
1407 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1408
1409 void
1410 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1411 rtx *operands)
1412 {
1413 enum machine_mode wmode = mode;
1414 rtx dst = operands[0];
1415 rtx src1 = operands[1];
1416 rtx src2 = operands[2];
1417 rtx op, clob, tem;
1418
1419 /* If we cannot handle the operation directly, use a temp register. */
1420 if (!s390_logical_operator_ok_p (operands))
1421 dst = gen_reg_rtx (mode);
1422
1423 /* QImode and HImode patterns make sense only if we have a destination
1424 in memory. Otherwise perform the operation in SImode. */
1425 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1426 wmode = SImode;
1427
1428 /* Widen operands if required. */
1429 if (mode != wmode)
1430 {
1431 if (GET_CODE (dst) == SUBREG
1432 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1433 dst = tem;
1434 else if (REG_P (dst))
1435 dst = gen_rtx_SUBREG (wmode, dst, 0);
1436 else
1437 dst = gen_reg_rtx (wmode);
1438
1439 if (GET_CODE (src1) == SUBREG
1440 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1441 src1 = tem;
1442 else if (GET_MODE (src1) != VOIDmode)
1443 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1444
1445 if (GET_CODE (src2) == SUBREG
1446 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1447 src2 = tem;
1448 else if (GET_MODE (src2) != VOIDmode)
1449 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1450 }
1451
1452 /* Emit the instruction. */
1453 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1454 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1455 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1456
1457 /* Fix up the destination if needed. */
1458 if (dst != operands[0])
1459 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1460 }
1461
1462 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1463
1464 bool
1465 s390_logical_operator_ok_p (rtx *operands)
1466 {
1467 /* If the destination operand is in memory, it needs to coincide
1468 with one of the source operands. After reload, it has to be
1469 the first source operand. */
1470 if (GET_CODE (operands[0]) == MEM)
1471 return rtx_equal_p (operands[0], operands[1])
1472 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1473
1474 return true;
1475 }
1476
1477 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1478 operand IMMOP to switch from SS to SI type instructions. */
1479
1480 void
1481 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1482 {
1483 int def = code == AND ? -1 : 0;
1484 HOST_WIDE_INT mask;
1485 int part;
1486
1487 gcc_assert (GET_CODE (*memop) == MEM);
1488 gcc_assert (!MEM_VOLATILE_P (*memop));
1489
1490 mask = s390_extract_part (*immop, QImode, def);
1491 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1492 gcc_assert (part >= 0);
1493
1494 *memop = adjust_address (*memop, QImode, part);
1495 *immop = gen_int_mode (mask, QImode);
1496 }
1497
1498
1499 /* How to allocate a 'struct machine_function'. */
1500
1501 static struct machine_function *
1502 s390_init_machine_status (void)
1503 {
1504 return ggc_alloc_cleared_machine_function ();
1505 }
1506
1507 static void
1508 s390_option_override (void)
1509 {
1510 /* Set up function hooks. */
1511 init_machine_status = s390_init_machine_status;
1512
1513 /* Architecture mode defaults according to ABI. */
1514 if (!(target_flags_explicit & MASK_ZARCH))
1515 {
1516 if (TARGET_64BIT)
1517 target_flags |= MASK_ZARCH;
1518 else
1519 target_flags &= ~MASK_ZARCH;
1520 }
1521
1522 /* Set the march default in case it hasn't been specified on
1523 cmdline. */
1524 if (s390_arch == PROCESSOR_max)
1525 {
1526 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1527 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1528 s390_arch_flags = processor_flags_table[(int)s390_arch];
1529 }
1530
1531 /* Determine processor to tune for. */
1532 if (s390_tune == PROCESSOR_max)
1533 {
1534 s390_tune = s390_arch;
1535 s390_tune_flags = s390_arch_flags;
1536 }
1537
1538 /* Sanity checks. */
1539 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1540 error ("z/Architecture mode not supported on %s", s390_arch_string);
1541 if (TARGET_64BIT && !TARGET_ZARCH)
1542 error ("64-bit ABI not supported in ESA/390 mode");
1543
1544 /* Use hardware DFP if available and not explicitly disabled by
1545 user. E.g. with -m31 -march=z10 -mzarch */
1546 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1547 target_flags |= MASK_HARD_DFP;
1548
1549 if (TARGET_HARD_DFP && !TARGET_DFP)
1550 {
1551 if (target_flags_explicit & MASK_HARD_DFP)
1552 {
1553 if (!TARGET_CPU_DFP)
1554 error ("hardware decimal floating point instructions"
1555 " not available on %s", s390_arch_string);
1556 if (!TARGET_ZARCH)
1557 error ("hardware decimal floating point instructions"
1558 " not available in ESA/390 mode");
1559 }
1560 else
1561 target_flags &= ~MASK_HARD_DFP;
1562 }
1563
1564 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1565 {
1566 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1567 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1568
1569 target_flags &= ~MASK_HARD_DFP;
1570 }
1571
1572 /* Set processor cost function. */
1573 switch (s390_tune)
1574 {
1575 case PROCESSOR_2084_Z990:
1576 s390_cost = &z990_cost;
1577 break;
1578 case PROCESSOR_2094_Z9_109:
1579 s390_cost = &z9_109_cost;
1580 break;
1581 case PROCESSOR_2097_Z10:
1582 s390_cost = &z10_cost;
1583 case PROCESSOR_2817_Z196:
1584 s390_cost = &z196_cost;
1585 break;
1586 default:
1587 s390_cost = &z900_cost;
1588 }
1589
1590 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1591 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1592 "in combination");
1593
1594 if (s390_stack_size)
1595 {
1596 if (s390_stack_guard >= s390_stack_size)
1597 error ("stack size must be greater than the stack guard value");
1598 else if (s390_stack_size > 1 << 16)
1599 error ("stack size must not be greater than 64k");
1600 }
1601 else if (s390_stack_guard)
1602 error ("-mstack-guard implies use of -mstack-size");
1603
1604 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1605 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1606 target_flags |= MASK_LONG_DOUBLE_128;
1607 #endif
1608
1609 if (s390_tune == PROCESSOR_2097_Z10
1610 || s390_tune == PROCESSOR_2817_Z196)
1611 {
1612 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1619 global_options.x_param_values,
1620 global_options_set.x_param_values);
1621 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1622 global_options.x_param_values,
1623 global_options_set.x_param_values);
1624 }
1625
1626 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1627 global_options.x_param_values,
1628 global_options_set.x_param_values);
1629 /* values for loop prefetching */
1630 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1631 global_options.x_param_values,
1632 global_options_set.x_param_values);
1633 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 /* s390 has more than 2 levels and the size is much larger. Since
1637 we are always running virtualized assume that we only get a small
1638 part of the caches above l1. */
1639 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1643 global_options.x_param_values,
1644 global_options_set.x_param_values);
1645 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1646 global_options.x_param_values,
1647 global_options_set.x_param_values);
1648
1649 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1650 requires the arch flags to be evaluated already. Since prefetching
1651 is beneficial on s390, we enable it if available. */
1652 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1653 flag_prefetch_loop_arrays = 1;
1654
1655 /* Use the alternative scheduling-pressure algorithm by default. */
1656 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1657 global_options.x_param_values,
1658 global_options_set.x_param_values);
1659
1660 if (TARGET_TPF)
1661 {
1662 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1663 debuggers do not yet support DWARF 3/4. */
1664 if (!global_options_set.x_dwarf_strict)
1665 dwarf_strict = 1;
1666 if (!global_options_set.x_dwarf_version)
1667 dwarf_version = 2;
1668 }
1669 }
1670
1671 /* Map for smallest class containing reg regno. */
1672
1673 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1674 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1675 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1682 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1683 ACCESS_REGS, ACCESS_REGS
1684 };
1685
1686 /* Return attribute type of insn. */
1687
1688 static enum attr_type
1689 s390_safe_attr_type (rtx insn)
1690 {
1691 if (recog_memoized (insn) >= 0)
1692 return get_attr_type (insn);
1693 else
1694 return TYPE_NONE;
1695 }
1696
1697 /* Return true if DISP is a valid short displacement. */
1698
1699 static bool
1700 s390_short_displacement (rtx disp)
1701 {
1702 /* No displacement is OK. */
1703 if (!disp)
1704 return true;
1705
1706 /* Without the long displacement facility we don't need to
1707 distingiush between long and short displacement. */
1708 if (!TARGET_LONG_DISPLACEMENT)
1709 return true;
1710
1711 /* Integer displacement in range. */
1712 if (GET_CODE (disp) == CONST_INT)
1713 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1714
1715 /* GOT offset is not OK, the GOT can be large. */
1716 if (GET_CODE (disp) == CONST
1717 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1718 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1719 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1720 return false;
1721
1722 /* All other symbolic constants are literal pool references,
1723 which are OK as the literal pool must be small. */
1724 if (GET_CODE (disp) == CONST)
1725 return true;
1726
1727 return false;
1728 }
1729
1730 /* Decompose a RTL expression ADDR for a memory address into
1731 its components, returned in OUT.
1732
1733 Returns false if ADDR is not a valid memory address, true
1734 otherwise. If OUT is NULL, don't return the components,
1735 but check for validity only.
1736
1737 Note: Only addresses in canonical form are recognized.
1738 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1739 canonical form so that they will be recognized. */
1740
1741 static int
1742 s390_decompose_address (rtx addr, struct s390_address *out)
1743 {
1744 HOST_WIDE_INT offset = 0;
1745 rtx base = NULL_RTX;
1746 rtx indx = NULL_RTX;
1747 rtx disp = NULL_RTX;
1748 rtx orig_disp;
1749 bool pointer = false;
1750 bool base_ptr = false;
1751 bool indx_ptr = false;
1752 bool literal_pool = false;
1753
1754 /* We may need to substitute the literal pool base register into the address
1755 below. However, at this point we do not know which register is going to
1756 be used as base, so we substitute the arg pointer register. This is going
1757 to be treated as holding a pointer below -- it shouldn't be used for any
1758 other purpose. */
1759 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1760
1761 /* Decompose address into base + index + displacement. */
1762
1763 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1764 base = addr;
1765
1766 else if (GET_CODE (addr) == PLUS)
1767 {
1768 rtx op0 = XEXP (addr, 0);
1769 rtx op1 = XEXP (addr, 1);
1770 enum rtx_code code0 = GET_CODE (op0);
1771 enum rtx_code code1 = GET_CODE (op1);
1772
1773 if (code0 == REG || code0 == UNSPEC)
1774 {
1775 if (code1 == REG || code1 == UNSPEC)
1776 {
1777 indx = op0; /* index + base */
1778 base = op1;
1779 }
1780
1781 else
1782 {
1783 base = op0; /* base + displacement */
1784 disp = op1;
1785 }
1786 }
1787
1788 else if (code0 == PLUS)
1789 {
1790 indx = XEXP (op0, 0); /* index + base + disp */
1791 base = XEXP (op0, 1);
1792 disp = op1;
1793 }
1794
1795 else
1796 {
1797 return false;
1798 }
1799 }
1800
1801 else
1802 disp = addr; /* displacement */
1803
1804 /* Extract integer part of displacement. */
1805 orig_disp = disp;
1806 if (disp)
1807 {
1808 if (GET_CODE (disp) == CONST_INT)
1809 {
1810 offset = INTVAL (disp);
1811 disp = NULL_RTX;
1812 }
1813 else if (GET_CODE (disp) == CONST
1814 && GET_CODE (XEXP (disp, 0)) == PLUS
1815 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1816 {
1817 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1818 disp = XEXP (XEXP (disp, 0), 0);
1819 }
1820 }
1821
1822 /* Strip off CONST here to avoid special case tests later. */
1823 if (disp && GET_CODE (disp) == CONST)
1824 disp = XEXP (disp, 0);
1825
1826 /* We can convert literal pool addresses to
1827 displacements by basing them off the base register. */
1828 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1829 {
1830 /* Either base or index must be free to hold the base register. */
1831 if (!base)
1832 base = fake_pool_base, literal_pool = true;
1833 else if (!indx)
1834 indx = fake_pool_base, literal_pool = true;
1835 else
1836 return false;
1837
1838 /* Mark up the displacement. */
1839 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1840 UNSPEC_LTREL_OFFSET);
1841 }
1842
1843 /* Validate base register. */
1844 if (base)
1845 {
1846 if (GET_CODE (base) == UNSPEC)
1847 switch (XINT (base, 1))
1848 {
1849 case UNSPEC_LTREF:
1850 if (!disp)
1851 disp = gen_rtx_UNSPEC (Pmode,
1852 gen_rtvec (1, XVECEXP (base, 0, 0)),
1853 UNSPEC_LTREL_OFFSET);
1854 else
1855 return false;
1856
1857 base = XVECEXP (base, 0, 1);
1858 break;
1859
1860 case UNSPEC_LTREL_BASE:
1861 if (XVECLEN (base, 0) == 1)
1862 base = fake_pool_base, literal_pool = true;
1863 else
1864 base = XVECEXP (base, 0, 1);
1865 break;
1866
1867 default:
1868 return false;
1869 }
1870
1871 if (!REG_P (base)
1872 || (GET_MODE (base) != SImode
1873 && GET_MODE (base) != Pmode))
1874 return false;
1875
1876 if (REGNO (base) == STACK_POINTER_REGNUM
1877 || REGNO (base) == FRAME_POINTER_REGNUM
1878 || ((reload_completed || reload_in_progress)
1879 && frame_pointer_needed
1880 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1881 || REGNO (base) == ARG_POINTER_REGNUM
1882 || (flag_pic
1883 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1884 pointer = base_ptr = true;
1885
1886 if ((reload_completed || reload_in_progress)
1887 && base == cfun->machine->base_reg)
1888 pointer = base_ptr = literal_pool = true;
1889 }
1890
1891 /* Validate index register. */
1892 if (indx)
1893 {
1894 if (GET_CODE (indx) == UNSPEC)
1895 switch (XINT (indx, 1))
1896 {
1897 case UNSPEC_LTREF:
1898 if (!disp)
1899 disp = gen_rtx_UNSPEC (Pmode,
1900 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1901 UNSPEC_LTREL_OFFSET);
1902 else
1903 return false;
1904
1905 indx = XVECEXP (indx, 0, 1);
1906 break;
1907
1908 case UNSPEC_LTREL_BASE:
1909 if (XVECLEN (indx, 0) == 1)
1910 indx = fake_pool_base, literal_pool = true;
1911 else
1912 indx = XVECEXP (indx, 0, 1);
1913 break;
1914
1915 default:
1916 return false;
1917 }
1918
1919 if (!REG_P (indx)
1920 || (GET_MODE (indx) != SImode
1921 && GET_MODE (indx) != Pmode))
1922 return false;
1923
1924 if (REGNO (indx) == STACK_POINTER_REGNUM
1925 || REGNO (indx) == FRAME_POINTER_REGNUM
1926 || ((reload_completed || reload_in_progress)
1927 && frame_pointer_needed
1928 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1929 || REGNO (indx) == ARG_POINTER_REGNUM
1930 || (flag_pic
1931 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1932 pointer = indx_ptr = true;
1933
1934 if ((reload_completed || reload_in_progress)
1935 && indx == cfun->machine->base_reg)
1936 pointer = indx_ptr = literal_pool = true;
1937 }
1938
1939 /* Prefer to use pointer as base, not index. */
1940 if (base && indx && !base_ptr
1941 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1942 {
1943 rtx tmp = base;
1944 base = indx;
1945 indx = tmp;
1946 }
1947
1948 /* Validate displacement. */
1949 if (!disp)
1950 {
1951 /* If virtual registers are involved, the displacement will change later
1952 anyway as the virtual registers get eliminated. This could make a
1953 valid displacement invalid, but it is more likely to make an invalid
1954 displacement valid, because we sometimes access the register save area
1955 via negative offsets to one of those registers.
1956 Thus we don't check the displacement for validity here. If after
1957 elimination the displacement turns out to be invalid after all,
1958 this is fixed up by reload in any case. */
1959 if (base != arg_pointer_rtx
1960 && indx != arg_pointer_rtx
1961 && base != return_address_pointer_rtx
1962 && indx != return_address_pointer_rtx
1963 && base != frame_pointer_rtx
1964 && indx != frame_pointer_rtx
1965 && base != virtual_stack_vars_rtx
1966 && indx != virtual_stack_vars_rtx)
1967 if (!DISP_IN_RANGE (offset))
1968 return false;
1969 }
1970 else
1971 {
1972 /* All the special cases are pointers. */
1973 pointer = true;
1974
1975 /* In the small-PIC case, the linker converts @GOT
1976 and @GOTNTPOFF offsets to possible displacements. */
1977 if (GET_CODE (disp) == UNSPEC
1978 && (XINT (disp, 1) == UNSPEC_GOT
1979 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1980 && flag_pic == 1)
1981 {
1982 ;
1983 }
1984
1985 /* Accept pool label offsets. */
1986 else if (GET_CODE (disp) == UNSPEC
1987 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1988 ;
1989
1990 /* Accept literal pool references. */
1991 else if (GET_CODE (disp) == UNSPEC
1992 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1993 {
1994 /* In case CSE pulled a non literal pool reference out of
1995 the pool we have to reject the address. This is
1996 especially important when loading the GOT pointer on non
1997 zarch CPUs. In this case the literal pool contains an lt
1998 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1999 will most likely exceed the displacement. */
2000 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2001 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2002 return false;
2003
2004 orig_disp = gen_rtx_CONST (Pmode, disp);
2005 if (offset)
2006 {
2007 /* If we have an offset, make sure it does not
2008 exceed the size of the constant pool entry. */
2009 rtx sym = XVECEXP (disp, 0, 0);
2010 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2011 return false;
2012
2013 orig_disp = plus_constant (Pmode, orig_disp, offset);
2014 }
2015 }
2016
2017 else
2018 return false;
2019 }
2020
2021 if (!base && !indx)
2022 pointer = true;
2023
2024 if (out)
2025 {
2026 out->base = base;
2027 out->indx = indx;
2028 out->disp = orig_disp;
2029 out->pointer = pointer;
2030 out->literal_pool = literal_pool;
2031 }
2032
2033 return true;
2034 }
2035
2036 /* Decompose a RTL expression OP for a shift count into its components,
2037 and return the base register in BASE and the offset in OFFSET.
2038
2039 Return true if OP is a valid shift count, false if not. */
2040
2041 bool
2042 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2043 {
2044 HOST_WIDE_INT off = 0;
2045
2046 /* We can have an integer constant, an address register,
2047 or a sum of the two. */
2048 if (GET_CODE (op) == CONST_INT)
2049 {
2050 off = INTVAL (op);
2051 op = NULL_RTX;
2052 }
2053 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2054 {
2055 off = INTVAL (XEXP (op, 1));
2056 op = XEXP (op, 0);
2057 }
2058 while (op && GET_CODE (op) == SUBREG)
2059 op = SUBREG_REG (op);
2060
2061 if (op && GET_CODE (op) != REG)
2062 return false;
2063
2064 if (offset)
2065 *offset = off;
2066 if (base)
2067 *base = op;
2068
2069 return true;
2070 }
2071
2072
2073 /* Return true if CODE is a valid address without index. */
2074
2075 bool
2076 s390_legitimate_address_without_index_p (rtx op)
2077 {
2078 struct s390_address addr;
2079
2080 if (!s390_decompose_address (XEXP (op, 0), &addr))
2081 return false;
2082 if (addr.indx)
2083 return false;
2084
2085 return true;
2086 }
2087
2088
2089 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2090 and return these parts in SYMREF and ADDEND. You can pass NULL in
2091 SYMREF and/or ADDEND if you are not interested in these values.
2092 Literal pool references are *not* considered symbol references. */
2093
2094 static bool
2095 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2096 {
2097 HOST_WIDE_INT tmpaddend = 0;
2098
2099 if (GET_CODE (addr) == CONST)
2100 addr = XEXP (addr, 0);
2101
2102 if (GET_CODE (addr) == PLUS)
2103 {
2104 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2105 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2106 && CONST_INT_P (XEXP (addr, 1)))
2107 {
2108 tmpaddend = INTVAL (XEXP (addr, 1));
2109 addr = XEXP (addr, 0);
2110 }
2111 else
2112 return false;
2113 }
2114 else
2115 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2116 return false;
2117
2118 if (symref)
2119 *symref = addr;
2120 if (addend)
2121 *addend = tmpaddend;
2122
2123 return true;
2124 }
2125
2126
2127 /* Return true if the address in OP is valid for constraint letter C
2128 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2129 pool MEMs should be accepted. Only the Q, R, S, T constraint
2130 letters are allowed for C. */
2131
2132 static int
2133 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2134 {
2135 struct s390_address addr;
2136 bool decomposed = false;
2137
2138 /* This check makes sure that no symbolic address (except literal
2139 pool references) are accepted by the R or T constraints. */
2140 if (s390_symref_operand_p (op, NULL, NULL))
2141 return 0;
2142
2143 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2144 if (!lit_pool_ok)
2145 {
2146 if (!s390_decompose_address (op, &addr))
2147 return 0;
2148 if (addr.literal_pool)
2149 return 0;
2150 decomposed = true;
2151 }
2152
2153 switch (c)
2154 {
2155 case 'Q': /* no index short displacement */
2156 if (!decomposed && !s390_decompose_address (op, &addr))
2157 return 0;
2158 if (addr.indx)
2159 return 0;
2160 if (!s390_short_displacement (addr.disp))
2161 return 0;
2162 break;
2163
2164 case 'R': /* with index short displacement */
2165 if (TARGET_LONG_DISPLACEMENT)
2166 {
2167 if (!decomposed && !s390_decompose_address (op, &addr))
2168 return 0;
2169 if (!s390_short_displacement (addr.disp))
2170 return 0;
2171 }
2172 /* Any invalid address here will be fixed up by reload,
2173 so accept it for the most generic constraint. */
2174 break;
2175
2176 case 'S': /* no index long displacement */
2177 if (!TARGET_LONG_DISPLACEMENT)
2178 return 0;
2179 if (!decomposed && !s390_decompose_address (op, &addr))
2180 return 0;
2181 if (addr.indx)
2182 return 0;
2183 if (s390_short_displacement (addr.disp))
2184 return 0;
2185 break;
2186
2187 case 'T': /* with index long displacement */
2188 if (!TARGET_LONG_DISPLACEMENT)
2189 return 0;
2190 /* Any invalid address here will be fixed up by reload,
2191 so accept it for the most generic constraint. */
2192 if ((decomposed || s390_decompose_address (op, &addr))
2193 && s390_short_displacement (addr.disp))
2194 return 0;
2195 break;
2196 default:
2197 return 0;
2198 }
2199 return 1;
2200 }
2201
2202
2203 /* Evaluates constraint strings described by the regular expression
2204 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2205 the constraint given in STR, or 0 else. */
2206
2207 int
2208 s390_mem_constraint (const char *str, rtx op)
2209 {
2210 char c = str[0];
2211
2212 switch (c)
2213 {
2214 case 'A':
2215 /* Check for offsettable variants of memory constraints. */
2216 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2217 return 0;
2218 if ((reload_completed || reload_in_progress)
2219 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2220 return 0;
2221 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2222 case 'B':
2223 /* Check for non-literal-pool variants of memory constraints. */
2224 if (!MEM_P (op))
2225 return 0;
2226 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2227 case 'Q':
2228 case 'R':
2229 case 'S':
2230 case 'T':
2231 if (GET_CODE (op) != MEM)
2232 return 0;
2233 return s390_check_qrst_address (c, XEXP (op, 0), true);
2234 case 'U':
2235 return (s390_check_qrst_address ('Q', op, true)
2236 || s390_check_qrst_address ('R', op, true));
2237 case 'W':
2238 return (s390_check_qrst_address ('S', op, true)
2239 || s390_check_qrst_address ('T', op, true));
2240 case 'Y':
2241 /* Simply check for the basic form of a shift count. Reload will
2242 take care of making sure we have a proper base register. */
2243 if (!s390_decompose_shift_count (op, NULL, NULL))
2244 return 0;
2245 break;
2246 case 'Z':
2247 return s390_check_qrst_address (str[1], op, true);
2248 default:
2249 return 0;
2250 }
2251 return 1;
2252 }
2253
2254
2255 /* Evaluates constraint strings starting with letter O. Input
2256 parameter C is the second letter following the "O" in the constraint
2257 string. Returns 1 if VALUE meets the respective constraint and 0
2258 otherwise. */
2259
2260 int
2261 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2262 {
2263 if (!TARGET_EXTIMM)
2264 return 0;
2265
2266 switch (c)
2267 {
2268 case 's':
2269 return trunc_int_for_mode (value, SImode) == value;
2270
2271 case 'p':
2272 return value == 0
2273 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2274
2275 case 'n':
2276 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2277
2278 default:
2279 gcc_unreachable ();
2280 }
2281 }
2282
2283
2284 /* Evaluates constraint strings starting with letter N. Parameter STR
2285 contains the letters following letter "N" in the constraint string.
2286 Returns true if VALUE matches the constraint. */
2287
2288 int
2289 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2290 {
2291 enum machine_mode mode, part_mode;
2292 int def;
2293 int part, part_goal;
2294
2295
2296 if (str[0] == 'x')
2297 part_goal = -1;
2298 else
2299 part_goal = str[0] - '0';
2300
2301 switch (str[1])
2302 {
2303 case 'Q':
2304 part_mode = QImode;
2305 break;
2306 case 'H':
2307 part_mode = HImode;
2308 break;
2309 case 'S':
2310 part_mode = SImode;
2311 break;
2312 default:
2313 return 0;
2314 }
2315
2316 switch (str[2])
2317 {
2318 case 'H':
2319 mode = HImode;
2320 break;
2321 case 'S':
2322 mode = SImode;
2323 break;
2324 case 'D':
2325 mode = DImode;
2326 break;
2327 default:
2328 return 0;
2329 }
2330
2331 switch (str[3])
2332 {
2333 case '0':
2334 def = 0;
2335 break;
2336 case 'F':
2337 def = -1;
2338 break;
2339 default:
2340 return 0;
2341 }
2342
2343 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2344 return 0;
2345
2346 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2347 if (part < 0)
2348 return 0;
2349 if (part_goal != -1 && part_goal != part)
2350 return 0;
2351
2352 return 1;
2353 }
2354
2355
2356 /* Returns true if the input parameter VALUE is a float zero. */
2357
2358 int
2359 s390_float_const_zero_p (rtx value)
2360 {
2361 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2362 && value == CONST0_RTX (GET_MODE (value)));
2363 }
2364
2365 /* Implement TARGET_REGISTER_MOVE_COST. */
2366
2367 static int
2368 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2369 reg_class_t from, reg_class_t to)
2370 {
2371 /* On s390, copy between fprs and gprs is expensive. */
2372 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2373 && reg_classes_intersect_p (to, FP_REGS))
2374 || (reg_classes_intersect_p (from, FP_REGS)
2375 && reg_classes_intersect_p (to, GENERAL_REGS)))
2376 return 10;
2377
2378 return 1;
2379 }
2380
2381 /* Implement TARGET_MEMORY_MOVE_COST. */
2382
2383 static int
2384 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2385 reg_class_t rclass ATTRIBUTE_UNUSED,
2386 bool in ATTRIBUTE_UNUSED)
2387 {
2388 return 1;
2389 }
2390
2391 /* Compute a (partial) cost for rtx X. Return true if the complete
2392 cost has been computed, and false if subexpressions should be
2393 scanned. In either case, *TOTAL contains the cost result.
2394 CODE contains GET_CODE (x), OUTER_CODE contains the code
2395 of the superexpression of x. */
2396
2397 static bool
2398 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2399 int *total, bool speed ATTRIBUTE_UNUSED)
2400 {
2401 switch (code)
2402 {
2403 case CONST:
2404 case CONST_INT:
2405 case LABEL_REF:
2406 case SYMBOL_REF:
2407 case CONST_DOUBLE:
2408 case MEM:
2409 *total = 0;
2410 return true;
2411
2412 case ASHIFT:
2413 case ASHIFTRT:
2414 case LSHIFTRT:
2415 case ROTATE:
2416 case ROTATERT:
2417 case AND:
2418 case IOR:
2419 case XOR:
2420 case NEG:
2421 case NOT:
2422 *total = COSTS_N_INSNS (1);
2423 return false;
2424
2425 case PLUS:
2426 case MINUS:
2427 *total = COSTS_N_INSNS (1);
2428 return false;
2429
2430 case MULT:
2431 switch (GET_MODE (x))
2432 {
2433 case SImode:
2434 {
2435 rtx left = XEXP (x, 0);
2436 rtx right = XEXP (x, 1);
2437 if (GET_CODE (right) == CONST_INT
2438 && CONST_OK_FOR_K (INTVAL (right)))
2439 *total = s390_cost->mhi;
2440 else if (GET_CODE (left) == SIGN_EXTEND)
2441 *total = s390_cost->mh;
2442 else
2443 *total = s390_cost->ms; /* msr, ms, msy */
2444 break;
2445 }
2446 case DImode:
2447 {
2448 rtx left = XEXP (x, 0);
2449 rtx right = XEXP (x, 1);
2450 if (TARGET_ZARCH)
2451 {
2452 if (GET_CODE (right) == CONST_INT
2453 && CONST_OK_FOR_K (INTVAL (right)))
2454 *total = s390_cost->mghi;
2455 else if (GET_CODE (left) == SIGN_EXTEND)
2456 *total = s390_cost->msgf;
2457 else
2458 *total = s390_cost->msg; /* msgr, msg */
2459 }
2460 else /* TARGET_31BIT */
2461 {
2462 if (GET_CODE (left) == SIGN_EXTEND
2463 && GET_CODE (right) == SIGN_EXTEND)
2464 /* mulsidi case: mr, m */
2465 *total = s390_cost->m;
2466 else if (GET_CODE (left) == ZERO_EXTEND
2467 && GET_CODE (right) == ZERO_EXTEND
2468 && TARGET_CPU_ZARCH)
2469 /* umulsidi case: ml, mlr */
2470 *total = s390_cost->ml;
2471 else
2472 /* Complex calculation is required. */
2473 *total = COSTS_N_INSNS (40);
2474 }
2475 break;
2476 }
2477 case SFmode:
2478 case DFmode:
2479 *total = s390_cost->mult_df;
2480 break;
2481 case TFmode:
2482 *total = s390_cost->mxbr;
2483 break;
2484 default:
2485 return false;
2486 }
2487 return false;
2488
2489 case FMA:
2490 switch (GET_MODE (x))
2491 {
2492 case DFmode:
2493 *total = s390_cost->madbr;
2494 break;
2495 case SFmode:
2496 *total = s390_cost->maebr;
2497 break;
2498 default:
2499 return false;
2500 }
2501 /* Negate in the third argument is free: FMSUB. */
2502 if (GET_CODE (XEXP (x, 2)) == NEG)
2503 {
2504 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2505 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2506 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2507 return true;
2508 }
2509 return false;
2510
2511 case UDIV:
2512 case UMOD:
2513 if (GET_MODE (x) == TImode) /* 128 bit division */
2514 *total = s390_cost->dlgr;
2515 else if (GET_MODE (x) == DImode)
2516 {
2517 rtx right = XEXP (x, 1);
2518 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2519 *total = s390_cost->dlr;
2520 else /* 64 by 64 bit division */
2521 *total = s390_cost->dlgr;
2522 }
2523 else if (GET_MODE (x) == SImode) /* 32 bit division */
2524 *total = s390_cost->dlr;
2525 return false;
2526
2527 case DIV:
2528 case MOD:
2529 if (GET_MODE (x) == DImode)
2530 {
2531 rtx right = XEXP (x, 1);
2532 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2533 if (TARGET_ZARCH)
2534 *total = s390_cost->dsgfr;
2535 else
2536 *total = s390_cost->dr;
2537 else /* 64 by 64 bit division */
2538 *total = s390_cost->dsgr;
2539 }
2540 else if (GET_MODE (x) == SImode) /* 32 bit division */
2541 *total = s390_cost->dlr;
2542 else if (GET_MODE (x) == SFmode)
2543 {
2544 *total = s390_cost->debr;
2545 }
2546 else if (GET_MODE (x) == DFmode)
2547 {
2548 *total = s390_cost->ddbr;
2549 }
2550 else if (GET_MODE (x) == TFmode)
2551 {
2552 *total = s390_cost->dxbr;
2553 }
2554 return false;
2555
2556 case SQRT:
2557 if (GET_MODE (x) == SFmode)
2558 *total = s390_cost->sqebr;
2559 else if (GET_MODE (x) == DFmode)
2560 *total = s390_cost->sqdbr;
2561 else /* TFmode */
2562 *total = s390_cost->sqxbr;
2563 return false;
2564
2565 case SIGN_EXTEND:
2566 case ZERO_EXTEND:
2567 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2568 || outer_code == PLUS || outer_code == MINUS
2569 || outer_code == COMPARE)
2570 *total = 0;
2571 return false;
2572
2573 case COMPARE:
2574 *total = COSTS_N_INSNS (1);
2575 if (GET_CODE (XEXP (x, 0)) == AND
2576 && GET_CODE (XEXP (x, 1)) == CONST_INT
2577 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2578 {
2579 rtx op0 = XEXP (XEXP (x, 0), 0);
2580 rtx op1 = XEXP (XEXP (x, 0), 1);
2581 rtx op2 = XEXP (x, 1);
2582
2583 if (memory_operand (op0, GET_MODE (op0))
2584 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2585 return true;
2586 if (register_operand (op0, GET_MODE (op0))
2587 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2588 return true;
2589 }
2590 return false;
2591
2592 default:
2593 return false;
2594 }
2595 }
2596
2597 /* Return the cost of an address rtx ADDR. */
2598
2599 static int
2600 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2601 {
2602 struct s390_address ad;
2603 if (!s390_decompose_address (addr, &ad))
2604 return 1000;
2605
2606 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2607 }
2608
2609 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2610 otherwise return 0. */
2611
2612 int
2613 tls_symbolic_operand (rtx op)
2614 {
2615 if (GET_CODE (op) != SYMBOL_REF)
2616 return 0;
2617 return SYMBOL_REF_TLS_MODEL (op);
2618 }
2619 \f
2620 /* Split DImode access register reference REG (on 64-bit) into its constituent
2621 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2622 gen_highpart cannot be used as they assume all registers are word-sized,
2623 while our access registers have only half that size. */
2624
2625 void
2626 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2627 {
2628 gcc_assert (TARGET_64BIT);
2629 gcc_assert (ACCESS_REG_P (reg));
2630 gcc_assert (GET_MODE (reg) == DImode);
2631 gcc_assert (!(REGNO (reg) & 1));
2632
2633 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2634 *hi = gen_rtx_REG (SImode, REGNO (reg));
2635 }
2636
2637 /* Return true if OP contains a symbol reference */
2638
2639 bool
2640 symbolic_reference_mentioned_p (rtx op)
2641 {
2642 const char *fmt;
2643 int i;
2644
2645 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2646 return 1;
2647
2648 fmt = GET_RTX_FORMAT (GET_CODE (op));
2649 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2650 {
2651 if (fmt[i] == 'E')
2652 {
2653 int j;
2654
2655 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2656 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2657 return 1;
2658 }
2659
2660 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2661 return 1;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /* Return true if OP contains a reference to a thread-local symbol. */
2668
2669 bool
2670 tls_symbolic_reference_mentioned_p (rtx op)
2671 {
2672 const char *fmt;
2673 int i;
2674
2675 if (GET_CODE (op) == SYMBOL_REF)
2676 return tls_symbolic_operand (op);
2677
2678 fmt = GET_RTX_FORMAT (GET_CODE (op));
2679 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2680 {
2681 if (fmt[i] == 'E')
2682 {
2683 int j;
2684
2685 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2686 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2687 return true;
2688 }
2689
2690 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2691 return true;
2692 }
2693
2694 return false;
2695 }
2696
2697
2698 /* Return true if OP is a legitimate general operand when
2699 generating PIC code. It is given that flag_pic is on
2700 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2701
2702 int
2703 legitimate_pic_operand_p (rtx op)
2704 {
2705 /* Accept all non-symbolic constants. */
2706 if (!SYMBOLIC_CONST (op))
2707 return 1;
2708
2709 /* Reject everything else; must be handled
2710 via emit_symbolic_move. */
2711 return 0;
2712 }
2713
2714 /* Returns true if the constant value OP is a legitimate general operand.
2715 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2716
2717 static bool
2718 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2719 {
2720 /* Accept all non-symbolic constants. */
2721 if (!SYMBOLIC_CONST (op))
2722 return 1;
2723
2724 /* Accept immediate LARL operands. */
2725 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2726 return 1;
2727
2728 /* Thread-local symbols are never legal constants. This is
2729 so that emit_call knows that computing such addresses
2730 might require a function call. */
2731 if (TLS_SYMBOLIC_CONST (op))
2732 return 0;
2733
2734 /* In the PIC case, symbolic constants must *not* be
2735 forced into the literal pool. We accept them here,
2736 so that they will be handled by emit_symbolic_move. */
2737 if (flag_pic)
2738 return 1;
2739
2740 /* All remaining non-PIC symbolic constants are
2741 forced into the literal pool. */
2742 return 0;
2743 }
2744
2745 /* Determine if it's legal to put X into the constant pool. This
2746 is not possible if X contains the address of a symbol that is
2747 not constant (TLS) or not known at final link time (PIC). */
2748
2749 static bool
2750 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2751 {
2752 switch (GET_CODE (x))
2753 {
2754 case CONST_INT:
2755 case CONST_DOUBLE:
2756 /* Accept all non-symbolic constants. */
2757 return false;
2758
2759 case LABEL_REF:
2760 /* Labels are OK iff we are non-PIC. */
2761 return flag_pic != 0;
2762
2763 case SYMBOL_REF:
2764 /* 'Naked' TLS symbol references are never OK,
2765 non-TLS symbols are OK iff we are non-PIC. */
2766 if (tls_symbolic_operand (x))
2767 return true;
2768 else
2769 return flag_pic != 0;
2770
2771 case CONST:
2772 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2773 case PLUS:
2774 case MINUS:
2775 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2776 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2777
2778 case UNSPEC:
2779 switch (XINT (x, 1))
2780 {
2781 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2782 case UNSPEC_LTREL_OFFSET:
2783 case UNSPEC_GOT:
2784 case UNSPEC_GOTOFF:
2785 case UNSPEC_PLTOFF:
2786 case UNSPEC_TLSGD:
2787 case UNSPEC_TLSLDM:
2788 case UNSPEC_NTPOFF:
2789 case UNSPEC_DTPOFF:
2790 case UNSPEC_GOTNTPOFF:
2791 case UNSPEC_INDNTPOFF:
2792 return false;
2793
2794 /* If the literal pool shares the code section, be put
2795 execute template placeholders into the pool as well. */
2796 case UNSPEC_INSN:
2797 return TARGET_CPU_ZARCH;
2798
2799 default:
2800 return true;
2801 }
2802 break;
2803
2804 default:
2805 gcc_unreachable ();
2806 }
2807 }
2808
2809 /* Returns true if the constant value OP is a legitimate general
2810 operand during and after reload. The difference to
2811 legitimate_constant_p is that this function will not accept
2812 a constant that would need to be forced to the literal pool
2813 before it can be used as operand.
2814 This function accepts all constants which can be loaded directly
2815 into a GPR. */
2816
2817 bool
2818 legitimate_reload_constant_p (rtx op)
2819 {
2820 /* Accept la(y) operands. */
2821 if (GET_CODE (op) == CONST_INT
2822 && DISP_IN_RANGE (INTVAL (op)))
2823 return true;
2824
2825 /* Accept l(g)hi/l(g)fi operands. */
2826 if (GET_CODE (op) == CONST_INT
2827 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2828 return true;
2829
2830 /* Accept lliXX operands. */
2831 if (TARGET_ZARCH
2832 && GET_CODE (op) == CONST_INT
2833 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2834 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2835 return true;
2836
2837 if (TARGET_EXTIMM
2838 && GET_CODE (op) == CONST_INT
2839 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2840 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2841 return true;
2842
2843 /* Accept larl operands. */
2844 if (TARGET_CPU_ZARCH
2845 && larl_operand (op, VOIDmode))
2846 return true;
2847
2848 /* Accept floating-point zero operands that fit into a single GPR. */
2849 if (GET_CODE (op) == CONST_DOUBLE
2850 && s390_float_const_zero_p (op)
2851 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2852 return true;
2853
2854 /* Accept double-word operands that can be split. */
2855 if (GET_CODE (op) == CONST_INT
2856 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2857 {
2858 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2859 rtx hi = operand_subword (op, 0, 0, dword_mode);
2860 rtx lo = operand_subword (op, 1, 0, dword_mode);
2861 return legitimate_reload_constant_p (hi)
2862 && legitimate_reload_constant_p (lo);
2863 }
2864
2865 /* Everything else cannot be handled without reload. */
2866 return false;
2867 }
2868
2869 /* Returns true if the constant value OP is a legitimate fp operand
2870 during and after reload.
2871 This function accepts all constants which can be loaded directly
2872 into an FPR. */
2873
2874 static bool
2875 legitimate_reload_fp_constant_p (rtx op)
2876 {
2877 /* Accept floating-point zero operands if the load zero instruction
2878 can be used. */
2879 if (TARGET_Z196
2880 && GET_CODE (op) == CONST_DOUBLE
2881 && s390_float_const_zero_p (op))
2882 return true;
2883
2884 return false;
2885 }
2886
2887 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2888 return the class of reg to actually use. */
2889
2890 static reg_class_t
2891 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2892 {
2893 switch (GET_CODE (op))
2894 {
2895 /* Constants we cannot reload into general registers
2896 must be forced into the literal pool. */
2897 case CONST_DOUBLE:
2898 case CONST_INT:
2899 if (reg_class_subset_p (GENERAL_REGS, rclass)
2900 && legitimate_reload_constant_p (op))
2901 return GENERAL_REGS;
2902 else if (reg_class_subset_p (ADDR_REGS, rclass)
2903 && legitimate_reload_constant_p (op))
2904 return ADDR_REGS;
2905 else if (reg_class_subset_p (FP_REGS, rclass)
2906 && legitimate_reload_fp_constant_p (op))
2907 return FP_REGS;
2908 return NO_REGS;
2909
2910 /* If a symbolic constant or a PLUS is reloaded,
2911 it is most likely being used as an address, so
2912 prefer ADDR_REGS. If 'class' is not a superset
2913 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2914 case LABEL_REF:
2915 case SYMBOL_REF:
2916 case CONST:
2917 if (!legitimate_reload_constant_p (op))
2918 return NO_REGS;
2919 /* fallthrough */
2920 case PLUS:
2921 /* load address will be used. */
2922 if (reg_class_subset_p (ADDR_REGS, rclass))
2923 return ADDR_REGS;
2924 else
2925 return NO_REGS;
2926
2927 default:
2928 break;
2929 }
2930
2931 return rclass;
2932 }
2933
2934 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2935 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2936 aligned. */
2937
2938 bool
2939 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2940 {
2941 HOST_WIDE_INT addend;
2942 rtx symref;
2943
2944 if (!s390_symref_operand_p (addr, &symref, &addend))
2945 return false;
2946
2947 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2948 && !(addend & (alignment - 1)));
2949 }
2950
2951 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2952 operand SCRATCH is used to reload the even part of the address and
2953 adding one. */
2954
2955 void
2956 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2957 {
2958 HOST_WIDE_INT addend;
2959 rtx symref;
2960
2961 if (!s390_symref_operand_p (addr, &symref, &addend))
2962 gcc_unreachable ();
2963
2964 if (!(addend & 1))
2965 /* Easy case. The addend is even so larl will do fine. */
2966 emit_move_insn (reg, addr);
2967 else
2968 {
2969 /* We can leave the scratch register untouched if the target
2970 register is a valid base register. */
2971 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2972 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2973 scratch = reg;
2974
2975 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2976 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2977
2978 if (addend != 1)
2979 emit_move_insn (scratch,
2980 gen_rtx_CONST (Pmode,
2981 gen_rtx_PLUS (Pmode, symref,
2982 GEN_INT (addend - 1))));
2983 else
2984 emit_move_insn (scratch, symref);
2985
2986 /* Increment the address using la in order to avoid clobbering cc. */
2987 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2988 }
2989 }
2990
2991 /* Generate what is necessary to move between REG and MEM using
2992 SCRATCH. The direction is given by TOMEM. */
2993
2994 void
2995 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2996 {
2997 /* Reload might have pulled a constant out of the literal pool.
2998 Force it back in. */
2999 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3000 || GET_CODE (mem) == CONST)
3001 mem = force_const_mem (GET_MODE (reg), mem);
3002
3003 gcc_assert (MEM_P (mem));
3004
3005 /* For a load from memory we can leave the scratch register
3006 untouched if the target register is a valid base register. */
3007 if (!tomem
3008 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3009 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3010 && GET_MODE (reg) == GET_MODE (scratch))
3011 scratch = reg;
3012
3013 /* Load address into scratch register. Since we can't have a
3014 secondary reload for a secondary reload we have to cover the case
3015 where larl would need a secondary reload here as well. */
3016 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3017
3018 /* Now we can use a standard load/store to do the move. */
3019 if (tomem)
3020 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3021 else
3022 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3023 }
3024
3025 /* Inform reload about cases where moving X with a mode MODE to a register in
3026 RCLASS requires an extra scratch or immediate register. Return the class
3027 needed for the immediate register. */
3028
3029 static reg_class_t
3030 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3031 enum machine_mode mode, secondary_reload_info *sri)
3032 {
3033 enum reg_class rclass = (enum reg_class) rclass_i;
3034
3035 /* Intermediate register needed. */
3036 if (reg_classes_intersect_p (CC_REGS, rclass))
3037 return GENERAL_REGS;
3038
3039 if (TARGET_Z10)
3040 {
3041 HOST_WIDE_INT offset;
3042 rtx symref;
3043
3044 /* On z10 several optimizer steps may generate larl operands with
3045 an odd addend. */
3046 if (in_p
3047 && s390_symref_operand_p (x, &symref, &offset)
3048 && mode == Pmode
3049 && !SYMBOL_REF_ALIGN1_P (symref)
3050 && (offset & 1) == 1)
3051 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3052 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3053
3054 /* On z10 we need a scratch register when moving QI, TI or floating
3055 point mode values from or to a memory location with a SYMBOL_REF
3056 or if the symref addend of a SI or DI move is not aligned to the
3057 width of the access. */
3058 if (MEM_P (x)
3059 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3060 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3061 || (!TARGET_ZARCH && mode == DImode)
3062 || ((mode == HImode || mode == SImode || mode == DImode)
3063 && (!s390_check_symref_alignment (XEXP (x, 0),
3064 GET_MODE_SIZE (mode))))))
3065 {
3066 #define __SECONDARY_RELOAD_CASE(M,m) \
3067 case M##mode: \
3068 if (TARGET_64BIT) \
3069 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3070 CODE_FOR_reload##m##di_tomem_z10; \
3071 else \
3072 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3073 CODE_FOR_reload##m##si_tomem_z10; \
3074 break;
3075
3076 switch (GET_MODE (x))
3077 {
3078 __SECONDARY_RELOAD_CASE (QI, qi);
3079 __SECONDARY_RELOAD_CASE (HI, hi);
3080 __SECONDARY_RELOAD_CASE (SI, si);
3081 __SECONDARY_RELOAD_CASE (DI, di);
3082 __SECONDARY_RELOAD_CASE (TI, ti);
3083 __SECONDARY_RELOAD_CASE (SF, sf);
3084 __SECONDARY_RELOAD_CASE (DF, df);
3085 __SECONDARY_RELOAD_CASE (TF, tf);
3086 __SECONDARY_RELOAD_CASE (SD, sd);
3087 __SECONDARY_RELOAD_CASE (DD, dd);
3088 __SECONDARY_RELOAD_CASE (TD, td);
3089
3090 default:
3091 gcc_unreachable ();
3092 }
3093 #undef __SECONDARY_RELOAD_CASE
3094 }
3095 }
3096
3097 /* We need a scratch register when loading a PLUS expression which
3098 is not a legitimate operand of the LOAD ADDRESS instruction. */
3099 if (in_p && s390_plus_operand (x, mode))
3100 sri->icode = (TARGET_64BIT ?
3101 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3102
3103 /* Performing a multiword move from or to memory we have to make sure the
3104 second chunk in memory is addressable without causing a displacement
3105 overflow. If that would be the case we calculate the address in
3106 a scratch register. */
3107 if (MEM_P (x)
3108 && GET_CODE (XEXP (x, 0)) == PLUS
3109 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3110 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3111 + GET_MODE_SIZE (mode) - 1))
3112 {
3113 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3114 in a s_operand address since we may fallback to lm/stm. So we only
3115 have to care about overflows in the b+i+d case. */
3116 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3117 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3118 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3119 /* For FP_REGS no lm/stm is available so this check is triggered
3120 for displacement overflows in b+i+d and b+d like addresses. */
3121 || (reg_classes_intersect_p (FP_REGS, rclass)
3122 && s390_class_max_nregs (FP_REGS, mode) > 1))
3123 {
3124 if (in_p)
3125 sri->icode = (TARGET_64BIT ?
3126 CODE_FOR_reloaddi_nonoffmem_in :
3127 CODE_FOR_reloadsi_nonoffmem_in);
3128 else
3129 sri->icode = (TARGET_64BIT ?
3130 CODE_FOR_reloaddi_nonoffmem_out :
3131 CODE_FOR_reloadsi_nonoffmem_out);
3132 }
3133 }
3134
3135 /* A scratch address register is needed when a symbolic constant is
3136 copied to r0 compiling with -fPIC. In other cases the target
3137 register might be used as temporary (see legitimize_pic_address). */
3138 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3139 sri->icode = (TARGET_64BIT ?
3140 CODE_FOR_reloaddi_PIC_addr :
3141 CODE_FOR_reloadsi_PIC_addr);
3142
3143 /* Either scratch or no register needed. */
3144 return NO_REGS;
3145 }
3146
3147 /* Generate code to load SRC, which is PLUS that is not a
3148 legitimate operand for the LA instruction, into TARGET.
3149 SCRATCH may be used as scratch register. */
3150
3151 void
3152 s390_expand_plus_operand (rtx target, rtx src,
3153 rtx scratch)
3154 {
3155 rtx sum1, sum2;
3156 struct s390_address ad;
3157
3158 /* src must be a PLUS; get its two operands. */
3159 gcc_assert (GET_CODE (src) == PLUS);
3160 gcc_assert (GET_MODE (src) == Pmode);
3161
3162 /* Check if any of the two operands is already scheduled
3163 for replacement by reload. This can happen e.g. when
3164 float registers occur in an address. */
3165 sum1 = find_replacement (&XEXP (src, 0));
3166 sum2 = find_replacement (&XEXP (src, 1));
3167 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3168
3169 /* If the address is already strictly valid, there's nothing to do. */
3170 if (!s390_decompose_address (src, &ad)
3171 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3172 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3173 {
3174 /* Otherwise, one of the operands cannot be an address register;
3175 we reload its value into the scratch register. */
3176 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3177 {
3178 emit_move_insn (scratch, sum1);
3179 sum1 = scratch;
3180 }
3181 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3182 {
3183 emit_move_insn (scratch, sum2);
3184 sum2 = scratch;
3185 }
3186
3187 /* According to the way these invalid addresses are generated
3188 in reload.c, it should never happen (at least on s390) that
3189 *neither* of the PLUS components, after find_replacements
3190 was applied, is an address register. */
3191 if (sum1 == scratch && sum2 == scratch)
3192 {
3193 debug_rtx (src);
3194 gcc_unreachable ();
3195 }
3196
3197 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3198 }
3199
3200 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3201 is only ever performed on addresses, so we can mark the
3202 sum as legitimate for LA in any case. */
3203 s390_load_address (target, src);
3204 }
3205
3206
3207 /* Return true if ADDR is a valid memory address.
3208 STRICT specifies whether strict register checking applies. */
3209
3210 static bool
3211 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3212 {
3213 struct s390_address ad;
3214
3215 if (TARGET_Z10
3216 && larl_operand (addr, VOIDmode)
3217 && (mode == VOIDmode
3218 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3219 return true;
3220
3221 if (!s390_decompose_address (addr, &ad))
3222 return false;
3223
3224 if (strict)
3225 {
3226 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3227 return false;
3228
3229 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3230 return false;
3231 }
3232 else
3233 {
3234 if (ad.base
3235 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3236 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3237 return false;
3238
3239 if (ad.indx
3240 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3241 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3242 return false;
3243 }
3244 return true;
3245 }
3246
3247 /* Return true if OP is a valid operand for the LA instruction.
3248 In 31-bit, we need to prove that the result is used as an
3249 address, as LA performs only a 31-bit addition. */
3250
3251 bool
3252 legitimate_la_operand_p (rtx op)
3253 {
3254 struct s390_address addr;
3255 if (!s390_decompose_address (op, &addr))
3256 return false;
3257
3258 return (TARGET_64BIT || addr.pointer);
3259 }
3260
3261 /* Return true if it is valid *and* preferable to use LA to
3262 compute the sum of OP1 and OP2. */
3263
3264 bool
3265 preferred_la_operand_p (rtx op1, rtx op2)
3266 {
3267 struct s390_address addr;
3268
3269 if (op2 != const0_rtx)
3270 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3271
3272 if (!s390_decompose_address (op1, &addr))
3273 return false;
3274 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3275 return false;
3276 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3277 return false;
3278
3279 /* Avoid LA instructions with index register on z196; it is
3280 preferable to use regular add instructions when possible. */
3281 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3282 return false;
3283
3284 if (!TARGET_64BIT && !addr.pointer)
3285 return false;
3286
3287 if (addr.pointer)
3288 return true;
3289
3290 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3291 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3292 return true;
3293
3294 return false;
3295 }
3296
3297 /* Emit a forced load-address operation to load SRC into DST.
3298 This will use the LOAD ADDRESS instruction even in situations
3299 where legitimate_la_operand_p (SRC) returns false. */
3300
3301 void
3302 s390_load_address (rtx dst, rtx src)
3303 {
3304 if (TARGET_64BIT)
3305 emit_move_insn (dst, src);
3306 else
3307 emit_insn (gen_force_la_31 (dst, src));
3308 }
3309
3310 /* Return a legitimate reference for ORIG (an address) using the
3311 register REG. If REG is 0, a new pseudo is generated.
3312
3313 There are two types of references that must be handled:
3314
3315 1. Global data references must load the address from the GOT, via
3316 the PIC reg. An insn is emitted to do this load, and the reg is
3317 returned.
3318
3319 2. Static data references, constant pool addresses, and code labels
3320 compute the address as an offset from the GOT, whose base is in
3321 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3322 differentiate them from global data objects. The returned
3323 address is the PIC reg + an unspec constant.
3324
3325 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3326 reg also appears in the address. */
3327
3328 rtx
3329 legitimize_pic_address (rtx orig, rtx reg)
3330 {
3331 rtx addr = orig;
3332 rtx new_rtx = orig;
3333 rtx base;
3334
3335 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3336
3337 if (GET_CODE (addr) == LABEL_REF
3338 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3339 {
3340 /* This is a local symbol. */
3341 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3342 {
3343 /* Access local symbols PC-relative via LARL.
3344 This is the same as in the non-PIC case, so it is
3345 handled automatically ... */
3346 }
3347 else
3348 {
3349 /* Access local symbols relative to the GOT. */
3350
3351 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3352
3353 if (reload_in_progress || reload_completed)
3354 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3355
3356 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3357 addr = gen_rtx_CONST (Pmode, addr);
3358 addr = force_const_mem (Pmode, addr);
3359 emit_move_insn (temp, addr);
3360
3361 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3362 if (reg != 0)
3363 {
3364 s390_load_address (reg, new_rtx);
3365 new_rtx = reg;
3366 }
3367 }
3368 }
3369 else if (GET_CODE (addr) == SYMBOL_REF)
3370 {
3371 if (reg == 0)
3372 reg = gen_reg_rtx (Pmode);
3373
3374 if (flag_pic == 1)
3375 {
3376 /* Assume GOT offset < 4k. This is handled the same way
3377 in both 31- and 64-bit code (@GOT). */
3378
3379 if (reload_in_progress || reload_completed)
3380 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3381
3382 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3383 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3384 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3385 new_rtx = gen_const_mem (Pmode, new_rtx);
3386 emit_move_insn (reg, new_rtx);
3387 new_rtx = reg;
3388 }
3389 else if (TARGET_CPU_ZARCH)
3390 {
3391 /* If the GOT offset might be >= 4k, we determine the position
3392 of the GOT entry via a PC-relative LARL (@GOTENT). */
3393
3394 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3395
3396 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3397 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3398
3399 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3400 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3401 emit_move_insn (temp, new_rtx);
3402
3403 new_rtx = gen_const_mem (Pmode, temp);
3404 emit_move_insn (reg, new_rtx);
3405 new_rtx = reg;
3406 }
3407 else
3408 {
3409 /* If the GOT offset might be >= 4k, we have to load it
3410 from the literal pool (@GOT). */
3411
3412 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3413
3414 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3415 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3416
3417 if (reload_in_progress || reload_completed)
3418 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3419
3420 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3421 addr = gen_rtx_CONST (Pmode, addr);
3422 addr = force_const_mem (Pmode, addr);
3423 emit_move_insn (temp, addr);
3424
3425 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3426 new_rtx = gen_const_mem (Pmode, new_rtx);
3427 emit_move_insn (reg, new_rtx);
3428 new_rtx = reg;
3429 }
3430 }
3431 else
3432 {
3433 if (GET_CODE (addr) == CONST)
3434 {
3435 addr = XEXP (addr, 0);
3436 if (GET_CODE (addr) == UNSPEC)
3437 {
3438 gcc_assert (XVECLEN (addr, 0) == 1);
3439 switch (XINT (addr, 1))
3440 {
3441 /* If someone moved a GOT-relative UNSPEC
3442 out of the literal pool, force them back in. */
3443 case UNSPEC_GOTOFF:
3444 case UNSPEC_PLTOFF:
3445 new_rtx = force_const_mem (Pmode, orig);
3446 break;
3447
3448 /* @GOT is OK as is if small. */
3449 case UNSPEC_GOT:
3450 if (flag_pic == 2)
3451 new_rtx = force_const_mem (Pmode, orig);
3452 break;
3453
3454 /* @GOTENT is OK as is. */
3455 case UNSPEC_GOTENT:
3456 break;
3457
3458 /* @PLT is OK as is on 64-bit, must be converted to
3459 GOT-relative @PLTOFF on 31-bit. */
3460 case UNSPEC_PLT:
3461 if (!TARGET_CPU_ZARCH)
3462 {
3463 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3464
3465 if (reload_in_progress || reload_completed)
3466 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3467
3468 addr = XVECEXP (addr, 0, 0);
3469 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3470 UNSPEC_PLTOFF);
3471 addr = gen_rtx_CONST (Pmode, addr);
3472 addr = force_const_mem (Pmode, addr);
3473 emit_move_insn (temp, addr);
3474
3475 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3476 if (reg != 0)
3477 {
3478 s390_load_address (reg, new_rtx);
3479 new_rtx = reg;
3480 }
3481 }
3482 break;
3483
3484 /* Everything else cannot happen. */
3485 default:
3486 gcc_unreachable ();
3487 }
3488 }
3489 else
3490 gcc_assert (GET_CODE (addr) == PLUS);
3491 }
3492 if (GET_CODE (addr) == PLUS)
3493 {
3494 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3495
3496 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3497 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3498
3499 /* Check first to see if this is a constant offset
3500 from a local symbol reference. */
3501 if ((GET_CODE (op0) == LABEL_REF
3502 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3503 && GET_CODE (op1) == CONST_INT)
3504 {
3505 if (TARGET_CPU_ZARCH
3506 && larl_operand (op0, VOIDmode)
3507 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3508 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3509 {
3510 if (INTVAL (op1) & 1)
3511 {
3512 /* LARL can't handle odd offsets, so emit a
3513 pair of LARL and LA. */
3514 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3515
3516 if (!DISP_IN_RANGE (INTVAL (op1)))
3517 {
3518 HOST_WIDE_INT even = INTVAL (op1) - 1;
3519 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3520 op0 = gen_rtx_CONST (Pmode, op0);
3521 op1 = const1_rtx;
3522 }
3523
3524 emit_move_insn (temp, op0);
3525 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3526
3527 if (reg != 0)
3528 {
3529 s390_load_address (reg, new_rtx);
3530 new_rtx = reg;
3531 }
3532 }
3533 else
3534 {
3535 /* If the offset is even, we can just use LARL.
3536 This will happen automatically. */
3537 }
3538 }
3539 else
3540 {
3541 /* Access local symbols relative to the GOT. */
3542
3543 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3544
3545 if (reload_in_progress || reload_completed)
3546 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3547
3548 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3549 UNSPEC_GOTOFF);
3550 addr = gen_rtx_PLUS (Pmode, addr, op1);
3551 addr = gen_rtx_CONST (Pmode, addr);
3552 addr = force_const_mem (Pmode, addr);
3553 emit_move_insn (temp, addr);
3554
3555 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3556 if (reg != 0)
3557 {
3558 s390_load_address (reg, new_rtx);
3559 new_rtx = reg;
3560 }
3561 }
3562 }
3563
3564 /* Now, check whether it is a GOT relative symbol plus offset
3565 that was pulled out of the literal pool. Force it back in. */
3566
3567 else if (GET_CODE (op0) == UNSPEC
3568 && GET_CODE (op1) == CONST_INT
3569 && XINT (op0, 1) == UNSPEC_GOTOFF)
3570 {
3571 gcc_assert (XVECLEN (op0, 0) == 1);
3572
3573 new_rtx = force_const_mem (Pmode, orig);
3574 }
3575
3576 /* Otherwise, compute the sum. */
3577 else
3578 {
3579 base = legitimize_pic_address (XEXP (addr, 0), reg);
3580 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3581 base == reg ? NULL_RTX : reg);
3582 if (GET_CODE (new_rtx) == CONST_INT)
3583 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3584 else
3585 {
3586 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3587 {
3588 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3589 new_rtx = XEXP (new_rtx, 1);
3590 }
3591 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3592 }
3593
3594 if (GET_CODE (new_rtx) == CONST)
3595 new_rtx = XEXP (new_rtx, 0);
3596 new_rtx = force_operand (new_rtx, 0);
3597 }
3598 }
3599 }
3600 return new_rtx;
3601 }
3602
3603 /* Load the thread pointer into a register. */
3604
3605 rtx
3606 s390_get_thread_pointer (void)
3607 {
3608 rtx tp = gen_reg_rtx (Pmode);
3609
3610 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3611 mark_reg_pointer (tp, BITS_PER_WORD);
3612
3613 return tp;
3614 }
3615
3616 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3617 in s390_tls_symbol which always refers to __tls_get_offset.
3618 The returned offset is written to RESULT_REG and an USE rtx is
3619 generated for TLS_CALL. */
3620
3621 static GTY(()) rtx s390_tls_symbol;
3622
3623 static void
3624 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3625 {
3626 rtx insn;
3627
3628 if (!flag_pic)
3629 emit_insn (s390_load_got ());
3630
3631 if (!s390_tls_symbol)
3632 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3633
3634 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3635 gen_rtx_REG (Pmode, RETURN_REGNUM));
3636
3637 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3638 RTL_CONST_CALL_P (insn) = 1;
3639 }
3640
3641 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3642 this (thread-local) address. REG may be used as temporary. */
3643
3644 static rtx
3645 legitimize_tls_address (rtx addr, rtx reg)
3646 {
3647 rtx new_rtx, tls_call, temp, base, r2, insn;
3648
3649 if (GET_CODE (addr) == SYMBOL_REF)
3650 switch (tls_symbolic_operand (addr))
3651 {
3652 case TLS_MODEL_GLOBAL_DYNAMIC:
3653 start_sequence ();
3654 r2 = gen_rtx_REG (Pmode, 2);
3655 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3656 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3657 new_rtx = force_const_mem (Pmode, new_rtx);
3658 emit_move_insn (r2, new_rtx);
3659 s390_emit_tls_call_insn (r2, tls_call);
3660 insn = get_insns ();
3661 end_sequence ();
3662
3663 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3664 temp = gen_reg_rtx (Pmode);
3665 emit_libcall_block (insn, temp, r2, new_rtx);
3666
3667 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3668 if (reg != 0)
3669 {
3670 s390_load_address (reg, new_rtx);
3671 new_rtx = reg;
3672 }
3673 break;
3674
3675 case TLS_MODEL_LOCAL_DYNAMIC:
3676 start_sequence ();
3677 r2 = gen_rtx_REG (Pmode, 2);
3678 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3679 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3680 new_rtx = force_const_mem (Pmode, new_rtx);
3681 emit_move_insn (r2, new_rtx);
3682 s390_emit_tls_call_insn (r2, tls_call);
3683 insn = get_insns ();
3684 end_sequence ();
3685
3686 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3687 temp = gen_reg_rtx (Pmode);
3688 emit_libcall_block (insn, temp, r2, new_rtx);
3689
3690 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3691 base = gen_reg_rtx (Pmode);
3692 s390_load_address (base, new_rtx);
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3695 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3696 new_rtx = force_const_mem (Pmode, new_rtx);
3697 temp = gen_reg_rtx (Pmode);
3698 emit_move_insn (temp, new_rtx);
3699
3700 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3701 if (reg != 0)
3702 {
3703 s390_load_address (reg, new_rtx);
3704 new_rtx = reg;
3705 }
3706 break;
3707
3708 case TLS_MODEL_INITIAL_EXEC:
3709 if (flag_pic == 1)
3710 {
3711 /* Assume GOT offset < 4k. This is handled the same way
3712 in both 31- and 64-bit code. */
3713
3714 if (reload_in_progress || reload_completed)
3715 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3718 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3719 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3720 new_rtx = gen_const_mem (Pmode, new_rtx);
3721 temp = gen_reg_rtx (Pmode);
3722 emit_move_insn (temp, new_rtx);
3723 }
3724 else if (TARGET_CPU_ZARCH)
3725 {
3726 /* If the GOT offset might be >= 4k, we determine the position
3727 of the GOT entry via a PC-relative LARL. */
3728
3729 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3730 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3731 temp = gen_reg_rtx (Pmode);
3732 emit_move_insn (temp, new_rtx);
3733
3734 new_rtx = gen_const_mem (Pmode, temp);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_move_insn (temp, new_rtx);
3737 }
3738 else if (flag_pic)
3739 {
3740 /* If the GOT offset might be >= 4k, we have to load it
3741 from the literal pool. */
3742
3743 if (reload_in_progress || reload_completed)
3744 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3745
3746 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3747 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 temp = gen_reg_rtx (Pmode);
3750 emit_move_insn (temp, new_rtx);
3751
3752 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3753 new_rtx = gen_const_mem (Pmode, new_rtx);
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3758 }
3759 else
3760 {
3761 /* In position-dependent code, load the absolute address of
3762 the GOT entry from the literal pool. */
3763
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = temp;
3771 new_rtx = gen_const_mem (Pmode, new_rtx);
3772 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3773 temp = gen_reg_rtx (Pmode);
3774 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3775 }
3776
3777 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3778 if (reg != 0)
3779 {
3780 s390_load_address (reg, new_rtx);
3781 new_rtx = reg;
3782 }
3783 break;
3784
3785 case TLS_MODEL_LOCAL_EXEC:
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 default:
3801 gcc_unreachable ();
3802 }
3803
3804 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3805 {
3806 switch (XINT (XEXP (addr, 0), 1))
3807 {
3808 case UNSPEC_INDNTPOFF:
3809 gcc_assert (TARGET_CPU_ZARCH);
3810 new_rtx = addr;
3811 break;
3812
3813 default:
3814 gcc_unreachable ();
3815 }
3816 }
3817
3818 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3819 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3820 {
3821 new_rtx = XEXP (XEXP (addr, 0), 0);
3822 if (GET_CODE (new_rtx) != SYMBOL_REF)
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824
3825 new_rtx = legitimize_tls_address (new_rtx, reg);
3826 new_rtx = plus_constant (Pmode, new_rtx,
3827 INTVAL (XEXP (XEXP (addr, 0), 1)));
3828 new_rtx = force_operand (new_rtx, 0);
3829 }
3830
3831 else
3832 gcc_unreachable (); /* for now ... */
3833
3834 return new_rtx;
3835 }
3836
3837 /* Emit insns making the address in operands[1] valid for a standard
3838 move to operands[0]. operands[1] is replaced by an address which
3839 should be used instead of the former RTX to emit the move
3840 pattern. */
3841
3842 void
3843 emit_symbolic_move (rtx *operands)
3844 {
3845 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3846
3847 if (GET_CODE (operands[0]) == MEM)
3848 operands[1] = force_reg (Pmode, operands[1]);
3849 else if (TLS_SYMBOLIC_CONST (operands[1]))
3850 operands[1] = legitimize_tls_address (operands[1], temp);
3851 else if (flag_pic)
3852 operands[1] = legitimize_pic_address (operands[1], temp);
3853 }
3854
3855 /* Try machine-dependent ways of modifying an illegitimate address X
3856 to be legitimate. If we find one, return the new, valid address.
3857
3858 OLDX is the address as it was before break_out_memory_refs was called.
3859 In some cases it is useful to look at this to decide what needs to be done.
3860
3861 MODE is the mode of the operand pointed to by X.
3862
3863 When -fpic is used, special handling is needed for symbolic references.
3864 See comments by legitimize_pic_address for details. */
3865
3866 static rtx
3867 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3868 enum machine_mode mode ATTRIBUTE_UNUSED)
3869 {
3870 rtx constant_term = const0_rtx;
3871
3872 if (TLS_SYMBOLIC_CONST (x))
3873 {
3874 x = legitimize_tls_address (x, 0);
3875
3876 if (s390_legitimate_address_p (mode, x, FALSE))
3877 return x;
3878 }
3879 else if (GET_CODE (x) == PLUS
3880 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3881 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3882 {
3883 return x;
3884 }
3885 else if (flag_pic)
3886 {
3887 if (SYMBOLIC_CONST (x)
3888 || (GET_CODE (x) == PLUS
3889 && (SYMBOLIC_CONST (XEXP (x, 0))
3890 || SYMBOLIC_CONST (XEXP (x, 1)))))
3891 x = legitimize_pic_address (x, 0);
3892
3893 if (s390_legitimate_address_p (mode, x, FALSE))
3894 return x;
3895 }
3896
3897 x = eliminate_constant_term (x, &constant_term);
3898
3899 /* Optimize loading of large displacements by splitting them
3900 into the multiple of 4K and the rest; this allows the
3901 former to be CSE'd if possible.
3902
3903 Don't do this if the displacement is added to a register
3904 pointing into the stack frame, as the offsets will
3905 change later anyway. */
3906
3907 if (GET_CODE (constant_term) == CONST_INT
3908 && !TARGET_LONG_DISPLACEMENT
3909 && !DISP_IN_RANGE (INTVAL (constant_term))
3910 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3911 {
3912 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3913 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3914
3915 rtx temp = gen_reg_rtx (Pmode);
3916 rtx val = force_operand (GEN_INT (upper), temp);
3917 if (val != temp)
3918 emit_move_insn (temp, val);
3919
3920 x = gen_rtx_PLUS (Pmode, x, temp);
3921 constant_term = GEN_INT (lower);
3922 }
3923
3924 if (GET_CODE (x) == PLUS)
3925 {
3926 if (GET_CODE (XEXP (x, 0)) == REG)
3927 {
3928 rtx temp = gen_reg_rtx (Pmode);
3929 rtx val = force_operand (XEXP (x, 1), temp);
3930 if (val != temp)
3931 emit_move_insn (temp, val);
3932
3933 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3934 }
3935
3936 else if (GET_CODE (XEXP (x, 1)) == REG)
3937 {
3938 rtx temp = gen_reg_rtx (Pmode);
3939 rtx val = force_operand (XEXP (x, 0), temp);
3940 if (val != temp)
3941 emit_move_insn (temp, val);
3942
3943 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3944 }
3945 }
3946
3947 if (constant_term != const0_rtx)
3948 x = gen_rtx_PLUS (Pmode, x, constant_term);
3949
3950 return x;
3951 }
3952
3953 /* Try a machine-dependent way of reloading an illegitimate address AD
3954 operand. If we find one, push the reload and return the new address.
3955
3956 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3957 and TYPE is the reload type of the current reload. */
3958
3959 rtx
3960 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3961 int opnum, int type)
3962 {
3963 if (!optimize || TARGET_LONG_DISPLACEMENT)
3964 return NULL_RTX;
3965
3966 if (GET_CODE (ad) == PLUS)
3967 {
3968 rtx tem = simplify_binary_operation (PLUS, Pmode,
3969 XEXP (ad, 0), XEXP (ad, 1));
3970 if (tem)
3971 ad = tem;
3972 }
3973
3974 if (GET_CODE (ad) == PLUS
3975 && GET_CODE (XEXP (ad, 0)) == REG
3976 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3977 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3978 {
3979 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3980 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3981 rtx cst, tem, new_rtx;
3982
3983 cst = GEN_INT (upper);
3984 if (!legitimate_reload_constant_p (cst))
3985 cst = force_const_mem (Pmode, cst);
3986
3987 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3988 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3989
3990 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3991 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3992 opnum, (enum reload_type) type);
3993 return new_rtx;
3994 }
3995
3996 return NULL_RTX;
3997 }
3998
3999 /* Emit code to move LEN bytes from DST to SRC. */
4000
4001 bool
4002 s390_expand_movmem (rtx dst, rtx src, rtx len)
4003 {
4004 /* When tuning for z10 or higher we rely on the Glibc functions to
4005 do the right thing. Only for constant lengths below 64k we will
4006 generate inline code. */
4007 if (s390_tune >= PROCESSOR_2097_Z10
4008 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4009 return false;
4010
4011 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4012 {
4013 if (INTVAL (len) > 0)
4014 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4015 }
4016
4017 else if (TARGET_MVCLE)
4018 {
4019 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4020 }
4021
4022 else
4023 {
4024 rtx dst_addr, src_addr, count, blocks, temp;
4025 rtx loop_start_label = gen_label_rtx ();
4026 rtx loop_end_label = gen_label_rtx ();
4027 rtx end_label = gen_label_rtx ();
4028 enum machine_mode mode;
4029
4030 mode = GET_MODE (len);
4031 if (mode == VOIDmode)
4032 mode = Pmode;
4033
4034 dst_addr = gen_reg_rtx (Pmode);
4035 src_addr = gen_reg_rtx (Pmode);
4036 count = gen_reg_rtx (mode);
4037 blocks = gen_reg_rtx (mode);
4038
4039 convert_move (count, len, 1);
4040 emit_cmp_and_jump_insns (count, const0_rtx,
4041 EQ, NULL_RTX, mode, 1, end_label);
4042
4043 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4044 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4045 dst = change_address (dst, VOIDmode, dst_addr);
4046 src = change_address (src, VOIDmode, src_addr);
4047
4048 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4049 OPTAB_DIRECT);
4050 if (temp != count)
4051 emit_move_insn (count, temp);
4052
4053 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4054 OPTAB_DIRECT);
4055 if (temp != blocks)
4056 emit_move_insn (blocks, temp);
4057
4058 emit_cmp_and_jump_insns (blocks, const0_rtx,
4059 EQ, NULL_RTX, mode, 1, loop_end_label);
4060
4061 emit_label (loop_start_label);
4062
4063 if (TARGET_Z10
4064 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4065 {
4066 rtx prefetch;
4067
4068 /* Issue a read prefetch for the +3 cache line. */
4069 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4070 const0_rtx, const0_rtx);
4071 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4072 emit_insn (prefetch);
4073
4074 /* Issue a write prefetch for the +3 cache line. */
4075 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4076 const1_rtx, const0_rtx);
4077 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4078 emit_insn (prefetch);
4079 }
4080
4081 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4082 s390_load_address (dst_addr,
4083 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4084 s390_load_address (src_addr,
4085 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4086
4087 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4088 OPTAB_DIRECT);
4089 if (temp != blocks)
4090 emit_move_insn (blocks, temp);
4091
4092 emit_cmp_and_jump_insns (blocks, const0_rtx,
4093 EQ, NULL_RTX, mode, 1, loop_end_label);
4094
4095 emit_jump (loop_start_label);
4096 emit_label (loop_end_label);
4097
4098 emit_insn (gen_movmem_short (dst, src,
4099 convert_to_mode (Pmode, count, 1)));
4100 emit_label (end_label);
4101 }
4102 return true;
4103 }
4104
4105 /* Emit code to set LEN bytes at DST to VAL.
4106 Make use of clrmem if VAL is zero. */
4107
4108 void
4109 s390_expand_setmem (rtx dst, rtx len, rtx val)
4110 {
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4112 return;
4113
4114 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4115
4116 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4117 {
4118 if (val == const0_rtx && INTVAL (len) <= 256)
4119 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4120 else
4121 {
4122 /* Initialize memory by storing the first byte. */
4123 emit_move_insn (adjust_address (dst, QImode, 0), val);
4124
4125 if (INTVAL (len) > 1)
4126 {
4127 /* Initiate 1 byte overlap move.
4128 The first byte of DST is propagated through DSTP1.
4129 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4130 DST is set to size 1 so the rest of the memory location
4131 does not count as source operand. */
4132 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4133 set_mem_size (dst, 1);
4134
4135 emit_insn (gen_movmem_short (dstp1, dst,
4136 GEN_INT (INTVAL (len) - 2)));
4137 }
4138 }
4139 }
4140
4141 else if (TARGET_MVCLE)
4142 {
4143 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4144 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4145 }
4146
4147 else
4148 {
4149 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4150 rtx loop_start_label = gen_label_rtx ();
4151 rtx loop_end_label = gen_label_rtx ();
4152 rtx end_label = gen_label_rtx ();
4153 enum machine_mode mode;
4154
4155 mode = GET_MODE (len);
4156 if (mode == VOIDmode)
4157 mode = Pmode;
4158
4159 dst_addr = gen_reg_rtx (Pmode);
4160 count = gen_reg_rtx (mode);
4161 blocks = gen_reg_rtx (mode);
4162
4163 convert_move (count, len, 1);
4164 emit_cmp_and_jump_insns (count, const0_rtx,
4165 EQ, NULL_RTX, mode, 1, end_label);
4166
4167 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4168 dst = change_address (dst, VOIDmode, dst_addr);
4169
4170 if (val == const0_rtx)
4171 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4172 OPTAB_DIRECT);
4173 else
4174 {
4175 dstp1 = adjust_address (dst, VOIDmode, 1);
4176 set_mem_size (dst, 1);
4177
4178 /* Initialize memory by storing the first byte. */
4179 emit_move_insn (adjust_address (dst, QImode, 0), val);
4180
4181 /* If count is 1 we are done. */
4182 emit_cmp_and_jump_insns (count, const1_rtx,
4183 EQ, NULL_RTX, mode, 1, end_label);
4184
4185 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4186 OPTAB_DIRECT);
4187 }
4188 if (temp != count)
4189 emit_move_insn (count, temp);
4190
4191 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4192 OPTAB_DIRECT);
4193 if (temp != blocks)
4194 emit_move_insn (blocks, temp);
4195
4196 emit_cmp_and_jump_insns (blocks, const0_rtx,
4197 EQ, NULL_RTX, mode, 1, loop_end_label);
4198
4199 emit_label (loop_start_label);
4200
4201 if (TARGET_Z10
4202 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4203 {
4204 /* Issue a write prefetch for the +4 cache line. */
4205 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4206 GEN_INT (1024)),
4207 const1_rtx, const0_rtx);
4208 emit_insn (prefetch);
4209 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4210 }
4211
4212 if (val == const0_rtx)
4213 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4214 else
4215 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4216 s390_load_address (dst_addr,
4217 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4218
4219 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4220 OPTAB_DIRECT);
4221 if (temp != blocks)
4222 emit_move_insn (blocks, temp);
4223
4224 emit_cmp_and_jump_insns (blocks, const0_rtx,
4225 EQ, NULL_RTX, mode, 1, loop_end_label);
4226
4227 emit_jump (loop_start_label);
4228 emit_label (loop_end_label);
4229
4230 if (val == const0_rtx)
4231 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4232 else
4233 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4234 emit_label (end_label);
4235 }
4236 }
4237
4238 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4239 and return the result in TARGET. */
4240
4241 bool
4242 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4243 {
4244 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4245 rtx tmp;
4246
4247 /* When tuning for z10 or higher we rely on the Glibc functions to
4248 do the right thing. Only for constant lengths below 64k we will
4249 generate inline code. */
4250 if (s390_tune >= PROCESSOR_2097_Z10
4251 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4252 return false;
4253
4254 /* As the result of CMPINT is inverted compared to what we need,
4255 we have to swap the operands. */
4256 tmp = op0; op0 = op1; op1 = tmp;
4257
4258 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4259 {
4260 if (INTVAL (len) > 0)
4261 {
4262 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4263 emit_insn (gen_cmpint (target, ccreg));
4264 }
4265 else
4266 emit_move_insn (target, const0_rtx);
4267 }
4268 else if (TARGET_MVCLE)
4269 {
4270 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4271 emit_insn (gen_cmpint (target, ccreg));
4272 }
4273 else
4274 {
4275 rtx addr0, addr1, count, blocks, temp;
4276 rtx loop_start_label = gen_label_rtx ();
4277 rtx loop_end_label = gen_label_rtx ();
4278 rtx end_label = gen_label_rtx ();
4279 enum machine_mode mode;
4280
4281 mode = GET_MODE (len);
4282 if (mode == VOIDmode)
4283 mode = Pmode;
4284
4285 addr0 = gen_reg_rtx (Pmode);
4286 addr1 = gen_reg_rtx (Pmode);
4287 count = gen_reg_rtx (mode);
4288 blocks = gen_reg_rtx (mode);
4289
4290 convert_move (count, len, 1);
4291 emit_cmp_and_jump_insns (count, const0_rtx,
4292 EQ, NULL_RTX, mode, 1, end_label);
4293
4294 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4295 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4296 op0 = change_address (op0, VOIDmode, addr0);
4297 op1 = change_address (op1, VOIDmode, addr1);
4298
4299 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4300 OPTAB_DIRECT);
4301 if (temp != count)
4302 emit_move_insn (count, temp);
4303
4304 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4305 OPTAB_DIRECT);
4306 if (temp != blocks)
4307 emit_move_insn (blocks, temp);
4308
4309 emit_cmp_and_jump_insns (blocks, const0_rtx,
4310 EQ, NULL_RTX, mode, 1, loop_end_label);
4311
4312 emit_label (loop_start_label);
4313
4314 if (TARGET_Z10
4315 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4316 {
4317 rtx prefetch;
4318
4319 /* Issue a read prefetch for the +2 cache line of operand 1. */
4320 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4321 const0_rtx, const0_rtx);
4322 emit_insn (prefetch);
4323 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4324
4325 /* Issue a read prefetch for the +2 cache line of operand 2. */
4326 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4327 const0_rtx, const0_rtx);
4328 emit_insn (prefetch);
4329 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4330 }
4331
4332 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4333 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4334 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4335 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4336 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4337 emit_jump_insn (temp);
4338
4339 s390_load_address (addr0,
4340 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4341 s390_load_address (addr1,
4342 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4343
4344 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4345 OPTAB_DIRECT);
4346 if (temp != blocks)
4347 emit_move_insn (blocks, temp);
4348
4349 emit_cmp_and_jump_insns (blocks, const0_rtx,
4350 EQ, NULL_RTX, mode, 1, loop_end_label);
4351
4352 emit_jump (loop_start_label);
4353 emit_label (loop_end_label);
4354
4355 emit_insn (gen_cmpmem_short (op0, op1,
4356 convert_to_mode (Pmode, count, 1)));
4357 emit_label (end_label);
4358
4359 emit_insn (gen_cmpint (target, ccreg));
4360 }
4361 return true;
4362 }
4363
4364
4365 /* Expand conditional increment or decrement using alc/slb instructions.
4366 Should generate code setting DST to either SRC or SRC + INCREMENT,
4367 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4368 Returns true if successful, false otherwise.
4369
4370 That makes it possible to implement some if-constructs without jumps e.g.:
4371 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4372 unsigned int a, b, c;
4373 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4374 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4375 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4376 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4377
4378 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4379 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4380 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4381 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4382 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4383
4384 bool
4385 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4386 rtx dst, rtx src, rtx increment)
4387 {
4388 enum machine_mode cmp_mode;
4389 enum machine_mode cc_mode;
4390 rtx op_res;
4391 rtx insn;
4392 rtvec p;
4393 int ret;
4394
4395 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4396 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4397 cmp_mode = SImode;
4398 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4399 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4400 cmp_mode = DImode;
4401 else
4402 return false;
4403
4404 /* Try ADD LOGICAL WITH CARRY. */
4405 if (increment == const1_rtx)
4406 {
4407 /* Determine CC mode to use. */
4408 if (cmp_code == EQ || cmp_code == NE)
4409 {
4410 if (cmp_op1 != const0_rtx)
4411 {
4412 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4413 NULL_RTX, 0, OPTAB_WIDEN);
4414 cmp_op1 = const0_rtx;
4415 }
4416
4417 cmp_code = cmp_code == EQ ? LEU : GTU;
4418 }
4419
4420 if (cmp_code == LTU || cmp_code == LEU)
4421 {
4422 rtx tem = cmp_op0;
4423 cmp_op0 = cmp_op1;
4424 cmp_op1 = tem;
4425 cmp_code = swap_condition (cmp_code);
4426 }
4427
4428 switch (cmp_code)
4429 {
4430 case GTU:
4431 cc_mode = CCUmode;
4432 break;
4433
4434 case GEU:
4435 cc_mode = CCL3mode;
4436 break;
4437
4438 default:
4439 return false;
4440 }
4441
4442 /* Emit comparison instruction pattern. */
4443 if (!register_operand (cmp_op0, cmp_mode))
4444 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4445
4446 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4447 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4448 /* We use insn_invalid_p here to add clobbers if required. */
4449 ret = insn_invalid_p (emit_insn (insn), false);
4450 gcc_assert (!ret);
4451
4452 /* Emit ALC instruction pattern. */
4453 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4454 gen_rtx_REG (cc_mode, CC_REGNUM),
4455 const0_rtx);
4456
4457 if (src != const0_rtx)
4458 {
4459 if (!register_operand (src, GET_MODE (dst)))
4460 src = force_reg (GET_MODE (dst), src);
4461
4462 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4463 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4464 }
4465
4466 p = rtvec_alloc (2);
4467 RTVEC_ELT (p, 0) =
4468 gen_rtx_SET (VOIDmode, dst, op_res);
4469 RTVEC_ELT (p, 1) =
4470 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4471 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4472
4473 return true;
4474 }
4475
4476 /* Try SUBTRACT LOGICAL WITH BORROW. */
4477 if (increment == constm1_rtx)
4478 {
4479 /* Determine CC mode to use. */
4480 if (cmp_code == EQ || cmp_code == NE)
4481 {
4482 if (cmp_op1 != const0_rtx)
4483 {
4484 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4485 NULL_RTX, 0, OPTAB_WIDEN);
4486 cmp_op1 = const0_rtx;
4487 }
4488
4489 cmp_code = cmp_code == EQ ? LEU : GTU;
4490 }
4491
4492 if (cmp_code == GTU || cmp_code == GEU)
4493 {
4494 rtx tem = cmp_op0;
4495 cmp_op0 = cmp_op1;
4496 cmp_op1 = tem;
4497 cmp_code = swap_condition (cmp_code);
4498 }
4499
4500 switch (cmp_code)
4501 {
4502 case LEU:
4503 cc_mode = CCUmode;
4504 break;
4505
4506 case LTU:
4507 cc_mode = CCL3mode;
4508 break;
4509
4510 default:
4511 return false;
4512 }
4513
4514 /* Emit comparison instruction pattern. */
4515 if (!register_operand (cmp_op0, cmp_mode))
4516 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4517
4518 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4519 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4520 /* We use insn_invalid_p here to add clobbers if required. */
4521 ret = insn_invalid_p (emit_insn (insn), false);
4522 gcc_assert (!ret);
4523
4524 /* Emit SLB instruction pattern. */
4525 if (!register_operand (src, GET_MODE (dst)))
4526 src = force_reg (GET_MODE (dst), src);
4527
4528 op_res = gen_rtx_MINUS (GET_MODE (dst),
4529 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4530 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4531 gen_rtx_REG (cc_mode, CC_REGNUM),
4532 const0_rtx));
4533 p = rtvec_alloc (2);
4534 RTVEC_ELT (p, 0) =
4535 gen_rtx_SET (VOIDmode, dst, op_res);
4536 RTVEC_ELT (p, 1) =
4537 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4538 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4539
4540 return true;
4541 }
4542
4543 return false;
4544 }
4545
4546 /* Expand code for the insv template. Return true if successful. */
4547
4548 bool
4549 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4550 {
4551 int bitsize = INTVAL (op1);
4552 int bitpos = INTVAL (op2);
4553 enum machine_mode mode = GET_MODE (dest);
4554 enum machine_mode smode;
4555 int smode_bsize, mode_bsize;
4556 rtx op, clobber;
4557
4558 /* Generate INSERT IMMEDIATE (IILL et al). */
4559 /* (set (ze (reg)) (const_int)). */
4560 if (TARGET_ZARCH
4561 && register_operand (dest, word_mode)
4562 && (bitpos % 16) == 0
4563 && (bitsize % 16) == 0
4564 && const_int_operand (src, VOIDmode))
4565 {
4566 HOST_WIDE_INT val = INTVAL (src);
4567 int regpos = bitpos + bitsize;
4568
4569 while (regpos > bitpos)
4570 {
4571 enum machine_mode putmode;
4572 int putsize;
4573
4574 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4575 putmode = SImode;
4576 else
4577 putmode = HImode;
4578
4579 putsize = GET_MODE_BITSIZE (putmode);
4580 regpos -= putsize;
4581 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4582 GEN_INT (putsize),
4583 GEN_INT (regpos)),
4584 gen_int_mode (val, putmode));
4585 val >>= putsize;
4586 }
4587 gcc_assert (regpos == bitpos);
4588 return true;
4589 }
4590
4591 smode = smallest_mode_for_size (bitsize, MODE_INT);
4592 smode_bsize = GET_MODE_BITSIZE (smode);
4593 mode_bsize = GET_MODE_BITSIZE (mode);
4594
4595 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4596 if (bitpos == 0
4597 && (bitsize % BITS_PER_UNIT) == 0
4598 && MEM_P (dest)
4599 && (register_operand (src, word_mode)
4600 || const_int_operand (src, VOIDmode)))
4601 {
4602 /* Emit standard pattern if possible. */
4603 if (smode_bsize == bitsize)
4604 {
4605 emit_move_insn (adjust_address (dest, smode, 0),
4606 gen_lowpart (smode, src));
4607 return true;
4608 }
4609
4610 /* (set (ze (mem)) (const_int)). */
4611 else if (const_int_operand (src, VOIDmode))
4612 {
4613 int size = bitsize / BITS_PER_UNIT;
4614 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4615 BLKmode,
4616 UNITS_PER_WORD - size);
4617
4618 dest = adjust_address (dest, BLKmode, 0);
4619 set_mem_size (dest, size);
4620 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4621 return true;
4622 }
4623
4624 /* (set (ze (mem)) (reg)). */
4625 else if (register_operand (src, word_mode))
4626 {
4627 if (bitsize <= 32)
4628 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4629 const0_rtx), src);
4630 else
4631 {
4632 /* Emit st,stcmh sequence. */
4633 int stcmh_width = bitsize - 32;
4634 int size = stcmh_width / BITS_PER_UNIT;
4635
4636 emit_move_insn (adjust_address (dest, SImode, size),
4637 gen_lowpart (SImode, src));
4638 set_mem_size (dest, size);
4639 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4640 GEN_INT (stcmh_width),
4641 const0_rtx),
4642 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4643 }
4644 return true;
4645 }
4646 }
4647
4648 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4649 if ((bitpos % BITS_PER_UNIT) == 0
4650 && (bitsize % BITS_PER_UNIT) == 0
4651 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4652 && MEM_P (src)
4653 && (mode == DImode || mode == SImode)
4654 && register_operand (dest, mode))
4655 {
4656 /* Emit a strict_low_part pattern if possible. */
4657 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4658 {
4659 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4660 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4661 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4662 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4663 return true;
4664 }
4665
4666 /* ??? There are more powerful versions of ICM that are not
4667 completely represented in the md file. */
4668 }
4669
4670 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4671 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4672 {
4673 enum machine_mode mode_s = GET_MODE (src);
4674
4675 if (mode_s == VOIDmode)
4676 {
4677 /* Assume const_int etc already in the proper mode. */
4678 src = force_reg (mode, src);
4679 }
4680 else if (mode_s != mode)
4681 {
4682 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4683 src = force_reg (mode_s, src);
4684 src = gen_lowpart (mode, src);
4685 }
4686
4687 op = gen_rtx_SET (mode,
4688 gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4689 src);
4690 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4691 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4692
4693 return true;
4694 }
4695
4696 return false;
4697 }
4698
4699 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4700 register that holds VAL of mode MODE shifted by COUNT bits. */
4701
4702 static inline rtx
4703 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4704 {
4705 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4706 NULL_RTX, 1, OPTAB_DIRECT);
4707 return expand_simple_binop (SImode, ASHIFT, val, count,
4708 NULL_RTX, 1, OPTAB_DIRECT);
4709 }
4710
4711 /* Structure to hold the initial parameters for a compare_and_swap operation
4712 in HImode and QImode. */
4713
4714 struct alignment_context
4715 {
4716 rtx memsi; /* SI aligned memory location. */
4717 rtx shift; /* Bit offset with regard to lsb. */
4718 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4719 rtx modemaski; /* ~modemask */
4720 bool aligned; /* True if memory is aligned, false else. */
4721 };
4722
4723 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4724 structure AC for transparent simplifying, if the memory alignment is known
4725 to be at least 32bit. MEM is the memory location for the actual operation
4726 and MODE its mode. */
4727
4728 static void
4729 init_alignment_context (struct alignment_context *ac, rtx mem,
4730 enum machine_mode mode)
4731 {
4732 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4733 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4734
4735 if (ac->aligned)
4736 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4737 else
4738 {
4739 /* Alignment is unknown. */
4740 rtx byteoffset, addr, align;
4741
4742 /* Force the address into a register. */
4743 addr = force_reg (Pmode, XEXP (mem, 0));
4744
4745 /* Align it to SImode. */
4746 align = expand_simple_binop (Pmode, AND, addr,
4747 GEN_INT (-GET_MODE_SIZE (SImode)),
4748 NULL_RTX, 1, OPTAB_DIRECT);
4749 /* Generate MEM. */
4750 ac->memsi = gen_rtx_MEM (SImode, align);
4751 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4752 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4753 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4754
4755 /* Calculate shiftcount. */
4756 byteoffset = expand_simple_binop (Pmode, AND, addr,
4757 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4758 NULL_RTX, 1, OPTAB_DIRECT);
4759 /* As we already have some offset, evaluate the remaining distance. */
4760 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4761 NULL_RTX, 1, OPTAB_DIRECT);
4762 }
4763
4764 /* Shift is the byte count, but we need the bitcount. */
4765 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4766 NULL_RTX, 1, OPTAB_DIRECT);
4767
4768 /* Calculate masks. */
4769 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4770 GEN_INT (GET_MODE_MASK (mode)),
4771 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4772 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4773 NULL_RTX, 1);
4774 }
4775
4776 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4777 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4778 perform the merge in SEQ2. */
4779
4780 static rtx
4781 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4782 enum machine_mode mode, rtx val, rtx ins)
4783 {
4784 rtx tmp;
4785
4786 if (ac->aligned)
4787 {
4788 start_sequence ();
4789 tmp = copy_to_mode_reg (SImode, val);
4790 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4791 const0_rtx, ins))
4792 {
4793 *seq1 = NULL;
4794 *seq2 = get_insns ();
4795 end_sequence ();
4796 return tmp;
4797 }
4798 end_sequence ();
4799 }
4800
4801 /* Failed to use insv. Generate a two part shift and mask. */
4802 start_sequence ();
4803 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4804 *seq1 = get_insns ();
4805 end_sequence ();
4806
4807 start_sequence ();
4808 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4809 *seq2 = get_insns ();
4810 end_sequence ();
4811
4812 return tmp;
4813 }
4814
4815 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4816 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4817 value to set if CMP == MEM. */
4818
4819 void
4820 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4821 rtx cmp, rtx new_rtx, bool is_weak)
4822 {
4823 struct alignment_context ac;
4824 rtx cmpv, newv, val, resv, cc, seq0, seq1, seq2, seq3;
4825 rtx res = gen_reg_rtx (SImode);
4826 rtx csloop = NULL, csend = NULL;
4827
4828 gcc_assert (MEM_P (mem));
4829
4830 init_alignment_context (&ac, mem, mode);
4831
4832 /* Load full word. Subsequent loads are performed by CS. */
4833 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4834 NULL_RTX, 1, OPTAB_DIRECT);
4835
4836 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4837 possible, we try to use insv to make this happen efficiently. If
4838 that fails we'll generate code both inside and outside the loop. */
4839 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4840 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4841
4842 if (seq0)
4843 emit_insn (seq0);
4844 if (seq1)
4845 emit_insn (seq1);
4846
4847 /* Start CS loop. */
4848 if (!is_weak)
4849 {
4850 /* Begin assuming success. */
4851 emit_move_insn (btarget, const1_rtx);
4852
4853 csloop = gen_label_rtx ();
4854 csend = gen_label_rtx ();
4855 emit_label (csloop);
4856 }
4857
4858 /* val = "<mem>00..0<mem>"
4859 * cmp = "00..0<cmp>00..0"
4860 * new = "00..0<new>00..0"
4861 */
4862
4863 emit_insn (seq2);
4864 emit_insn (seq3);
4865
4866 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4867 if (is_weak)
4868 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4869 else
4870 {
4871 /* Jump to end if we're done (likely?). */
4872 s390_emit_jump (csend, cc);
4873
4874 /* Check for changes outside mode, and loop internal if so. */
4875 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4876 NULL_RTX, 1, OPTAB_DIRECT);
4877 cc = s390_emit_compare (NE, resv, val);
4878 emit_move_insn (val, resv);
4879 s390_emit_jump (csloop, cc);
4880
4881 /* Failed. */
4882 emit_move_insn (btarget, const0_rtx);
4883 emit_label (csend);
4884 }
4885
4886 /* Return the correct part of the bitfield. */
4887 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4888 NULL_RTX, 1, OPTAB_DIRECT), 1);
4889 }
4890
4891 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4892 and VAL the value to play with. If AFTER is true then store the value
4893 MEM holds after the operation, if AFTER is false then store the value MEM
4894 holds before the operation. If TARGET is zero then discard that value, else
4895 store it to TARGET. */
4896
4897 void
4898 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4899 rtx target, rtx mem, rtx val, bool after)
4900 {
4901 struct alignment_context ac;
4902 rtx cmp;
4903 rtx new_rtx = gen_reg_rtx (SImode);
4904 rtx orig = gen_reg_rtx (SImode);
4905 rtx csloop = gen_label_rtx ();
4906
4907 gcc_assert (!target || register_operand (target, VOIDmode));
4908 gcc_assert (MEM_P (mem));
4909
4910 init_alignment_context (&ac, mem, mode);
4911
4912 /* Shift val to the correct bit positions.
4913 Preserve "icm", but prevent "ex icm". */
4914 if (!(ac.aligned && code == SET && MEM_P (val)))
4915 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4916
4917 /* Further preparation insns. */
4918 if (code == PLUS || code == MINUS)
4919 emit_move_insn (orig, val);
4920 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4921 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4922 NULL_RTX, 1, OPTAB_DIRECT);
4923
4924 /* Load full word. Subsequent loads are performed by CS. */
4925 cmp = force_reg (SImode, ac.memsi);
4926
4927 /* Start CS loop. */
4928 emit_label (csloop);
4929 emit_move_insn (new_rtx, cmp);
4930
4931 /* Patch new with val at correct position. */
4932 switch (code)
4933 {
4934 case PLUS:
4935 case MINUS:
4936 val = expand_simple_binop (SImode, code, new_rtx, orig,
4937 NULL_RTX, 1, OPTAB_DIRECT);
4938 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4939 NULL_RTX, 1, OPTAB_DIRECT);
4940 /* FALLTHRU */
4941 case SET:
4942 if (ac.aligned && MEM_P (val))
4943 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4944 0, 0, SImode, val);
4945 else
4946 {
4947 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4948 NULL_RTX, 1, OPTAB_DIRECT);
4949 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4950 NULL_RTX, 1, OPTAB_DIRECT);
4951 }
4952 break;
4953 case AND:
4954 case IOR:
4955 case XOR:
4956 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4957 NULL_RTX, 1, OPTAB_DIRECT);
4958 break;
4959 case MULT: /* NAND */
4960 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4961 NULL_RTX, 1, OPTAB_DIRECT);
4962 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4963 NULL_RTX, 1, OPTAB_DIRECT);
4964 break;
4965 default:
4966 gcc_unreachable ();
4967 }
4968
4969 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4970 ac.memsi, cmp, new_rtx));
4971
4972 /* Return the correct part of the bitfield. */
4973 if (target)
4974 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4975 after ? new_rtx : cmp, ac.shift,
4976 NULL_RTX, 1, OPTAB_DIRECT), 1);
4977 }
4978
4979 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4980 We need to emit DTP-relative relocations. */
4981
4982 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4983
4984 static void
4985 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4986 {
4987 switch (size)
4988 {
4989 case 4:
4990 fputs ("\t.long\t", file);
4991 break;
4992 case 8:
4993 fputs ("\t.quad\t", file);
4994 break;
4995 default:
4996 gcc_unreachable ();
4997 }
4998 output_addr_const (file, x);
4999 fputs ("@DTPOFF", file);
5000 }
5001
5002 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5003 /* Implement TARGET_MANGLE_TYPE. */
5004
5005 static const char *
5006 s390_mangle_type (const_tree type)
5007 {
5008 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5009 && TARGET_LONG_DOUBLE_128)
5010 return "g";
5011
5012 /* For all other types, use normal C++ mangling. */
5013 return NULL;
5014 }
5015 #endif
5016
5017 /* In the name of slightly smaller debug output, and to cater to
5018 general assembler lossage, recognize various UNSPEC sequences
5019 and turn them back into a direct symbol reference. */
5020
5021 static rtx
5022 s390_delegitimize_address (rtx orig_x)
5023 {
5024 rtx x, y;
5025
5026 orig_x = delegitimize_mem_from_attrs (orig_x);
5027 x = orig_x;
5028
5029 /* Extract the symbol ref from:
5030 (plus:SI (reg:SI 12 %r12)
5031 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5032 UNSPEC_GOTOFF/PLTOFF)))
5033 and
5034 (plus:SI (reg:SI 12 %r12)
5035 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5036 UNSPEC_GOTOFF/PLTOFF)
5037 (const_int 4 [0x4])))) */
5038 if (GET_CODE (x) == PLUS
5039 && REG_P (XEXP (x, 0))
5040 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5041 && GET_CODE (XEXP (x, 1)) == CONST)
5042 {
5043 HOST_WIDE_INT offset = 0;
5044
5045 /* The const operand. */
5046 y = XEXP (XEXP (x, 1), 0);
5047
5048 if (GET_CODE (y) == PLUS
5049 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5050 {
5051 offset = INTVAL (XEXP (y, 1));
5052 y = XEXP (y, 0);
5053 }
5054
5055 if (GET_CODE (y) == UNSPEC
5056 && (XINT (y, 1) == UNSPEC_GOTOFF
5057 || XINT (y, 1) == UNSPEC_PLTOFF))
5058 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5059 }
5060
5061 if (GET_CODE (x) != MEM)
5062 return orig_x;
5063
5064 x = XEXP (x, 0);
5065 if (GET_CODE (x) == PLUS
5066 && GET_CODE (XEXP (x, 1)) == CONST
5067 && GET_CODE (XEXP (x, 0)) == REG
5068 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5069 {
5070 y = XEXP (XEXP (x, 1), 0);
5071 if (GET_CODE (y) == UNSPEC
5072 && XINT (y, 1) == UNSPEC_GOT)
5073 y = XVECEXP (y, 0, 0);
5074 else
5075 return orig_x;
5076 }
5077 else if (GET_CODE (x) == CONST)
5078 {
5079 /* Extract the symbol ref from:
5080 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5081 UNSPEC_PLT/GOTENT))) */
5082
5083 y = XEXP (x, 0);
5084 if (GET_CODE (y) == UNSPEC
5085 && (XINT (y, 1) == UNSPEC_GOTENT
5086 || XINT (y, 1) == UNSPEC_PLT))
5087 y = XVECEXP (y, 0, 0);
5088 else
5089 return orig_x;
5090 }
5091 else
5092 return orig_x;
5093
5094 if (GET_MODE (orig_x) != Pmode)
5095 {
5096 if (GET_MODE (orig_x) == BLKmode)
5097 return orig_x;
5098 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5099 if (y == NULL_RTX)
5100 return orig_x;
5101 }
5102 return y;
5103 }
5104
5105 /* Output operand OP to stdio stream FILE.
5106 OP is an address (register + offset) which is not used to address data;
5107 instead the rightmost bits are interpreted as the value. */
5108
5109 static void
5110 print_shift_count_operand (FILE *file, rtx op)
5111 {
5112 HOST_WIDE_INT offset;
5113 rtx base;
5114
5115 /* Extract base register and offset. */
5116 if (!s390_decompose_shift_count (op, &base, &offset))
5117 gcc_unreachable ();
5118
5119 /* Sanity check. */
5120 if (base)
5121 {
5122 gcc_assert (GET_CODE (base) == REG);
5123 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5124 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5125 }
5126
5127 /* Offsets are constricted to twelve bits. */
5128 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5129 if (base)
5130 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5131 }
5132
5133 /* See 'get_some_local_dynamic_name'. */
5134
5135 static int
5136 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5137 {
5138 rtx x = *px;
5139
5140 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5141 {
5142 x = get_pool_constant (x);
5143 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5144 }
5145
5146 if (GET_CODE (x) == SYMBOL_REF
5147 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5148 {
5149 cfun->machine->some_ld_name = XSTR (x, 0);
5150 return 1;
5151 }
5152
5153 return 0;
5154 }
5155
5156 /* Locate some local-dynamic symbol still in use by this function
5157 so that we can print its name in local-dynamic base patterns. */
5158
5159 static const char *
5160 get_some_local_dynamic_name (void)
5161 {
5162 rtx insn;
5163
5164 if (cfun->machine->some_ld_name)
5165 return cfun->machine->some_ld_name;
5166
5167 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5168 if (INSN_P (insn)
5169 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5170 return cfun->machine->some_ld_name;
5171
5172 gcc_unreachable ();
5173 }
5174
5175 /* Output machine-dependent UNSPECs occurring in address constant X
5176 in assembler syntax to stdio stream FILE. Returns true if the
5177 constant X could be recognized, false otherwise. */
5178
5179 static bool
5180 s390_output_addr_const_extra (FILE *file, rtx x)
5181 {
5182 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5183 switch (XINT (x, 1))
5184 {
5185 case UNSPEC_GOTENT:
5186 output_addr_const (file, XVECEXP (x, 0, 0));
5187 fprintf (file, "@GOTENT");
5188 return true;
5189 case UNSPEC_GOT:
5190 output_addr_const (file, XVECEXP (x, 0, 0));
5191 fprintf (file, "@GOT");
5192 return true;
5193 case UNSPEC_GOTOFF:
5194 output_addr_const (file, XVECEXP (x, 0, 0));
5195 fprintf (file, "@GOTOFF");
5196 return true;
5197 case UNSPEC_PLT:
5198 output_addr_const (file, XVECEXP (x, 0, 0));
5199 fprintf (file, "@PLT");
5200 return true;
5201 case UNSPEC_PLTOFF:
5202 output_addr_const (file, XVECEXP (x, 0, 0));
5203 fprintf (file, "@PLTOFF");
5204 return true;
5205 case UNSPEC_TLSGD:
5206 output_addr_const (file, XVECEXP (x, 0, 0));
5207 fprintf (file, "@TLSGD");
5208 return true;
5209 case UNSPEC_TLSLDM:
5210 assemble_name (file, get_some_local_dynamic_name ());
5211 fprintf (file, "@TLSLDM");
5212 return true;
5213 case UNSPEC_DTPOFF:
5214 output_addr_const (file, XVECEXP (x, 0, 0));
5215 fprintf (file, "@DTPOFF");
5216 return true;
5217 case UNSPEC_NTPOFF:
5218 output_addr_const (file, XVECEXP (x, 0, 0));
5219 fprintf (file, "@NTPOFF");
5220 return true;
5221 case UNSPEC_GOTNTPOFF:
5222 output_addr_const (file, XVECEXP (x, 0, 0));
5223 fprintf (file, "@GOTNTPOFF");
5224 return true;
5225 case UNSPEC_INDNTPOFF:
5226 output_addr_const (file, XVECEXP (x, 0, 0));
5227 fprintf (file, "@INDNTPOFF");
5228 return true;
5229 }
5230
5231 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5232 switch (XINT (x, 1))
5233 {
5234 case UNSPEC_POOL_OFFSET:
5235 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5236 output_addr_const (file, x);
5237 return true;
5238 }
5239 return false;
5240 }
5241
5242 /* Output address operand ADDR in assembler syntax to
5243 stdio stream FILE. */
5244
5245 void
5246 print_operand_address (FILE *file, rtx addr)
5247 {
5248 struct s390_address ad;
5249
5250 if (s390_symref_operand_p (addr, NULL, NULL))
5251 {
5252 if (!TARGET_Z10)
5253 {
5254 output_operand_lossage ("symbolic memory references are "
5255 "only supported on z10 or later");
5256 return;
5257 }
5258 output_addr_const (file, addr);
5259 return;
5260 }
5261
5262 if (!s390_decompose_address (addr, &ad)
5263 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5264 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5265 output_operand_lossage ("cannot decompose address");
5266
5267 if (ad.disp)
5268 output_addr_const (file, ad.disp);
5269 else
5270 fprintf (file, "0");
5271
5272 if (ad.base && ad.indx)
5273 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5274 reg_names[REGNO (ad.base)]);
5275 else if (ad.base)
5276 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5277 }
5278
5279 /* Output operand X in assembler syntax to stdio stream FILE.
5280 CODE specified the format flag. The following format flags
5281 are recognized:
5282
5283 'C': print opcode suffix for branch condition.
5284 'D': print opcode suffix for inverse branch condition.
5285 'E': print opcode suffix for branch on index instruction.
5286 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5287 'G': print the size of the operand in bytes.
5288 'O': print only the displacement of a memory reference.
5289 'R': print only the base register of a memory reference.
5290 'S': print S-type memory reference (base+displacement).
5291 'N': print the second word of a DImode operand.
5292 'M': print the second word of a TImode operand.
5293 'Y': print shift count operand.
5294
5295 'b': print integer X as if it's an unsigned byte.
5296 'c': print integer X as if it's an signed byte.
5297 'x': print integer X as if it's an unsigned halfword.
5298 'h': print integer X as if it's a signed halfword.
5299 'i': print the first nonzero HImode part of X.
5300 'j': print the first HImode part unequal to -1 of X.
5301 'k': print the first nonzero SImode part of X.
5302 'm': print the first SImode part unequal to -1 of X.
5303 'o': print integer X as if it's an unsigned 32bit word. */
5304
5305 void
5306 print_operand (FILE *file, rtx x, int code)
5307 {
5308 switch (code)
5309 {
5310 case 'C':
5311 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5312 return;
5313
5314 case 'D':
5315 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5316 return;
5317
5318 case 'E':
5319 if (GET_CODE (x) == LE)
5320 fprintf (file, "l");
5321 else if (GET_CODE (x) == GT)
5322 fprintf (file, "h");
5323 else
5324 output_operand_lossage ("invalid comparison operator "
5325 "for 'E' output modifier");
5326 return;
5327
5328 case 'J':
5329 if (GET_CODE (x) == SYMBOL_REF)
5330 {
5331 fprintf (file, "%s", ":tls_load:");
5332 output_addr_const (file, x);
5333 }
5334 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5335 {
5336 fprintf (file, "%s", ":tls_gdcall:");
5337 output_addr_const (file, XVECEXP (x, 0, 0));
5338 }
5339 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5340 {
5341 fprintf (file, "%s", ":tls_ldcall:");
5342 assemble_name (file, get_some_local_dynamic_name ());
5343 }
5344 else
5345 output_operand_lossage ("invalid reference for 'J' output modifier");
5346 return;
5347
5348 case 'G':
5349 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5350 return;
5351
5352 case 'O':
5353 {
5354 struct s390_address ad;
5355 int ret;
5356
5357 if (!MEM_P (x))
5358 {
5359 output_operand_lossage ("memory reference expected for "
5360 "'O' output modifier");
5361 return;
5362 }
5363
5364 ret = s390_decompose_address (XEXP (x, 0), &ad);
5365
5366 if (!ret
5367 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5368 || ad.indx)
5369 {
5370 output_operand_lossage ("invalid address for 'O' output modifier");
5371 return;
5372 }
5373
5374 if (ad.disp)
5375 output_addr_const (file, ad.disp);
5376 else
5377 fprintf (file, "0");
5378 }
5379 return;
5380
5381 case 'R':
5382 {
5383 struct s390_address ad;
5384 int ret;
5385
5386 if (!MEM_P (x))
5387 {
5388 output_operand_lossage ("memory reference expected for "
5389 "'R' output modifier");
5390 return;
5391 }
5392
5393 ret = s390_decompose_address (XEXP (x, 0), &ad);
5394
5395 if (!ret
5396 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5397 || ad.indx)
5398 {
5399 output_operand_lossage ("invalid address for 'R' output modifier");
5400 return;
5401 }
5402
5403 if (ad.base)
5404 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5405 else
5406 fprintf (file, "0");
5407 }
5408 return;
5409
5410 case 'S':
5411 {
5412 struct s390_address ad;
5413 int ret;
5414
5415 if (!MEM_P (x))
5416 {
5417 output_operand_lossage ("memory reference expected for "
5418 "'S' output modifier");
5419 return;
5420 }
5421 ret = s390_decompose_address (XEXP (x, 0), &ad);
5422
5423 if (!ret
5424 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5425 || ad.indx)
5426 {
5427 output_operand_lossage ("invalid address for 'S' output modifier");
5428 return;
5429 }
5430
5431 if (ad.disp)
5432 output_addr_const (file, ad.disp);
5433 else
5434 fprintf (file, "0");
5435
5436 if (ad.base)
5437 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5438 }
5439 return;
5440
5441 case 'N':
5442 if (GET_CODE (x) == REG)
5443 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5444 else if (GET_CODE (x) == MEM)
5445 x = change_address (x, VOIDmode,
5446 plus_constant (Pmode, XEXP (x, 0), 4));
5447 else
5448 output_operand_lossage ("register or memory expression expected "
5449 "for 'N' output modifier");
5450 break;
5451
5452 case 'M':
5453 if (GET_CODE (x) == REG)
5454 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5455 else if (GET_CODE (x) == MEM)
5456 x = change_address (x, VOIDmode,
5457 plus_constant (Pmode, XEXP (x, 0), 8));
5458 else
5459 output_operand_lossage ("register or memory expression expected "
5460 "for 'M' output modifier");
5461 break;
5462
5463 case 'Y':
5464 print_shift_count_operand (file, x);
5465 return;
5466 }
5467
5468 switch (GET_CODE (x))
5469 {
5470 case REG:
5471 fprintf (file, "%s", reg_names[REGNO (x)]);
5472 break;
5473
5474 case MEM:
5475 output_address (XEXP (x, 0));
5476 break;
5477
5478 case CONST:
5479 case CODE_LABEL:
5480 case LABEL_REF:
5481 case SYMBOL_REF:
5482 output_addr_const (file, x);
5483 break;
5484
5485 case CONST_INT:
5486 if (code == 'b')
5487 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5488 else if (code == 'c')
5489 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5490 else if (code == 'x')
5491 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5492 else if (code == 'h')
5493 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5494 else if (code == 'i')
5495 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5496 s390_extract_part (x, HImode, 0));
5497 else if (code == 'j')
5498 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5499 s390_extract_part (x, HImode, -1));
5500 else if (code == 'k')
5501 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5502 s390_extract_part (x, SImode, 0));
5503 else if (code == 'm')
5504 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5505 s390_extract_part (x, SImode, -1));
5506 else if (code == 'o')
5507 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5508 else
5509 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5510 break;
5511
5512 case CONST_DOUBLE:
5513 gcc_assert (GET_MODE (x) == VOIDmode);
5514 if (code == 'b')
5515 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5516 else if (code == 'x')
5517 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5518 else if (code == 'h')
5519 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5520 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5521 else
5522 {
5523 if (code == 0)
5524 output_operand_lossage ("invalid constant - try using "
5525 "an output modifier");
5526 else
5527 output_operand_lossage ("invalid constant for output modifier '%c'",
5528 code);
5529 }
5530 break;
5531
5532 default:
5533 if (code == 0)
5534 output_operand_lossage ("invalid expression - try using "
5535 "an output modifier");
5536 else
5537 output_operand_lossage ("invalid expression for output "
5538 "modifier '%c'", code);
5539 break;
5540 }
5541 }
5542
5543 /* Target hook for assembling integer objects. We need to define it
5544 here to work a round a bug in some versions of GAS, which couldn't
5545 handle values smaller than INT_MIN when printed in decimal. */
5546
5547 static bool
5548 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5549 {
5550 if (size == 8 && aligned_p
5551 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5552 {
5553 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5554 INTVAL (x));
5555 return true;
5556 }
5557 return default_assemble_integer (x, size, aligned_p);
5558 }
5559
5560 /* Returns true if register REGNO is used for forming
5561 a memory address in expression X. */
5562
5563 static bool
5564 reg_used_in_mem_p (int regno, rtx x)
5565 {
5566 enum rtx_code code = GET_CODE (x);
5567 int i, j;
5568 const char *fmt;
5569
5570 if (code == MEM)
5571 {
5572 if (refers_to_regno_p (regno, regno+1,
5573 XEXP (x, 0), 0))
5574 return true;
5575 }
5576 else if (code == SET
5577 && GET_CODE (SET_DEST (x)) == PC)
5578 {
5579 if (refers_to_regno_p (regno, regno+1,
5580 SET_SRC (x), 0))
5581 return true;
5582 }
5583
5584 fmt = GET_RTX_FORMAT (code);
5585 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5586 {
5587 if (fmt[i] == 'e'
5588 && reg_used_in_mem_p (regno, XEXP (x, i)))
5589 return true;
5590
5591 else if (fmt[i] == 'E')
5592 for (j = 0; j < XVECLEN (x, i); j++)
5593 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5594 return true;
5595 }
5596 return false;
5597 }
5598
5599 /* Returns true if expression DEP_RTX sets an address register
5600 used by instruction INSN to address memory. */
5601
5602 static bool
5603 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5604 {
5605 rtx target, pat;
5606
5607 if (GET_CODE (dep_rtx) == INSN)
5608 dep_rtx = PATTERN (dep_rtx);
5609
5610 if (GET_CODE (dep_rtx) == SET)
5611 {
5612 target = SET_DEST (dep_rtx);
5613 if (GET_CODE (target) == STRICT_LOW_PART)
5614 target = XEXP (target, 0);
5615 while (GET_CODE (target) == SUBREG)
5616 target = SUBREG_REG (target);
5617
5618 if (GET_CODE (target) == REG)
5619 {
5620 int regno = REGNO (target);
5621
5622 if (s390_safe_attr_type (insn) == TYPE_LA)
5623 {
5624 pat = PATTERN (insn);
5625 if (GET_CODE (pat) == PARALLEL)
5626 {
5627 gcc_assert (XVECLEN (pat, 0) == 2);
5628 pat = XVECEXP (pat, 0, 0);
5629 }
5630 gcc_assert (GET_CODE (pat) == SET);
5631 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5632 }
5633 else if (get_attr_atype (insn) == ATYPE_AGEN)
5634 return reg_used_in_mem_p (regno, PATTERN (insn));
5635 }
5636 }
5637 return false;
5638 }
5639
5640 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5641
5642 int
5643 s390_agen_dep_p (rtx dep_insn, rtx insn)
5644 {
5645 rtx dep_rtx = PATTERN (dep_insn);
5646 int i;
5647
5648 if (GET_CODE (dep_rtx) == SET
5649 && addr_generation_dependency_p (dep_rtx, insn))
5650 return 1;
5651 else if (GET_CODE (dep_rtx) == PARALLEL)
5652 {
5653 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5654 {
5655 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5656 return 1;
5657 }
5658 }
5659 return 0;
5660 }
5661
5662
5663 /* A C statement (sans semicolon) to update the integer scheduling priority
5664 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5665 reduce the priority to execute INSN later. Do not define this macro if
5666 you do not need to adjust the scheduling priorities of insns.
5667
5668 A STD instruction should be scheduled earlier,
5669 in order to use the bypass. */
5670 static int
5671 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5672 {
5673 if (! INSN_P (insn))
5674 return priority;
5675
5676 if (s390_tune != PROCESSOR_2084_Z990
5677 && s390_tune != PROCESSOR_2094_Z9_109
5678 && s390_tune != PROCESSOR_2097_Z10
5679 && s390_tune != PROCESSOR_2817_Z196)
5680 return priority;
5681
5682 switch (s390_safe_attr_type (insn))
5683 {
5684 case TYPE_FSTOREDF:
5685 case TYPE_FSTORESF:
5686 priority = priority << 3;
5687 break;
5688 case TYPE_STORE:
5689 case TYPE_STM:
5690 priority = priority << 1;
5691 break;
5692 default:
5693 break;
5694 }
5695 return priority;
5696 }
5697
5698
5699 /* The number of instructions that can be issued per cycle. */
5700
5701 static int
5702 s390_issue_rate (void)
5703 {
5704 switch (s390_tune)
5705 {
5706 case PROCESSOR_2084_Z990:
5707 case PROCESSOR_2094_Z9_109:
5708 case PROCESSOR_2817_Z196:
5709 return 3;
5710 case PROCESSOR_2097_Z10:
5711 return 2;
5712 default:
5713 return 1;
5714 }
5715 }
5716
5717 static int
5718 s390_first_cycle_multipass_dfa_lookahead (void)
5719 {
5720 return 4;
5721 }
5722
5723 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5724 Fix up MEMs as required. */
5725
5726 static void
5727 annotate_constant_pool_refs (rtx *x)
5728 {
5729 int i, j;
5730 const char *fmt;
5731
5732 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5733 || !CONSTANT_POOL_ADDRESS_P (*x));
5734
5735 /* Literal pool references can only occur inside a MEM ... */
5736 if (GET_CODE (*x) == MEM)
5737 {
5738 rtx memref = XEXP (*x, 0);
5739
5740 if (GET_CODE (memref) == SYMBOL_REF
5741 && CONSTANT_POOL_ADDRESS_P (memref))
5742 {
5743 rtx base = cfun->machine->base_reg;
5744 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5745 UNSPEC_LTREF);
5746
5747 *x = replace_equiv_address (*x, addr);
5748 return;
5749 }
5750
5751 if (GET_CODE (memref) == CONST
5752 && GET_CODE (XEXP (memref, 0)) == PLUS
5753 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5754 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5755 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5756 {
5757 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5758 rtx sym = XEXP (XEXP (memref, 0), 0);
5759 rtx base = cfun->machine->base_reg;
5760 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5761 UNSPEC_LTREF);
5762
5763 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5764 return;
5765 }
5766 }
5767
5768 /* ... or a load-address type pattern. */
5769 if (GET_CODE (*x) == SET)
5770 {
5771 rtx addrref = SET_SRC (*x);
5772
5773 if (GET_CODE (addrref) == SYMBOL_REF
5774 && CONSTANT_POOL_ADDRESS_P (addrref))
5775 {
5776 rtx base = cfun->machine->base_reg;
5777 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5778 UNSPEC_LTREF);
5779
5780 SET_SRC (*x) = addr;
5781 return;
5782 }
5783
5784 if (GET_CODE (addrref) == CONST
5785 && GET_CODE (XEXP (addrref, 0)) == PLUS
5786 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5787 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5788 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5789 {
5790 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5791 rtx sym = XEXP (XEXP (addrref, 0), 0);
5792 rtx base = cfun->machine->base_reg;
5793 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5794 UNSPEC_LTREF);
5795
5796 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5797 return;
5798 }
5799 }
5800
5801 /* Annotate LTREL_BASE as well. */
5802 if (GET_CODE (*x) == UNSPEC
5803 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5804 {
5805 rtx base = cfun->machine->base_reg;
5806 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5807 UNSPEC_LTREL_BASE);
5808 return;
5809 }
5810
5811 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5812 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5813 {
5814 if (fmt[i] == 'e')
5815 {
5816 annotate_constant_pool_refs (&XEXP (*x, i));
5817 }
5818 else if (fmt[i] == 'E')
5819 {
5820 for (j = 0; j < XVECLEN (*x, i); j++)
5821 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5822 }
5823 }
5824 }
5825
5826 /* Split all branches that exceed the maximum distance.
5827 Returns true if this created a new literal pool entry. */
5828
5829 static int
5830 s390_split_branches (void)
5831 {
5832 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5833 int new_literal = 0, ret;
5834 rtx insn, pat, tmp, target;
5835 rtx *label;
5836
5837 /* We need correct insn addresses. */
5838
5839 shorten_branches (get_insns ());
5840
5841 /* Find all branches that exceed 64KB, and split them. */
5842
5843 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5844 {
5845 if (GET_CODE (insn) != JUMP_INSN)
5846 continue;
5847
5848 pat = PATTERN (insn);
5849 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5850 pat = XVECEXP (pat, 0, 0);
5851 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5852 continue;
5853
5854 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5855 {
5856 label = &SET_SRC (pat);
5857 }
5858 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5859 {
5860 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5861 label = &XEXP (SET_SRC (pat), 1);
5862 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5863 label = &XEXP (SET_SRC (pat), 2);
5864 else
5865 continue;
5866 }
5867 else
5868 continue;
5869
5870 if (get_attr_length (insn) <= 4)
5871 continue;
5872
5873 /* We are going to use the return register as scratch register,
5874 make sure it will be saved/restored by the prologue/epilogue. */
5875 cfun_frame_layout.save_return_addr_p = 1;
5876
5877 if (!flag_pic)
5878 {
5879 new_literal = 1;
5880 tmp = force_const_mem (Pmode, *label);
5881 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5882 INSN_ADDRESSES_NEW (tmp, -1);
5883 annotate_constant_pool_refs (&PATTERN (tmp));
5884
5885 target = temp_reg;
5886 }
5887 else
5888 {
5889 new_literal = 1;
5890 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5891 UNSPEC_LTREL_OFFSET);
5892 target = gen_rtx_CONST (Pmode, target);
5893 target = force_const_mem (Pmode, target);
5894 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5895 INSN_ADDRESSES_NEW (tmp, -1);
5896 annotate_constant_pool_refs (&PATTERN (tmp));
5897
5898 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5899 cfun->machine->base_reg),
5900 UNSPEC_LTREL_BASE);
5901 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5902 }
5903
5904 ret = validate_change (insn, label, target, 0);
5905 gcc_assert (ret);
5906 }
5907
5908 return new_literal;
5909 }
5910
5911
5912 /* Find an annotated literal pool symbol referenced in RTX X,
5913 and store it at REF. Will abort if X contains references to
5914 more than one such pool symbol; multiple references to the same
5915 symbol are allowed, however.
5916
5917 The rtx pointed to by REF must be initialized to NULL_RTX
5918 by the caller before calling this routine. */
5919
5920 static void
5921 find_constant_pool_ref (rtx x, rtx *ref)
5922 {
5923 int i, j;
5924 const char *fmt;
5925
5926 /* Ignore LTREL_BASE references. */
5927 if (GET_CODE (x) == UNSPEC
5928 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5929 return;
5930 /* Likewise POOL_ENTRY insns. */
5931 if (GET_CODE (x) == UNSPEC_VOLATILE
5932 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5933 return;
5934
5935 gcc_assert (GET_CODE (x) != SYMBOL_REF
5936 || !CONSTANT_POOL_ADDRESS_P (x));
5937
5938 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5939 {
5940 rtx sym = XVECEXP (x, 0, 0);
5941 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5942 && CONSTANT_POOL_ADDRESS_P (sym));
5943
5944 if (*ref == NULL_RTX)
5945 *ref = sym;
5946 else
5947 gcc_assert (*ref == sym);
5948
5949 return;
5950 }
5951
5952 fmt = GET_RTX_FORMAT (GET_CODE (x));
5953 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5954 {
5955 if (fmt[i] == 'e')
5956 {
5957 find_constant_pool_ref (XEXP (x, i), ref);
5958 }
5959 else if (fmt[i] == 'E')
5960 {
5961 for (j = 0; j < XVECLEN (x, i); j++)
5962 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5963 }
5964 }
5965 }
5966
5967 /* Replace every reference to the annotated literal pool
5968 symbol REF in X by its base plus OFFSET. */
5969
5970 static void
5971 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5972 {
5973 int i, j;
5974 const char *fmt;
5975
5976 gcc_assert (*x != ref);
5977
5978 if (GET_CODE (*x) == UNSPEC
5979 && XINT (*x, 1) == UNSPEC_LTREF
5980 && XVECEXP (*x, 0, 0) == ref)
5981 {
5982 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5983 return;
5984 }
5985
5986 if (GET_CODE (*x) == PLUS
5987 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5988 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5989 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5990 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5991 {
5992 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5993 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
5994 return;
5995 }
5996
5997 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5998 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5999 {
6000 if (fmt[i] == 'e')
6001 {
6002 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6003 }
6004 else if (fmt[i] == 'E')
6005 {
6006 for (j = 0; j < XVECLEN (*x, i); j++)
6007 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6008 }
6009 }
6010 }
6011
6012 /* Check whether X contains an UNSPEC_LTREL_BASE.
6013 Return its constant pool symbol if found, NULL_RTX otherwise. */
6014
6015 static rtx
6016 find_ltrel_base (rtx x)
6017 {
6018 int i, j;
6019 const char *fmt;
6020
6021 if (GET_CODE (x) == UNSPEC
6022 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6023 return XVECEXP (x, 0, 0);
6024
6025 fmt = GET_RTX_FORMAT (GET_CODE (x));
6026 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6027 {
6028 if (fmt[i] == 'e')
6029 {
6030 rtx fnd = find_ltrel_base (XEXP (x, i));
6031 if (fnd)
6032 return fnd;
6033 }
6034 else if (fmt[i] == 'E')
6035 {
6036 for (j = 0; j < XVECLEN (x, i); j++)
6037 {
6038 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6039 if (fnd)
6040 return fnd;
6041 }
6042 }
6043 }
6044
6045 return NULL_RTX;
6046 }
6047
6048 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6049
6050 static void
6051 replace_ltrel_base (rtx *x)
6052 {
6053 int i, j;
6054 const char *fmt;
6055
6056 if (GET_CODE (*x) == UNSPEC
6057 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6058 {
6059 *x = XVECEXP (*x, 0, 1);
6060 return;
6061 }
6062
6063 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6064 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6065 {
6066 if (fmt[i] == 'e')
6067 {
6068 replace_ltrel_base (&XEXP (*x, i));
6069 }
6070 else if (fmt[i] == 'E')
6071 {
6072 for (j = 0; j < XVECLEN (*x, i); j++)
6073 replace_ltrel_base (&XVECEXP (*x, i, j));
6074 }
6075 }
6076 }
6077
6078
6079 /* We keep a list of constants which we have to add to internal
6080 constant tables in the middle of large functions. */
6081
6082 #define NR_C_MODES 11
6083 enum machine_mode constant_modes[NR_C_MODES] =
6084 {
6085 TFmode, TImode, TDmode,
6086 DFmode, DImode, DDmode,
6087 SFmode, SImode, SDmode,
6088 HImode,
6089 QImode
6090 };
6091
6092 struct constant
6093 {
6094 struct constant *next;
6095 rtx value;
6096 rtx label;
6097 };
6098
6099 struct constant_pool
6100 {
6101 struct constant_pool *next;
6102 rtx first_insn;
6103 rtx pool_insn;
6104 bitmap insns;
6105 rtx emit_pool_after;
6106
6107 struct constant *constants[NR_C_MODES];
6108 struct constant *execute;
6109 rtx label;
6110 int size;
6111 };
6112
6113 /* Allocate new constant_pool structure. */
6114
6115 static struct constant_pool *
6116 s390_alloc_pool (void)
6117 {
6118 struct constant_pool *pool;
6119 int i;
6120
6121 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6122 pool->next = NULL;
6123 for (i = 0; i < NR_C_MODES; i++)
6124 pool->constants[i] = NULL;
6125
6126 pool->execute = NULL;
6127 pool->label = gen_label_rtx ();
6128 pool->first_insn = NULL_RTX;
6129 pool->pool_insn = NULL_RTX;
6130 pool->insns = BITMAP_ALLOC (NULL);
6131 pool->size = 0;
6132 pool->emit_pool_after = NULL_RTX;
6133
6134 return pool;
6135 }
6136
6137 /* Create new constant pool covering instructions starting at INSN
6138 and chain it to the end of POOL_LIST. */
6139
6140 static struct constant_pool *
6141 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6142 {
6143 struct constant_pool *pool, **prev;
6144
6145 pool = s390_alloc_pool ();
6146 pool->first_insn = insn;
6147
6148 for (prev = pool_list; *prev; prev = &(*prev)->next)
6149 ;
6150 *prev = pool;
6151
6152 return pool;
6153 }
6154
6155 /* End range of instructions covered by POOL at INSN and emit
6156 placeholder insn representing the pool. */
6157
6158 static void
6159 s390_end_pool (struct constant_pool *pool, rtx insn)
6160 {
6161 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6162
6163 if (!insn)
6164 insn = get_last_insn ();
6165
6166 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6167 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6168 }
6169
6170 /* Add INSN to the list of insns covered by POOL. */
6171
6172 static void
6173 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6174 {
6175 bitmap_set_bit (pool->insns, INSN_UID (insn));
6176 }
6177
6178 /* Return pool out of POOL_LIST that covers INSN. */
6179
6180 static struct constant_pool *
6181 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6182 {
6183 struct constant_pool *pool;
6184
6185 for (pool = pool_list; pool; pool = pool->next)
6186 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6187 break;
6188
6189 return pool;
6190 }
6191
6192 /* Add constant VAL of mode MODE to the constant pool POOL. */
6193
6194 static void
6195 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6196 {
6197 struct constant *c;
6198 int i;
6199
6200 for (i = 0; i < NR_C_MODES; i++)
6201 if (constant_modes[i] == mode)
6202 break;
6203 gcc_assert (i != NR_C_MODES);
6204
6205 for (c = pool->constants[i]; c != NULL; c = c->next)
6206 if (rtx_equal_p (val, c->value))
6207 break;
6208
6209 if (c == NULL)
6210 {
6211 c = (struct constant *) xmalloc (sizeof *c);
6212 c->value = val;
6213 c->label = gen_label_rtx ();
6214 c->next = pool->constants[i];
6215 pool->constants[i] = c;
6216 pool->size += GET_MODE_SIZE (mode);
6217 }
6218 }
6219
6220 /* Return an rtx that represents the offset of X from the start of
6221 pool POOL. */
6222
6223 static rtx
6224 s390_pool_offset (struct constant_pool *pool, rtx x)
6225 {
6226 rtx label;
6227
6228 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6229 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6230 UNSPEC_POOL_OFFSET);
6231 return gen_rtx_CONST (GET_MODE (x), x);
6232 }
6233
6234 /* Find constant VAL of mode MODE in the constant pool POOL.
6235 Return an RTX describing the distance from the start of
6236 the pool to the location of the new constant. */
6237
6238 static rtx
6239 s390_find_constant (struct constant_pool *pool, rtx val,
6240 enum machine_mode mode)
6241 {
6242 struct constant *c;
6243 int i;
6244
6245 for (i = 0; i < NR_C_MODES; i++)
6246 if (constant_modes[i] == mode)
6247 break;
6248 gcc_assert (i != NR_C_MODES);
6249
6250 for (c = pool->constants[i]; c != NULL; c = c->next)
6251 if (rtx_equal_p (val, c->value))
6252 break;
6253
6254 gcc_assert (c);
6255
6256 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6257 }
6258
6259 /* Check whether INSN is an execute. Return the label_ref to its
6260 execute target template if so, NULL_RTX otherwise. */
6261
6262 static rtx
6263 s390_execute_label (rtx insn)
6264 {
6265 if (GET_CODE (insn) == INSN
6266 && GET_CODE (PATTERN (insn)) == PARALLEL
6267 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6268 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6269 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6270
6271 return NULL_RTX;
6272 }
6273
6274 /* Add execute target for INSN to the constant pool POOL. */
6275
6276 static void
6277 s390_add_execute (struct constant_pool *pool, rtx insn)
6278 {
6279 struct constant *c;
6280
6281 for (c = pool->execute; c != NULL; c = c->next)
6282 if (INSN_UID (insn) == INSN_UID (c->value))
6283 break;
6284
6285 if (c == NULL)
6286 {
6287 c = (struct constant *) xmalloc (sizeof *c);
6288 c->value = insn;
6289 c->label = gen_label_rtx ();
6290 c->next = pool->execute;
6291 pool->execute = c;
6292 pool->size += 6;
6293 }
6294 }
6295
6296 /* Find execute target for INSN in the constant pool POOL.
6297 Return an RTX describing the distance from the start of
6298 the pool to the location of the execute target. */
6299
6300 static rtx
6301 s390_find_execute (struct constant_pool *pool, rtx insn)
6302 {
6303 struct constant *c;
6304
6305 for (c = pool->execute; c != NULL; c = c->next)
6306 if (INSN_UID (insn) == INSN_UID (c->value))
6307 break;
6308
6309 gcc_assert (c);
6310
6311 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6312 }
6313
6314 /* For an execute INSN, extract the execute target template. */
6315
6316 static rtx
6317 s390_execute_target (rtx insn)
6318 {
6319 rtx pattern = PATTERN (insn);
6320 gcc_assert (s390_execute_label (insn));
6321
6322 if (XVECLEN (pattern, 0) == 2)
6323 {
6324 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6325 }
6326 else
6327 {
6328 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6329 int i;
6330
6331 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6332 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6333
6334 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6335 }
6336
6337 return pattern;
6338 }
6339
6340 /* Indicate that INSN cannot be duplicated. This is the case for
6341 execute insns that carry a unique label. */
6342
6343 static bool
6344 s390_cannot_copy_insn_p (rtx insn)
6345 {
6346 rtx label = s390_execute_label (insn);
6347 return label && label != const0_rtx;
6348 }
6349
6350 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6351 do not emit the pool base label. */
6352
6353 static void
6354 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6355 {
6356 struct constant *c;
6357 rtx insn = pool->pool_insn;
6358 int i;
6359
6360 /* Switch to rodata section. */
6361 if (TARGET_CPU_ZARCH)
6362 {
6363 insn = emit_insn_after (gen_pool_section_start (), insn);
6364 INSN_ADDRESSES_NEW (insn, -1);
6365 }
6366
6367 /* Ensure minimum pool alignment. */
6368 if (TARGET_CPU_ZARCH)
6369 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6370 else
6371 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6372 INSN_ADDRESSES_NEW (insn, -1);
6373
6374 /* Emit pool base label. */
6375 if (!remote_label)
6376 {
6377 insn = emit_label_after (pool->label, insn);
6378 INSN_ADDRESSES_NEW (insn, -1);
6379 }
6380
6381 /* Dump constants in descending alignment requirement order,
6382 ensuring proper alignment for every constant. */
6383 for (i = 0; i < NR_C_MODES; i++)
6384 for (c = pool->constants[i]; c; c = c->next)
6385 {
6386 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6387 rtx value = copy_rtx (c->value);
6388 if (GET_CODE (value) == CONST
6389 && GET_CODE (XEXP (value, 0)) == UNSPEC
6390 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6391 && XVECLEN (XEXP (value, 0), 0) == 1)
6392 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6393
6394 insn = emit_label_after (c->label, insn);
6395 INSN_ADDRESSES_NEW (insn, -1);
6396
6397 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6398 gen_rtvec (1, value),
6399 UNSPECV_POOL_ENTRY);
6400 insn = emit_insn_after (value, insn);
6401 INSN_ADDRESSES_NEW (insn, -1);
6402 }
6403
6404 /* Ensure minimum alignment for instructions. */
6405 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6406 INSN_ADDRESSES_NEW (insn, -1);
6407
6408 /* Output in-pool execute template insns. */
6409 for (c = pool->execute; c; c = c->next)
6410 {
6411 insn = emit_label_after (c->label, insn);
6412 INSN_ADDRESSES_NEW (insn, -1);
6413
6414 insn = emit_insn_after (s390_execute_target (c->value), insn);
6415 INSN_ADDRESSES_NEW (insn, -1);
6416 }
6417
6418 /* Switch back to previous section. */
6419 if (TARGET_CPU_ZARCH)
6420 {
6421 insn = emit_insn_after (gen_pool_section_end (), insn);
6422 INSN_ADDRESSES_NEW (insn, -1);
6423 }
6424
6425 insn = emit_barrier_after (insn);
6426 INSN_ADDRESSES_NEW (insn, -1);
6427
6428 /* Remove placeholder insn. */
6429 remove_insn (pool->pool_insn);
6430 }
6431
6432 /* Free all memory used by POOL. */
6433
6434 static void
6435 s390_free_pool (struct constant_pool *pool)
6436 {
6437 struct constant *c, *next;
6438 int i;
6439
6440 for (i = 0; i < NR_C_MODES; i++)
6441 for (c = pool->constants[i]; c; c = next)
6442 {
6443 next = c->next;
6444 free (c);
6445 }
6446
6447 for (c = pool->execute; c; c = next)
6448 {
6449 next = c->next;
6450 free (c);
6451 }
6452
6453 BITMAP_FREE (pool->insns);
6454 free (pool);
6455 }
6456
6457
6458 /* Collect main literal pool. Return NULL on overflow. */
6459
6460 static struct constant_pool *
6461 s390_mainpool_start (void)
6462 {
6463 struct constant_pool *pool;
6464 rtx insn;
6465
6466 pool = s390_alloc_pool ();
6467
6468 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6469 {
6470 if (GET_CODE (insn) == INSN
6471 && GET_CODE (PATTERN (insn)) == SET
6472 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6473 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6474 {
6475 gcc_assert (!pool->pool_insn);
6476 pool->pool_insn = insn;
6477 }
6478
6479 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6480 {
6481 s390_add_execute (pool, insn);
6482 }
6483 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6484 {
6485 rtx pool_ref = NULL_RTX;
6486 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6487 if (pool_ref)
6488 {
6489 rtx constant = get_pool_constant (pool_ref);
6490 enum machine_mode mode = get_pool_mode (pool_ref);
6491 s390_add_constant (pool, constant, mode);
6492 }
6493 }
6494
6495 /* If hot/cold partitioning is enabled we have to make sure that
6496 the literal pool is emitted in the same section where the
6497 initialization of the literal pool base pointer takes place.
6498 emit_pool_after is only used in the non-overflow case on non
6499 Z cpus where we can emit the literal pool at the end of the
6500 function body within the text section. */
6501 if (NOTE_P (insn)
6502 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6503 && !pool->emit_pool_after)
6504 pool->emit_pool_after = PREV_INSN (insn);
6505 }
6506
6507 gcc_assert (pool->pool_insn || pool->size == 0);
6508
6509 if (pool->size >= 4096)
6510 {
6511 /* We're going to chunkify the pool, so remove the main
6512 pool placeholder insn. */
6513 remove_insn (pool->pool_insn);
6514
6515 s390_free_pool (pool);
6516 pool = NULL;
6517 }
6518
6519 /* If the functions ends with the section where the literal pool
6520 should be emitted set the marker to its end. */
6521 if (pool && !pool->emit_pool_after)
6522 pool->emit_pool_after = get_last_insn ();
6523
6524 return pool;
6525 }
6526
6527 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6528 Modify the current function to output the pool constants as well as
6529 the pool register setup instruction. */
6530
6531 static void
6532 s390_mainpool_finish (struct constant_pool *pool)
6533 {
6534 rtx base_reg = cfun->machine->base_reg;
6535 rtx insn;
6536
6537 /* If the pool is empty, we're done. */
6538 if (pool->size == 0)
6539 {
6540 /* We don't actually need a base register after all. */
6541 cfun->machine->base_reg = NULL_RTX;
6542
6543 if (pool->pool_insn)
6544 remove_insn (pool->pool_insn);
6545 s390_free_pool (pool);
6546 return;
6547 }
6548
6549 /* We need correct insn addresses. */
6550 shorten_branches (get_insns ());
6551
6552 /* On zSeries, we use a LARL to load the pool register. The pool is
6553 located in the .rodata section, so we emit it after the function. */
6554 if (TARGET_CPU_ZARCH)
6555 {
6556 insn = gen_main_base_64 (base_reg, pool->label);
6557 insn = emit_insn_after (insn, pool->pool_insn);
6558 INSN_ADDRESSES_NEW (insn, -1);
6559 remove_insn (pool->pool_insn);
6560
6561 insn = get_last_insn ();
6562 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6563 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6564
6565 s390_dump_pool (pool, 0);
6566 }
6567
6568 /* On S/390, if the total size of the function's code plus literal pool
6569 does not exceed 4096 bytes, we use BASR to set up a function base
6570 pointer, and emit the literal pool at the end of the function. */
6571 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6572 + pool->size + 8 /* alignment slop */ < 4096)
6573 {
6574 insn = gen_main_base_31_small (base_reg, pool->label);
6575 insn = emit_insn_after (insn, pool->pool_insn);
6576 INSN_ADDRESSES_NEW (insn, -1);
6577 remove_insn (pool->pool_insn);
6578
6579 insn = emit_label_after (pool->label, insn);
6580 INSN_ADDRESSES_NEW (insn, -1);
6581
6582 /* emit_pool_after will be set by s390_mainpool_start to the
6583 last insn of the section where the literal pool should be
6584 emitted. */
6585 insn = pool->emit_pool_after;
6586
6587 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6588 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6589
6590 s390_dump_pool (pool, 1);
6591 }
6592
6593 /* Otherwise, we emit an inline literal pool and use BASR to branch
6594 over it, setting up the pool register at the same time. */
6595 else
6596 {
6597 rtx pool_end = gen_label_rtx ();
6598
6599 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6600 insn = emit_jump_insn_after (insn, pool->pool_insn);
6601 JUMP_LABEL (insn) = pool_end;
6602 INSN_ADDRESSES_NEW (insn, -1);
6603 remove_insn (pool->pool_insn);
6604
6605 insn = emit_label_after (pool->label, insn);
6606 INSN_ADDRESSES_NEW (insn, -1);
6607
6608 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6609 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6610
6611 insn = emit_label_after (pool_end, pool->pool_insn);
6612 INSN_ADDRESSES_NEW (insn, -1);
6613
6614 s390_dump_pool (pool, 1);
6615 }
6616
6617
6618 /* Replace all literal pool references. */
6619
6620 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6621 {
6622 if (INSN_P (insn))
6623 replace_ltrel_base (&PATTERN (insn));
6624
6625 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6626 {
6627 rtx addr, pool_ref = NULL_RTX;
6628 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6629 if (pool_ref)
6630 {
6631 if (s390_execute_label (insn))
6632 addr = s390_find_execute (pool, insn);
6633 else
6634 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6635 get_pool_mode (pool_ref));
6636
6637 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6638 INSN_CODE (insn) = -1;
6639 }
6640 }
6641 }
6642
6643
6644 /* Free the pool. */
6645 s390_free_pool (pool);
6646 }
6647
6648 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6649 We have decided we cannot use this pool, so revert all changes
6650 to the current function that were done by s390_mainpool_start. */
6651 static void
6652 s390_mainpool_cancel (struct constant_pool *pool)
6653 {
6654 /* We didn't actually change the instruction stream, so simply
6655 free the pool memory. */
6656 s390_free_pool (pool);
6657 }
6658
6659
6660 /* Chunkify the literal pool. */
6661
6662 #define S390_POOL_CHUNK_MIN 0xc00
6663 #define S390_POOL_CHUNK_MAX 0xe00
6664
6665 static struct constant_pool *
6666 s390_chunkify_start (void)
6667 {
6668 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6669 int extra_size = 0;
6670 bitmap far_labels;
6671 rtx pending_ltrel = NULL_RTX;
6672 rtx insn;
6673
6674 rtx (*gen_reload_base) (rtx, rtx) =
6675 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6676
6677
6678 /* We need correct insn addresses. */
6679
6680 shorten_branches (get_insns ());
6681
6682 /* Scan all insns and move literals to pool chunks. */
6683
6684 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6685 {
6686 bool section_switch_p = false;
6687
6688 /* Check for pending LTREL_BASE. */
6689 if (INSN_P (insn))
6690 {
6691 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6692 if (ltrel_base)
6693 {
6694 gcc_assert (ltrel_base == pending_ltrel);
6695 pending_ltrel = NULL_RTX;
6696 }
6697 }
6698
6699 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6700 {
6701 if (!curr_pool)
6702 curr_pool = s390_start_pool (&pool_list, insn);
6703
6704 s390_add_execute (curr_pool, insn);
6705 s390_add_pool_insn (curr_pool, insn);
6706 }
6707 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6708 {
6709 rtx pool_ref = NULL_RTX;
6710 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6711 if (pool_ref)
6712 {
6713 rtx constant = get_pool_constant (pool_ref);
6714 enum machine_mode mode = get_pool_mode (pool_ref);
6715
6716 if (!curr_pool)
6717 curr_pool = s390_start_pool (&pool_list, insn);
6718
6719 s390_add_constant (curr_pool, constant, mode);
6720 s390_add_pool_insn (curr_pool, insn);
6721
6722 /* Don't split the pool chunk between a LTREL_OFFSET load
6723 and the corresponding LTREL_BASE. */
6724 if (GET_CODE (constant) == CONST
6725 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6726 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6727 {
6728 gcc_assert (!pending_ltrel);
6729 pending_ltrel = pool_ref;
6730 }
6731 }
6732 }
6733
6734 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6735 {
6736 if (curr_pool)
6737 s390_add_pool_insn (curr_pool, insn);
6738 /* An LTREL_BASE must follow within the same basic block. */
6739 gcc_assert (!pending_ltrel);
6740 }
6741
6742 if (NOTE_P (insn))
6743 switch (NOTE_KIND (insn))
6744 {
6745 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6746 section_switch_p = true;
6747 break;
6748 case NOTE_INSN_VAR_LOCATION:
6749 case NOTE_INSN_CALL_ARG_LOCATION:
6750 continue;
6751 default:
6752 break;
6753 }
6754
6755 if (!curr_pool
6756 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6757 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6758 continue;
6759
6760 if (TARGET_CPU_ZARCH)
6761 {
6762 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6763 continue;
6764
6765 s390_end_pool (curr_pool, NULL_RTX);
6766 curr_pool = NULL;
6767 }
6768 else
6769 {
6770 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6771 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6772 + extra_size;
6773
6774 /* We will later have to insert base register reload insns.
6775 Those will have an effect on code size, which we need to
6776 consider here. This calculation makes rather pessimistic
6777 worst-case assumptions. */
6778 if (GET_CODE (insn) == CODE_LABEL)
6779 extra_size += 6;
6780
6781 if (chunk_size < S390_POOL_CHUNK_MIN
6782 && curr_pool->size < S390_POOL_CHUNK_MIN
6783 && !section_switch_p)
6784 continue;
6785
6786 /* Pool chunks can only be inserted after BARRIERs ... */
6787 if (GET_CODE (insn) == BARRIER)
6788 {
6789 s390_end_pool (curr_pool, insn);
6790 curr_pool = NULL;
6791 extra_size = 0;
6792 }
6793
6794 /* ... so if we don't find one in time, create one. */
6795 else if (chunk_size > S390_POOL_CHUNK_MAX
6796 || curr_pool->size > S390_POOL_CHUNK_MAX
6797 || section_switch_p)
6798 {
6799 rtx label, jump, barrier, next, prev;
6800
6801 if (!section_switch_p)
6802 {
6803 /* We can insert the barrier only after a 'real' insn. */
6804 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6805 continue;
6806 if (get_attr_length (insn) == 0)
6807 continue;
6808 /* Don't separate LTREL_BASE from the corresponding
6809 LTREL_OFFSET load. */
6810 if (pending_ltrel)
6811 continue;
6812 next = insn;
6813 do
6814 {
6815 insn = next;
6816 next = NEXT_INSN (insn);
6817 }
6818 while (next
6819 && NOTE_P (next)
6820 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6821 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6822 }
6823 else
6824 {
6825 gcc_assert (!pending_ltrel);
6826
6827 /* The old pool has to end before the section switch
6828 note in order to make it part of the current
6829 section. */
6830 insn = PREV_INSN (insn);
6831 }
6832
6833 label = gen_label_rtx ();
6834 prev = insn;
6835 if (prev && NOTE_P (prev))
6836 prev = prev_nonnote_insn (prev);
6837 if (prev)
6838 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6839 INSN_LOCATOR (prev));
6840 else
6841 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6842 barrier = emit_barrier_after (jump);
6843 insn = emit_label_after (label, barrier);
6844 JUMP_LABEL (jump) = label;
6845 LABEL_NUSES (label) = 1;
6846
6847 INSN_ADDRESSES_NEW (jump, -1);
6848 INSN_ADDRESSES_NEW (barrier, -1);
6849 INSN_ADDRESSES_NEW (insn, -1);
6850
6851 s390_end_pool (curr_pool, barrier);
6852 curr_pool = NULL;
6853 extra_size = 0;
6854 }
6855 }
6856 }
6857
6858 if (curr_pool)
6859 s390_end_pool (curr_pool, NULL_RTX);
6860 gcc_assert (!pending_ltrel);
6861
6862 /* Find all labels that are branched into
6863 from an insn belonging to a different chunk. */
6864
6865 far_labels = BITMAP_ALLOC (NULL);
6866
6867 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6868 {
6869 /* Labels marked with LABEL_PRESERVE_P can be target
6870 of non-local jumps, so we have to mark them.
6871 The same holds for named labels.
6872
6873 Don't do that, however, if it is the label before
6874 a jump table. */
6875
6876 if (GET_CODE (insn) == CODE_LABEL
6877 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6878 {
6879 rtx vec_insn = next_real_insn (insn);
6880 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6881 PATTERN (vec_insn) : NULL_RTX;
6882 if (!vec_pat
6883 || !(GET_CODE (vec_pat) == ADDR_VEC
6884 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6885 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6886 }
6887
6888 /* If we have a direct jump (conditional or unconditional)
6889 or a casesi jump, check all potential targets. */
6890 else if (GET_CODE (insn) == JUMP_INSN)
6891 {
6892 rtx pat = PATTERN (insn);
6893 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6894 pat = XVECEXP (pat, 0, 0);
6895
6896 if (GET_CODE (pat) == SET)
6897 {
6898 rtx label = JUMP_LABEL (insn);
6899 if (label)
6900 {
6901 if (s390_find_pool (pool_list, label)
6902 != s390_find_pool (pool_list, insn))
6903 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6904 }
6905 }
6906 else if (GET_CODE (pat) == PARALLEL
6907 && XVECLEN (pat, 0) == 2
6908 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6909 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6910 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6911 {
6912 /* Find the jump table used by this casesi jump. */
6913 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6914 rtx vec_insn = next_real_insn (vec_label);
6915 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6916 PATTERN (vec_insn) : NULL_RTX;
6917 if (vec_pat
6918 && (GET_CODE (vec_pat) == ADDR_VEC
6919 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6920 {
6921 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6922
6923 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6924 {
6925 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6926
6927 if (s390_find_pool (pool_list, label)
6928 != s390_find_pool (pool_list, insn))
6929 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6930 }
6931 }
6932 }
6933 }
6934 }
6935
6936 /* Insert base register reload insns before every pool. */
6937
6938 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6939 {
6940 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6941 curr_pool->label);
6942 rtx insn = curr_pool->first_insn;
6943 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6944 }
6945
6946 /* Insert base register reload insns at every far label. */
6947
6948 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6949 if (GET_CODE (insn) == CODE_LABEL
6950 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6951 {
6952 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6953 if (pool)
6954 {
6955 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6956 pool->label);
6957 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6958 }
6959 }
6960
6961
6962 BITMAP_FREE (far_labels);
6963
6964
6965 /* Recompute insn addresses. */
6966
6967 init_insn_lengths ();
6968 shorten_branches (get_insns ());
6969
6970 return pool_list;
6971 }
6972
6973 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6974 After we have decided to use this list, finish implementing
6975 all changes to the current function as required. */
6976
6977 static void
6978 s390_chunkify_finish (struct constant_pool *pool_list)
6979 {
6980 struct constant_pool *curr_pool = NULL;
6981 rtx insn;
6982
6983
6984 /* Replace all literal pool references. */
6985
6986 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6987 {
6988 if (INSN_P (insn))
6989 replace_ltrel_base (&PATTERN (insn));
6990
6991 curr_pool = s390_find_pool (pool_list, insn);
6992 if (!curr_pool)
6993 continue;
6994
6995 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6996 {
6997 rtx addr, pool_ref = NULL_RTX;
6998 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6999 if (pool_ref)
7000 {
7001 if (s390_execute_label (insn))
7002 addr = s390_find_execute (curr_pool, insn);
7003 else
7004 addr = s390_find_constant (curr_pool,
7005 get_pool_constant (pool_ref),
7006 get_pool_mode (pool_ref));
7007
7008 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7009 INSN_CODE (insn) = -1;
7010 }
7011 }
7012 }
7013
7014 /* Dump out all literal pools. */
7015
7016 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7017 s390_dump_pool (curr_pool, 0);
7018
7019 /* Free pool list. */
7020
7021 while (pool_list)
7022 {
7023 struct constant_pool *next = pool_list->next;
7024 s390_free_pool (pool_list);
7025 pool_list = next;
7026 }
7027 }
7028
7029 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7030 We have decided we cannot use this list, so revert all changes
7031 to the current function that were done by s390_chunkify_start. */
7032
7033 static void
7034 s390_chunkify_cancel (struct constant_pool *pool_list)
7035 {
7036 struct constant_pool *curr_pool = NULL;
7037 rtx insn;
7038
7039 /* Remove all pool placeholder insns. */
7040
7041 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7042 {
7043 /* Did we insert an extra barrier? Remove it. */
7044 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7045 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7046 rtx label = NEXT_INSN (curr_pool->pool_insn);
7047
7048 if (jump && GET_CODE (jump) == JUMP_INSN
7049 && barrier && GET_CODE (barrier) == BARRIER
7050 && label && GET_CODE (label) == CODE_LABEL
7051 && GET_CODE (PATTERN (jump)) == SET
7052 && SET_DEST (PATTERN (jump)) == pc_rtx
7053 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7054 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7055 {
7056 remove_insn (jump);
7057 remove_insn (barrier);
7058 remove_insn (label);
7059 }
7060
7061 remove_insn (curr_pool->pool_insn);
7062 }
7063
7064 /* Remove all base register reload insns. */
7065
7066 for (insn = get_insns (); insn; )
7067 {
7068 rtx next_insn = NEXT_INSN (insn);
7069
7070 if (GET_CODE (insn) == INSN
7071 && GET_CODE (PATTERN (insn)) == SET
7072 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7073 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7074 remove_insn (insn);
7075
7076 insn = next_insn;
7077 }
7078
7079 /* Free pool list. */
7080
7081 while (pool_list)
7082 {
7083 struct constant_pool *next = pool_list->next;
7084 s390_free_pool (pool_list);
7085 pool_list = next;
7086 }
7087 }
7088
7089 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7090
7091 void
7092 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7093 {
7094 REAL_VALUE_TYPE r;
7095
7096 switch (GET_MODE_CLASS (mode))
7097 {
7098 case MODE_FLOAT:
7099 case MODE_DECIMAL_FLOAT:
7100 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7101
7102 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7103 assemble_real (r, mode, align);
7104 break;
7105
7106 case MODE_INT:
7107 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7108 mark_symbol_refs_as_used (exp);
7109 break;
7110
7111 default:
7112 gcc_unreachable ();
7113 }
7114 }
7115
7116
7117 /* Return an RTL expression representing the value of the return address
7118 for the frame COUNT steps up from the current frame. FRAME is the
7119 frame pointer of that frame. */
7120
7121 rtx
7122 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7123 {
7124 int offset;
7125 rtx addr;
7126
7127 /* Without backchain, we fail for all but the current frame. */
7128
7129 if (!TARGET_BACKCHAIN && count > 0)
7130 return NULL_RTX;
7131
7132 /* For the current frame, we need to make sure the initial
7133 value of RETURN_REGNUM is actually saved. */
7134
7135 if (count == 0)
7136 {
7137 /* On non-z architectures branch splitting could overwrite r14. */
7138 if (TARGET_CPU_ZARCH)
7139 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7140 else
7141 {
7142 cfun_frame_layout.save_return_addr_p = true;
7143 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7144 }
7145 }
7146
7147 if (TARGET_PACKED_STACK)
7148 offset = -2 * UNITS_PER_LONG;
7149 else
7150 offset = RETURN_REGNUM * UNITS_PER_LONG;
7151
7152 addr = plus_constant (Pmode, frame, offset);
7153 addr = memory_address (Pmode, addr);
7154 return gen_rtx_MEM (Pmode, addr);
7155 }
7156
7157 /* Return an RTL expression representing the back chain stored in
7158 the current stack frame. */
7159
7160 rtx
7161 s390_back_chain_rtx (void)
7162 {
7163 rtx chain;
7164
7165 gcc_assert (TARGET_BACKCHAIN);
7166
7167 if (TARGET_PACKED_STACK)
7168 chain = plus_constant (Pmode, stack_pointer_rtx,
7169 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7170 else
7171 chain = stack_pointer_rtx;
7172
7173 chain = gen_rtx_MEM (Pmode, chain);
7174 return chain;
7175 }
7176
7177 /* Find first call clobbered register unused in a function.
7178 This could be used as base register in a leaf function
7179 or for holding the return address before epilogue. */
7180
7181 static int
7182 find_unused_clobbered_reg (void)
7183 {
7184 int i;
7185 for (i = 0; i < 6; i++)
7186 if (!df_regs_ever_live_p (i))
7187 return i;
7188 return 0;
7189 }
7190
7191
7192 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7193 clobbered hard regs in SETREG. */
7194
7195 static void
7196 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7197 {
7198 int *regs_ever_clobbered = (int *)data;
7199 unsigned int i, regno;
7200 enum machine_mode mode = GET_MODE (setreg);
7201
7202 if (GET_CODE (setreg) == SUBREG)
7203 {
7204 rtx inner = SUBREG_REG (setreg);
7205 if (!GENERAL_REG_P (inner))
7206 return;
7207 regno = subreg_regno (setreg);
7208 }
7209 else if (GENERAL_REG_P (setreg))
7210 regno = REGNO (setreg);
7211 else
7212 return;
7213
7214 for (i = regno;
7215 i < regno + HARD_REGNO_NREGS (regno, mode);
7216 i++)
7217 regs_ever_clobbered[i] = 1;
7218 }
7219
7220 /* Walks through all basic blocks of the current function looking
7221 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7222 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7223 each of those regs. */
7224
7225 static void
7226 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7227 {
7228 basic_block cur_bb;
7229 rtx cur_insn;
7230 unsigned int i;
7231
7232 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7233
7234 /* For non-leaf functions we have to consider all call clobbered regs to be
7235 clobbered. */
7236 if (!crtl->is_leaf)
7237 {
7238 for (i = 0; i < 16; i++)
7239 regs_ever_clobbered[i] = call_really_used_regs[i];
7240 }
7241
7242 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7243 this work is done by liveness analysis (mark_regs_live_at_end).
7244 Special care is needed for functions containing landing pads. Landing pads
7245 may use the eh registers, but the code which sets these registers is not
7246 contained in that function. Hence s390_regs_ever_clobbered is not able to
7247 deal with this automatically. */
7248 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7249 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7250 if (crtl->calls_eh_return
7251 || (cfun->machine->has_landing_pad_p
7252 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7253 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7254
7255 /* For nonlocal gotos all call-saved registers have to be saved.
7256 This flag is also set for the unwinding code in libgcc.
7257 See expand_builtin_unwind_init. For regs_ever_live this is done by
7258 reload. */
7259 if (cfun->has_nonlocal_label)
7260 for (i = 0; i < 16; i++)
7261 if (!call_really_used_regs[i])
7262 regs_ever_clobbered[i] = 1;
7263
7264 FOR_EACH_BB (cur_bb)
7265 {
7266 FOR_BB_INSNS (cur_bb, cur_insn)
7267 {
7268 if (INSN_P (cur_insn))
7269 note_stores (PATTERN (cur_insn),
7270 s390_reg_clobbered_rtx,
7271 regs_ever_clobbered);
7272 }
7273 }
7274 }
7275
7276 /* Determine the frame area which actually has to be accessed
7277 in the function epilogue. The values are stored at the
7278 given pointers AREA_BOTTOM (address of the lowest used stack
7279 address) and AREA_TOP (address of the first item which does
7280 not belong to the stack frame). */
7281
7282 static void
7283 s390_frame_area (int *area_bottom, int *area_top)
7284 {
7285 int b, t;
7286 int i;
7287
7288 b = INT_MAX;
7289 t = INT_MIN;
7290
7291 if (cfun_frame_layout.first_restore_gpr != -1)
7292 {
7293 b = (cfun_frame_layout.gprs_offset
7294 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7295 t = b + (cfun_frame_layout.last_restore_gpr
7296 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7297 }
7298
7299 if (TARGET_64BIT && cfun_save_high_fprs_p)
7300 {
7301 b = MIN (b, cfun_frame_layout.f8_offset);
7302 t = MAX (t, (cfun_frame_layout.f8_offset
7303 + cfun_frame_layout.high_fprs * 8));
7304 }
7305
7306 if (!TARGET_64BIT)
7307 for (i = 2; i < 4; i++)
7308 if (cfun_fpr_bit_p (i))
7309 {
7310 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7311 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7312 }
7313
7314 *area_bottom = b;
7315 *area_top = t;
7316 }
7317
7318 /* Fill cfun->machine with info about register usage of current function.
7319 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7320
7321 static void
7322 s390_register_info (int clobbered_regs[])
7323 {
7324 int i, j;
7325
7326 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7327 cfun_frame_layout.fpr_bitmap = 0;
7328 cfun_frame_layout.high_fprs = 0;
7329 if (TARGET_64BIT)
7330 for (i = 24; i < 32; i++)
7331 if (df_regs_ever_live_p (i) && !global_regs[i])
7332 {
7333 cfun_set_fpr_bit (i - 16);
7334 cfun_frame_layout.high_fprs++;
7335 }
7336
7337 /* Find first and last gpr to be saved. We trust regs_ever_live
7338 data, except that we don't save and restore global registers.
7339
7340 Also, all registers with special meaning to the compiler need
7341 to be handled extra. */
7342
7343 s390_regs_ever_clobbered (clobbered_regs);
7344
7345 for (i = 0; i < 16; i++)
7346 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7347
7348 if (frame_pointer_needed)
7349 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7350
7351 if (flag_pic)
7352 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7353 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7354
7355 clobbered_regs[BASE_REGNUM]
7356 |= (cfun->machine->base_reg
7357 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7358
7359 clobbered_regs[RETURN_REGNUM]
7360 |= (!crtl->is_leaf
7361 || TARGET_TPF_PROFILING
7362 || cfun->machine->split_branches_pending_p
7363 || cfun_frame_layout.save_return_addr_p
7364 || crtl->calls_eh_return
7365 || cfun->stdarg);
7366
7367 clobbered_regs[STACK_POINTER_REGNUM]
7368 |= (!crtl->is_leaf
7369 || TARGET_TPF_PROFILING
7370 || cfun_save_high_fprs_p
7371 || get_frame_size () > 0
7372 || cfun->calls_alloca
7373 || cfun->stdarg);
7374
7375 for (i = 6; i < 16; i++)
7376 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7377 break;
7378 for (j = 15; j > i; j--)
7379 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7380 break;
7381
7382 if (i == 16)
7383 {
7384 /* Nothing to save/restore. */
7385 cfun_frame_layout.first_save_gpr_slot = -1;
7386 cfun_frame_layout.last_save_gpr_slot = -1;
7387 cfun_frame_layout.first_save_gpr = -1;
7388 cfun_frame_layout.first_restore_gpr = -1;
7389 cfun_frame_layout.last_save_gpr = -1;
7390 cfun_frame_layout.last_restore_gpr = -1;
7391 }
7392 else
7393 {
7394 /* Save slots for gprs from i to j. */
7395 cfun_frame_layout.first_save_gpr_slot = i;
7396 cfun_frame_layout.last_save_gpr_slot = j;
7397
7398 for (i = cfun_frame_layout.first_save_gpr_slot;
7399 i < cfun_frame_layout.last_save_gpr_slot + 1;
7400 i++)
7401 if (clobbered_regs[i])
7402 break;
7403
7404 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7405 if (clobbered_regs[j])
7406 break;
7407
7408 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7409 {
7410 /* Nothing to save/restore. */
7411 cfun_frame_layout.first_save_gpr = -1;
7412 cfun_frame_layout.first_restore_gpr = -1;
7413 cfun_frame_layout.last_save_gpr = -1;
7414 cfun_frame_layout.last_restore_gpr = -1;
7415 }
7416 else
7417 {
7418 /* Save / Restore from gpr i to j. */
7419 cfun_frame_layout.first_save_gpr = i;
7420 cfun_frame_layout.first_restore_gpr = i;
7421 cfun_frame_layout.last_save_gpr = j;
7422 cfun_frame_layout.last_restore_gpr = j;
7423 }
7424 }
7425
7426 if (cfun->stdarg)
7427 {
7428 /* Varargs functions need to save gprs 2 to 6. */
7429 if (cfun->va_list_gpr_size
7430 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7431 {
7432 int min_gpr = crtl->args.info.gprs;
7433 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7434 if (max_gpr > GP_ARG_NUM_REG)
7435 max_gpr = GP_ARG_NUM_REG;
7436
7437 if (cfun_frame_layout.first_save_gpr == -1
7438 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7439 {
7440 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7441 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7442 }
7443
7444 if (cfun_frame_layout.last_save_gpr == -1
7445 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7446 {
7447 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7448 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7449 }
7450 }
7451
7452 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7453 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7454 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7455 {
7456 int min_fpr = crtl->args.info.fprs;
7457 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7458 if (max_fpr > FP_ARG_NUM_REG)
7459 max_fpr = FP_ARG_NUM_REG;
7460
7461 /* ??? This is currently required to ensure proper location
7462 of the fpr save slots within the va_list save area. */
7463 if (TARGET_PACKED_STACK)
7464 min_fpr = 0;
7465
7466 for (i = min_fpr; i < max_fpr; i++)
7467 cfun_set_fpr_bit (i);
7468 }
7469 }
7470
7471 if (!TARGET_64BIT)
7472 for (i = 2; i < 4; i++)
7473 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7474 cfun_set_fpr_bit (i);
7475 }
7476
7477 /* Fill cfun->machine with info about frame of current function. */
7478
7479 static void
7480 s390_frame_info (void)
7481 {
7482 int i;
7483
7484 cfun_frame_layout.frame_size = get_frame_size ();
7485 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7486 fatal_error ("total size of local variables exceeds architecture limit");
7487
7488 if (!TARGET_PACKED_STACK)
7489 {
7490 cfun_frame_layout.backchain_offset = 0;
7491 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7492 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7493 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7494 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7495 * UNITS_PER_LONG);
7496 }
7497 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7498 {
7499 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7500 - UNITS_PER_LONG);
7501 cfun_frame_layout.gprs_offset
7502 = (cfun_frame_layout.backchain_offset
7503 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7504 * UNITS_PER_LONG);
7505
7506 if (TARGET_64BIT)
7507 {
7508 cfun_frame_layout.f4_offset
7509 = (cfun_frame_layout.gprs_offset
7510 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7511
7512 cfun_frame_layout.f0_offset
7513 = (cfun_frame_layout.f4_offset
7514 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7515 }
7516 else
7517 {
7518 /* On 31 bit we have to care about alignment of the
7519 floating point regs to provide fastest access. */
7520 cfun_frame_layout.f0_offset
7521 = ((cfun_frame_layout.gprs_offset
7522 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7523 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7524
7525 cfun_frame_layout.f4_offset
7526 = (cfun_frame_layout.f0_offset
7527 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7528 }
7529 }
7530 else /* no backchain */
7531 {
7532 cfun_frame_layout.f4_offset
7533 = (STACK_POINTER_OFFSET
7534 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7535
7536 cfun_frame_layout.f0_offset
7537 = (cfun_frame_layout.f4_offset
7538 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7539
7540 cfun_frame_layout.gprs_offset
7541 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7542 }
7543
7544 if (crtl->is_leaf
7545 && !TARGET_TPF_PROFILING
7546 && cfun_frame_layout.frame_size == 0
7547 && !cfun_save_high_fprs_p
7548 && !cfun->calls_alloca
7549 && !cfun->stdarg)
7550 return;
7551
7552 if (!TARGET_PACKED_STACK)
7553 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7554 + crtl->outgoing_args_size
7555 + cfun_frame_layout.high_fprs * 8);
7556 else
7557 {
7558 if (TARGET_BACKCHAIN)
7559 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7560
7561 /* No alignment trouble here because f8-f15 are only saved under
7562 64 bit. */
7563 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7564 cfun_frame_layout.f4_offset),
7565 cfun_frame_layout.gprs_offset)
7566 - cfun_frame_layout.high_fprs * 8);
7567
7568 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7569
7570 for (i = 0; i < 8; i++)
7571 if (cfun_fpr_bit_p (i))
7572 cfun_frame_layout.frame_size += 8;
7573
7574 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7575
7576 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7577 the frame size to sustain 8 byte alignment of stack frames. */
7578 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7579 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7580 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7581
7582 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7583 }
7584 }
7585
7586 /* Generate frame layout. Fills in register and frame data for the current
7587 function in cfun->machine. This routine can be called multiple times;
7588 it will re-do the complete frame layout every time. */
7589
7590 static void
7591 s390_init_frame_layout (void)
7592 {
7593 HOST_WIDE_INT frame_size;
7594 int base_used;
7595 int clobbered_regs[16];
7596
7597 /* On S/390 machines, we may need to perform branch splitting, which
7598 will require both base and return address register. We have no
7599 choice but to assume we're going to need them until right at the
7600 end of the machine dependent reorg phase. */
7601 if (!TARGET_CPU_ZARCH)
7602 cfun->machine->split_branches_pending_p = true;
7603
7604 do
7605 {
7606 frame_size = cfun_frame_layout.frame_size;
7607
7608 /* Try to predict whether we'll need the base register. */
7609 base_used = cfun->machine->split_branches_pending_p
7610 || crtl->uses_const_pool
7611 || (!DISP_IN_RANGE (frame_size)
7612 && !CONST_OK_FOR_K (frame_size));
7613
7614 /* Decide which register to use as literal pool base. In small
7615 leaf functions, try to use an unused call-clobbered register
7616 as base register to avoid save/restore overhead. */
7617 if (!base_used)
7618 cfun->machine->base_reg = NULL_RTX;
7619 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7620 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7621 else
7622 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7623
7624 s390_register_info (clobbered_regs);
7625 s390_frame_info ();
7626 }
7627 while (frame_size != cfun_frame_layout.frame_size);
7628 }
7629
7630 /* Update frame layout. Recompute actual register save data based on
7631 current info and update regs_ever_live for the special registers.
7632 May be called multiple times, but may never cause *more* registers
7633 to be saved than s390_init_frame_layout allocated room for. */
7634
7635 static void
7636 s390_update_frame_layout (void)
7637 {
7638 int clobbered_regs[16];
7639
7640 s390_register_info (clobbered_regs);
7641
7642 df_set_regs_ever_live (BASE_REGNUM,
7643 clobbered_regs[BASE_REGNUM] ? true : false);
7644 df_set_regs_ever_live (RETURN_REGNUM,
7645 clobbered_regs[RETURN_REGNUM] ? true : false);
7646 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7647 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7648
7649 if (cfun->machine->base_reg)
7650 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7651 }
7652
7653 /* Return true if it is legal to put a value with MODE into REGNO. */
7654
7655 bool
7656 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7657 {
7658 switch (REGNO_REG_CLASS (regno))
7659 {
7660 case FP_REGS:
7661 if (REGNO_PAIR_OK (regno, mode))
7662 {
7663 if (mode == SImode || mode == DImode)
7664 return true;
7665
7666 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7667 return true;
7668 }
7669 break;
7670 case ADDR_REGS:
7671 if (FRAME_REGNO_P (regno) && mode == Pmode)
7672 return true;
7673
7674 /* fallthrough */
7675 case GENERAL_REGS:
7676 if (REGNO_PAIR_OK (regno, mode))
7677 {
7678 if (TARGET_ZARCH
7679 || (mode != TFmode && mode != TCmode && mode != TDmode))
7680 return true;
7681 }
7682 break;
7683 case CC_REGS:
7684 if (GET_MODE_CLASS (mode) == MODE_CC)
7685 return true;
7686 break;
7687 case ACCESS_REGS:
7688 if (REGNO_PAIR_OK (regno, mode))
7689 {
7690 if (mode == SImode || mode == Pmode)
7691 return true;
7692 }
7693 break;
7694 default:
7695 return false;
7696 }
7697
7698 return false;
7699 }
7700
7701 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7702
7703 bool
7704 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7705 {
7706 /* Once we've decided upon a register to use as base register, it must
7707 no longer be used for any other purpose. */
7708 if (cfun->machine->base_reg)
7709 if (REGNO (cfun->machine->base_reg) == old_reg
7710 || REGNO (cfun->machine->base_reg) == new_reg)
7711 return false;
7712
7713 return true;
7714 }
7715
7716 /* Maximum number of registers to represent a value of mode MODE
7717 in a register of class RCLASS. */
7718
7719 int
7720 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7721 {
7722 switch (rclass)
7723 {
7724 case FP_REGS:
7725 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7726 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7727 else
7728 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7729 case ACCESS_REGS:
7730 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7731 default:
7732 break;
7733 }
7734 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7735 }
7736
7737 /* Return true if register FROM can be eliminated via register TO. */
7738
7739 static bool
7740 s390_can_eliminate (const int from, const int to)
7741 {
7742 /* On zSeries machines, we have not marked the base register as fixed.
7743 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7744 If a function requires the base register, we say here that this
7745 elimination cannot be performed. This will cause reload to free
7746 up the base register (as if it were fixed). On the other hand,
7747 if the current function does *not* require the base register, we
7748 say here the elimination succeeds, which in turn allows reload
7749 to allocate the base register for any other purpose. */
7750 if (from == BASE_REGNUM && to == BASE_REGNUM)
7751 {
7752 if (TARGET_CPU_ZARCH)
7753 {
7754 s390_init_frame_layout ();
7755 return cfun->machine->base_reg == NULL_RTX;
7756 }
7757
7758 return false;
7759 }
7760
7761 /* Everything else must point into the stack frame. */
7762 gcc_assert (to == STACK_POINTER_REGNUM
7763 || to == HARD_FRAME_POINTER_REGNUM);
7764
7765 gcc_assert (from == FRAME_POINTER_REGNUM
7766 || from == ARG_POINTER_REGNUM
7767 || from == RETURN_ADDRESS_POINTER_REGNUM);
7768
7769 /* Make sure we actually saved the return address. */
7770 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7771 if (!crtl->calls_eh_return
7772 && !cfun->stdarg
7773 && !cfun_frame_layout.save_return_addr_p)
7774 return false;
7775
7776 return true;
7777 }
7778
7779 /* Return offset between register FROM and TO initially after prolog. */
7780
7781 HOST_WIDE_INT
7782 s390_initial_elimination_offset (int from, int to)
7783 {
7784 HOST_WIDE_INT offset;
7785 int index;
7786
7787 /* ??? Why are we called for non-eliminable pairs? */
7788 if (!s390_can_eliminate (from, to))
7789 return 0;
7790
7791 switch (from)
7792 {
7793 case FRAME_POINTER_REGNUM:
7794 offset = (get_frame_size()
7795 + STACK_POINTER_OFFSET
7796 + crtl->outgoing_args_size);
7797 break;
7798
7799 case ARG_POINTER_REGNUM:
7800 s390_init_frame_layout ();
7801 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7802 break;
7803
7804 case RETURN_ADDRESS_POINTER_REGNUM:
7805 s390_init_frame_layout ();
7806 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7807 gcc_assert (index >= 0);
7808 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7809 offset += index * UNITS_PER_LONG;
7810 break;
7811
7812 case BASE_REGNUM:
7813 offset = 0;
7814 break;
7815
7816 default:
7817 gcc_unreachable ();
7818 }
7819
7820 return offset;
7821 }
7822
7823 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7824 to register BASE. Return generated insn. */
7825
7826 static rtx
7827 save_fpr (rtx base, int offset, int regnum)
7828 {
7829 rtx addr;
7830 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7831
7832 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7833 set_mem_alias_set (addr, get_varargs_alias_set ());
7834 else
7835 set_mem_alias_set (addr, get_frame_alias_set ());
7836
7837 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7838 }
7839
7840 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7841 to register BASE. Return generated insn. */
7842
7843 static rtx
7844 restore_fpr (rtx base, int offset, int regnum)
7845 {
7846 rtx addr;
7847 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7848 set_mem_alias_set (addr, get_frame_alias_set ());
7849
7850 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7851 }
7852
7853 /* Return true if REGNO is a global register, but not one
7854 of the special ones that need to be saved/restored in anyway. */
7855
7856 static inline bool
7857 global_not_special_regno_p (int regno)
7858 {
7859 return (global_regs[regno]
7860 /* These registers are special and need to be
7861 restored in any case. */
7862 && !(regno == STACK_POINTER_REGNUM
7863 || regno == RETURN_REGNUM
7864 || regno == BASE_REGNUM
7865 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7866 }
7867
7868 /* Generate insn to save registers FIRST to LAST into
7869 the register save area located at offset OFFSET
7870 relative to register BASE. */
7871
7872 static rtx
7873 save_gprs (rtx base, int offset, int first, int last)
7874 {
7875 rtx addr, insn, note;
7876 int i;
7877
7878 addr = plus_constant (Pmode, base, offset);
7879 addr = gen_rtx_MEM (Pmode, addr);
7880
7881 set_mem_alias_set (addr, get_frame_alias_set ());
7882
7883 /* Special-case single register. */
7884 if (first == last)
7885 {
7886 if (TARGET_64BIT)
7887 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7888 else
7889 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7890
7891 if (!global_not_special_regno_p (first))
7892 RTX_FRAME_RELATED_P (insn) = 1;
7893 return insn;
7894 }
7895
7896
7897 insn = gen_store_multiple (addr,
7898 gen_rtx_REG (Pmode, first),
7899 GEN_INT (last - first + 1));
7900
7901 if (first <= 6 && cfun->stdarg)
7902 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7903 {
7904 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7905
7906 if (first + i <= 6)
7907 set_mem_alias_set (mem, get_varargs_alias_set ());
7908 }
7909
7910 /* We need to set the FRAME_RELATED flag on all SETs
7911 inside the store-multiple pattern.
7912
7913 However, we must not emit DWARF records for registers 2..5
7914 if they are stored for use by variable arguments ...
7915
7916 ??? Unfortunately, it is not enough to simply not the
7917 FRAME_RELATED flags for those SETs, because the first SET
7918 of the PARALLEL is always treated as if it had the flag
7919 set, even if it does not. Therefore we emit a new pattern
7920 without those registers as REG_FRAME_RELATED_EXPR note. */
7921
7922 if (first >= 6 && !global_not_special_regno_p (first))
7923 {
7924 rtx pat = PATTERN (insn);
7925
7926 for (i = 0; i < XVECLEN (pat, 0); i++)
7927 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7928 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7929 0, i)))))
7930 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7931
7932 RTX_FRAME_RELATED_P (insn) = 1;
7933 }
7934 else if (last >= 6)
7935 {
7936 int start;
7937
7938 for (start = first >= 6 ? first : 6; start <= last; start++)
7939 if (!global_not_special_regno_p (start))
7940 break;
7941
7942 if (start > last)
7943 return insn;
7944
7945 addr = plus_constant (Pmode, base,
7946 offset + (start - first) * UNITS_PER_LONG);
7947 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7948 gen_rtx_REG (Pmode, start),
7949 GEN_INT (last - start + 1));
7950 note = PATTERN (note);
7951
7952 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7953
7954 for (i = 0; i < XVECLEN (note, 0); i++)
7955 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7956 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7957 0, i)))))
7958 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7959
7960 RTX_FRAME_RELATED_P (insn) = 1;
7961 }
7962
7963 return insn;
7964 }
7965
7966 /* Generate insn to restore registers FIRST to LAST from
7967 the register save area located at offset OFFSET
7968 relative to register BASE. */
7969
7970 static rtx
7971 restore_gprs (rtx base, int offset, int first, int last)
7972 {
7973 rtx addr, insn;
7974
7975 addr = plus_constant (Pmode, base, offset);
7976 addr = gen_rtx_MEM (Pmode, addr);
7977 set_mem_alias_set (addr, get_frame_alias_set ());
7978
7979 /* Special-case single register. */
7980 if (first == last)
7981 {
7982 if (TARGET_64BIT)
7983 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7984 else
7985 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7986
7987 return insn;
7988 }
7989
7990 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7991 addr,
7992 GEN_INT (last - first + 1));
7993 return insn;
7994 }
7995
7996 /* Return insn sequence to load the GOT register. */
7997
7998 static GTY(()) rtx got_symbol;
7999 rtx
8000 s390_load_got (void)
8001 {
8002 rtx insns;
8003
8004 /* We cannot use pic_offset_table_rtx here since we use this
8005 function also for non-pic if __tls_get_offset is called and in
8006 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8007 aren't usable. */
8008 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8009
8010 if (!got_symbol)
8011 {
8012 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8013 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8014 }
8015
8016 start_sequence ();
8017
8018 if (TARGET_CPU_ZARCH)
8019 {
8020 emit_move_insn (got_rtx, got_symbol);
8021 }
8022 else
8023 {
8024 rtx offset;
8025
8026 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8027 UNSPEC_LTREL_OFFSET);
8028 offset = gen_rtx_CONST (Pmode, offset);
8029 offset = force_const_mem (Pmode, offset);
8030
8031 emit_move_insn (got_rtx, offset);
8032
8033 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8034 UNSPEC_LTREL_BASE);
8035 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8036
8037 emit_move_insn (got_rtx, offset);
8038 }
8039
8040 insns = get_insns ();
8041 end_sequence ();
8042 return insns;
8043 }
8044
8045 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8046 and the change to the stack pointer. */
8047
8048 static void
8049 s390_emit_stack_tie (void)
8050 {
8051 rtx mem = gen_frame_mem (BLKmode,
8052 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8053
8054 emit_insn (gen_stack_tie (mem));
8055 }
8056
8057 /* Expand the prologue into a bunch of separate insns. */
8058
8059 void
8060 s390_emit_prologue (void)
8061 {
8062 rtx insn, addr;
8063 rtx temp_reg;
8064 int i;
8065 int offset;
8066 int next_fpr = 0;
8067
8068 /* Complete frame layout. */
8069
8070 s390_update_frame_layout ();
8071
8072 /* Annotate all constant pool references to let the scheduler know
8073 they implicitly use the base register. */
8074
8075 push_topmost_sequence ();
8076
8077 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8078 if (INSN_P (insn))
8079 {
8080 annotate_constant_pool_refs (&PATTERN (insn));
8081 df_insn_rescan (insn);
8082 }
8083
8084 pop_topmost_sequence ();
8085
8086 /* Choose best register to use for temp use within prologue.
8087 See below for why TPF must use the register 1. */
8088
8089 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8090 && !crtl->is_leaf
8091 && !TARGET_TPF_PROFILING)
8092 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8093 else
8094 temp_reg = gen_rtx_REG (Pmode, 1);
8095
8096 /* Save call saved gprs. */
8097 if (cfun_frame_layout.first_save_gpr != -1)
8098 {
8099 insn = save_gprs (stack_pointer_rtx,
8100 cfun_frame_layout.gprs_offset +
8101 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8102 - cfun_frame_layout.first_save_gpr_slot),
8103 cfun_frame_layout.first_save_gpr,
8104 cfun_frame_layout.last_save_gpr);
8105 emit_insn (insn);
8106 }
8107
8108 /* Dummy insn to mark literal pool slot. */
8109
8110 if (cfun->machine->base_reg)
8111 emit_insn (gen_main_pool (cfun->machine->base_reg));
8112
8113 offset = cfun_frame_layout.f0_offset;
8114
8115 /* Save f0 and f2. */
8116 for (i = 0; i < 2; i++)
8117 {
8118 if (cfun_fpr_bit_p (i))
8119 {
8120 save_fpr (stack_pointer_rtx, offset, i + 16);
8121 offset += 8;
8122 }
8123 else if (!TARGET_PACKED_STACK)
8124 offset += 8;
8125 }
8126
8127 /* Save f4 and f6. */
8128 offset = cfun_frame_layout.f4_offset;
8129 for (i = 2; i < 4; i++)
8130 {
8131 if (cfun_fpr_bit_p (i))
8132 {
8133 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8134 offset += 8;
8135
8136 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8137 therefore are not frame related. */
8138 if (!call_really_used_regs[i + 16])
8139 RTX_FRAME_RELATED_P (insn) = 1;
8140 }
8141 else if (!TARGET_PACKED_STACK)
8142 offset += 8;
8143 }
8144
8145 if (TARGET_PACKED_STACK
8146 && cfun_save_high_fprs_p
8147 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8148 {
8149 offset = (cfun_frame_layout.f8_offset
8150 + (cfun_frame_layout.high_fprs - 1) * 8);
8151
8152 for (i = 15; i > 7 && offset >= 0; i--)
8153 if (cfun_fpr_bit_p (i))
8154 {
8155 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8156
8157 RTX_FRAME_RELATED_P (insn) = 1;
8158 offset -= 8;
8159 }
8160 if (offset >= cfun_frame_layout.f8_offset)
8161 next_fpr = i + 16;
8162 }
8163
8164 if (!TARGET_PACKED_STACK)
8165 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8166
8167 if (flag_stack_usage_info)
8168 current_function_static_stack_size = cfun_frame_layout.frame_size;
8169
8170 /* Decrement stack pointer. */
8171
8172 if (cfun_frame_layout.frame_size > 0)
8173 {
8174 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8175 rtx real_frame_off;
8176
8177 if (s390_stack_size)
8178 {
8179 HOST_WIDE_INT stack_guard;
8180
8181 if (s390_stack_guard)
8182 stack_guard = s390_stack_guard;
8183 else
8184 {
8185 /* If no value for stack guard is provided the smallest power of 2
8186 larger than the current frame size is chosen. */
8187 stack_guard = 1;
8188 while (stack_guard < cfun_frame_layout.frame_size)
8189 stack_guard <<= 1;
8190 }
8191
8192 if (cfun_frame_layout.frame_size >= s390_stack_size)
8193 {
8194 warning (0, "frame size of function %qs is %wd"
8195 " bytes exceeding user provided stack limit of "
8196 "%d bytes. "
8197 "An unconditional trap is added.",
8198 current_function_name(), cfun_frame_layout.frame_size,
8199 s390_stack_size);
8200 emit_insn (gen_trap ());
8201 }
8202 else
8203 {
8204 /* stack_guard has to be smaller than s390_stack_size.
8205 Otherwise we would emit an AND with zero which would
8206 not match the test under mask pattern. */
8207 if (stack_guard >= s390_stack_size)
8208 {
8209 warning (0, "frame size of function %qs is %wd"
8210 " bytes which is more than half the stack size. "
8211 "The dynamic check would not be reliable. "
8212 "No check emitted for this function.",
8213 current_function_name(),
8214 cfun_frame_layout.frame_size);
8215 }
8216 else
8217 {
8218 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8219 & ~(stack_guard - 1));
8220
8221 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8222 GEN_INT (stack_check_mask));
8223 if (TARGET_64BIT)
8224 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8225 t, const0_rtx),
8226 t, const0_rtx, const0_rtx));
8227 else
8228 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8229 t, const0_rtx),
8230 t, const0_rtx, const0_rtx));
8231 }
8232 }
8233 }
8234
8235 if (s390_warn_framesize > 0
8236 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8237 warning (0, "frame size of %qs is %wd bytes",
8238 current_function_name (), cfun_frame_layout.frame_size);
8239
8240 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8241 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8242
8243 /* Save incoming stack pointer into temp reg. */
8244 if (TARGET_BACKCHAIN || next_fpr)
8245 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8246
8247 /* Subtract frame size from stack pointer. */
8248
8249 if (DISP_IN_RANGE (INTVAL (frame_off)))
8250 {
8251 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8252 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8253 frame_off));
8254 insn = emit_insn (insn);
8255 }
8256 else
8257 {
8258 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8259 frame_off = force_const_mem (Pmode, frame_off);
8260
8261 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8262 annotate_constant_pool_refs (&PATTERN (insn));
8263 }
8264
8265 RTX_FRAME_RELATED_P (insn) = 1;
8266 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8267 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8268 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8269 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8270 real_frame_off)));
8271
8272 /* Set backchain. */
8273
8274 if (TARGET_BACKCHAIN)
8275 {
8276 if (cfun_frame_layout.backchain_offset)
8277 addr = gen_rtx_MEM (Pmode,
8278 plus_constant (Pmode, stack_pointer_rtx,
8279 cfun_frame_layout.backchain_offset));
8280 else
8281 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8282 set_mem_alias_set (addr, get_frame_alias_set ());
8283 insn = emit_insn (gen_move_insn (addr, temp_reg));
8284 }
8285
8286 /* If we support non-call exceptions (e.g. for Java),
8287 we need to make sure the backchain pointer is set up
8288 before any possibly trapping memory access. */
8289 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8290 {
8291 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8292 emit_clobber (addr);
8293 }
8294 }
8295
8296 /* Save fprs 8 - 15 (64 bit ABI). */
8297
8298 if (cfun_save_high_fprs_p && next_fpr)
8299 {
8300 /* If the stack might be accessed through a different register
8301 we have to make sure that the stack pointer decrement is not
8302 moved below the use of the stack slots. */
8303 s390_emit_stack_tie ();
8304
8305 insn = emit_insn (gen_add2_insn (temp_reg,
8306 GEN_INT (cfun_frame_layout.f8_offset)));
8307
8308 offset = 0;
8309
8310 for (i = 24; i <= next_fpr; i++)
8311 if (cfun_fpr_bit_p (i - 16))
8312 {
8313 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8314 cfun_frame_layout.frame_size
8315 + cfun_frame_layout.f8_offset
8316 + offset);
8317
8318 insn = save_fpr (temp_reg, offset, i);
8319 offset += 8;
8320 RTX_FRAME_RELATED_P (insn) = 1;
8321 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8322 gen_rtx_SET (VOIDmode,
8323 gen_rtx_MEM (DFmode, addr),
8324 gen_rtx_REG (DFmode, i)));
8325 }
8326 }
8327
8328 /* Set frame pointer, if needed. */
8329
8330 if (frame_pointer_needed)
8331 {
8332 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8333 RTX_FRAME_RELATED_P (insn) = 1;
8334 }
8335
8336 /* Set up got pointer, if needed. */
8337
8338 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8339 {
8340 rtx insns = s390_load_got ();
8341
8342 for (insn = insns; insn; insn = NEXT_INSN (insn))
8343 annotate_constant_pool_refs (&PATTERN (insn));
8344
8345 emit_insn (insns);
8346 }
8347
8348 if (TARGET_TPF_PROFILING)
8349 {
8350 /* Generate a BAS instruction to serve as a function
8351 entry intercept to facilitate the use of tracing
8352 algorithms located at the branch target. */
8353 emit_insn (gen_prologue_tpf ());
8354
8355 /* Emit a blockage here so that all code
8356 lies between the profiling mechanisms. */
8357 emit_insn (gen_blockage ());
8358 }
8359 }
8360
8361 /* Expand the epilogue into a bunch of separate insns. */
8362
8363 void
8364 s390_emit_epilogue (bool sibcall)
8365 {
8366 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8367 int area_bottom, area_top, offset = 0;
8368 int next_offset;
8369 rtvec p;
8370 int i;
8371
8372 if (TARGET_TPF_PROFILING)
8373 {
8374
8375 /* Generate a BAS instruction to serve as a function
8376 entry intercept to facilitate the use of tracing
8377 algorithms located at the branch target. */
8378
8379 /* Emit a blockage here so that all code
8380 lies between the profiling mechanisms. */
8381 emit_insn (gen_blockage ());
8382
8383 emit_insn (gen_epilogue_tpf ());
8384 }
8385
8386 /* Check whether to use frame or stack pointer for restore. */
8387
8388 frame_pointer = (frame_pointer_needed
8389 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8390
8391 s390_frame_area (&area_bottom, &area_top);
8392
8393 /* Check whether we can access the register save area.
8394 If not, increment the frame pointer as required. */
8395
8396 if (area_top <= area_bottom)
8397 {
8398 /* Nothing to restore. */
8399 }
8400 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8401 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8402 {
8403 /* Area is in range. */
8404 offset = cfun_frame_layout.frame_size;
8405 }
8406 else
8407 {
8408 rtx insn, frame_off, cfa;
8409
8410 offset = area_bottom < 0 ? -area_bottom : 0;
8411 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8412
8413 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8414 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8415 if (DISP_IN_RANGE (INTVAL (frame_off)))
8416 {
8417 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8418 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8419 insn = emit_insn (insn);
8420 }
8421 else
8422 {
8423 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8424 frame_off = force_const_mem (Pmode, frame_off);
8425
8426 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8427 annotate_constant_pool_refs (&PATTERN (insn));
8428 }
8429 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8430 RTX_FRAME_RELATED_P (insn) = 1;
8431 }
8432
8433 /* Restore call saved fprs. */
8434
8435 if (TARGET_64BIT)
8436 {
8437 if (cfun_save_high_fprs_p)
8438 {
8439 next_offset = cfun_frame_layout.f8_offset;
8440 for (i = 24; i < 32; i++)
8441 {
8442 if (cfun_fpr_bit_p (i - 16))
8443 {
8444 restore_fpr (frame_pointer,
8445 offset + next_offset, i);
8446 cfa_restores
8447 = alloc_reg_note (REG_CFA_RESTORE,
8448 gen_rtx_REG (DFmode, i), cfa_restores);
8449 next_offset += 8;
8450 }
8451 }
8452 }
8453
8454 }
8455 else
8456 {
8457 next_offset = cfun_frame_layout.f4_offset;
8458 for (i = 18; i < 20; i++)
8459 {
8460 if (cfun_fpr_bit_p (i - 16))
8461 {
8462 restore_fpr (frame_pointer,
8463 offset + next_offset, i);
8464 cfa_restores
8465 = alloc_reg_note (REG_CFA_RESTORE,
8466 gen_rtx_REG (DFmode, i), cfa_restores);
8467 next_offset += 8;
8468 }
8469 else if (!TARGET_PACKED_STACK)
8470 next_offset += 8;
8471 }
8472
8473 }
8474
8475 /* Return register. */
8476
8477 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8478
8479 /* Restore call saved gprs. */
8480
8481 if (cfun_frame_layout.first_restore_gpr != -1)
8482 {
8483 rtx insn, addr;
8484 int i;
8485
8486 /* Check for global register and save them
8487 to stack location from where they get restored. */
8488
8489 for (i = cfun_frame_layout.first_restore_gpr;
8490 i <= cfun_frame_layout.last_restore_gpr;
8491 i++)
8492 {
8493 if (global_not_special_regno_p (i))
8494 {
8495 addr = plus_constant (Pmode, frame_pointer,
8496 offset + cfun_frame_layout.gprs_offset
8497 + (i - cfun_frame_layout.first_save_gpr_slot)
8498 * UNITS_PER_LONG);
8499 addr = gen_rtx_MEM (Pmode, addr);
8500 set_mem_alias_set (addr, get_frame_alias_set ());
8501 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8502 }
8503 else
8504 cfa_restores
8505 = alloc_reg_note (REG_CFA_RESTORE,
8506 gen_rtx_REG (Pmode, i), cfa_restores);
8507 }
8508
8509 if (! sibcall)
8510 {
8511 /* Fetch return address from stack before load multiple,
8512 this will do good for scheduling. */
8513
8514 if (cfun_frame_layout.save_return_addr_p
8515 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8516 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8517 {
8518 int return_regnum = find_unused_clobbered_reg();
8519 if (!return_regnum)
8520 return_regnum = 4;
8521 return_reg = gen_rtx_REG (Pmode, return_regnum);
8522
8523 addr = plus_constant (Pmode, frame_pointer,
8524 offset + cfun_frame_layout.gprs_offset
8525 + (RETURN_REGNUM
8526 - cfun_frame_layout.first_save_gpr_slot)
8527 * UNITS_PER_LONG);
8528 addr = gen_rtx_MEM (Pmode, addr);
8529 set_mem_alias_set (addr, get_frame_alias_set ());
8530 emit_move_insn (return_reg, addr);
8531 }
8532 }
8533
8534 insn = restore_gprs (frame_pointer,
8535 offset + cfun_frame_layout.gprs_offset
8536 + (cfun_frame_layout.first_restore_gpr
8537 - cfun_frame_layout.first_save_gpr_slot)
8538 * UNITS_PER_LONG,
8539 cfun_frame_layout.first_restore_gpr,
8540 cfun_frame_layout.last_restore_gpr);
8541 insn = emit_insn (insn);
8542 REG_NOTES (insn) = cfa_restores;
8543 add_reg_note (insn, REG_CFA_DEF_CFA,
8544 plus_constant (Pmode, stack_pointer_rtx,
8545 STACK_POINTER_OFFSET));
8546 RTX_FRAME_RELATED_P (insn) = 1;
8547 }
8548
8549 if (! sibcall)
8550 {
8551
8552 /* Return to caller. */
8553
8554 p = rtvec_alloc (2);
8555
8556 RTVEC_ELT (p, 0) = ret_rtx;
8557 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8558 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8559 }
8560 }
8561
8562
8563 /* Return the size in bytes of a function argument of
8564 type TYPE and/or mode MODE. At least one of TYPE or
8565 MODE must be specified. */
8566
8567 static int
8568 s390_function_arg_size (enum machine_mode mode, const_tree type)
8569 {
8570 if (type)
8571 return int_size_in_bytes (type);
8572
8573 /* No type info available for some library calls ... */
8574 if (mode != BLKmode)
8575 return GET_MODE_SIZE (mode);
8576
8577 /* If we have neither type nor mode, abort */
8578 gcc_unreachable ();
8579 }
8580
8581 /* Return true if a function argument of type TYPE and mode MODE
8582 is to be passed in a floating-point register, if available. */
8583
8584 static bool
8585 s390_function_arg_float (enum machine_mode mode, const_tree type)
8586 {
8587 int size = s390_function_arg_size (mode, type);
8588 if (size > 8)
8589 return false;
8590
8591 /* Soft-float changes the ABI: no floating-point registers are used. */
8592 if (TARGET_SOFT_FLOAT)
8593 return false;
8594
8595 /* No type info available for some library calls ... */
8596 if (!type)
8597 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8598
8599 /* The ABI says that record types with a single member are treated
8600 just like that member would be. */
8601 while (TREE_CODE (type) == RECORD_TYPE)
8602 {
8603 tree field, single = NULL_TREE;
8604
8605 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8606 {
8607 if (TREE_CODE (field) != FIELD_DECL)
8608 continue;
8609
8610 if (single == NULL_TREE)
8611 single = TREE_TYPE (field);
8612 else
8613 return false;
8614 }
8615
8616 if (single == NULL_TREE)
8617 return false;
8618 else
8619 type = single;
8620 }
8621
8622 return TREE_CODE (type) == REAL_TYPE;
8623 }
8624
8625 /* Return true if a function argument of type TYPE and mode MODE
8626 is to be passed in an integer register, or a pair of integer
8627 registers, if available. */
8628
8629 static bool
8630 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8631 {
8632 int size = s390_function_arg_size (mode, type);
8633 if (size > 8)
8634 return false;
8635
8636 /* No type info available for some library calls ... */
8637 if (!type)
8638 return GET_MODE_CLASS (mode) == MODE_INT
8639 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8640
8641 /* We accept small integral (and similar) types. */
8642 if (INTEGRAL_TYPE_P (type)
8643 || POINTER_TYPE_P (type)
8644 || TREE_CODE (type) == NULLPTR_TYPE
8645 || TREE_CODE (type) == OFFSET_TYPE
8646 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8647 return true;
8648
8649 /* We also accept structs of size 1, 2, 4, 8 that are not
8650 passed in floating-point registers. */
8651 if (AGGREGATE_TYPE_P (type)
8652 && exact_log2 (size) >= 0
8653 && !s390_function_arg_float (mode, type))
8654 return true;
8655
8656 return false;
8657 }
8658
8659 /* Return 1 if a function argument of type TYPE and mode MODE
8660 is to be passed by reference. The ABI specifies that only
8661 structures of size 1, 2, 4, or 8 bytes are passed by value,
8662 all other structures (and complex numbers) are passed by
8663 reference. */
8664
8665 static bool
8666 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8667 enum machine_mode mode, const_tree type,
8668 bool named ATTRIBUTE_UNUSED)
8669 {
8670 int size = s390_function_arg_size (mode, type);
8671 if (size > 8)
8672 return true;
8673
8674 if (type)
8675 {
8676 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8677 return 1;
8678
8679 if (TREE_CODE (type) == COMPLEX_TYPE
8680 || TREE_CODE (type) == VECTOR_TYPE)
8681 return 1;
8682 }
8683
8684 return 0;
8685 }
8686
8687 /* Update the data in CUM to advance over an argument of mode MODE and
8688 data type TYPE. (TYPE is null for libcalls where that information
8689 may not be available.). The boolean NAMED specifies whether the
8690 argument is a named argument (as opposed to an unnamed argument
8691 matching an ellipsis). */
8692
8693 static void
8694 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8695 const_tree type, bool named ATTRIBUTE_UNUSED)
8696 {
8697 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8698
8699 if (s390_function_arg_float (mode, type))
8700 {
8701 cum->fprs += 1;
8702 }
8703 else if (s390_function_arg_integer (mode, type))
8704 {
8705 int size = s390_function_arg_size (mode, type);
8706 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8707 }
8708 else
8709 gcc_unreachable ();
8710 }
8711
8712 /* Define where to put the arguments to a function.
8713 Value is zero to push the argument on the stack,
8714 or a hard register in which to store the argument.
8715
8716 MODE is the argument's machine mode.
8717 TYPE is the data type of the argument (as a tree).
8718 This is null for libcalls where that information may
8719 not be available.
8720 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8721 the preceding args and about the function being called.
8722 NAMED is nonzero if this argument is a named parameter
8723 (otherwise it is an extra parameter matching an ellipsis).
8724
8725 On S/390, we use general purpose registers 2 through 6 to
8726 pass integer, pointer, and certain structure arguments, and
8727 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8728 to pass floating point arguments. All remaining arguments
8729 are pushed to the stack. */
8730
8731 static rtx
8732 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8733 const_tree type, bool named ATTRIBUTE_UNUSED)
8734 {
8735 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8736
8737 if (s390_function_arg_float (mode, type))
8738 {
8739 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8740 return 0;
8741 else
8742 return gen_rtx_REG (mode, cum->fprs + 16);
8743 }
8744 else if (s390_function_arg_integer (mode, type))
8745 {
8746 int size = s390_function_arg_size (mode, type);
8747 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8748
8749 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8750 return 0;
8751 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8752 return gen_rtx_REG (mode, cum->gprs + 2);
8753 else if (n_gprs == 2)
8754 {
8755 rtvec p = rtvec_alloc (2);
8756
8757 RTVEC_ELT (p, 0)
8758 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8759 const0_rtx);
8760 RTVEC_ELT (p, 1)
8761 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8762 GEN_INT (4));
8763
8764 return gen_rtx_PARALLEL (mode, p);
8765 }
8766 }
8767
8768 /* After the real arguments, expand_call calls us once again
8769 with a void_type_node type. Whatever we return here is
8770 passed as operand 2 to the call expanders.
8771
8772 We don't need this feature ... */
8773 else if (type == void_type_node)
8774 return const0_rtx;
8775
8776 gcc_unreachable ();
8777 }
8778
8779 /* Return true if return values of type TYPE should be returned
8780 in a memory buffer whose address is passed by the caller as
8781 hidden first argument. */
8782
8783 static bool
8784 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8785 {
8786 /* We accept small integral (and similar) types. */
8787 if (INTEGRAL_TYPE_P (type)
8788 || POINTER_TYPE_P (type)
8789 || TREE_CODE (type) == OFFSET_TYPE
8790 || TREE_CODE (type) == REAL_TYPE)
8791 return int_size_in_bytes (type) > 8;
8792
8793 /* Aggregates and similar constructs are always returned
8794 in memory. */
8795 if (AGGREGATE_TYPE_P (type)
8796 || TREE_CODE (type) == COMPLEX_TYPE
8797 || TREE_CODE (type) == VECTOR_TYPE)
8798 return true;
8799
8800 /* ??? We get called on all sorts of random stuff from
8801 aggregate_value_p. We can't abort, but it's not clear
8802 what's safe to return. Pretend it's a struct I guess. */
8803 return true;
8804 }
8805
8806 /* Function arguments and return values are promoted to word size. */
8807
8808 static enum machine_mode
8809 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8810 int *punsignedp,
8811 const_tree fntype ATTRIBUTE_UNUSED,
8812 int for_return ATTRIBUTE_UNUSED)
8813 {
8814 if (INTEGRAL_MODE_P (mode)
8815 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8816 {
8817 if (type != NULL_TREE && POINTER_TYPE_P (type))
8818 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8819 return Pmode;
8820 }
8821
8822 return mode;
8823 }
8824
8825 /* Define where to return a (scalar) value of type RET_TYPE.
8826 If RET_TYPE is null, define where to return a (scalar)
8827 value of mode MODE from a libcall. */
8828
8829 static rtx
8830 s390_function_and_libcall_value (enum machine_mode mode,
8831 const_tree ret_type,
8832 const_tree fntype_or_decl,
8833 bool outgoing ATTRIBUTE_UNUSED)
8834 {
8835 /* For normal functions perform the promotion as
8836 promote_function_mode would do. */
8837 if (ret_type)
8838 {
8839 int unsignedp = TYPE_UNSIGNED (ret_type);
8840 mode = promote_function_mode (ret_type, mode, &unsignedp,
8841 fntype_or_decl, 1);
8842 }
8843
8844 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8845 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8846
8847 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8848 return gen_rtx_REG (mode, 16);
8849 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8850 || UNITS_PER_LONG == UNITS_PER_WORD)
8851 return gen_rtx_REG (mode, 2);
8852 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8853 {
8854 /* This case is triggered when returning a 64 bit value with
8855 -m31 -mzarch. Although the value would fit into a single
8856 register it has to be forced into a 32 bit register pair in
8857 order to match the ABI. */
8858 rtvec p = rtvec_alloc (2);
8859
8860 RTVEC_ELT (p, 0)
8861 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8862 RTVEC_ELT (p, 1)
8863 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8864
8865 return gen_rtx_PARALLEL (mode, p);
8866 }
8867
8868 gcc_unreachable ();
8869 }
8870
8871 /* Define where to return a scalar return value of type RET_TYPE. */
8872
8873 static rtx
8874 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8875 bool outgoing)
8876 {
8877 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8878 fn_decl_or_type, outgoing);
8879 }
8880
8881 /* Define where to return a scalar libcall return value of mode
8882 MODE. */
8883
8884 static rtx
8885 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8886 {
8887 return s390_function_and_libcall_value (mode, NULL_TREE,
8888 NULL_TREE, true);
8889 }
8890
8891
8892 /* Create and return the va_list datatype.
8893
8894 On S/390, va_list is an array type equivalent to
8895
8896 typedef struct __va_list_tag
8897 {
8898 long __gpr;
8899 long __fpr;
8900 void *__overflow_arg_area;
8901 void *__reg_save_area;
8902 } va_list[1];
8903
8904 where __gpr and __fpr hold the number of general purpose
8905 or floating point arguments used up to now, respectively,
8906 __overflow_arg_area points to the stack location of the
8907 next argument passed on the stack, and __reg_save_area
8908 always points to the start of the register area in the
8909 call frame of the current function. The function prologue
8910 saves all registers used for argument passing into this
8911 area if the function uses variable arguments. */
8912
8913 static tree
8914 s390_build_builtin_va_list (void)
8915 {
8916 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8917
8918 record = lang_hooks.types.make_type (RECORD_TYPE);
8919
8920 type_decl =
8921 build_decl (BUILTINS_LOCATION,
8922 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8923
8924 f_gpr = build_decl (BUILTINS_LOCATION,
8925 FIELD_DECL, get_identifier ("__gpr"),
8926 long_integer_type_node);
8927 f_fpr = build_decl (BUILTINS_LOCATION,
8928 FIELD_DECL, get_identifier ("__fpr"),
8929 long_integer_type_node);
8930 f_ovf = build_decl (BUILTINS_LOCATION,
8931 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8932 ptr_type_node);
8933 f_sav = build_decl (BUILTINS_LOCATION,
8934 FIELD_DECL, get_identifier ("__reg_save_area"),
8935 ptr_type_node);
8936
8937 va_list_gpr_counter_field = f_gpr;
8938 va_list_fpr_counter_field = f_fpr;
8939
8940 DECL_FIELD_CONTEXT (f_gpr) = record;
8941 DECL_FIELD_CONTEXT (f_fpr) = record;
8942 DECL_FIELD_CONTEXT (f_ovf) = record;
8943 DECL_FIELD_CONTEXT (f_sav) = record;
8944
8945 TYPE_STUB_DECL (record) = type_decl;
8946 TYPE_NAME (record) = type_decl;
8947 TYPE_FIELDS (record) = f_gpr;
8948 DECL_CHAIN (f_gpr) = f_fpr;
8949 DECL_CHAIN (f_fpr) = f_ovf;
8950 DECL_CHAIN (f_ovf) = f_sav;
8951
8952 layout_type (record);
8953
8954 /* The correct type is an array type of one element. */
8955 return build_array_type (record, build_index_type (size_zero_node));
8956 }
8957
8958 /* Implement va_start by filling the va_list structure VALIST.
8959 STDARG_P is always true, and ignored.
8960 NEXTARG points to the first anonymous stack argument.
8961
8962 The following global variables are used to initialize
8963 the va_list structure:
8964
8965 crtl->args.info:
8966 holds number of gprs and fprs used for named arguments.
8967 crtl->args.arg_offset_rtx:
8968 holds the offset of the first anonymous stack argument
8969 (relative to the virtual arg pointer). */
8970
8971 static void
8972 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8973 {
8974 HOST_WIDE_INT n_gpr, n_fpr;
8975 int off;
8976 tree f_gpr, f_fpr, f_ovf, f_sav;
8977 tree gpr, fpr, ovf, sav, t;
8978
8979 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8980 f_fpr = DECL_CHAIN (f_gpr);
8981 f_ovf = DECL_CHAIN (f_fpr);
8982 f_sav = DECL_CHAIN (f_ovf);
8983
8984 valist = build_simple_mem_ref (valist);
8985 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8986 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8987 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8988 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8989
8990 /* Count number of gp and fp argument registers used. */
8991
8992 n_gpr = crtl->args.info.gprs;
8993 n_fpr = crtl->args.info.fprs;
8994
8995 if (cfun->va_list_gpr_size)
8996 {
8997 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8998 build_int_cst (NULL_TREE, n_gpr));
8999 TREE_SIDE_EFFECTS (t) = 1;
9000 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9001 }
9002
9003 if (cfun->va_list_fpr_size)
9004 {
9005 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9006 build_int_cst (NULL_TREE, n_fpr));
9007 TREE_SIDE_EFFECTS (t) = 1;
9008 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9009 }
9010
9011 /* Find the overflow area. */
9012 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9013 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9014 {
9015 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9016
9017 off = INTVAL (crtl->args.arg_offset_rtx);
9018 off = off < 0 ? 0 : off;
9019 if (TARGET_DEBUG_ARG)
9020 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9021 (int)n_gpr, (int)n_fpr, off);
9022
9023 t = fold_build_pointer_plus_hwi (t, off);
9024
9025 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9026 TREE_SIDE_EFFECTS (t) = 1;
9027 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9028 }
9029
9030 /* Find the register save area. */
9031 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9032 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9033 {
9034 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9035 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9036
9037 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9038 TREE_SIDE_EFFECTS (t) = 1;
9039 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9040 }
9041 }
9042
9043 /* Implement va_arg by updating the va_list structure
9044 VALIST as required to retrieve an argument of type
9045 TYPE, and returning that argument.
9046
9047 Generates code equivalent to:
9048
9049 if (integral value) {
9050 if (size <= 4 && args.gpr < 5 ||
9051 size > 4 && args.gpr < 4 )
9052 ret = args.reg_save_area[args.gpr+8]
9053 else
9054 ret = *args.overflow_arg_area++;
9055 } else if (float value) {
9056 if (args.fgpr < 2)
9057 ret = args.reg_save_area[args.fpr+64]
9058 else
9059 ret = *args.overflow_arg_area++;
9060 } else if (aggregate value) {
9061 if (args.gpr < 5)
9062 ret = *args.reg_save_area[args.gpr]
9063 else
9064 ret = **args.overflow_arg_area++;
9065 } */
9066
9067 static tree
9068 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9069 gimple_seq *post_p ATTRIBUTE_UNUSED)
9070 {
9071 tree f_gpr, f_fpr, f_ovf, f_sav;
9072 tree gpr, fpr, ovf, sav, reg, t, u;
9073 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9074 tree lab_false, lab_over, addr;
9075
9076 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9077 f_fpr = DECL_CHAIN (f_gpr);
9078 f_ovf = DECL_CHAIN (f_fpr);
9079 f_sav = DECL_CHAIN (f_ovf);
9080
9081 valist = build_va_arg_indirect_ref (valist);
9082 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9083 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9084 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9085
9086 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9087 both appear on a lhs. */
9088 valist = unshare_expr (valist);
9089 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9090
9091 size = int_size_in_bytes (type);
9092
9093 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9094 {
9095 if (TARGET_DEBUG_ARG)
9096 {
9097 fprintf (stderr, "va_arg: aggregate type");
9098 debug_tree (type);
9099 }
9100
9101 /* Aggregates are passed by reference. */
9102 indirect_p = 1;
9103 reg = gpr;
9104 n_reg = 1;
9105
9106 /* kernel stack layout on 31 bit: It is assumed here that no padding
9107 will be added by s390_frame_info because for va_args always an even
9108 number of gprs has to be saved r15-r2 = 14 regs. */
9109 sav_ofs = 2 * UNITS_PER_LONG;
9110 sav_scale = UNITS_PER_LONG;
9111 size = UNITS_PER_LONG;
9112 max_reg = GP_ARG_NUM_REG - n_reg;
9113 }
9114 else if (s390_function_arg_float (TYPE_MODE (type), type))
9115 {
9116 if (TARGET_DEBUG_ARG)
9117 {
9118 fprintf (stderr, "va_arg: float type");
9119 debug_tree (type);
9120 }
9121
9122 /* FP args go in FP registers, if present. */
9123 indirect_p = 0;
9124 reg = fpr;
9125 n_reg = 1;
9126 sav_ofs = 16 * UNITS_PER_LONG;
9127 sav_scale = 8;
9128 max_reg = FP_ARG_NUM_REG - n_reg;
9129 }
9130 else
9131 {
9132 if (TARGET_DEBUG_ARG)
9133 {
9134 fprintf (stderr, "va_arg: other type");
9135 debug_tree (type);
9136 }
9137
9138 /* Otherwise into GP registers. */
9139 indirect_p = 0;
9140 reg = gpr;
9141 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9142
9143 /* kernel stack layout on 31 bit: It is assumed here that no padding
9144 will be added by s390_frame_info because for va_args always an even
9145 number of gprs has to be saved r15-r2 = 14 regs. */
9146 sav_ofs = 2 * UNITS_PER_LONG;
9147
9148 if (size < UNITS_PER_LONG)
9149 sav_ofs += UNITS_PER_LONG - size;
9150
9151 sav_scale = UNITS_PER_LONG;
9152 max_reg = GP_ARG_NUM_REG - n_reg;
9153 }
9154
9155 /* Pull the value out of the saved registers ... */
9156
9157 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9158 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9159 addr = create_tmp_var (ptr_type_node, "addr");
9160
9161 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9162 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9163 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9164 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9165 gimplify_and_add (t, pre_p);
9166
9167 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9168 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9169 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9170 t = fold_build_pointer_plus (t, u);
9171
9172 gimplify_assign (addr, t, pre_p);
9173
9174 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9175
9176 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9177
9178
9179 /* ... Otherwise out of the overflow area. */
9180
9181 t = ovf;
9182 if (size < UNITS_PER_LONG)
9183 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9184
9185 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9186
9187 gimplify_assign (addr, t, pre_p);
9188
9189 t = fold_build_pointer_plus_hwi (t, size);
9190 gimplify_assign (ovf, t, pre_p);
9191
9192 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9193
9194
9195 /* Increment register save count. */
9196
9197 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9198 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9199 gimplify_and_add (u, pre_p);
9200
9201 if (indirect_p)
9202 {
9203 t = build_pointer_type_for_mode (build_pointer_type (type),
9204 ptr_mode, true);
9205 addr = fold_convert (t, addr);
9206 addr = build_va_arg_indirect_ref (addr);
9207 }
9208 else
9209 {
9210 t = build_pointer_type_for_mode (type, ptr_mode, true);
9211 addr = fold_convert (t, addr);
9212 }
9213
9214 return build_va_arg_indirect_ref (addr);
9215 }
9216
9217
9218 /* Builtins. */
9219
9220 enum s390_builtin
9221 {
9222 S390_BUILTIN_THREAD_POINTER,
9223 S390_BUILTIN_SET_THREAD_POINTER,
9224
9225 S390_BUILTIN_max
9226 };
9227
9228 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9229 CODE_FOR_get_tp_64,
9230 CODE_FOR_set_tp_64
9231 };
9232
9233 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9234 CODE_FOR_get_tp_31,
9235 CODE_FOR_set_tp_31
9236 };
9237
9238 static void
9239 s390_init_builtins (void)
9240 {
9241 tree ftype;
9242
9243 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9244 add_builtin_function ("__builtin_thread_pointer", ftype,
9245 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9246 NULL, NULL_TREE);
9247
9248 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9249 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9250 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9251 NULL, NULL_TREE);
9252 }
9253
9254 /* Expand an expression EXP that calls a built-in function,
9255 with result going to TARGET if that's convenient
9256 (and in mode MODE if that's convenient).
9257 SUBTARGET may be used as the target for computing one of EXP's operands.
9258 IGNORE is nonzero if the value is to be ignored. */
9259
9260 static rtx
9261 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9262 enum machine_mode mode ATTRIBUTE_UNUSED,
9263 int ignore ATTRIBUTE_UNUSED)
9264 {
9265 #define MAX_ARGS 2
9266
9267 enum insn_code const *code_for_builtin =
9268 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9269
9270 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9271 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9272 enum insn_code icode;
9273 rtx op[MAX_ARGS], pat;
9274 int arity;
9275 bool nonvoid;
9276 tree arg;
9277 call_expr_arg_iterator iter;
9278
9279 if (fcode >= S390_BUILTIN_max)
9280 internal_error ("bad builtin fcode");
9281 icode = code_for_builtin[fcode];
9282 if (icode == 0)
9283 internal_error ("bad builtin fcode");
9284
9285 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9286
9287 arity = 0;
9288 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9289 {
9290 const struct insn_operand_data *insn_op;
9291
9292 if (arg == error_mark_node)
9293 return NULL_RTX;
9294 if (arity > MAX_ARGS)
9295 return NULL_RTX;
9296
9297 insn_op = &insn_data[icode].operand[arity + nonvoid];
9298
9299 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9300
9301 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9302 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9303 arity++;
9304 }
9305
9306 if (nonvoid)
9307 {
9308 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9309 if (!target
9310 || GET_MODE (target) != tmode
9311 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9312 target = gen_reg_rtx (tmode);
9313 }
9314
9315 switch (arity)
9316 {
9317 case 0:
9318 pat = GEN_FCN (icode) (target);
9319 break;
9320 case 1:
9321 if (nonvoid)
9322 pat = GEN_FCN (icode) (target, op[0]);
9323 else
9324 pat = GEN_FCN (icode) (op[0]);
9325 break;
9326 case 2:
9327 pat = GEN_FCN (icode) (target, op[0], op[1]);
9328 break;
9329 default:
9330 gcc_unreachable ();
9331 }
9332 if (!pat)
9333 return NULL_RTX;
9334 emit_insn (pat);
9335
9336 if (nonvoid)
9337 return target;
9338 else
9339 return const0_rtx;
9340 }
9341
9342
9343 /* Output assembly code for the trampoline template to
9344 stdio stream FILE.
9345
9346 On S/390, we use gpr 1 internally in the trampoline code;
9347 gpr 0 is used to hold the static chain. */
9348
9349 static void
9350 s390_asm_trampoline_template (FILE *file)
9351 {
9352 rtx op[2];
9353 op[0] = gen_rtx_REG (Pmode, 0);
9354 op[1] = gen_rtx_REG (Pmode, 1);
9355
9356 if (TARGET_64BIT)
9357 {
9358 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9359 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9360 output_asm_insn ("br\t%1", op); /* 2 byte */
9361 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9362 }
9363 else
9364 {
9365 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9366 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9367 output_asm_insn ("br\t%1", op); /* 2 byte */
9368 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9369 }
9370 }
9371
9372 /* Emit RTL insns to initialize the variable parts of a trampoline.
9373 FNADDR is an RTX for the address of the function's pure code.
9374 CXT is an RTX for the static chain value for the function. */
9375
9376 static void
9377 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9378 {
9379 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9380 rtx mem;
9381
9382 emit_block_move (m_tramp, assemble_trampoline_template (),
9383 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9384
9385 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9386 emit_move_insn (mem, cxt);
9387 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9388 emit_move_insn (mem, fnaddr);
9389 }
9390
9391 /* Output assembler code to FILE to increment profiler label # LABELNO
9392 for profiling a function entry. */
9393
9394 void
9395 s390_function_profiler (FILE *file, int labelno)
9396 {
9397 rtx op[7];
9398
9399 char label[128];
9400 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9401
9402 fprintf (file, "# function profiler \n");
9403
9404 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9405 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9406 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9407
9408 op[2] = gen_rtx_REG (Pmode, 1);
9409 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9410 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9411
9412 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9413 if (flag_pic)
9414 {
9415 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9416 op[4] = gen_rtx_CONST (Pmode, op[4]);
9417 }
9418
9419 if (TARGET_64BIT)
9420 {
9421 output_asm_insn ("stg\t%0,%1", op);
9422 output_asm_insn ("larl\t%2,%3", op);
9423 output_asm_insn ("brasl\t%0,%4", op);
9424 output_asm_insn ("lg\t%0,%1", op);
9425 }
9426 else if (!flag_pic)
9427 {
9428 op[6] = gen_label_rtx ();
9429
9430 output_asm_insn ("st\t%0,%1", op);
9431 output_asm_insn ("bras\t%2,%l6", op);
9432 output_asm_insn (".long\t%4", op);
9433 output_asm_insn (".long\t%3", op);
9434 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9435 output_asm_insn ("l\t%0,0(%2)", op);
9436 output_asm_insn ("l\t%2,4(%2)", op);
9437 output_asm_insn ("basr\t%0,%0", op);
9438 output_asm_insn ("l\t%0,%1", op);
9439 }
9440 else
9441 {
9442 op[5] = gen_label_rtx ();
9443 op[6] = gen_label_rtx ();
9444
9445 output_asm_insn ("st\t%0,%1", op);
9446 output_asm_insn ("bras\t%2,%l6", op);
9447 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9448 output_asm_insn (".long\t%4-%l5", op);
9449 output_asm_insn (".long\t%3-%l5", op);
9450 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9451 output_asm_insn ("lr\t%0,%2", op);
9452 output_asm_insn ("a\t%0,0(%2)", op);
9453 output_asm_insn ("a\t%2,4(%2)", op);
9454 output_asm_insn ("basr\t%0,%0", op);
9455 output_asm_insn ("l\t%0,%1", op);
9456 }
9457 }
9458
9459 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9460 into its SYMBOL_REF_FLAGS. */
9461
9462 static void
9463 s390_encode_section_info (tree decl, rtx rtl, int first)
9464 {
9465 default_encode_section_info (decl, rtl, first);
9466
9467 if (TREE_CODE (decl) == VAR_DECL)
9468 {
9469 /* If a variable has a forced alignment to < 2 bytes, mark it
9470 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9471 operand. */
9472 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9473 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9474 if (!DECL_SIZE (decl)
9475 || !DECL_ALIGN (decl)
9476 || !host_integerp (DECL_SIZE (decl), 0)
9477 || (DECL_ALIGN (decl) <= 64
9478 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9479 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9480 }
9481
9482 /* Literal pool references don't have a decl so they are handled
9483 differently here. We rely on the information in the MEM_ALIGN
9484 entry to decide upon natural alignment. */
9485 if (MEM_P (rtl)
9486 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9487 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9488 && (MEM_ALIGN (rtl) == 0
9489 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9490 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9491 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9492 }
9493
9494 /* Output thunk to FILE that implements a C++ virtual function call (with
9495 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9496 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9497 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9498 relative to the resulting this pointer. */
9499
9500 static void
9501 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9502 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9503 tree function)
9504 {
9505 rtx op[10];
9506 int nonlocal = 0;
9507
9508 /* Make sure unwind info is emitted for the thunk if needed. */
9509 final_start_function (emit_barrier (), file, 1);
9510
9511 /* Operand 0 is the target function. */
9512 op[0] = XEXP (DECL_RTL (function), 0);
9513 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9514 {
9515 nonlocal = 1;
9516 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9517 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9518 op[0] = gen_rtx_CONST (Pmode, op[0]);
9519 }
9520
9521 /* Operand 1 is the 'this' pointer. */
9522 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9523 op[1] = gen_rtx_REG (Pmode, 3);
9524 else
9525 op[1] = gen_rtx_REG (Pmode, 2);
9526
9527 /* Operand 2 is the delta. */
9528 op[2] = GEN_INT (delta);
9529
9530 /* Operand 3 is the vcall_offset. */
9531 op[3] = GEN_INT (vcall_offset);
9532
9533 /* Operand 4 is the temporary register. */
9534 op[4] = gen_rtx_REG (Pmode, 1);
9535
9536 /* Operands 5 to 8 can be used as labels. */
9537 op[5] = NULL_RTX;
9538 op[6] = NULL_RTX;
9539 op[7] = NULL_RTX;
9540 op[8] = NULL_RTX;
9541
9542 /* Operand 9 can be used for temporary register. */
9543 op[9] = NULL_RTX;
9544
9545 /* Generate code. */
9546 if (TARGET_64BIT)
9547 {
9548 /* Setup literal pool pointer if required. */
9549 if ((!DISP_IN_RANGE (delta)
9550 && !CONST_OK_FOR_K (delta)
9551 && !CONST_OK_FOR_Os (delta))
9552 || (!DISP_IN_RANGE (vcall_offset)
9553 && !CONST_OK_FOR_K (vcall_offset)
9554 && !CONST_OK_FOR_Os (vcall_offset)))
9555 {
9556 op[5] = gen_label_rtx ();
9557 output_asm_insn ("larl\t%4,%5", op);
9558 }
9559
9560 /* Add DELTA to this pointer. */
9561 if (delta)
9562 {
9563 if (CONST_OK_FOR_J (delta))
9564 output_asm_insn ("la\t%1,%2(%1)", op);
9565 else if (DISP_IN_RANGE (delta))
9566 output_asm_insn ("lay\t%1,%2(%1)", op);
9567 else if (CONST_OK_FOR_K (delta))
9568 output_asm_insn ("aghi\t%1,%2", op);
9569 else if (CONST_OK_FOR_Os (delta))
9570 output_asm_insn ("agfi\t%1,%2", op);
9571 else
9572 {
9573 op[6] = gen_label_rtx ();
9574 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9575 }
9576 }
9577
9578 /* Perform vcall adjustment. */
9579 if (vcall_offset)
9580 {
9581 if (DISP_IN_RANGE (vcall_offset))
9582 {
9583 output_asm_insn ("lg\t%4,0(%1)", op);
9584 output_asm_insn ("ag\t%1,%3(%4)", op);
9585 }
9586 else if (CONST_OK_FOR_K (vcall_offset))
9587 {
9588 output_asm_insn ("lghi\t%4,%3", op);
9589 output_asm_insn ("ag\t%4,0(%1)", op);
9590 output_asm_insn ("ag\t%1,0(%4)", op);
9591 }
9592 else if (CONST_OK_FOR_Os (vcall_offset))
9593 {
9594 output_asm_insn ("lgfi\t%4,%3", op);
9595 output_asm_insn ("ag\t%4,0(%1)", op);
9596 output_asm_insn ("ag\t%1,0(%4)", op);
9597 }
9598 else
9599 {
9600 op[7] = gen_label_rtx ();
9601 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9602 output_asm_insn ("ag\t%4,0(%1)", op);
9603 output_asm_insn ("ag\t%1,0(%4)", op);
9604 }
9605 }
9606
9607 /* Jump to target. */
9608 output_asm_insn ("jg\t%0", op);
9609
9610 /* Output literal pool if required. */
9611 if (op[5])
9612 {
9613 output_asm_insn (".align\t4", op);
9614 targetm.asm_out.internal_label (file, "L",
9615 CODE_LABEL_NUMBER (op[5]));
9616 }
9617 if (op[6])
9618 {
9619 targetm.asm_out.internal_label (file, "L",
9620 CODE_LABEL_NUMBER (op[6]));
9621 output_asm_insn (".long\t%2", op);
9622 }
9623 if (op[7])
9624 {
9625 targetm.asm_out.internal_label (file, "L",
9626 CODE_LABEL_NUMBER (op[7]));
9627 output_asm_insn (".long\t%3", op);
9628 }
9629 }
9630 else
9631 {
9632 /* Setup base pointer if required. */
9633 if (!vcall_offset
9634 || (!DISP_IN_RANGE (delta)
9635 && !CONST_OK_FOR_K (delta)
9636 && !CONST_OK_FOR_Os (delta))
9637 || (!DISP_IN_RANGE (delta)
9638 && !CONST_OK_FOR_K (vcall_offset)
9639 && !CONST_OK_FOR_Os (vcall_offset)))
9640 {
9641 op[5] = gen_label_rtx ();
9642 output_asm_insn ("basr\t%4,0", op);
9643 targetm.asm_out.internal_label (file, "L",
9644 CODE_LABEL_NUMBER (op[5]));
9645 }
9646
9647 /* Add DELTA to this pointer. */
9648 if (delta)
9649 {
9650 if (CONST_OK_FOR_J (delta))
9651 output_asm_insn ("la\t%1,%2(%1)", op);
9652 else if (DISP_IN_RANGE (delta))
9653 output_asm_insn ("lay\t%1,%2(%1)", op);
9654 else if (CONST_OK_FOR_K (delta))
9655 output_asm_insn ("ahi\t%1,%2", op);
9656 else if (CONST_OK_FOR_Os (delta))
9657 output_asm_insn ("afi\t%1,%2", op);
9658 else
9659 {
9660 op[6] = gen_label_rtx ();
9661 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9662 }
9663 }
9664
9665 /* Perform vcall adjustment. */
9666 if (vcall_offset)
9667 {
9668 if (CONST_OK_FOR_J (vcall_offset))
9669 {
9670 output_asm_insn ("l\t%4,0(%1)", op);
9671 output_asm_insn ("a\t%1,%3(%4)", op);
9672 }
9673 else if (DISP_IN_RANGE (vcall_offset))
9674 {
9675 output_asm_insn ("l\t%4,0(%1)", op);
9676 output_asm_insn ("ay\t%1,%3(%4)", op);
9677 }
9678 else if (CONST_OK_FOR_K (vcall_offset))
9679 {
9680 output_asm_insn ("lhi\t%4,%3", op);
9681 output_asm_insn ("a\t%4,0(%1)", op);
9682 output_asm_insn ("a\t%1,0(%4)", op);
9683 }
9684 else if (CONST_OK_FOR_Os (vcall_offset))
9685 {
9686 output_asm_insn ("iilf\t%4,%3", op);
9687 output_asm_insn ("a\t%4,0(%1)", op);
9688 output_asm_insn ("a\t%1,0(%4)", op);
9689 }
9690 else
9691 {
9692 op[7] = gen_label_rtx ();
9693 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9694 output_asm_insn ("a\t%4,0(%1)", op);
9695 output_asm_insn ("a\t%1,0(%4)", op);
9696 }
9697
9698 /* We had to clobber the base pointer register.
9699 Re-setup the base pointer (with a different base). */
9700 op[5] = gen_label_rtx ();
9701 output_asm_insn ("basr\t%4,0", op);
9702 targetm.asm_out.internal_label (file, "L",
9703 CODE_LABEL_NUMBER (op[5]));
9704 }
9705
9706 /* Jump to target. */
9707 op[8] = gen_label_rtx ();
9708
9709 if (!flag_pic)
9710 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9711 else if (!nonlocal)
9712 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9713 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9714 else if (flag_pic == 1)
9715 {
9716 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9717 output_asm_insn ("l\t%4,%0(%4)", op);
9718 }
9719 else if (flag_pic == 2)
9720 {
9721 op[9] = gen_rtx_REG (Pmode, 0);
9722 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9723 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9724 output_asm_insn ("ar\t%4,%9", op);
9725 output_asm_insn ("l\t%4,0(%4)", op);
9726 }
9727
9728 output_asm_insn ("br\t%4", op);
9729
9730 /* Output literal pool. */
9731 output_asm_insn (".align\t4", op);
9732
9733 if (nonlocal && flag_pic == 2)
9734 output_asm_insn (".long\t%0", op);
9735 if (nonlocal)
9736 {
9737 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9738 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9739 }
9740
9741 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9742 if (!flag_pic)
9743 output_asm_insn (".long\t%0", op);
9744 else
9745 output_asm_insn (".long\t%0-%5", op);
9746
9747 if (op[6])
9748 {
9749 targetm.asm_out.internal_label (file, "L",
9750 CODE_LABEL_NUMBER (op[6]));
9751 output_asm_insn (".long\t%2", op);
9752 }
9753 if (op[7])
9754 {
9755 targetm.asm_out.internal_label (file, "L",
9756 CODE_LABEL_NUMBER (op[7]));
9757 output_asm_insn (".long\t%3", op);
9758 }
9759 }
9760 final_end_function ();
9761 }
9762
9763 static bool
9764 s390_valid_pointer_mode (enum machine_mode mode)
9765 {
9766 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9767 }
9768
9769 /* Checks whether the given CALL_EXPR would use a caller
9770 saved register. This is used to decide whether sibling call
9771 optimization could be performed on the respective function
9772 call. */
9773
9774 static bool
9775 s390_call_saved_register_used (tree call_expr)
9776 {
9777 CUMULATIVE_ARGS cum_v;
9778 cumulative_args_t cum;
9779 tree parameter;
9780 enum machine_mode mode;
9781 tree type;
9782 rtx parm_rtx;
9783 int reg, i;
9784
9785 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9786 cum = pack_cumulative_args (&cum_v);
9787
9788 for (i = 0; i < call_expr_nargs (call_expr); i++)
9789 {
9790 parameter = CALL_EXPR_ARG (call_expr, i);
9791 gcc_assert (parameter);
9792
9793 /* For an undeclared variable passed as parameter we will get
9794 an ERROR_MARK node here. */
9795 if (TREE_CODE (parameter) == ERROR_MARK)
9796 return true;
9797
9798 type = TREE_TYPE (parameter);
9799 gcc_assert (type);
9800
9801 mode = TYPE_MODE (type);
9802 gcc_assert (mode);
9803
9804 if (pass_by_reference (&cum_v, mode, type, true))
9805 {
9806 mode = Pmode;
9807 type = build_pointer_type (type);
9808 }
9809
9810 parm_rtx = s390_function_arg (cum, mode, type, 0);
9811
9812 s390_function_arg_advance (cum, mode, type, 0);
9813
9814 if (!parm_rtx)
9815 continue;
9816
9817 if (REG_P (parm_rtx))
9818 {
9819 for (reg = 0;
9820 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9821 reg++)
9822 if (!call_used_regs[reg + REGNO (parm_rtx)])
9823 return true;
9824 }
9825
9826 if (GET_CODE (parm_rtx) == PARALLEL)
9827 {
9828 int i;
9829
9830 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9831 {
9832 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9833
9834 gcc_assert (REG_P (r));
9835
9836 for (reg = 0;
9837 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9838 reg++)
9839 if (!call_used_regs[reg + REGNO (r)])
9840 return true;
9841 }
9842 }
9843
9844 }
9845 return false;
9846 }
9847
9848 /* Return true if the given call expression can be
9849 turned into a sibling call.
9850 DECL holds the declaration of the function to be called whereas
9851 EXP is the call expression itself. */
9852
9853 static bool
9854 s390_function_ok_for_sibcall (tree decl, tree exp)
9855 {
9856 /* The TPF epilogue uses register 1. */
9857 if (TARGET_TPF_PROFILING)
9858 return false;
9859
9860 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9861 which would have to be restored before the sibcall. */
9862 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9863 return false;
9864
9865 /* Register 6 on s390 is available as an argument register but unfortunately
9866 "caller saved". This makes functions needing this register for arguments
9867 not suitable for sibcalls. */
9868 return !s390_call_saved_register_used (exp);
9869 }
9870
9871 /* Return the fixed registers used for condition codes. */
9872
9873 static bool
9874 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9875 {
9876 *p1 = CC_REGNUM;
9877 *p2 = INVALID_REGNUM;
9878
9879 return true;
9880 }
9881
9882 /* This function is used by the call expanders of the machine description.
9883 It emits the call insn itself together with the necessary operations
9884 to adjust the target address and returns the emitted insn.
9885 ADDR_LOCATION is the target address rtx
9886 TLS_CALL the location of the thread-local symbol
9887 RESULT_REG the register where the result of the call should be stored
9888 RETADDR_REG the register where the return address should be stored
9889 If this parameter is NULL_RTX the call is considered
9890 to be a sibling call. */
9891
9892 rtx
9893 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9894 rtx retaddr_reg)
9895 {
9896 bool plt_call = false;
9897 rtx insn;
9898 rtx call;
9899 rtx clobber;
9900 rtvec vec;
9901
9902 /* Direct function calls need special treatment. */
9903 if (GET_CODE (addr_location) == SYMBOL_REF)
9904 {
9905 /* When calling a global routine in PIC mode, we must
9906 replace the symbol itself with the PLT stub. */
9907 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9908 {
9909 if (retaddr_reg != NULL_RTX)
9910 {
9911 addr_location = gen_rtx_UNSPEC (Pmode,
9912 gen_rtvec (1, addr_location),
9913 UNSPEC_PLT);
9914 addr_location = gen_rtx_CONST (Pmode, addr_location);
9915 plt_call = true;
9916 }
9917 else
9918 /* For -fpic code the PLT entries might use r12 which is
9919 call-saved. Therefore we cannot do a sibcall when
9920 calling directly using a symbol ref. When reaching
9921 this point we decided (in s390_function_ok_for_sibcall)
9922 to do a sibcall for a function pointer but one of the
9923 optimizers was able to get rid of the function pointer
9924 by propagating the symbol ref into the call. This
9925 optimization is illegal for S/390 so we turn the direct
9926 call into a indirect call again. */
9927 addr_location = force_reg (Pmode, addr_location);
9928 }
9929
9930 /* Unless we can use the bras(l) insn, force the
9931 routine address into a register. */
9932 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9933 {
9934 if (flag_pic)
9935 addr_location = legitimize_pic_address (addr_location, 0);
9936 else
9937 addr_location = force_reg (Pmode, addr_location);
9938 }
9939 }
9940
9941 /* If it is already an indirect call or the code above moved the
9942 SYMBOL_REF to somewhere else make sure the address can be found in
9943 register 1. */
9944 if (retaddr_reg == NULL_RTX
9945 && GET_CODE (addr_location) != SYMBOL_REF
9946 && !plt_call)
9947 {
9948 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9949 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9950 }
9951
9952 addr_location = gen_rtx_MEM (QImode, addr_location);
9953 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9954
9955 if (result_reg != NULL_RTX)
9956 call = gen_rtx_SET (VOIDmode, result_reg, call);
9957
9958 if (retaddr_reg != NULL_RTX)
9959 {
9960 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9961
9962 if (tls_call != NULL_RTX)
9963 vec = gen_rtvec (3, call, clobber,
9964 gen_rtx_USE (VOIDmode, tls_call));
9965 else
9966 vec = gen_rtvec (2, call, clobber);
9967
9968 call = gen_rtx_PARALLEL (VOIDmode, vec);
9969 }
9970
9971 insn = emit_call_insn (call);
9972
9973 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9974 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9975 {
9976 /* s390_function_ok_for_sibcall should
9977 have denied sibcalls in this case. */
9978 gcc_assert (retaddr_reg != NULL_RTX);
9979 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9980 }
9981 return insn;
9982 }
9983
9984 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9985
9986 static void
9987 s390_conditional_register_usage (void)
9988 {
9989 int i;
9990
9991 if (flag_pic)
9992 {
9993 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9994 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9995 }
9996 if (TARGET_CPU_ZARCH)
9997 {
9998 fixed_regs[BASE_REGNUM] = 0;
9999 call_used_regs[BASE_REGNUM] = 0;
10000 fixed_regs[RETURN_REGNUM] = 0;
10001 call_used_regs[RETURN_REGNUM] = 0;
10002 }
10003 if (TARGET_64BIT)
10004 {
10005 for (i = 24; i < 32; i++)
10006 call_used_regs[i] = call_really_used_regs[i] = 0;
10007 }
10008 else
10009 {
10010 for (i = 18; i < 20; i++)
10011 call_used_regs[i] = call_really_used_regs[i] = 0;
10012 }
10013
10014 if (TARGET_SOFT_FLOAT)
10015 {
10016 for (i = 16; i < 32; i++)
10017 call_used_regs[i] = fixed_regs[i] = 1;
10018 }
10019 }
10020
10021 /* Corresponding function to eh_return expander. */
10022
10023 static GTY(()) rtx s390_tpf_eh_return_symbol;
10024 void
10025 s390_emit_tpf_eh_return (rtx target)
10026 {
10027 rtx insn, reg;
10028
10029 if (!s390_tpf_eh_return_symbol)
10030 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10031
10032 reg = gen_rtx_REG (Pmode, 2);
10033
10034 emit_move_insn (reg, target);
10035 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10036 gen_rtx_REG (Pmode, RETURN_REGNUM));
10037 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10038
10039 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10040 }
10041
10042 /* Rework the prologue/epilogue to avoid saving/restoring
10043 registers unnecessarily. */
10044
10045 static void
10046 s390_optimize_prologue (void)
10047 {
10048 rtx insn, new_insn, next_insn;
10049
10050 /* Do a final recompute of the frame-related data. */
10051
10052 s390_update_frame_layout ();
10053
10054 /* If all special registers are in fact used, there's nothing we
10055 can do, so no point in walking the insn list. */
10056
10057 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10058 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10059 && (TARGET_CPU_ZARCH
10060 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10061 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10062 return;
10063
10064 /* Search for prologue/epilogue insns and replace them. */
10065
10066 for (insn = get_insns (); insn; insn = next_insn)
10067 {
10068 int first, last, off;
10069 rtx set, base, offset;
10070
10071 next_insn = NEXT_INSN (insn);
10072
10073 if (GET_CODE (insn) != INSN)
10074 continue;
10075
10076 if (GET_CODE (PATTERN (insn)) == PARALLEL
10077 && store_multiple_operation (PATTERN (insn), VOIDmode))
10078 {
10079 set = XVECEXP (PATTERN (insn), 0, 0);
10080 first = REGNO (SET_SRC (set));
10081 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10082 offset = const0_rtx;
10083 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10084 off = INTVAL (offset);
10085
10086 if (GET_CODE (base) != REG || off < 0)
10087 continue;
10088 if (cfun_frame_layout.first_save_gpr != -1
10089 && (cfun_frame_layout.first_save_gpr < first
10090 || cfun_frame_layout.last_save_gpr > last))
10091 continue;
10092 if (REGNO (base) != STACK_POINTER_REGNUM
10093 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10094 continue;
10095 if (first > BASE_REGNUM || last < BASE_REGNUM)
10096 continue;
10097
10098 if (cfun_frame_layout.first_save_gpr != -1)
10099 {
10100 new_insn = save_gprs (base,
10101 off + (cfun_frame_layout.first_save_gpr
10102 - first) * UNITS_PER_LONG,
10103 cfun_frame_layout.first_save_gpr,
10104 cfun_frame_layout.last_save_gpr);
10105 new_insn = emit_insn_before (new_insn, insn);
10106 INSN_ADDRESSES_NEW (new_insn, -1);
10107 }
10108
10109 remove_insn (insn);
10110 continue;
10111 }
10112
10113 if (cfun_frame_layout.first_save_gpr == -1
10114 && GET_CODE (PATTERN (insn)) == SET
10115 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10116 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10117 || (!TARGET_CPU_ZARCH
10118 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10119 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10120 {
10121 set = PATTERN (insn);
10122 first = REGNO (SET_SRC (set));
10123 offset = const0_rtx;
10124 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10125 off = INTVAL (offset);
10126
10127 if (GET_CODE (base) != REG || off < 0)
10128 continue;
10129 if (REGNO (base) != STACK_POINTER_REGNUM
10130 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10131 continue;
10132
10133 remove_insn (insn);
10134 continue;
10135 }
10136
10137 if (GET_CODE (PATTERN (insn)) == PARALLEL
10138 && load_multiple_operation (PATTERN (insn), VOIDmode))
10139 {
10140 set = XVECEXP (PATTERN (insn), 0, 0);
10141 first = REGNO (SET_DEST (set));
10142 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10143 offset = const0_rtx;
10144 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10145 off = INTVAL (offset);
10146
10147 if (GET_CODE (base) != REG || off < 0)
10148 continue;
10149 if (cfun_frame_layout.first_restore_gpr != -1
10150 && (cfun_frame_layout.first_restore_gpr < first
10151 || cfun_frame_layout.last_restore_gpr > last))
10152 continue;
10153 if (REGNO (base) != STACK_POINTER_REGNUM
10154 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10155 continue;
10156 if (first > BASE_REGNUM || last < BASE_REGNUM)
10157 continue;
10158
10159 if (cfun_frame_layout.first_restore_gpr != -1)
10160 {
10161 new_insn = restore_gprs (base,
10162 off + (cfun_frame_layout.first_restore_gpr
10163 - first) * UNITS_PER_LONG,
10164 cfun_frame_layout.first_restore_gpr,
10165 cfun_frame_layout.last_restore_gpr);
10166 new_insn = emit_insn_before (new_insn, insn);
10167 INSN_ADDRESSES_NEW (new_insn, -1);
10168 }
10169
10170 remove_insn (insn);
10171 continue;
10172 }
10173
10174 if (cfun_frame_layout.first_restore_gpr == -1
10175 && GET_CODE (PATTERN (insn)) == SET
10176 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10177 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10178 || (!TARGET_CPU_ZARCH
10179 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10180 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10181 {
10182 set = PATTERN (insn);
10183 first = REGNO (SET_DEST (set));
10184 offset = const0_rtx;
10185 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10186 off = INTVAL (offset);
10187
10188 if (GET_CODE (base) != REG || off < 0)
10189 continue;
10190 if (REGNO (base) != STACK_POINTER_REGNUM
10191 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10192 continue;
10193
10194 remove_insn (insn);
10195 continue;
10196 }
10197 }
10198 }
10199
10200 /* On z10 and later the dynamic branch prediction must see the
10201 backward jump within a certain windows. If not it falls back to
10202 the static prediction. This function rearranges the loop backward
10203 branch in a way which makes the static prediction always correct.
10204 The function returns true if it added an instruction. */
10205 static bool
10206 s390_fix_long_loop_prediction (rtx insn)
10207 {
10208 rtx set = single_set (insn);
10209 rtx code_label, label_ref, new_label;
10210 rtx uncond_jump;
10211 rtx cur_insn;
10212 rtx tmp;
10213 int distance;
10214
10215 /* This will exclude branch on count and branch on index patterns
10216 since these are correctly statically predicted. */
10217 if (!set
10218 || SET_DEST (set) != pc_rtx
10219 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10220 return false;
10221
10222 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10223 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10224
10225 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10226
10227 code_label = XEXP (label_ref, 0);
10228
10229 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10230 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10231 || (INSN_ADDRESSES (INSN_UID (insn))
10232 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10233 return false;
10234
10235 for (distance = 0, cur_insn = PREV_INSN (insn);
10236 distance < PREDICT_DISTANCE - 6;
10237 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10238 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10239 return false;
10240
10241 new_label = gen_label_rtx ();
10242 uncond_jump = emit_jump_insn_after (
10243 gen_rtx_SET (VOIDmode, pc_rtx,
10244 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10245 insn);
10246 emit_label_after (new_label, uncond_jump);
10247
10248 tmp = XEXP (SET_SRC (set), 1);
10249 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10250 XEXP (SET_SRC (set), 2) = tmp;
10251 INSN_CODE (insn) = -1;
10252
10253 XEXP (label_ref, 0) = new_label;
10254 JUMP_LABEL (insn) = new_label;
10255 JUMP_LABEL (uncond_jump) = code_label;
10256
10257 return true;
10258 }
10259
10260 /* Returns 1 if INSN reads the value of REG for purposes not related
10261 to addressing of memory, and 0 otherwise. */
10262 static int
10263 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10264 {
10265 return reg_referenced_p (reg, PATTERN (insn))
10266 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10267 }
10268
10269 /* Starting from INSN find_cond_jump looks downwards in the insn
10270 stream for a single jump insn which is the last user of the
10271 condition code set in INSN. */
10272 static rtx
10273 find_cond_jump (rtx insn)
10274 {
10275 for (; insn; insn = NEXT_INSN (insn))
10276 {
10277 rtx ite, cc;
10278
10279 if (LABEL_P (insn))
10280 break;
10281
10282 if (!JUMP_P (insn))
10283 {
10284 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10285 break;
10286 continue;
10287 }
10288
10289 /* This will be triggered by a return. */
10290 if (GET_CODE (PATTERN (insn)) != SET)
10291 break;
10292
10293 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10294 ite = SET_SRC (PATTERN (insn));
10295
10296 if (GET_CODE (ite) != IF_THEN_ELSE)
10297 break;
10298
10299 cc = XEXP (XEXP (ite, 0), 0);
10300 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10301 break;
10302
10303 if (find_reg_note (insn, REG_DEAD, cc))
10304 return insn;
10305 break;
10306 }
10307
10308 return NULL_RTX;
10309 }
10310
10311 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10312 the semantics does not change. If NULL_RTX is passed as COND the
10313 function tries to find the conditional jump starting with INSN. */
10314 static void
10315 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10316 {
10317 rtx tmp = *op0;
10318
10319 if (cond == NULL_RTX)
10320 {
10321 rtx jump = find_cond_jump (NEXT_INSN (insn));
10322 jump = jump ? single_set (jump) : NULL_RTX;
10323
10324 if (jump == NULL_RTX)
10325 return;
10326
10327 cond = XEXP (XEXP (jump, 1), 0);
10328 }
10329
10330 *op0 = *op1;
10331 *op1 = tmp;
10332 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10333 }
10334
10335 /* On z10, instructions of the compare-and-branch family have the
10336 property to access the register occurring as second operand with
10337 its bits complemented. If such a compare is grouped with a second
10338 instruction that accesses the same register non-complemented, and
10339 if that register's value is delivered via a bypass, then the
10340 pipeline recycles, thereby causing significant performance decline.
10341 This function locates such situations and exchanges the two
10342 operands of the compare. The function return true whenever it
10343 added an insn. */
10344 static bool
10345 s390_z10_optimize_cmp (rtx insn)
10346 {
10347 rtx prev_insn, next_insn;
10348 bool insn_added_p = false;
10349 rtx cond, *op0, *op1;
10350
10351 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10352 {
10353 /* Handle compare and branch and branch on count
10354 instructions. */
10355 rtx pattern = single_set (insn);
10356
10357 if (!pattern
10358 || SET_DEST (pattern) != pc_rtx
10359 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10360 return false;
10361
10362 cond = XEXP (SET_SRC (pattern), 0);
10363 op0 = &XEXP (cond, 0);
10364 op1 = &XEXP (cond, 1);
10365 }
10366 else if (GET_CODE (PATTERN (insn)) == SET)
10367 {
10368 rtx src, dest;
10369
10370 /* Handle normal compare instructions. */
10371 src = SET_SRC (PATTERN (insn));
10372 dest = SET_DEST (PATTERN (insn));
10373
10374 if (!REG_P (dest)
10375 || !CC_REGNO_P (REGNO (dest))
10376 || GET_CODE (src) != COMPARE)
10377 return false;
10378
10379 /* s390_swap_cmp will try to find the conditional
10380 jump when passing NULL_RTX as condition. */
10381 cond = NULL_RTX;
10382 op0 = &XEXP (src, 0);
10383 op1 = &XEXP (src, 1);
10384 }
10385 else
10386 return false;
10387
10388 if (!REG_P (*op0) || !REG_P (*op1))
10389 return false;
10390
10391 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10392 return false;
10393
10394 /* Swap the COMPARE arguments and its mask if there is a
10395 conflicting access in the previous insn. */
10396 prev_insn = prev_active_insn (insn);
10397 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10398 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10399 s390_swap_cmp (cond, op0, op1, insn);
10400
10401 /* Check if there is a conflict with the next insn. If there
10402 was no conflict with the previous insn, then swap the
10403 COMPARE arguments and its mask. If we already swapped
10404 the operands, or if swapping them would cause a conflict
10405 with the previous insn, issue a NOP after the COMPARE in
10406 order to separate the two instuctions. */
10407 next_insn = next_active_insn (insn);
10408 if (next_insn != NULL_RTX && INSN_P (next_insn)
10409 && s390_non_addr_reg_read_p (*op1, next_insn))
10410 {
10411 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10412 && s390_non_addr_reg_read_p (*op0, prev_insn))
10413 {
10414 if (REGNO (*op1) == 0)
10415 emit_insn_after (gen_nop1 (), insn);
10416 else
10417 emit_insn_after (gen_nop (), insn);
10418 insn_added_p = true;
10419 }
10420 else
10421 s390_swap_cmp (cond, op0, op1, insn);
10422 }
10423 return insn_added_p;
10424 }
10425
10426 /* Perform machine-dependent processing. */
10427
10428 static void
10429 s390_reorg (void)
10430 {
10431 bool pool_overflow = false;
10432
10433 /* Make sure all splits have been performed; splits after
10434 machine_dependent_reorg might confuse insn length counts. */
10435 split_all_insns_noflow ();
10436
10437 /* Install the main literal pool and the associated base
10438 register load insns.
10439
10440 In addition, there are two problematic situations we need
10441 to correct:
10442
10443 - the literal pool might be > 4096 bytes in size, so that
10444 some of its elements cannot be directly accessed
10445
10446 - a branch target might be > 64K away from the branch, so that
10447 it is not possible to use a PC-relative instruction.
10448
10449 To fix those, we split the single literal pool into multiple
10450 pool chunks, reloading the pool base register at various
10451 points throughout the function to ensure it always points to
10452 the pool chunk the following code expects, and / or replace
10453 PC-relative branches by absolute branches.
10454
10455 However, the two problems are interdependent: splitting the
10456 literal pool can move a branch further away from its target,
10457 causing the 64K limit to overflow, and on the other hand,
10458 replacing a PC-relative branch by an absolute branch means
10459 we need to put the branch target address into the literal
10460 pool, possibly causing it to overflow.
10461
10462 So, we loop trying to fix up both problems until we manage
10463 to satisfy both conditions at the same time. Note that the
10464 loop is guaranteed to terminate as every pass of the loop
10465 strictly decreases the total number of PC-relative branches
10466 in the function. (This is not completely true as there
10467 might be branch-over-pool insns introduced by chunkify_start.
10468 Those never need to be split however.) */
10469
10470 for (;;)
10471 {
10472 struct constant_pool *pool = NULL;
10473
10474 /* Collect the literal pool. */
10475 if (!pool_overflow)
10476 {
10477 pool = s390_mainpool_start ();
10478 if (!pool)
10479 pool_overflow = true;
10480 }
10481
10482 /* If literal pool overflowed, start to chunkify it. */
10483 if (pool_overflow)
10484 pool = s390_chunkify_start ();
10485
10486 /* Split out-of-range branches. If this has created new
10487 literal pool entries, cancel current chunk list and
10488 recompute it. zSeries machines have large branch
10489 instructions, so we never need to split a branch. */
10490 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10491 {
10492 if (pool_overflow)
10493 s390_chunkify_cancel (pool);
10494 else
10495 s390_mainpool_cancel (pool);
10496
10497 continue;
10498 }
10499
10500 /* If we made it up to here, both conditions are satisfied.
10501 Finish up literal pool related changes. */
10502 if (pool_overflow)
10503 s390_chunkify_finish (pool);
10504 else
10505 s390_mainpool_finish (pool);
10506
10507 /* We're done splitting branches. */
10508 cfun->machine->split_branches_pending_p = false;
10509 break;
10510 }
10511
10512 /* Generate out-of-pool execute target insns. */
10513 if (TARGET_CPU_ZARCH)
10514 {
10515 rtx insn, label, target;
10516
10517 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10518 {
10519 label = s390_execute_label (insn);
10520 if (!label)
10521 continue;
10522
10523 gcc_assert (label != const0_rtx);
10524
10525 target = emit_label (XEXP (label, 0));
10526 INSN_ADDRESSES_NEW (target, -1);
10527
10528 target = emit_insn (s390_execute_target (insn));
10529 INSN_ADDRESSES_NEW (target, -1);
10530 }
10531 }
10532
10533 /* Try to optimize prologue and epilogue further. */
10534 s390_optimize_prologue ();
10535
10536 /* Walk over the insns and do some >=z10 specific changes. */
10537 if (s390_tune == PROCESSOR_2097_Z10
10538 || s390_tune == PROCESSOR_2817_Z196)
10539 {
10540 rtx insn;
10541 bool insn_added_p = false;
10542
10543 /* The insn lengths and addresses have to be up to date for the
10544 following manipulations. */
10545 shorten_branches (get_insns ());
10546
10547 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10548 {
10549 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10550 continue;
10551
10552 if (JUMP_P (insn))
10553 insn_added_p |= s390_fix_long_loop_prediction (insn);
10554
10555 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10556 || GET_CODE (PATTERN (insn)) == SET)
10557 && s390_tune == PROCESSOR_2097_Z10)
10558 insn_added_p |= s390_z10_optimize_cmp (insn);
10559 }
10560
10561 /* Adjust branches if we added new instructions. */
10562 if (insn_added_p)
10563 shorten_branches (get_insns ());
10564 }
10565 }
10566
10567 /* Return true if INSN is a fp load insn writing register REGNO. */
10568 static inline bool
10569 s390_fpload_toreg (rtx insn, unsigned int regno)
10570 {
10571 rtx set;
10572 enum attr_type flag = s390_safe_attr_type (insn);
10573
10574 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10575 return false;
10576
10577 set = single_set (insn);
10578
10579 if (set == NULL_RTX)
10580 return false;
10581
10582 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10583 return false;
10584
10585 if (REGNO (SET_DEST (set)) != regno)
10586 return false;
10587
10588 return true;
10589 }
10590
10591 /* This value describes the distance to be avoided between an
10592 aritmetic fp instruction and an fp load writing the same register.
10593 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10594 fine but the exact value has to be avoided. Otherwise the FP
10595 pipeline will throw an exception causing a major penalty. */
10596 #define Z10_EARLYLOAD_DISTANCE 7
10597
10598 /* Rearrange the ready list in order to avoid the situation described
10599 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10600 moved to the very end of the ready list. */
10601 static void
10602 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10603 {
10604 unsigned int regno;
10605 int nready = *nready_p;
10606 rtx tmp;
10607 int i;
10608 rtx insn;
10609 rtx set;
10610 enum attr_type flag;
10611 int distance;
10612
10613 /* Skip DISTANCE - 1 active insns. */
10614 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10615 distance > 0 && insn != NULL_RTX;
10616 distance--, insn = prev_active_insn (insn))
10617 if (CALL_P (insn) || JUMP_P (insn))
10618 return;
10619
10620 if (insn == NULL_RTX)
10621 return;
10622
10623 set = single_set (insn);
10624
10625 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10626 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10627 return;
10628
10629 flag = s390_safe_attr_type (insn);
10630
10631 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10632 return;
10633
10634 regno = REGNO (SET_DEST (set));
10635 i = nready - 1;
10636
10637 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10638 i--;
10639
10640 if (!i)
10641 return;
10642
10643 tmp = ready[i];
10644 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10645 ready[0] = tmp;
10646 }
10647
10648 /* This function is called via hook TARGET_SCHED_REORDER before
10649 issuing one insn from list READY which contains *NREADYP entries.
10650 For target z10 it reorders load instructions to avoid early load
10651 conflicts in the floating point pipeline */
10652 static int
10653 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10654 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10655 {
10656 if (s390_tune == PROCESSOR_2097_Z10)
10657 if (reload_completed && *nreadyp > 1)
10658 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10659
10660 return s390_issue_rate ();
10661 }
10662
10663 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10664 the scheduler has issued INSN. It stores the last issued insn into
10665 last_scheduled_insn in order to make it available for
10666 s390_sched_reorder. */
10667 static int
10668 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10669 int verbose ATTRIBUTE_UNUSED,
10670 rtx insn, int more)
10671 {
10672 last_scheduled_insn = insn;
10673
10674 if (GET_CODE (PATTERN (insn)) != USE
10675 && GET_CODE (PATTERN (insn)) != CLOBBER)
10676 return more - 1;
10677 else
10678 return more;
10679 }
10680
10681 static void
10682 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10683 int verbose ATTRIBUTE_UNUSED,
10684 int max_ready ATTRIBUTE_UNUSED)
10685 {
10686 last_scheduled_insn = NULL_RTX;
10687 }
10688
10689 /* This function checks the whole of insn X for memory references. The
10690 function always returns zero because the framework it is called
10691 from would stop recursively analyzing the insn upon a return value
10692 other than zero. The real result of this function is updating
10693 counter variable MEM_COUNT. */
10694 static int
10695 check_dpu (rtx *x, unsigned *mem_count)
10696 {
10697 if (*x != NULL_RTX && MEM_P (*x))
10698 (*mem_count)++;
10699 return 0;
10700 }
10701
10702 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10703 a new number struct loop *loop should be unrolled if tuned for cpus with
10704 a built-in stride prefetcher.
10705 The loop is analyzed for memory accesses by calling check_dpu for
10706 each rtx of the loop. Depending on the loop_depth and the amount of
10707 memory accesses a new number <=nunroll is returned to improve the
10708 behaviour of the hardware prefetch unit. */
10709 static unsigned
10710 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10711 {
10712 basic_block *bbs;
10713 rtx insn;
10714 unsigned i;
10715 unsigned mem_count = 0;
10716
10717 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10718 return nunroll;
10719
10720 /* Count the number of memory references within the loop body. */
10721 bbs = get_loop_body (loop);
10722 for (i = 0; i < loop->num_nodes; i++)
10723 {
10724 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10725 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10726 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10727 }
10728 free (bbs);
10729
10730 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10731 if (mem_count == 0)
10732 return nunroll;
10733
10734 switch (loop_depth(loop))
10735 {
10736 case 1:
10737 return MIN (nunroll, 28 / mem_count);
10738 case 2:
10739 return MIN (nunroll, 22 / mem_count);
10740 default:
10741 return MIN (nunroll, 16 / mem_count);
10742 }
10743 }
10744
10745 /* Initialize GCC target structure. */
10746
10747 #undef TARGET_ASM_ALIGNED_HI_OP
10748 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10749 #undef TARGET_ASM_ALIGNED_DI_OP
10750 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10751 #undef TARGET_ASM_INTEGER
10752 #define TARGET_ASM_INTEGER s390_assemble_integer
10753
10754 #undef TARGET_ASM_OPEN_PAREN
10755 #define TARGET_ASM_OPEN_PAREN ""
10756
10757 #undef TARGET_ASM_CLOSE_PAREN
10758 #define TARGET_ASM_CLOSE_PAREN ""
10759
10760 #undef TARGET_OPTION_OVERRIDE
10761 #define TARGET_OPTION_OVERRIDE s390_option_override
10762
10763 #undef TARGET_ENCODE_SECTION_INFO
10764 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10765
10766 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10767 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10768
10769 #ifdef HAVE_AS_TLS
10770 #undef TARGET_HAVE_TLS
10771 #define TARGET_HAVE_TLS true
10772 #endif
10773 #undef TARGET_CANNOT_FORCE_CONST_MEM
10774 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10775
10776 #undef TARGET_DELEGITIMIZE_ADDRESS
10777 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10778
10779 #undef TARGET_LEGITIMIZE_ADDRESS
10780 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10781
10782 #undef TARGET_RETURN_IN_MEMORY
10783 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10784
10785 #undef TARGET_INIT_BUILTINS
10786 #define TARGET_INIT_BUILTINS s390_init_builtins
10787 #undef TARGET_EXPAND_BUILTIN
10788 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10789
10790 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10791 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10792
10793 #undef TARGET_ASM_OUTPUT_MI_THUNK
10794 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10795 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10796 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10797
10798 #undef TARGET_SCHED_ADJUST_PRIORITY
10799 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10800 #undef TARGET_SCHED_ISSUE_RATE
10801 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10802 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10803 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10804
10805 #undef TARGET_SCHED_VARIABLE_ISSUE
10806 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10807 #undef TARGET_SCHED_REORDER
10808 #define TARGET_SCHED_REORDER s390_sched_reorder
10809 #undef TARGET_SCHED_INIT
10810 #define TARGET_SCHED_INIT s390_sched_init
10811
10812 #undef TARGET_CANNOT_COPY_INSN_P
10813 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10814 #undef TARGET_RTX_COSTS
10815 #define TARGET_RTX_COSTS s390_rtx_costs
10816 #undef TARGET_ADDRESS_COST
10817 #define TARGET_ADDRESS_COST s390_address_cost
10818 #undef TARGET_REGISTER_MOVE_COST
10819 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10820 #undef TARGET_MEMORY_MOVE_COST
10821 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10822
10823 #undef TARGET_MACHINE_DEPENDENT_REORG
10824 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10825
10826 #undef TARGET_VALID_POINTER_MODE
10827 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10828
10829 #undef TARGET_BUILD_BUILTIN_VA_LIST
10830 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10831 #undef TARGET_EXPAND_BUILTIN_VA_START
10832 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10833 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10834 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10835
10836 #undef TARGET_PROMOTE_FUNCTION_MODE
10837 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10838 #undef TARGET_PASS_BY_REFERENCE
10839 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10840
10841 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10842 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10843 #undef TARGET_FUNCTION_ARG
10844 #define TARGET_FUNCTION_ARG s390_function_arg
10845 #undef TARGET_FUNCTION_ARG_ADVANCE
10846 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10847 #undef TARGET_FUNCTION_VALUE
10848 #define TARGET_FUNCTION_VALUE s390_function_value
10849 #undef TARGET_LIBCALL_VALUE
10850 #define TARGET_LIBCALL_VALUE s390_libcall_value
10851
10852 #undef TARGET_FIXED_CONDITION_CODE_REGS
10853 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10854
10855 #undef TARGET_CC_MODES_COMPATIBLE
10856 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10857
10858 #undef TARGET_INVALID_WITHIN_DOLOOP
10859 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10860
10861 #ifdef HAVE_AS_TLS
10862 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10863 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10864 #endif
10865
10866 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10867 #undef TARGET_MANGLE_TYPE
10868 #define TARGET_MANGLE_TYPE s390_mangle_type
10869 #endif
10870
10871 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10872 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10873
10874 #undef TARGET_PREFERRED_RELOAD_CLASS
10875 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10876
10877 #undef TARGET_SECONDARY_RELOAD
10878 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10879
10880 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10881 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10882
10883 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10884 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10885
10886 #undef TARGET_LEGITIMATE_ADDRESS_P
10887 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10888
10889 #undef TARGET_LEGITIMATE_CONSTANT_P
10890 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10891
10892 #undef TARGET_CAN_ELIMINATE
10893 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10894
10895 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10896 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10897
10898 #undef TARGET_LOOP_UNROLL_ADJUST
10899 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10900
10901 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10902 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10903 #undef TARGET_TRAMPOLINE_INIT
10904 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10905
10906 #undef TARGET_UNWIND_WORD_MODE
10907 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10908
10909 struct gcc_target targetm = TARGET_INITIALIZER;
10910
10911 #include "gt-s390.h"