s390-protos.h (s390_function_arg_advance): Delete.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "integrate.h"
47 #include "ggc.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "optabs.h"
53 #include "gimple.h"
54 #include "df.h"
55 #include "params.h"
56 #include "cfgloop.h"
57
58
59 /* Define the specific costs for a given cpu. */
60
61 struct processor_costs
62 {
63 /* multiplication */
64 const int m; /* cost of an M instruction. */
65 const int mghi; /* cost of an MGHI instruction. */
66 const int mh; /* cost of an MH instruction. */
67 const int mhi; /* cost of an MHI instruction. */
68 const int ml; /* cost of an ML instruction. */
69 const int mr; /* cost of an MR instruction. */
70 const int ms; /* cost of an MS instruction. */
71 const int msg; /* cost of an MSG instruction. */
72 const int msgf; /* cost of an MSGF instruction. */
73 const int msgfr; /* cost of an MSGFR instruction. */
74 const int msgr; /* cost of an MSGR instruction. */
75 const int msr; /* cost of an MSR instruction. */
76 const int mult_df; /* cost of multiplication in DFmode. */
77 const int mxbr;
78 /* square root */
79 const int sqxbr; /* cost of square root in TFmode. */
80 const int sqdbr; /* cost of square root in DFmode. */
81 const int sqebr; /* cost of square root in SFmode. */
82 /* multiply and add */
83 const int madbr; /* cost of multiply and add in DFmode. */
84 const int maebr; /* cost of multiply and add in SFmode. */
85 /* division */
86 const int dxbr;
87 const int ddbr;
88 const int debr;
89 const int dlgr;
90 const int dlr;
91 const int dr;
92 const int dsgfr;
93 const int dsgr;
94 };
95
96 const struct processor_costs *s390_cost;
97
98 static const
99 struct processor_costs z900_cost =
100 {
101 COSTS_N_INSNS (5), /* M */
102 COSTS_N_INSNS (10), /* MGHI */
103 COSTS_N_INSNS (5), /* MH */
104 COSTS_N_INSNS (4), /* MHI */
105 COSTS_N_INSNS (5), /* ML */
106 COSTS_N_INSNS (5), /* MR */
107 COSTS_N_INSNS (4), /* MS */
108 COSTS_N_INSNS (15), /* MSG */
109 COSTS_N_INSNS (7), /* MSGF */
110 COSTS_N_INSNS (7), /* MSGFR */
111 COSTS_N_INSNS (10), /* MSGR */
112 COSTS_N_INSNS (4), /* MSR */
113 COSTS_N_INSNS (7), /* multiplication in DFmode */
114 COSTS_N_INSNS (13), /* MXBR */
115 COSTS_N_INSNS (136), /* SQXBR */
116 COSTS_N_INSNS (44), /* SQDBR */
117 COSTS_N_INSNS (35), /* SQEBR */
118 COSTS_N_INSNS (18), /* MADBR */
119 COSTS_N_INSNS (13), /* MAEBR */
120 COSTS_N_INSNS (134), /* DXBR */
121 COSTS_N_INSNS (30), /* DDBR */
122 COSTS_N_INSNS (27), /* DEBR */
123 COSTS_N_INSNS (220), /* DLGR */
124 COSTS_N_INSNS (34), /* DLR */
125 COSTS_N_INSNS (34), /* DR */
126 COSTS_N_INSNS (32), /* DSGFR */
127 COSTS_N_INSNS (32), /* DSGR */
128 };
129
130 static const
131 struct processor_costs z990_cost =
132 {
133 COSTS_N_INSNS (4), /* M */
134 COSTS_N_INSNS (2), /* MGHI */
135 COSTS_N_INSNS (2), /* MH */
136 COSTS_N_INSNS (2), /* MHI */
137 COSTS_N_INSNS (4), /* ML */
138 COSTS_N_INSNS (4), /* MR */
139 COSTS_N_INSNS (5), /* MS */
140 COSTS_N_INSNS (6), /* MSG */
141 COSTS_N_INSNS (4), /* MSGF */
142 COSTS_N_INSNS (4), /* MSGFR */
143 COSTS_N_INSNS (4), /* MSGR */
144 COSTS_N_INSNS (4), /* MSR */
145 COSTS_N_INSNS (1), /* multiplication in DFmode */
146 COSTS_N_INSNS (28), /* MXBR */
147 COSTS_N_INSNS (130), /* SQXBR */
148 COSTS_N_INSNS (66), /* SQDBR */
149 COSTS_N_INSNS (38), /* SQEBR */
150 COSTS_N_INSNS (1), /* MADBR */
151 COSTS_N_INSNS (1), /* MAEBR */
152 COSTS_N_INSNS (60), /* DXBR */
153 COSTS_N_INSNS (40), /* DDBR */
154 COSTS_N_INSNS (26), /* DEBR */
155 COSTS_N_INSNS (176), /* DLGR */
156 COSTS_N_INSNS (31), /* DLR */
157 COSTS_N_INSNS (31), /* DR */
158 COSTS_N_INSNS (31), /* DSGFR */
159 COSTS_N_INSNS (31), /* DSGR */
160 };
161
162 static const
163 struct processor_costs z9_109_cost =
164 {
165 COSTS_N_INSNS (4), /* M */
166 COSTS_N_INSNS (2), /* MGHI */
167 COSTS_N_INSNS (2), /* MH */
168 COSTS_N_INSNS (2), /* MHI */
169 COSTS_N_INSNS (4), /* ML */
170 COSTS_N_INSNS (4), /* MR */
171 COSTS_N_INSNS (5), /* MS */
172 COSTS_N_INSNS (6), /* MSG */
173 COSTS_N_INSNS (4), /* MSGF */
174 COSTS_N_INSNS (4), /* MSGFR */
175 COSTS_N_INSNS (4), /* MSGR */
176 COSTS_N_INSNS (4), /* MSR */
177 COSTS_N_INSNS (1), /* multiplication in DFmode */
178 COSTS_N_INSNS (28), /* MXBR */
179 COSTS_N_INSNS (130), /* SQXBR */
180 COSTS_N_INSNS (66), /* SQDBR */
181 COSTS_N_INSNS (38), /* SQEBR */
182 COSTS_N_INSNS (1), /* MADBR */
183 COSTS_N_INSNS (1), /* MAEBR */
184 COSTS_N_INSNS (60), /* DXBR */
185 COSTS_N_INSNS (40), /* DDBR */
186 COSTS_N_INSNS (26), /* DEBR */
187 COSTS_N_INSNS (30), /* DLGR */
188 COSTS_N_INSNS (23), /* DLR */
189 COSTS_N_INSNS (23), /* DR */
190 COSTS_N_INSNS (24), /* DSGFR */
191 COSTS_N_INSNS (24), /* DSGR */
192 };
193
194 static const
195 struct processor_costs z10_cost =
196 {
197 COSTS_N_INSNS (10), /* M */
198 COSTS_N_INSNS (10), /* MGHI */
199 COSTS_N_INSNS (10), /* MH */
200 COSTS_N_INSNS (10), /* MHI */
201 COSTS_N_INSNS (10), /* ML */
202 COSTS_N_INSNS (10), /* MR */
203 COSTS_N_INSNS (10), /* MS */
204 COSTS_N_INSNS (10), /* MSG */
205 COSTS_N_INSNS (10), /* MSGF */
206 COSTS_N_INSNS (10), /* MSGFR */
207 COSTS_N_INSNS (10), /* MSGR */
208 COSTS_N_INSNS (10), /* MSR */
209 COSTS_N_INSNS (1) , /* multiplication in DFmode */
210 COSTS_N_INSNS (50), /* MXBR */
211 COSTS_N_INSNS (120), /* SQXBR */
212 COSTS_N_INSNS (52), /* SQDBR */
213 COSTS_N_INSNS (38), /* SQEBR */
214 COSTS_N_INSNS (1), /* MADBR */
215 COSTS_N_INSNS (1), /* MAEBR */
216 COSTS_N_INSNS (111), /* DXBR */
217 COSTS_N_INSNS (39), /* DDBR */
218 COSTS_N_INSNS (32), /* DEBR */
219 COSTS_N_INSNS (160), /* DLGR */
220 COSTS_N_INSNS (71), /* DLR */
221 COSTS_N_INSNS (71), /* DR */
222 COSTS_N_INSNS (71), /* DSGFR */
223 COSTS_N_INSNS (71), /* DSGR */
224 };
225
226 static const
227 struct processor_costs z196_cost =
228 {
229 COSTS_N_INSNS (7), /* M */
230 COSTS_N_INSNS (5), /* MGHI */
231 COSTS_N_INSNS (5), /* MH */
232 COSTS_N_INSNS (5), /* MHI */
233 COSTS_N_INSNS (7), /* ML */
234 COSTS_N_INSNS (7), /* MR */
235 COSTS_N_INSNS (6), /* MS */
236 COSTS_N_INSNS (8), /* MSG */
237 COSTS_N_INSNS (6), /* MSGF */
238 COSTS_N_INSNS (6), /* MSGFR */
239 COSTS_N_INSNS (8), /* MSGR */
240 COSTS_N_INSNS (6), /* MSR */
241 COSTS_N_INSNS (1) , /* multiplication in DFmode */
242 COSTS_N_INSNS (40), /* MXBR B+40 */
243 COSTS_N_INSNS (100), /* SQXBR B+100 */
244 COSTS_N_INSNS (42), /* SQDBR B+42 */
245 COSTS_N_INSNS (28), /* SQEBR B+28 */
246 COSTS_N_INSNS (1), /* MADBR B */
247 COSTS_N_INSNS (1), /* MAEBR B */
248 COSTS_N_INSNS (101), /* DXBR B+101 */
249 COSTS_N_INSNS (29), /* DDBR */
250 COSTS_N_INSNS (22), /* DEBR */
251 COSTS_N_INSNS (160), /* DLGR cracked */
252 COSTS_N_INSNS (160), /* DLR cracked */
253 COSTS_N_INSNS (160), /* DR expanded */
254 COSTS_N_INSNS (160), /* DSGFR cracked */
255 COSTS_N_INSNS (160), /* DSGR cracked */
256 };
257
258 extern int reload_completed;
259
260 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
261 static rtx last_scheduled_insn;
262
263 /* Structure used to hold the components of a S/390 memory
264 address. A legitimate address on S/390 is of the general
265 form
266 base + index + displacement
267 where any of the components is optional.
268
269 base and index are registers of the class ADDR_REGS,
270 displacement is an unsigned 12-bit immediate constant. */
271
272 struct s390_address
273 {
274 rtx base;
275 rtx indx;
276 rtx disp;
277 bool pointer;
278 bool literal_pool;
279 };
280
281 /* Which cpu are we tuning for. */
282 enum processor_type s390_tune = PROCESSOR_max;
283 int s390_tune_flags;
284 /* Which instruction set architecture to use. */
285 enum processor_type s390_arch;
286 int s390_arch_flags;
287
288 HOST_WIDE_INT s390_warn_framesize = 0;
289 HOST_WIDE_INT s390_stack_size = 0;
290 HOST_WIDE_INT s390_stack_guard = 0;
291
292 /* The following structure is embedded in the machine
293 specific part of struct function. */
294
295 struct GTY (()) s390_frame_layout
296 {
297 /* Offset within stack frame. */
298 HOST_WIDE_INT gprs_offset;
299 HOST_WIDE_INT f0_offset;
300 HOST_WIDE_INT f4_offset;
301 HOST_WIDE_INT f8_offset;
302 HOST_WIDE_INT backchain_offset;
303
304 /* Number of first and last gpr where slots in the register
305 save area are reserved for. */
306 int first_save_gpr_slot;
307 int last_save_gpr_slot;
308
309 /* Number of first and last gpr to be saved, restored. */
310 int first_save_gpr;
311 int first_restore_gpr;
312 int last_save_gpr;
313 int last_restore_gpr;
314
315 /* Bits standing for floating point registers. Set, if the
316 respective register has to be saved. Starting with reg 16 (f0)
317 at the rightmost bit.
318 Bit 15 - 8 7 6 5 4 3 2 1 0
319 fpr 15 - 8 7 5 3 1 6 4 2 0
320 reg 31 - 24 23 22 21 20 19 18 17 16 */
321 unsigned int fpr_bitmap;
322
323 /* Number of floating point registers f8-f15 which must be saved. */
324 int high_fprs;
325
326 /* Set if return address needs to be saved.
327 This flag is set by s390_return_addr_rtx if it could not use
328 the initial value of r14 and therefore depends on r14 saved
329 to the stack. */
330 bool save_return_addr_p;
331
332 /* Size of stack frame. */
333 HOST_WIDE_INT frame_size;
334 };
335
336 /* Define the structure for the machine field in struct function. */
337
338 struct GTY(()) machine_function
339 {
340 struct s390_frame_layout frame_layout;
341
342 /* Literal pool base register. */
343 rtx base_reg;
344
345 /* True if we may need to perform branch splitting. */
346 bool split_branches_pending_p;
347
348 /* Some local-dynamic TLS symbol name. */
349 const char *some_ld_name;
350
351 bool has_landing_pad_p;
352 };
353
354 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
355
356 #define cfun_frame_layout (cfun->machine->frame_layout)
357 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
358 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
359 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
360 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
361 (1 << (BITNUM)))
362 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
363 (1 << (BITNUM))))
364
365 /* Number of GPRs and FPRs used for argument passing. */
366 #define GP_ARG_NUM_REG 5
367 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
368
369 /* A couple of shortcuts. */
370 #define CONST_OK_FOR_J(x) \
371 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
372 #define CONST_OK_FOR_K(x) \
373 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
374 #define CONST_OK_FOR_Os(x) \
375 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
376 #define CONST_OK_FOR_Op(x) \
377 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
378 #define CONST_OK_FOR_On(x) \
379 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
380
381 #define REGNO_PAIR_OK(REGNO, MODE) \
382 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
383
384 /* That's the read ahead of the dynamic branch prediction unit in
385 bytes on a z10 (or higher) CPU. */
386 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
387
388 static enum machine_mode
389 s390_libgcc_cmp_return_mode (void)
390 {
391 return TARGET_64BIT ? DImode : SImode;
392 }
393
394 static enum machine_mode
395 s390_libgcc_shift_count_mode (void)
396 {
397 return TARGET_64BIT ? DImode : SImode;
398 }
399
400 static enum machine_mode
401 s390_unwind_word_mode (void)
402 {
403 return TARGET_64BIT ? DImode : SImode;
404 }
405
406 /* Return true if the back end supports mode MODE. */
407 static bool
408 s390_scalar_mode_supported_p (enum machine_mode mode)
409 {
410 /* In contrast to the default implementation reject TImode constants on 31bit
411 TARGET_ZARCH for ABI compliance. */
412 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
413 return false;
414
415 if (DECIMAL_FLOAT_MODE_P (mode))
416 return default_decimal_float_supported_p ();
417
418 return default_scalar_mode_supported_p (mode);
419 }
420
421 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
422
423 void
424 s390_set_has_landing_pad_p (bool value)
425 {
426 cfun->machine->has_landing_pad_p = value;
427 }
428
429 /* If two condition code modes are compatible, return a condition code
430 mode which is compatible with both. Otherwise, return
431 VOIDmode. */
432
433 static enum machine_mode
434 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
435 {
436 if (m1 == m2)
437 return m1;
438
439 switch (m1)
440 {
441 case CCZmode:
442 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
443 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
444 return m2;
445 return VOIDmode;
446
447 case CCSmode:
448 case CCUmode:
449 case CCTmode:
450 case CCSRmode:
451 case CCURmode:
452 case CCZ1mode:
453 if (m2 == CCZmode)
454 return m1;
455
456 return VOIDmode;
457
458 default:
459 return VOIDmode;
460 }
461 return VOIDmode;
462 }
463
464 /* Return true if SET either doesn't set the CC register, or else
465 the source and destination have matching CC modes and that
466 CC mode is at least as constrained as REQ_MODE. */
467
468 static bool
469 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
470 {
471 enum machine_mode set_mode;
472
473 gcc_assert (GET_CODE (set) == SET);
474
475 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
476 return 1;
477
478 set_mode = GET_MODE (SET_DEST (set));
479 switch (set_mode)
480 {
481 case CCSmode:
482 case CCSRmode:
483 case CCUmode:
484 case CCURmode:
485 case CCLmode:
486 case CCL1mode:
487 case CCL2mode:
488 case CCL3mode:
489 case CCT1mode:
490 case CCT2mode:
491 case CCT3mode:
492 if (req_mode != set_mode)
493 return 0;
494 break;
495
496 case CCZmode:
497 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
498 && req_mode != CCSRmode && req_mode != CCURmode)
499 return 0;
500 break;
501
502 case CCAPmode:
503 case CCANmode:
504 if (req_mode != CCAmode)
505 return 0;
506 break;
507
508 default:
509 gcc_unreachable ();
510 }
511
512 return (GET_MODE (SET_SRC (set)) == set_mode);
513 }
514
515 /* Return true if every SET in INSN that sets the CC register
516 has source and destination with matching CC modes and that
517 CC mode is at least as constrained as REQ_MODE.
518 If REQ_MODE is VOIDmode, always return false. */
519
520 bool
521 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
522 {
523 int i;
524
525 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
526 if (req_mode == VOIDmode)
527 return false;
528
529 if (GET_CODE (PATTERN (insn)) == SET)
530 return s390_match_ccmode_set (PATTERN (insn), req_mode);
531
532 if (GET_CODE (PATTERN (insn)) == PARALLEL)
533 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
534 {
535 rtx set = XVECEXP (PATTERN (insn), 0, i);
536 if (GET_CODE (set) == SET)
537 if (!s390_match_ccmode_set (set, req_mode))
538 return false;
539 }
540
541 return true;
542 }
543
544 /* If a test-under-mask instruction can be used to implement
545 (compare (and ... OP1) OP2), return the CC mode required
546 to do that. Otherwise, return VOIDmode.
547 MIXED is true if the instruction can distinguish between
548 CC1 and CC2 for mixed selected bits (TMxx), it is false
549 if the instruction cannot (TM). */
550
551 enum machine_mode
552 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
553 {
554 int bit0, bit1;
555
556 /* ??? Fixme: should work on CONST_DOUBLE as well. */
557 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
558 return VOIDmode;
559
560 /* Selected bits all zero: CC0.
561 e.g.: int a; if ((a & (16 + 128)) == 0) */
562 if (INTVAL (op2) == 0)
563 return CCTmode;
564
565 /* Selected bits all one: CC3.
566 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
567 if (INTVAL (op2) == INTVAL (op1))
568 return CCT3mode;
569
570 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
571 int a;
572 if ((a & (16 + 128)) == 16) -> CCT1
573 if ((a & (16 + 128)) == 128) -> CCT2 */
574 if (mixed)
575 {
576 bit1 = exact_log2 (INTVAL (op2));
577 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
578 if (bit0 != -1 && bit1 != -1)
579 return bit0 > bit1 ? CCT1mode : CCT2mode;
580 }
581
582 return VOIDmode;
583 }
584
585 /* Given a comparison code OP (EQ, NE, etc.) and the operands
586 OP0 and OP1 of a COMPARE, return the mode to be used for the
587 comparison. */
588
589 enum machine_mode
590 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
591 {
592 switch (code)
593 {
594 case EQ:
595 case NE:
596 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
597 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
598 return CCAPmode;
599 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
600 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
601 return CCAPmode;
602 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
603 || GET_CODE (op1) == NEG)
604 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
605 return CCLmode;
606
607 if (GET_CODE (op0) == AND)
608 {
609 /* Check whether we can potentially do it via TM. */
610 enum machine_mode ccmode;
611 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
612 if (ccmode != VOIDmode)
613 {
614 /* Relax CCTmode to CCZmode to allow fall-back to AND
615 if that turns out to be beneficial. */
616 return ccmode == CCTmode ? CCZmode : ccmode;
617 }
618 }
619
620 if (register_operand (op0, HImode)
621 && GET_CODE (op1) == CONST_INT
622 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
623 return CCT3mode;
624 if (register_operand (op0, QImode)
625 && GET_CODE (op1) == CONST_INT
626 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
627 return CCT3mode;
628
629 return CCZmode;
630
631 case LE:
632 case LT:
633 case GE:
634 case GT:
635 /* The only overflow condition of NEG and ABS happens when
636 -INT_MAX is used as parameter, which stays negative. So
637 we have an overflow from a positive value to a negative.
638 Using CCAP mode the resulting cc can be used for comparisons. */
639 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
640 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
641 return CCAPmode;
642
643 /* If constants are involved in an add instruction it is possible to use
644 the resulting cc for comparisons with zero. Knowing the sign of the
645 constant the overflow behavior gets predictable. e.g.:
646 int a, b; if ((b = a + c) > 0)
647 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
648 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
649 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
650 {
651 if (INTVAL (XEXP((op0), 1)) < 0)
652 return CCANmode;
653 else
654 return CCAPmode;
655 }
656 /* Fall through. */
657 case UNORDERED:
658 case ORDERED:
659 case UNEQ:
660 case UNLE:
661 case UNLT:
662 case UNGE:
663 case UNGT:
664 case LTGT:
665 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
666 && GET_CODE (op1) != CONST_INT)
667 return CCSRmode;
668 return CCSmode;
669
670 case LTU:
671 case GEU:
672 if (GET_CODE (op0) == PLUS
673 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
674 return CCL1mode;
675
676 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
677 && GET_CODE (op1) != CONST_INT)
678 return CCURmode;
679 return CCUmode;
680
681 case LEU:
682 case GTU:
683 if (GET_CODE (op0) == MINUS
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCL2mode;
686
687 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
688 && GET_CODE (op1) != CONST_INT)
689 return CCURmode;
690 return CCUmode;
691
692 default:
693 gcc_unreachable ();
694 }
695 }
696
697 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
698 that we can implement more efficiently. */
699
700 void
701 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
702 {
703 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
704 if ((*code == EQ || *code == NE)
705 && *op1 == const0_rtx
706 && GET_CODE (*op0) == ZERO_EXTRACT
707 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
708 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
709 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
710 {
711 rtx inner = XEXP (*op0, 0);
712 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
713 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
714 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
715
716 if (len > 0 && len < modesize
717 && pos >= 0 && pos + len <= modesize
718 && modesize <= HOST_BITS_PER_WIDE_INT)
719 {
720 unsigned HOST_WIDE_INT block;
721 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
722 block <<= modesize - pos - len;
723
724 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
725 gen_int_mode (block, GET_MODE (inner)));
726 }
727 }
728
729 /* Narrow AND of memory against immediate to enable TM. */
730 if ((*code == EQ || *code == NE)
731 && *op1 == const0_rtx
732 && GET_CODE (*op0) == AND
733 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
735 {
736 rtx inner = XEXP (*op0, 0);
737 rtx mask = XEXP (*op0, 1);
738
739 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
740 if (GET_CODE (inner) == SUBREG
741 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
742 && (GET_MODE_SIZE (GET_MODE (inner))
743 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
744 && ((INTVAL (mask)
745 & GET_MODE_MASK (GET_MODE (inner))
746 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
747 == 0))
748 inner = SUBREG_REG (inner);
749
750 /* Do not change volatile MEMs. */
751 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
752 {
753 int part = s390_single_part (XEXP (*op0, 1),
754 GET_MODE (inner), QImode, 0);
755 if (part >= 0)
756 {
757 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
758 inner = adjust_address_nv (inner, QImode, part);
759 *op0 = gen_rtx_AND (QImode, inner, mask);
760 }
761 }
762 }
763
764 /* Narrow comparisons against 0xffff to HImode if possible. */
765 if ((*code == EQ || *code == NE)
766 && GET_CODE (*op1) == CONST_INT
767 && INTVAL (*op1) == 0xffff
768 && SCALAR_INT_MODE_P (GET_MODE (*op0))
769 && (nonzero_bits (*op0, GET_MODE (*op0))
770 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
771 {
772 *op0 = gen_lowpart (HImode, *op0);
773 *op1 = constm1_rtx;
774 }
775
776 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
777 if (GET_CODE (*op0) == UNSPEC
778 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
779 && XVECLEN (*op0, 0) == 1
780 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
781 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
782 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
783 && *op1 == const0_rtx)
784 {
785 enum rtx_code new_code = UNKNOWN;
786 switch (*code)
787 {
788 case EQ: new_code = EQ; break;
789 case NE: new_code = NE; break;
790 case LT: new_code = GTU; break;
791 case GT: new_code = LTU; break;
792 case LE: new_code = GEU; break;
793 case GE: new_code = LEU; break;
794 default: break;
795 }
796
797 if (new_code != UNKNOWN)
798 {
799 *op0 = XVECEXP (*op0, 0, 0);
800 *code = new_code;
801 }
802 }
803
804 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
805 if (GET_CODE (*op0) == UNSPEC
806 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
807 && XVECLEN (*op0, 0) == 1
808 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
809 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
810 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
811 && *op1 == const0_rtx)
812 {
813 enum rtx_code new_code = UNKNOWN;
814 switch (*code)
815 {
816 case EQ: new_code = EQ; break;
817 case NE: new_code = NE; break;
818 default: break;
819 }
820
821 if (new_code != UNKNOWN)
822 {
823 *op0 = XVECEXP (*op0, 0, 0);
824 *code = new_code;
825 }
826 }
827
828 /* Simplify cascaded EQ, NE with const0_rtx. */
829 if ((*code == NE || *code == EQ)
830 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
831 && GET_MODE (*op0) == SImode
832 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
833 && REG_P (XEXP (*op0, 0))
834 && XEXP (*op0, 1) == const0_rtx
835 && *op1 == const0_rtx)
836 {
837 if ((*code == EQ && GET_CODE (*op0) == NE)
838 || (*code == NE && GET_CODE (*op0) == EQ))
839 *code = EQ;
840 else
841 *code = NE;
842 *op0 = XEXP (*op0, 0);
843 }
844
845 /* Prefer register over memory as first operand. */
846 if (MEM_P (*op0) && REG_P (*op1))
847 {
848 rtx tem = *op0; *op0 = *op1; *op1 = tem;
849 *code = swap_condition (*code);
850 }
851 }
852
853 /* Emit a compare instruction suitable to implement the comparison
854 OP0 CODE OP1. Return the correct condition RTL to be placed in
855 the IF_THEN_ELSE of the conditional branch testing the result. */
856
857 rtx
858 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
859 {
860 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
861 rtx cc;
862
863 /* Do not output a redundant compare instruction if a compare_and_swap
864 pattern already computed the result and the machine modes are compatible. */
865 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
866 {
867 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
868 == GET_MODE (op0));
869 cc = op0;
870 }
871 else
872 {
873 cc = gen_rtx_REG (mode, CC_REGNUM);
874 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
875 }
876
877 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
878 }
879
880 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
881 matches CMP.
882 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
883 conditional branch testing the result. */
884
885 static rtx
886 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
887 {
888 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
889 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
890 }
891
892 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
893 unconditional jump, else a conditional jump under condition COND. */
894
895 void
896 s390_emit_jump (rtx target, rtx cond)
897 {
898 rtx insn;
899
900 target = gen_rtx_LABEL_REF (VOIDmode, target);
901 if (cond)
902 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
903
904 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
905 emit_jump_insn (insn);
906 }
907
908 /* Return branch condition mask to implement a branch
909 specified by CODE. Return -1 for invalid comparisons. */
910
911 int
912 s390_branch_condition_mask (rtx code)
913 {
914 const int CC0 = 1 << 3;
915 const int CC1 = 1 << 2;
916 const int CC2 = 1 << 1;
917 const int CC3 = 1 << 0;
918
919 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
920 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
921 gcc_assert (XEXP (code, 1) == const0_rtx);
922
923 switch (GET_MODE (XEXP (code, 0)))
924 {
925 case CCZmode:
926 case CCZ1mode:
927 switch (GET_CODE (code))
928 {
929 case EQ: return CC0;
930 case NE: return CC1 | CC2 | CC3;
931 default: return -1;
932 }
933 break;
934
935 case CCT1mode:
936 switch (GET_CODE (code))
937 {
938 case EQ: return CC1;
939 case NE: return CC0 | CC2 | CC3;
940 default: return -1;
941 }
942 break;
943
944 case CCT2mode:
945 switch (GET_CODE (code))
946 {
947 case EQ: return CC2;
948 case NE: return CC0 | CC1 | CC3;
949 default: return -1;
950 }
951 break;
952
953 case CCT3mode:
954 switch (GET_CODE (code))
955 {
956 case EQ: return CC3;
957 case NE: return CC0 | CC1 | CC2;
958 default: return -1;
959 }
960 break;
961
962 case CCLmode:
963 switch (GET_CODE (code))
964 {
965 case EQ: return CC0 | CC2;
966 case NE: return CC1 | CC3;
967 default: return -1;
968 }
969 break;
970
971 case CCL1mode:
972 switch (GET_CODE (code))
973 {
974 case LTU: return CC2 | CC3; /* carry */
975 case GEU: return CC0 | CC1; /* no carry */
976 default: return -1;
977 }
978 break;
979
980 case CCL2mode:
981 switch (GET_CODE (code))
982 {
983 case GTU: return CC0 | CC1; /* borrow */
984 case LEU: return CC2 | CC3; /* no borrow */
985 default: return -1;
986 }
987 break;
988
989 case CCL3mode:
990 switch (GET_CODE (code))
991 {
992 case EQ: return CC0 | CC2;
993 case NE: return CC1 | CC3;
994 case LTU: return CC1;
995 case GTU: return CC3;
996 case LEU: return CC1 | CC2;
997 case GEU: return CC2 | CC3;
998 default: return -1;
999 }
1000
1001 case CCUmode:
1002 switch (GET_CODE (code))
1003 {
1004 case EQ: return CC0;
1005 case NE: return CC1 | CC2 | CC3;
1006 case LTU: return CC1;
1007 case GTU: return CC2;
1008 case LEU: return CC0 | CC1;
1009 case GEU: return CC0 | CC2;
1010 default: return -1;
1011 }
1012 break;
1013
1014 case CCURmode:
1015 switch (GET_CODE (code))
1016 {
1017 case EQ: return CC0;
1018 case NE: return CC2 | CC1 | CC3;
1019 case LTU: return CC2;
1020 case GTU: return CC1;
1021 case LEU: return CC0 | CC2;
1022 case GEU: return CC0 | CC1;
1023 default: return -1;
1024 }
1025 break;
1026
1027 case CCAPmode:
1028 switch (GET_CODE (code))
1029 {
1030 case EQ: return CC0;
1031 case NE: return CC1 | CC2 | CC3;
1032 case LT: return CC1 | CC3;
1033 case GT: return CC2;
1034 case LE: return CC0 | CC1 | CC3;
1035 case GE: return CC0 | CC2;
1036 default: return -1;
1037 }
1038 break;
1039
1040 case CCANmode:
1041 switch (GET_CODE (code))
1042 {
1043 case EQ: return CC0;
1044 case NE: return CC1 | CC2 | CC3;
1045 case LT: return CC1;
1046 case GT: return CC2 | CC3;
1047 case LE: return CC0 | CC1;
1048 case GE: return CC0 | CC2 | CC3;
1049 default: return -1;
1050 }
1051 break;
1052
1053 case CCSmode:
1054 switch (GET_CODE (code))
1055 {
1056 case EQ: return CC0;
1057 case NE: return CC1 | CC2 | CC3;
1058 case LT: return CC1;
1059 case GT: return CC2;
1060 case LE: return CC0 | CC1;
1061 case GE: return CC0 | CC2;
1062 case UNORDERED: return CC3;
1063 case ORDERED: return CC0 | CC1 | CC2;
1064 case UNEQ: return CC0 | CC3;
1065 case UNLT: return CC1 | CC3;
1066 case UNGT: return CC2 | CC3;
1067 case UNLE: return CC0 | CC1 | CC3;
1068 case UNGE: return CC0 | CC2 | CC3;
1069 case LTGT: return CC1 | CC2;
1070 default: return -1;
1071 }
1072 break;
1073
1074 case CCSRmode:
1075 switch (GET_CODE (code))
1076 {
1077 case EQ: return CC0;
1078 case NE: return CC2 | CC1 | CC3;
1079 case LT: return CC2;
1080 case GT: return CC1;
1081 case LE: return CC0 | CC2;
1082 case GE: return CC0 | CC1;
1083 case UNORDERED: return CC3;
1084 case ORDERED: return CC0 | CC2 | CC1;
1085 case UNEQ: return CC0 | CC3;
1086 case UNLT: return CC2 | CC3;
1087 case UNGT: return CC1 | CC3;
1088 case UNLE: return CC0 | CC2 | CC3;
1089 case UNGE: return CC0 | CC1 | CC3;
1090 case LTGT: return CC2 | CC1;
1091 default: return -1;
1092 }
1093 break;
1094
1095 default:
1096 return -1;
1097 }
1098 }
1099
1100
1101 /* Return branch condition mask to implement a compare and branch
1102 specified by CODE. Return -1 for invalid comparisons. */
1103
1104 int
1105 s390_compare_and_branch_condition_mask (rtx code)
1106 {
1107 const int CC0 = 1 << 3;
1108 const int CC1 = 1 << 2;
1109 const int CC2 = 1 << 1;
1110
1111 switch (GET_CODE (code))
1112 {
1113 case EQ:
1114 return CC0;
1115 case NE:
1116 return CC1 | CC2;
1117 case LT:
1118 case LTU:
1119 return CC1;
1120 case GT:
1121 case GTU:
1122 return CC2;
1123 case LE:
1124 case LEU:
1125 return CC0 | CC1;
1126 case GE:
1127 case GEU:
1128 return CC0 | CC2;
1129 default:
1130 gcc_unreachable ();
1131 }
1132 return -1;
1133 }
1134
1135 /* If INV is false, return assembler mnemonic string to implement
1136 a branch specified by CODE. If INV is true, return mnemonic
1137 for the corresponding inverted branch. */
1138
1139 static const char *
1140 s390_branch_condition_mnemonic (rtx code, int inv)
1141 {
1142 int mask;
1143
1144 static const char *const mnemonic[16] =
1145 {
1146 NULL, "o", "h", "nle",
1147 "l", "nhe", "lh", "ne",
1148 "e", "nlh", "he", "nl",
1149 "le", "nh", "no", NULL
1150 };
1151
1152 if (GET_CODE (XEXP (code, 0)) == REG
1153 && REGNO (XEXP (code, 0)) == CC_REGNUM
1154 && XEXP (code, 1) == const0_rtx)
1155 mask = s390_branch_condition_mask (code);
1156 else
1157 mask = s390_compare_and_branch_condition_mask (code);
1158
1159 gcc_assert (mask >= 0);
1160
1161 if (inv)
1162 mask ^= 15;
1163
1164 gcc_assert (mask >= 1 && mask <= 14);
1165
1166 return mnemonic[mask];
1167 }
1168
1169 /* Return the part of op which has a value different from def.
1170 The size of the part is determined by mode.
1171 Use this function only if you already know that op really
1172 contains such a part. */
1173
1174 unsigned HOST_WIDE_INT
1175 s390_extract_part (rtx op, enum machine_mode mode, int def)
1176 {
1177 unsigned HOST_WIDE_INT value = 0;
1178 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1179 int part_bits = GET_MODE_BITSIZE (mode);
1180 unsigned HOST_WIDE_INT part_mask
1181 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1182 int i;
1183
1184 for (i = 0; i < max_parts; i++)
1185 {
1186 if (i == 0)
1187 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1188 else
1189 value >>= part_bits;
1190
1191 if ((value & part_mask) != (def & part_mask))
1192 return value & part_mask;
1193 }
1194
1195 gcc_unreachable ();
1196 }
1197
1198 /* If OP is an integer constant of mode MODE with exactly one
1199 part of mode PART_MODE unequal to DEF, return the number of that
1200 part. Otherwise, return -1. */
1201
1202 int
1203 s390_single_part (rtx op,
1204 enum machine_mode mode,
1205 enum machine_mode part_mode,
1206 int def)
1207 {
1208 unsigned HOST_WIDE_INT value = 0;
1209 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1210 unsigned HOST_WIDE_INT part_mask
1211 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1212 int i, part = -1;
1213
1214 if (GET_CODE (op) != CONST_INT)
1215 return -1;
1216
1217 for (i = 0; i < n_parts; i++)
1218 {
1219 if (i == 0)
1220 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1221 else
1222 value >>= GET_MODE_BITSIZE (part_mode);
1223
1224 if ((value & part_mask) != (def & part_mask))
1225 {
1226 if (part != -1)
1227 return -1;
1228 else
1229 part = i;
1230 }
1231 }
1232 return part == -1 ? -1 : n_parts - 1 - part;
1233 }
1234
1235 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1236 bits and no other bits are set in IN. POS and LENGTH can be used
1237 to obtain the start position and the length of the bitfield.
1238
1239 POS gives the position of the first bit of the bitfield counting
1240 from the lowest order bit starting with zero. In order to use this
1241 value for S/390 instructions this has to be converted to "bits big
1242 endian" style. */
1243
1244 bool
1245 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1246 int *pos, int *length)
1247 {
1248 int tmp_pos = 0;
1249 int tmp_length = 0;
1250 int i;
1251 unsigned HOST_WIDE_INT mask = 1ULL;
1252 bool contiguous = false;
1253
1254 for (i = 0; i < size; mask <<= 1, i++)
1255 {
1256 if (contiguous)
1257 {
1258 if (mask & in)
1259 tmp_length++;
1260 else
1261 break;
1262 }
1263 else
1264 {
1265 if (mask & in)
1266 {
1267 contiguous = true;
1268 tmp_length++;
1269 }
1270 else
1271 tmp_pos++;
1272 }
1273 }
1274
1275 if (!tmp_length)
1276 return false;
1277
1278 /* Calculate a mask for all bits beyond the contiguous bits. */
1279 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1280
1281 if (mask & in)
1282 return false;
1283
1284 if (tmp_length + tmp_pos - 1 > size)
1285 return false;
1286
1287 if (length)
1288 *length = tmp_length;
1289
1290 if (pos)
1291 *pos = tmp_pos;
1292
1293 return true;
1294 }
1295
1296 /* Check whether we can (and want to) split a double-word
1297 move in mode MODE from SRC to DST into two single-word
1298 moves, moving the subword FIRST_SUBWORD first. */
1299
1300 bool
1301 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1302 {
1303 /* Floating point registers cannot be split. */
1304 if (FP_REG_P (src) || FP_REG_P (dst))
1305 return false;
1306
1307 /* We don't need to split if operands are directly accessible. */
1308 if (s_operand (src, mode) || s_operand (dst, mode))
1309 return false;
1310
1311 /* Non-offsettable memory references cannot be split. */
1312 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1313 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1314 return false;
1315
1316 /* Moving the first subword must not clobber a register
1317 needed to move the second subword. */
1318 if (register_operand (dst, mode))
1319 {
1320 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1321 if (reg_overlap_mentioned_p (subreg, src))
1322 return false;
1323 }
1324
1325 return true;
1326 }
1327
1328 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1329 and [MEM2, MEM2 + SIZE] do overlap and false
1330 otherwise. */
1331
1332 bool
1333 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1334 {
1335 rtx addr1, addr2, addr_delta;
1336 HOST_WIDE_INT delta;
1337
1338 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1339 return true;
1340
1341 if (size == 0)
1342 return false;
1343
1344 addr1 = XEXP (mem1, 0);
1345 addr2 = XEXP (mem2, 0);
1346
1347 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1348
1349 /* This overlapping check is used by peepholes merging memory block operations.
1350 Overlapping operations would otherwise be recognized by the S/390 hardware
1351 and would fall back to a slower implementation. Allowing overlapping
1352 operations would lead to slow code but not to wrong code. Therefore we are
1353 somewhat optimistic if we cannot prove that the memory blocks are
1354 overlapping.
1355 That's why we return false here although this may accept operations on
1356 overlapping memory areas. */
1357 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1358 return false;
1359
1360 delta = INTVAL (addr_delta);
1361
1362 if (delta == 0
1363 || (delta > 0 && delta < size)
1364 || (delta < 0 && -delta < size))
1365 return true;
1366
1367 return false;
1368 }
1369
1370 /* Check whether the address of memory reference MEM2 equals exactly
1371 the address of memory reference MEM1 plus DELTA. Return true if
1372 we can prove this to be the case, false otherwise. */
1373
1374 bool
1375 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1376 {
1377 rtx addr1, addr2, addr_delta;
1378
1379 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1380 return false;
1381
1382 addr1 = XEXP (mem1, 0);
1383 addr2 = XEXP (mem2, 0);
1384
1385 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1386 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1387 return false;
1388
1389 return true;
1390 }
1391
1392 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1393
1394 void
1395 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1396 rtx *operands)
1397 {
1398 enum machine_mode wmode = mode;
1399 rtx dst = operands[0];
1400 rtx src1 = operands[1];
1401 rtx src2 = operands[2];
1402 rtx op, clob, tem;
1403
1404 /* If we cannot handle the operation directly, use a temp register. */
1405 if (!s390_logical_operator_ok_p (operands))
1406 dst = gen_reg_rtx (mode);
1407
1408 /* QImode and HImode patterns make sense only if we have a destination
1409 in memory. Otherwise perform the operation in SImode. */
1410 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1411 wmode = SImode;
1412
1413 /* Widen operands if required. */
1414 if (mode != wmode)
1415 {
1416 if (GET_CODE (dst) == SUBREG
1417 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1418 dst = tem;
1419 else if (REG_P (dst))
1420 dst = gen_rtx_SUBREG (wmode, dst, 0);
1421 else
1422 dst = gen_reg_rtx (wmode);
1423
1424 if (GET_CODE (src1) == SUBREG
1425 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1426 src1 = tem;
1427 else if (GET_MODE (src1) != VOIDmode)
1428 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1429
1430 if (GET_CODE (src2) == SUBREG
1431 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1432 src2 = tem;
1433 else if (GET_MODE (src2) != VOIDmode)
1434 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1435 }
1436
1437 /* Emit the instruction. */
1438 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1439 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1440 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1441
1442 /* Fix up the destination if needed. */
1443 if (dst != operands[0])
1444 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1445 }
1446
1447 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1448
1449 bool
1450 s390_logical_operator_ok_p (rtx *operands)
1451 {
1452 /* If the destination operand is in memory, it needs to coincide
1453 with one of the source operands. After reload, it has to be
1454 the first source operand. */
1455 if (GET_CODE (operands[0]) == MEM)
1456 return rtx_equal_p (operands[0], operands[1])
1457 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1458
1459 return true;
1460 }
1461
1462 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1463 operand IMMOP to switch from SS to SI type instructions. */
1464
1465 void
1466 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1467 {
1468 int def = code == AND ? -1 : 0;
1469 HOST_WIDE_INT mask;
1470 int part;
1471
1472 gcc_assert (GET_CODE (*memop) == MEM);
1473 gcc_assert (!MEM_VOLATILE_P (*memop));
1474
1475 mask = s390_extract_part (*immop, QImode, def);
1476 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1477 gcc_assert (part >= 0);
1478
1479 *memop = adjust_address (*memop, QImode, part);
1480 *immop = gen_int_mode (mask, QImode);
1481 }
1482
1483
1484 /* How to allocate a 'struct machine_function'. */
1485
1486 static struct machine_function *
1487 s390_init_machine_status (void)
1488 {
1489 return ggc_alloc_cleared_machine_function ();
1490 }
1491
1492 /* Change optimizations to be performed, depending on the
1493 optimization level.
1494
1495 LEVEL is the optimization level specified; 2 if `-O2' is
1496 specified, 1 if `-O' is specified, and 0 if neither is specified.
1497
1498 SIZE is nonzero if `-Os' is specified and zero otherwise. */
1499
1500 static void
1501 s390_option_optimization (int level ATTRIBUTE_UNUSED, int size)
1502 {
1503 /* ??? There are apparently still problems with -fcaller-saves. */
1504 flag_caller_saves = 0;
1505
1506 /* By default, always emit DWARF-2 unwind info. This allows debugging
1507 without maintaining a stack frame back-chain. */
1508 flag_asynchronous_unwind_tables = 1;
1509
1510 /* Use MVCLE instructions to decrease code size if requested. */
1511 if (size != 0)
1512 target_flags |= MASK_MVCLE;
1513 }
1514
1515 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1516 to the associated processor_type and processor_flags if so. */
1517
1518 static bool
1519 s390_handle_arch_option (const char *arg,
1520 enum processor_type *type,
1521 int *flags)
1522 {
1523 static struct pta
1524 {
1525 const char *const name; /* processor name or nickname. */
1526 const enum processor_type processor;
1527 const int flags; /* From enum processor_flags. */
1528 }
1529 const processor_alias_table[] =
1530 {
1531 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1532 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1533 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1534 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1535 | PF_LONG_DISPLACEMENT},
1536 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1537 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1538 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1539 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1540 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1541 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1542 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1543 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1544 };
1545 size_t i;
1546
1547 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1548 if (strcmp (arg, processor_alias_table[i].name) == 0)
1549 {
1550 *type = processor_alias_table[i].processor;
1551 *flags = processor_alias_table[i].flags;
1552 return true;
1553 }
1554 return false;
1555 }
1556
1557 /* Implement TARGET_HANDLE_OPTION. */
1558
1559 static bool
1560 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1561 {
1562 switch (code)
1563 {
1564 case OPT_march_:
1565 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1566
1567 case OPT_mstack_guard_:
1568 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1569 return false;
1570 if (exact_log2 (s390_stack_guard) == -1)
1571 error ("stack guard value must be an exact power of 2");
1572 return true;
1573
1574 case OPT_mstack_size_:
1575 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1576 return false;
1577 if (exact_log2 (s390_stack_size) == -1)
1578 error ("stack size must be an exact power of 2");
1579 return true;
1580
1581 case OPT_mtune_:
1582 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1583
1584 case OPT_mwarn_framesize_:
1585 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1586
1587 default:
1588 return true;
1589 }
1590 }
1591
1592 static void
1593 s390_option_override (void)
1594 {
1595 /* Set up function hooks. */
1596 init_machine_status = s390_init_machine_status;
1597
1598 /* Architecture mode defaults according to ABI. */
1599 if (!(target_flags_explicit & MASK_ZARCH))
1600 {
1601 if (TARGET_64BIT)
1602 target_flags |= MASK_ZARCH;
1603 else
1604 target_flags &= ~MASK_ZARCH;
1605 }
1606
1607 /* Determine processor architectural level. */
1608 if (!s390_arch_string)
1609 {
1610 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1611 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1612 }
1613
1614 /* Determine processor to tune for. */
1615 if (s390_tune == PROCESSOR_max)
1616 {
1617 s390_tune = s390_arch;
1618 s390_tune_flags = s390_arch_flags;
1619 }
1620
1621 /* Sanity checks. */
1622 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1623 error ("z/Architecture mode not supported on %s", s390_arch_string);
1624 if (TARGET_64BIT && !TARGET_ZARCH)
1625 error ("64-bit ABI not supported in ESA/390 mode");
1626
1627 if (TARGET_HARD_DFP && !TARGET_DFP)
1628 {
1629 if (target_flags_explicit & MASK_HARD_DFP)
1630 {
1631 if (!TARGET_CPU_DFP)
1632 error ("Hardware decimal floating point instructions"
1633 " not available on %s", s390_arch_string);
1634 if (!TARGET_ZARCH)
1635 error ("Hardware decimal floating point instructions"
1636 " not available in ESA/390 mode");
1637 }
1638 else
1639 target_flags &= ~MASK_HARD_DFP;
1640 }
1641
1642 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1643 {
1644 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1645 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1646
1647 target_flags &= ~MASK_HARD_DFP;
1648 }
1649
1650 /* Set processor cost function. */
1651 switch (s390_tune)
1652 {
1653 case PROCESSOR_2084_Z990:
1654 s390_cost = &z990_cost;
1655 break;
1656 case PROCESSOR_2094_Z9_109:
1657 s390_cost = &z9_109_cost;
1658 break;
1659 case PROCESSOR_2097_Z10:
1660 s390_cost = &z10_cost;
1661 case PROCESSOR_2817_Z196:
1662 s390_cost = &z196_cost;
1663 break;
1664 default:
1665 s390_cost = &z900_cost;
1666 }
1667
1668 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1669 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1670 "in combination");
1671
1672 if (s390_stack_size)
1673 {
1674 if (s390_stack_guard >= s390_stack_size)
1675 error ("stack size must be greater than the stack guard value");
1676 else if (s390_stack_size > 1 << 16)
1677 error ("stack size must not be greater than 64k");
1678 }
1679 else if (s390_stack_guard)
1680 error ("-mstack-guard implies use of -mstack-size");
1681
1682 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1683 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1684 target_flags |= MASK_LONG_DOUBLE_128;
1685 #endif
1686
1687 if (s390_tune == PROCESSOR_2097_Z10
1688 || s390_tune == PROCESSOR_2817_Z196)
1689 {
1690 if (!PARAM_SET_P (PARAM_MAX_UNROLLED_INSNS))
1691 set_param_value ("max-unrolled-insns", 100);
1692 if (!PARAM_SET_P (PARAM_MAX_UNROLL_TIMES))
1693 set_param_value ("max-unroll-times", 32);
1694 if (!PARAM_SET_P (PARAM_MAX_COMPLETELY_PEELED_INSNS))
1695 set_param_value ("max-completely-peeled-insns", 2000);
1696 if (!PARAM_SET_P (PARAM_MAX_COMPLETELY_PEEL_TIMES))
1697 set_param_value ("max-completely-peel-times", 64);
1698 }
1699
1700 set_param_value ("max-pending-list-length", 256);
1701 /* values for loop prefetching */
1702 set_param_value ("l1-cache-line-size", 256);
1703 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
1704 set_param_value ("l1-cache-size", 128);
1705 /* s390 has more than 2 levels and the size is much larger. Since
1706 we are always running virtualized assume that we only get a small
1707 part of the caches above l1. */
1708 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
1709 set_param_value ("l2-cache-size", 1500);
1710 if (!PARAM_SET_P (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO))
1711 set_param_value ("prefetch-min-insn-to-mem-ratio", 2);
1712 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
1713 set_param_value ("simultaneous-prefetches", 6);
1714
1715 /* This cannot reside in s390_option_optimization since HAVE_prefetch
1716 requires the arch flags to be evaluated already. Since prefetching
1717 is beneficial on s390, we enable it if available. */
1718 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1719 flag_prefetch_loop_arrays = 1;
1720 }
1721
1722 /* Map for smallest class containing reg regno. */
1723
1724 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1725 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1726 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1727 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1728 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1729 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1730 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1731 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1732 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1733 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1734 ACCESS_REGS, ACCESS_REGS
1735 };
1736
1737 /* Return attribute type of insn. */
1738
1739 static enum attr_type
1740 s390_safe_attr_type (rtx insn)
1741 {
1742 if (recog_memoized (insn) >= 0)
1743 return get_attr_type (insn);
1744 else
1745 return TYPE_NONE;
1746 }
1747
1748 /* Return true if DISP is a valid short displacement. */
1749
1750 static bool
1751 s390_short_displacement (rtx disp)
1752 {
1753 /* No displacement is OK. */
1754 if (!disp)
1755 return true;
1756
1757 /* Without the long displacement facility we don't need to
1758 distingiush between long and short displacement. */
1759 if (!TARGET_LONG_DISPLACEMENT)
1760 return true;
1761
1762 /* Integer displacement in range. */
1763 if (GET_CODE (disp) == CONST_INT)
1764 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1765
1766 /* GOT offset is not OK, the GOT can be large. */
1767 if (GET_CODE (disp) == CONST
1768 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1769 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1770 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1771 return false;
1772
1773 /* All other symbolic constants are literal pool references,
1774 which are OK as the literal pool must be small. */
1775 if (GET_CODE (disp) == CONST)
1776 return true;
1777
1778 return false;
1779 }
1780
1781 /* Decompose a RTL expression ADDR for a memory address into
1782 its components, returned in OUT.
1783
1784 Returns false if ADDR is not a valid memory address, true
1785 otherwise. If OUT is NULL, don't return the components,
1786 but check for validity only.
1787
1788 Note: Only addresses in canonical form are recognized.
1789 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1790 canonical form so that they will be recognized. */
1791
1792 static int
1793 s390_decompose_address (rtx addr, struct s390_address *out)
1794 {
1795 HOST_WIDE_INT offset = 0;
1796 rtx base = NULL_RTX;
1797 rtx indx = NULL_RTX;
1798 rtx disp = NULL_RTX;
1799 rtx orig_disp;
1800 bool pointer = false;
1801 bool base_ptr = false;
1802 bool indx_ptr = false;
1803 bool literal_pool = false;
1804
1805 /* We may need to substitute the literal pool base register into the address
1806 below. However, at this point we do not know which register is going to
1807 be used as base, so we substitute the arg pointer register. This is going
1808 to be treated as holding a pointer below -- it shouldn't be used for any
1809 other purpose. */
1810 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1811
1812 /* Decompose address into base + index + displacement. */
1813
1814 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1815 base = addr;
1816
1817 else if (GET_CODE (addr) == PLUS)
1818 {
1819 rtx op0 = XEXP (addr, 0);
1820 rtx op1 = XEXP (addr, 1);
1821 enum rtx_code code0 = GET_CODE (op0);
1822 enum rtx_code code1 = GET_CODE (op1);
1823
1824 if (code0 == REG || code0 == UNSPEC)
1825 {
1826 if (code1 == REG || code1 == UNSPEC)
1827 {
1828 indx = op0; /* index + base */
1829 base = op1;
1830 }
1831
1832 else
1833 {
1834 base = op0; /* base + displacement */
1835 disp = op1;
1836 }
1837 }
1838
1839 else if (code0 == PLUS)
1840 {
1841 indx = XEXP (op0, 0); /* index + base + disp */
1842 base = XEXP (op0, 1);
1843 disp = op1;
1844 }
1845
1846 else
1847 {
1848 return false;
1849 }
1850 }
1851
1852 else
1853 disp = addr; /* displacement */
1854
1855 /* Extract integer part of displacement. */
1856 orig_disp = disp;
1857 if (disp)
1858 {
1859 if (GET_CODE (disp) == CONST_INT)
1860 {
1861 offset = INTVAL (disp);
1862 disp = NULL_RTX;
1863 }
1864 else if (GET_CODE (disp) == CONST
1865 && GET_CODE (XEXP (disp, 0)) == PLUS
1866 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1867 {
1868 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1869 disp = XEXP (XEXP (disp, 0), 0);
1870 }
1871 }
1872
1873 /* Strip off CONST here to avoid special case tests later. */
1874 if (disp && GET_CODE (disp) == CONST)
1875 disp = XEXP (disp, 0);
1876
1877 /* We can convert literal pool addresses to
1878 displacements by basing them off the base register. */
1879 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1880 {
1881 /* Either base or index must be free to hold the base register. */
1882 if (!base)
1883 base = fake_pool_base, literal_pool = true;
1884 else if (!indx)
1885 indx = fake_pool_base, literal_pool = true;
1886 else
1887 return false;
1888
1889 /* Mark up the displacement. */
1890 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1891 UNSPEC_LTREL_OFFSET);
1892 }
1893
1894 /* Validate base register. */
1895 if (base)
1896 {
1897 if (GET_CODE (base) == UNSPEC)
1898 switch (XINT (base, 1))
1899 {
1900 case UNSPEC_LTREF:
1901 if (!disp)
1902 disp = gen_rtx_UNSPEC (Pmode,
1903 gen_rtvec (1, XVECEXP (base, 0, 0)),
1904 UNSPEC_LTREL_OFFSET);
1905 else
1906 return false;
1907
1908 base = XVECEXP (base, 0, 1);
1909 break;
1910
1911 case UNSPEC_LTREL_BASE:
1912 if (XVECLEN (base, 0) == 1)
1913 base = fake_pool_base, literal_pool = true;
1914 else
1915 base = XVECEXP (base, 0, 1);
1916 break;
1917
1918 default:
1919 return false;
1920 }
1921
1922 if (!REG_P (base)
1923 || (GET_MODE (base) != SImode
1924 && GET_MODE (base) != Pmode))
1925 return false;
1926
1927 if (REGNO (base) == STACK_POINTER_REGNUM
1928 || REGNO (base) == FRAME_POINTER_REGNUM
1929 || ((reload_completed || reload_in_progress)
1930 && frame_pointer_needed
1931 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1932 || REGNO (base) == ARG_POINTER_REGNUM
1933 || (flag_pic
1934 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1935 pointer = base_ptr = true;
1936
1937 if ((reload_completed || reload_in_progress)
1938 && base == cfun->machine->base_reg)
1939 pointer = base_ptr = literal_pool = true;
1940 }
1941
1942 /* Validate index register. */
1943 if (indx)
1944 {
1945 if (GET_CODE (indx) == UNSPEC)
1946 switch (XINT (indx, 1))
1947 {
1948 case UNSPEC_LTREF:
1949 if (!disp)
1950 disp = gen_rtx_UNSPEC (Pmode,
1951 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1952 UNSPEC_LTREL_OFFSET);
1953 else
1954 return false;
1955
1956 indx = XVECEXP (indx, 0, 1);
1957 break;
1958
1959 case UNSPEC_LTREL_BASE:
1960 if (XVECLEN (indx, 0) == 1)
1961 indx = fake_pool_base, literal_pool = true;
1962 else
1963 indx = XVECEXP (indx, 0, 1);
1964 break;
1965
1966 default:
1967 return false;
1968 }
1969
1970 if (!REG_P (indx)
1971 || (GET_MODE (indx) != SImode
1972 && GET_MODE (indx) != Pmode))
1973 return false;
1974
1975 if (REGNO (indx) == STACK_POINTER_REGNUM
1976 || REGNO (indx) == FRAME_POINTER_REGNUM
1977 || ((reload_completed || reload_in_progress)
1978 && frame_pointer_needed
1979 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1980 || REGNO (indx) == ARG_POINTER_REGNUM
1981 || (flag_pic
1982 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1983 pointer = indx_ptr = true;
1984
1985 if ((reload_completed || reload_in_progress)
1986 && indx == cfun->machine->base_reg)
1987 pointer = indx_ptr = literal_pool = true;
1988 }
1989
1990 /* Prefer to use pointer as base, not index. */
1991 if (base && indx && !base_ptr
1992 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1993 {
1994 rtx tmp = base;
1995 base = indx;
1996 indx = tmp;
1997 }
1998
1999 /* Validate displacement. */
2000 if (!disp)
2001 {
2002 /* If virtual registers are involved, the displacement will change later
2003 anyway as the virtual registers get eliminated. This could make a
2004 valid displacement invalid, but it is more likely to make an invalid
2005 displacement valid, because we sometimes access the register save area
2006 via negative offsets to one of those registers.
2007 Thus we don't check the displacement for validity here. If after
2008 elimination the displacement turns out to be invalid after all,
2009 this is fixed up by reload in any case. */
2010 if (base != arg_pointer_rtx
2011 && indx != arg_pointer_rtx
2012 && base != return_address_pointer_rtx
2013 && indx != return_address_pointer_rtx
2014 && base != frame_pointer_rtx
2015 && indx != frame_pointer_rtx
2016 && base != virtual_stack_vars_rtx
2017 && indx != virtual_stack_vars_rtx)
2018 if (!DISP_IN_RANGE (offset))
2019 return false;
2020 }
2021 else
2022 {
2023 /* All the special cases are pointers. */
2024 pointer = true;
2025
2026 /* In the small-PIC case, the linker converts @GOT
2027 and @GOTNTPOFF offsets to possible displacements. */
2028 if (GET_CODE (disp) == UNSPEC
2029 && (XINT (disp, 1) == UNSPEC_GOT
2030 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2031 && flag_pic == 1)
2032 {
2033 ;
2034 }
2035
2036 /* Accept pool label offsets. */
2037 else if (GET_CODE (disp) == UNSPEC
2038 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2039 ;
2040
2041 /* Accept literal pool references. */
2042 else if (GET_CODE (disp) == UNSPEC
2043 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2044 {
2045 orig_disp = gen_rtx_CONST (Pmode, disp);
2046 if (offset)
2047 {
2048 /* If we have an offset, make sure it does not
2049 exceed the size of the constant pool entry. */
2050 rtx sym = XVECEXP (disp, 0, 0);
2051 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2052 return false;
2053
2054 orig_disp = plus_constant (orig_disp, offset);
2055 }
2056 }
2057
2058 else
2059 return false;
2060 }
2061
2062 if (!base && !indx)
2063 pointer = true;
2064
2065 if (out)
2066 {
2067 out->base = base;
2068 out->indx = indx;
2069 out->disp = orig_disp;
2070 out->pointer = pointer;
2071 out->literal_pool = literal_pool;
2072 }
2073
2074 return true;
2075 }
2076
2077 /* Decompose a RTL expression OP for a shift count into its components,
2078 and return the base register in BASE and the offset in OFFSET.
2079
2080 Return true if OP is a valid shift count, false if not. */
2081
2082 bool
2083 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2084 {
2085 HOST_WIDE_INT off = 0;
2086
2087 /* We can have an integer constant, an address register,
2088 or a sum of the two. */
2089 if (GET_CODE (op) == CONST_INT)
2090 {
2091 off = INTVAL (op);
2092 op = NULL_RTX;
2093 }
2094 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2095 {
2096 off = INTVAL (XEXP (op, 1));
2097 op = XEXP (op, 0);
2098 }
2099 while (op && GET_CODE (op) == SUBREG)
2100 op = SUBREG_REG (op);
2101
2102 if (op && GET_CODE (op) != REG)
2103 return false;
2104
2105 if (offset)
2106 *offset = off;
2107 if (base)
2108 *base = op;
2109
2110 return true;
2111 }
2112
2113
2114 /* Return true if CODE is a valid address without index. */
2115
2116 bool
2117 s390_legitimate_address_without_index_p (rtx op)
2118 {
2119 struct s390_address addr;
2120
2121 if (!s390_decompose_address (XEXP (op, 0), &addr))
2122 return false;
2123 if (addr.indx)
2124 return false;
2125
2126 return true;
2127 }
2128
2129
2130 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2131 and return these parts in SYMREF and ADDEND. You can pass NULL in
2132 SYMREF and/or ADDEND if you are not interested in these values.
2133 Literal pool references are *not* considered symbol references. */
2134
2135 static bool
2136 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2137 {
2138 HOST_WIDE_INT tmpaddend = 0;
2139
2140 if (GET_CODE (addr) == CONST)
2141 addr = XEXP (addr, 0);
2142
2143 if (GET_CODE (addr) == PLUS)
2144 {
2145 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2146 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2147 && CONST_INT_P (XEXP (addr, 1)))
2148 {
2149 tmpaddend = INTVAL (XEXP (addr, 1));
2150 addr = XEXP (addr, 0);
2151 }
2152 else
2153 return false;
2154 }
2155 else
2156 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2157 return false;
2158
2159 if (symref)
2160 *symref = addr;
2161 if (addend)
2162 *addend = tmpaddend;
2163
2164 return true;
2165 }
2166
2167
2168 /* Return true if the address in OP is valid for constraint letter C
2169 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2170 pool MEMs should be accepted. Only the Q, R, S, T constraint
2171 letters are allowed for C. */
2172
2173 static int
2174 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2175 {
2176 struct s390_address addr;
2177 bool decomposed = false;
2178
2179 /* This check makes sure that no symbolic address (except literal
2180 pool references) are accepted by the R or T constraints. */
2181 if (s390_symref_operand_p (op, NULL, NULL))
2182 return 0;
2183
2184 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2185 if (!lit_pool_ok)
2186 {
2187 if (!s390_decompose_address (op, &addr))
2188 return 0;
2189 if (addr.literal_pool)
2190 return 0;
2191 decomposed = true;
2192 }
2193
2194 switch (c)
2195 {
2196 case 'Q': /* no index short displacement */
2197 if (!decomposed && !s390_decompose_address (op, &addr))
2198 return 0;
2199 if (addr.indx)
2200 return 0;
2201 if (!s390_short_displacement (addr.disp))
2202 return 0;
2203 break;
2204
2205 case 'R': /* with index short displacement */
2206 if (TARGET_LONG_DISPLACEMENT)
2207 {
2208 if (!decomposed && !s390_decompose_address (op, &addr))
2209 return 0;
2210 if (!s390_short_displacement (addr.disp))
2211 return 0;
2212 }
2213 /* Any invalid address here will be fixed up by reload,
2214 so accept it for the most generic constraint. */
2215 break;
2216
2217 case 'S': /* no index long displacement */
2218 if (!TARGET_LONG_DISPLACEMENT)
2219 return 0;
2220 if (!decomposed && !s390_decompose_address (op, &addr))
2221 return 0;
2222 if (addr.indx)
2223 return 0;
2224 if (s390_short_displacement (addr.disp))
2225 return 0;
2226 break;
2227
2228 case 'T': /* with index long displacement */
2229 if (!TARGET_LONG_DISPLACEMENT)
2230 return 0;
2231 /* Any invalid address here will be fixed up by reload,
2232 so accept it for the most generic constraint. */
2233 if ((decomposed || s390_decompose_address (op, &addr))
2234 && s390_short_displacement (addr.disp))
2235 return 0;
2236 break;
2237 default:
2238 return 0;
2239 }
2240 return 1;
2241 }
2242
2243
2244 /* Evaluates constraint strings described by the regular expression
2245 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2246 the constraint given in STR, or 0 else. */
2247
2248 int
2249 s390_mem_constraint (const char *str, rtx op)
2250 {
2251 char c = str[0];
2252
2253 switch (c)
2254 {
2255 case 'A':
2256 /* Check for offsettable variants of memory constraints. */
2257 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2258 return 0;
2259 if ((reload_completed || reload_in_progress)
2260 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2261 return 0;
2262 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2263 case 'B':
2264 /* Check for non-literal-pool variants of memory constraints. */
2265 if (!MEM_P (op))
2266 return 0;
2267 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2268 case 'Q':
2269 case 'R':
2270 case 'S':
2271 case 'T':
2272 if (GET_CODE (op) != MEM)
2273 return 0;
2274 return s390_check_qrst_address (c, XEXP (op, 0), true);
2275 case 'U':
2276 return (s390_check_qrst_address ('Q', op, true)
2277 || s390_check_qrst_address ('R', op, true));
2278 case 'W':
2279 return (s390_check_qrst_address ('S', op, true)
2280 || s390_check_qrst_address ('T', op, true));
2281 case 'Y':
2282 /* Simply check for the basic form of a shift count. Reload will
2283 take care of making sure we have a proper base register. */
2284 if (!s390_decompose_shift_count (op, NULL, NULL))
2285 return 0;
2286 break;
2287 case 'Z':
2288 return s390_check_qrst_address (str[1], op, true);
2289 default:
2290 return 0;
2291 }
2292 return 1;
2293 }
2294
2295
2296 /* Evaluates constraint strings starting with letter O. Input
2297 parameter C is the second letter following the "O" in the constraint
2298 string. Returns 1 if VALUE meets the respective constraint and 0
2299 otherwise. */
2300
2301 int
2302 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2303 {
2304 if (!TARGET_EXTIMM)
2305 return 0;
2306
2307 switch (c)
2308 {
2309 case 's':
2310 return trunc_int_for_mode (value, SImode) == value;
2311
2312 case 'p':
2313 return value == 0
2314 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2315
2316 case 'n':
2317 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2318
2319 default:
2320 gcc_unreachable ();
2321 }
2322 }
2323
2324
2325 /* Evaluates constraint strings starting with letter N. Parameter STR
2326 contains the letters following letter "N" in the constraint string.
2327 Returns true if VALUE matches the constraint. */
2328
2329 int
2330 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2331 {
2332 enum machine_mode mode, part_mode;
2333 int def;
2334 int part, part_goal;
2335
2336
2337 if (str[0] == 'x')
2338 part_goal = -1;
2339 else
2340 part_goal = str[0] - '0';
2341
2342 switch (str[1])
2343 {
2344 case 'Q':
2345 part_mode = QImode;
2346 break;
2347 case 'H':
2348 part_mode = HImode;
2349 break;
2350 case 'S':
2351 part_mode = SImode;
2352 break;
2353 default:
2354 return 0;
2355 }
2356
2357 switch (str[2])
2358 {
2359 case 'H':
2360 mode = HImode;
2361 break;
2362 case 'S':
2363 mode = SImode;
2364 break;
2365 case 'D':
2366 mode = DImode;
2367 break;
2368 default:
2369 return 0;
2370 }
2371
2372 switch (str[3])
2373 {
2374 case '0':
2375 def = 0;
2376 break;
2377 case 'F':
2378 def = -1;
2379 break;
2380 default:
2381 return 0;
2382 }
2383
2384 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2385 return 0;
2386
2387 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2388 if (part < 0)
2389 return 0;
2390 if (part_goal != -1 && part_goal != part)
2391 return 0;
2392
2393 return 1;
2394 }
2395
2396
2397 /* Returns true if the input parameter VALUE is a float zero. */
2398
2399 int
2400 s390_float_const_zero_p (rtx value)
2401 {
2402 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2403 && value == CONST0_RTX (GET_MODE (value)));
2404 }
2405
2406
2407 /* Compute a (partial) cost for rtx X. Return true if the complete
2408 cost has been computed, and false if subexpressions should be
2409 scanned. In either case, *TOTAL contains the cost result.
2410 CODE contains GET_CODE (x), OUTER_CODE contains the code
2411 of the superexpression of x. */
2412
2413 static bool
2414 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2415 bool speed ATTRIBUTE_UNUSED)
2416 {
2417 switch (code)
2418 {
2419 case CONST:
2420 case CONST_INT:
2421 case LABEL_REF:
2422 case SYMBOL_REF:
2423 case CONST_DOUBLE:
2424 case MEM:
2425 *total = 0;
2426 return true;
2427
2428 case ASHIFT:
2429 case ASHIFTRT:
2430 case LSHIFTRT:
2431 case ROTATE:
2432 case ROTATERT:
2433 case AND:
2434 case IOR:
2435 case XOR:
2436 case NEG:
2437 case NOT:
2438 *total = COSTS_N_INSNS (1);
2439 return false;
2440
2441 case PLUS:
2442 case MINUS:
2443 /* Check for multiply and add. */
2444 if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2445 && GET_CODE (XEXP (x, 0)) == MULT
2446 && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
2447 {
2448 /* This is the multiply and add case. */
2449 if (GET_MODE (x) == DFmode)
2450 *total = s390_cost->madbr;
2451 else
2452 *total = s390_cost->maebr;
2453 *total += (rtx_cost (XEXP (XEXP (x, 0), 0), MULT, speed)
2454 + rtx_cost (XEXP (XEXP (x, 0), 1), MULT, speed)
2455 + rtx_cost (XEXP (x, 1), (enum rtx_code) code, speed));
2456 return true; /* Do not do an additional recursive descent. */
2457 }
2458 *total = COSTS_N_INSNS (1);
2459 return false;
2460
2461 case MULT:
2462 switch (GET_MODE (x))
2463 {
2464 case SImode:
2465 {
2466 rtx left = XEXP (x, 0);
2467 rtx right = XEXP (x, 1);
2468 if (GET_CODE (right) == CONST_INT
2469 && CONST_OK_FOR_K (INTVAL (right)))
2470 *total = s390_cost->mhi;
2471 else if (GET_CODE (left) == SIGN_EXTEND)
2472 *total = s390_cost->mh;
2473 else
2474 *total = s390_cost->ms; /* msr, ms, msy */
2475 break;
2476 }
2477 case DImode:
2478 {
2479 rtx left = XEXP (x, 0);
2480 rtx right = XEXP (x, 1);
2481 if (TARGET_ZARCH)
2482 {
2483 if (GET_CODE (right) == CONST_INT
2484 && CONST_OK_FOR_K (INTVAL (right)))
2485 *total = s390_cost->mghi;
2486 else if (GET_CODE (left) == SIGN_EXTEND)
2487 *total = s390_cost->msgf;
2488 else
2489 *total = s390_cost->msg; /* msgr, msg */
2490 }
2491 else /* TARGET_31BIT */
2492 {
2493 if (GET_CODE (left) == SIGN_EXTEND
2494 && GET_CODE (right) == SIGN_EXTEND)
2495 /* mulsidi case: mr, m */
2496 *total = s390_cost->m;
2497 else if (GET_CODE (left) == ZERO_EXTEND
2498 && GET_CODE (right) == ZERO_EXTEND
2499 && TARGET_CPU_ZARCH)
2500 /* umulsidi case: ml, mlr */
2501 *total = s390_cost->ml;
2502 else
2503 /* Complex calculation is required. */
2504 *total = COSTS_N_INSNS (40);
2505 }
2506 break;
2507 }
2508 case SFmode:
2509 case DFmode:
2510 *total = s390_cost->mult_df;
2511 break;
2512 case TFmode:
2513 *total = s390_cost->mxbr;
2514 break;
2515 default:
2516 return false;
2517 }
2518 return false;
2519
2520 case UDIV:
2521 case UMOD:
2522 if (GET_MODE (x) == TImode) /* 128 bit division */
2523 *total = s390_cost->dlgr;
2524 else if (GET_MODE (x) == DImode)
2525 {
2526 rtx right = XEXP (x, 1);
2527 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2528 *total = s390_cost->dlr;
2529 else /* 64 by 64 bit division */
2530 *total = s390_cost->dlgr;
2531 }
2532 else if (GET_MODE (x) == SImode) /* 32 bit division */
2533 *total = s390_cost->dlr;
2534 return false;
2535
2536 case DIV:
2537 case MOD:
2538 if (GET_MODE (x) == DImode)
2539 {
2540 rtx right = XEXP (x, 1);
2541 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2542 if (TARGET_ZARCH)
2543 *total = s390_cost->dsgfr;
2544 else
2545 *total = s390_cost->dr;
2546 else /* 64 by 64 bit division */
2547 *total = s390_cost->dsgr;
2548 }
2549 else if (GET_MODE (x) == SImode) /* 32 bit division */
2550 *total = s390_cost->dlr;
2551 else if (GET_MODE (x) == SFmode)
2552 {
2553 *total = s390_cost->debr;
2554 }
2555 else if (GET_MODE (x) == DFmode)
2556 {
2557 *total = s390_cost->ddbr;
2558 }
2559 else if (GET_MODE (x) == TFmode)
2560 {
2561 *total = s390_cost->dxbr;
2562 }
2563 return false;
2564
2565 case SQRT:
2566 if (GET_MODE (x) == SFmode)
2567 *total = s390_cost->sqebr;
2568 else if (GET_MODE (x) == DFmode)
2569 *total = s390_cost->sqdbr;
2570 else /* TFmode */
2571 *total = s390_cost->sqxbr;
2572 return false;
2573
2574 case SIGN_EXTEND:
2575 case ZERO_EXTEND:
2576 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2577 || outer_code == PLUS || outer_code == MINUS
2578 || outer_code == COMPARE)
2579 *total = 0;
2580 return false;
2581
2582 case COMPARE:
2583 *total = COSTS_N_INSNS (1);
2584 if (GET_CODE (XEXP (x, 0)) == AND
2585 && GET_CODE (XEXP (x, 1)) == CONST_INT
2586 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2587 {
2588 rtx op0 = XEXP (XEXP (x, 0), 0);
2589 rtx op1 = XEXP (XEXP (x, 0), 1);
2590 rtx op2 = XEXP (x, 1);
2591
2592 if (memory_operand (op0, GET_MODE (op0))
2593 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2594 return true;
2595 if (register_operand (op0, GET_MODE (op0))
2596 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2597 return true;
2598 }
2599 return false;
2600
2601 default:
2602 return false;
2603 }
2604 }
2605
2606 /* Return the cost of an address rtx ADDR. */
2607
2608 static int
2609 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2610 {
2611 struct s390_address ad;
2612 if (!s390_decompose_address (addr, &ad))
2613 return 1000;
2614
2615 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2616 }
2617
2618 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2619 otherwise return 0. */
2620
2621 int
2622 tls_symbolic_operand (rtx op)
2623 {
2624 if (GET_CODE (op) != SYMBOL_REF)
2625 return 0;
2626 return SYMBOL_REF_TLS_MODEL (op);
2627 }
2628 \f
2629 /* Split DImode access register reference REG (on 64-bit) into its constituent
2630 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2631 gen_highpart cannot be used as they assume all registers are word-sized,
2632 while our access registers have only half that size. */
2633
2634 void
2635 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2636 {
2637 gcc_assert (TARGET_64BIT);
2638 gcc_assert (ACCESS_REG_P (reg));
2639 gcc_assert (GET_MODE (reg) == DImode);
2640 gcc_assert (!(REGNO (reg) & 1));
2641
2642 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2643 *hi = gen_rtx_REG (SImode, REGNO (reg));
2644 }
2645
2646 /* Return true if OP contains a symbol reference */
2647
2648 bool
2649 symbolic_reference_mentioned_p (rtx op)
2650 {
2651 const char *fmt;
2652 int i;
2653
2654 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2655 return 1;
2656
2657 fmt = GET_RTX_FORMAT (GET_CODE (op));
2658 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2659 {
2660 if (fmt[i] == 'E')
2661 {
2662 int j;
2663
2664 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2665 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2666 return 1;
2667 }
2668
2669 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2670 return 1;
2671 }
2672
2673 return 0;
2674 }
2675
2676 /* Return true if OP contains a reference to a thread-local symbol. */
2677
2678 bool
2679 tls_symbolic_reference_mentioned_p (rtx op)
2680 {
2681 const char *fmt;
2682 int i;
2683
2684 if (GET_CODE (op) == SYMBOL_REF)
2685 return tls_symbolic_operand (op);
2686
2687 fmt = GET_RTX_FORMAT (GET_CODE (op));
2688 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2689 {
2690 if (fmt[i] == 'E')
2691 {
2692 int j;
2693
2694 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2695 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2696 return true;
2697 }
2698
2699 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2700 return true;
2701 }
2702
2703 return false;
2704 }
2705
2706
2707 /* Return true if OP is a legitimate general operand when
2708 generating PIC code. It is given that flag_pic is on
2709 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2710
2711 int
2712 legitimate_pic_operand_p (rtx op)
2713 {
2714 /* Accept all non-symbolic constants. */
2715 if (!SYMBOLIC_CONST (op))
2716 return 1;
2717
2718 /* Reject everything else; must be handled
2719 via emit_symbolic_move. */
2720 return 0;
2721 }
2722
2723 /* Returns true if the constant value OP is a legitimate general operand.
2724 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2725
2726 int
2727 legitimate_constant_p (rtx op)
2728 {
2729 /* Accept all non-symbolic constants. */
2730 if (!SYMBOLIC_CONST (op))
2731 return 1;
2732
2733 /* Accept immediate LARL operands. */
2734 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2735 return 1;
2736
2737 /* Thread-local symbols are never legal constants. This is
2738 so that emit_call knows that computing such addresses
2739 might require a function call. */
2740 if (TLS_SYMBOLIC_CONST (op))
2741 return 0;
2742
2743 /* In the PIC case, symbolic constants must *not* be
2744 forced into the literal pool. We accept them here,
2745 so that they will be handled by emit_symbolic_move. */
2746 if (flag_pic)
2747 return 1;
2748
2749 /* All remaining non-PIC symbolic constants are
2750 forced into the literal pool. */
2751 return 0;
2752 }
2753
2754 /* Determine if it's legal to put X into the constant pool. This
2755 is not possible if X contains the address of a symbol that is
2756 not constant (TLS) or not known at final link time (PIC). */
2757
2758 static bool
2759 s390_cannot_force_const_mem (rtx x)
2760 {
2761 switch (GET_CODE (x))
2762 {
2763 case CONST_INT:
2764 case CONST_DOUBLE:
2765 /* Accept all non-symbolic constants. */
2766 return false;
2767
2768 case LABEL_REF:
2769 /* Labels are OK iff we are non-PIC. */
2770 return flag_pic != 0;
2771
2772 case SYMBOL_REF:
2773 /* 'Naked' TLS symbol references are never OK,
2774 non-TLS symbols are OK iff we are non-PIC. */
2775 if (tls_symbolic_operand (x))
2776 return true;
2777 else
2778 return flag_pic != 0;
2779
2780 case CONST:
2781 return s390_cannot_force_const_mem (XEXP (x, 0));
2782 case PLUS:
2783 case MINUS:
2784 return s390_cannot_force_const_mem (XEXP (x, 0))
2785 || s390_cannot_force_const_mem (XEXP (x, 1));
2786
2787 case UNSPEC:
2788 switch (XINT (x, 1))
2789 {
2790 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2791 case UNSPEC_LTREL_OFFSET:
2792 case UNSPEC_GOT:
2793 case UNSPEC_GOTOFF:
2794 case UNSPEC_PLTOFF:
2795 case UNSPEC_TLSGD:
2796 case UNSPEC_TLSLDM:
2797 case UNSPEC_NTPOFF:
2798 case UNSPEC_DTPOFF:
2799 case UNSPEC_GOTNTPOFF:
2800 case UNSPEC_INDNTPOFF:
2801 return false;
2802
2803 /* If the literal pool shares the code section, be put
2804 execute template placeholders into the pool as well. */
2805 case UNSPEC_INSN:
2806 return TARGET_CPU_ZARCH;
2807
2808 default:
2809 return true;
2810 }
2811 break;
2812
2813 default:
2814 gcc_unreachable ();
2815 }
2816 }
2817
2818 /* Returns true if the constant value OP is a legitimate general
2819 operand during and after reload. The difference to
2820 legitimate_constant_p is that this function will not accept
2821 a constant that would need to be forced to the literal pool
2822 before it can be used as operand.
2823 This function accepts all constants which can be loaded directly
2824 into a GPR. */
2825
2826 bool
2827 legitimate_reload_constant_p (rtx op)
2828 {
2829 /* Accept la(y) operands. */
2830 if (GET_CODE (op) == CONST_INT
2831 && DISP_IN_RANGE (INTVAL (op)))
2832 return true;
2833
2834 /* Accept l(g)hi/l(g)fi operands. */
2835 if (GET_CODE (op) == CONST_INT
2836 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2837 return true;
2838
2839 /* Accept lliXX operands. */
2840 if (TARGET_ZARCH
2841 && GET_CODE (op) == CONST_INT
2842 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2843 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2844 return true;
2845
2846 if (TARGET_EXTIMM
2847 && GET_CODE (op) == CONST_INT
2848 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2849 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2850 return true;
2851
2852 /* Accept larl operands. */
2853 if (TARGET_CPU_ZARCH
2854 && larl_operand (op, VOIDmode))
2855 return true;
2856
2857 /* Accept floating-point zero operands that fit into a single GPR. */
2858 if (GET_CODE (op) == CONST_DOUBLE
2859 && s390_float_const_zero_p (op)
2860 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2861 return true;
2862
2863 /* Accept double-word operands that can be split. */
2864 if (GET_CODE (op) == CONST_INT
2865 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2866 {
2867 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2868 rtx hi = operand_subword (op, 0, 0, dword_mode);
2869 rtx lo = operand_subword (op, 1, 0, dword_mode);
2870 return legitimate_reload_constant_p (hi)
2871 && legitimate_reload_constant_p (lo);
2872 }
2873
2874 /* Everything else cannot be handled without reload. */
2875 return false;
2876 }
2877
2878 /* Returns true if the constant value OP is a legitimate fp operand
2879 during and after reload.
2880 This function accepts all constants which can be loaded directly
2881 into an FPR. */
2882
2883 static bool
2884 legitimate_reload_fp_constant_p (rtx op)
2885 {
2886 /* Accept floating-point zero operands if the load zero instruction
2887 can be used. */
2888 if (TARGET_Z196
2889 && GET_CODE (op) == CONST_DOUBLE
2890 && s390_float_const_zero_p (op))
2891 return true;
2892
2893 return false;
2894 }
2895
2896 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2897 return the class of reg to actually use. */
2898
2899 enum reg_class
2900 s390_preferred_reload_class (rtx op, enum reg_class rclass)
2901 {
2902 switch (GET_CODE (op))
2903 {
2904 /* Constants we cannot reload into general registers
2905 must be forced into the literal pool. */
2906 case CONST_DOUBLE:
2907 case CONST_INT:
2908 if (reg_class_subset_p (GENERAL_REGS, rclass)
2909 && legitimate_reload_constant_p (op))
2910 return GENERAL_REGS;
2911 else if (reg_class_subset_p (ADDR_REGS, rclass)
2912 && legitimate_reload_constant_p (op))
2913 return ADDR_REGS;
2914 else if (reg_class_subset_p (FP_REGS, rclass)
2915 && legitimate_reload_fp_constant_p (op))
2916 return FP_REGS;
2917 return NO_REGS;
2918
2919 /* If a symbolic constant or a PLUS is reloaded,
2920 it is most likely being used as an address, so
2921 prefer ADDR_REGS. If 'class' is not a superset
2922 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2923 case PLUS:
2924 case LABEL_REF:
2925 case SYMBOL_REF:
2926 case CONST:
2927 if (reg_class_subset_p (ADDR_REGS, rclass))
2928 return ADDR_REGS;
2929 else
2930 return NO_REGS;
2931
2932 default:
2933 break;
2934 }
2935
2936 return rclass;
2937 }
2938
2939 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2940 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2941 aligned. */
2942
2943 bool
2944 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2945 {
2946 HOST_WIDE_INT addend;
2947 rtx symref;
2948
2949 if (!s390_symref_operand_p (addr, &symref, &addend))
2950 return false;
2951
2952 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2953 && !(addend & (alignment - 1)));
2954 }
2955
2956 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2957 operand SCRATCH is used to reload the even part of the address and
2958 adding one. */
2959
2960 void
2961 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2962 {
2963 HOST_WIDE_INT addend;
2964 rtx symref;
2965
2966 if (!s390_symref_operand_p (addr, &symref, &addend))
2967 gcc_unreachable ();
2968
2969 if (!(addend & 1))
2970 /* Easy case. The addend is even so larl will do fine. */
2971 emit_move_insn (reg, addr);
2972 else
2973 {
2974 /* We can leave the scratch register untouched if the target
2975 register is a valid base register. */
2976 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2977 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2978 scratch = reg;
2979
2980 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2981 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2982
2983 if (addend != 1)
2984 emit_move_insn (scratch,
2985 gen_rtx_CONST (Pmode,
2986 gen_rtx_PLUS (Pmode, symref,
2987 GEN_INT (addend - 1))));
2988 else
2989 emit_move_insn (scratch, symref);
2990
2991 /* Increment the address using la in order to avoid clobbering cc. */
2992 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2993 }
2994 }
2995
2996 /* Generate what is necessary to move between REG and MEM using
2997 SCRATCH. The direction is given by TOMEM. */
2998
2999 void
3000 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3001 {
3002 /* Reload might have pulled a constant out of the literal pool.
3003 Force it back in. */
3004 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3005 || GET_CODE (mem) == CONST)
3006 mem = force_const_mem (GET_MODE (reg), mem);
3007
3008 gcc_assert (MEM_P (mem));
3009
3010 /* For a load from memory we can leave the scratch register
3011 untouched if the target register is a valid base register. */
3012 if (!tomem
3013 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3014 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3015 && GET_MODE (reg) == GET_MODE (scratch))
3016 scratch = reg;
3017
3018 /* Load address into scratch register. Since we can't have a
3019 secondary reload for a secondary reload we have to cover the case
3020 where larl would need a secondary reload here as well. */
3021 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3022
3023 /* Now we can use a standard load/store to do the move. */
3024 if (tomem)
3025 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3026 else
3027 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3028 }
3029
3030 /* Inform reload about cases where moving X with a mode MODE to a register in
3031 RCLASS requires an extra scratch or immediate register. Return the class
3032 needed for the immediate register. */
3033
3034 static reg_class_t
3035 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3036 enum machine_mode mode, secondary_reload_info *sri)
3037 {
3038 enum reg_class rclass = (enum reg_class) rclass_i;
3039
3040 /* Intermediate register needed. */
3041 if (reg_classes_intersect_p (CC_REGS, rclass))
3042 return GENERAL_REGS;
3043
3044 if (TARGET_Z10)
3045 {
3046 /* On z10 several optimizer steps may generate larl operands with
3047 an odd addend. */
3048 if (in_p
3049 && s390_symref_operand_p (x, NULL, NULL)
3050 && mode == Pmode
3051 && !s390_check_symref_alignment (x, 2))
3052 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3053 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3054
3055 /* On z10 we need a scratch register when moving QI, TI or floating
3056 point mode values from or to a memory location with a SYMBOL_REF
3057 or if the symref addend of a SI or DI move is not aligned to the
3058 width of the access. */
3059 if (MEM_P (x)
3060 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3061 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3062 || (!TARGET_ZARCH && mode == DImode)
3063 || ((mode == HImode || mode == SImode || mode == DImode)
3064 && (!s390_check_symref_alignment (XEXP (x, 0),
3065 GET_MODE_SIZE (mode))))))
3066 {
3067 #define __SECONDARY_RELOAD_CASE(M,m) \
3068 case M##mode: \
3069 if (TARGET_64BIT) \
3070 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3071 CODE_FOR_reload##m##di_tomem_z10; \
3072 else \
3073 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3074 CODE_FOR_reload##m##si_tomem_z10; \
3075 break;
3076
3077 switch (GET_MODE (x))
3078 {
3079 __SECONDARY_RELOAD_CASE (QI, qi);
3080 __SECONDARY_RELOAD_CASE (HI, hi);
3081 __SECONDARY_RELOAD_CASE (SI, si);
3082 __SECONDARY_RELOAD_CASE (DI, di);
3083 __SECONDARY_RELOAD_CASE (TI, ti);
3084 __SECONDARY_RELOAD_CASE (SF, sf);
3085 __SECONDARY_RELOAD_CASE (DF, df);
3086 __SECONDARY_RELOAD_CASE (TF, tf);
3087 __SECONDARY_RELOAD_CASE (SD, sd);
3088 __SECONDARY_RELOAD_CASE (DD, dd);
3089 __SECONDARY_RELOAD_CASE (TD, td);
3090
3091 default:
3092 gcc_unreachable ();
3093 }
3094 #undef __SECONDARY_RELOAD_CASE
3095 }
3096 }
3097
3098 /* We need a scratch register when loading a PLUS expression which
3099 is not a legitimate operand of the LOAD ADDRESS instruction. */
3100 if (in_p && s390_plus_operand (x, mode))
3101 sri->icode = (TARGET_64BIT ?
3102 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3103
3104 /* Performing a multiword move from or to memory we have to make sure the
3105 second chunk in memory is addressable without causing a displacement
3106 overflow. If that would be the case we calculate the address in
3107 a scratch register. */
3108 if (MEM_P (x)
3109 && GET_CODE (XEXP (x, 0)) == PLUS
3110 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3111 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3112 + GET_MODE_SIZE (mode) - 1))
3113 {
3114 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3115 in a s_operand address since we may fallback to lm/stm. So we only
3116 have to care about overflows in the b+i+d case. */
3117 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3118 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3119 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3120 /* For FP_REGS no lm/stm is available so this check is triggered
3121 for displacement overflows in b+i+d and b+d like addresses. */
3122 || (reg_classes_intersect_p (FP_REGS, rclass)
3123 && s390_class_max_nregs (FP_REGS, mode) > 1))
3124 {
3125 if (in_p)
3126 sri->icode = (TARGET_64BIT ?
3127 CODE_FOR_reloaddi_nonoffmem_in :
3128 CODE_FOR_reloadsi_nonoffmem_in);
3129 else
3130 sri->icode = (TARGET_64BIT ?
3131 CODE_FOR_reloaddi_nonoffmem_out :
3132 CODE_FOR_reloadsi_nonoffmem_out);
3133 }
3134 }
3135
3136 /* A scratch address register is needed when a symbolic constant is
3137 copied to r0 compiling with -fPIC. In other cases the target
3138 register might be used as temporary (see legitimize_pic_address). */
3139 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3140 sri->icode = (TARGET_64BIT ?
3141 CODE_FOR_reloaddi_PIC_addr :
3142 CODE_FOR_reloadsi_PIC_addr);
3143
3144 /* Either scratch or no register needed. */
3145 return NO_REGS;
3146 }
3147
3148 /* Generate code to load SRC, which is PLUS that is not a
3149 legitimate operand for the LA instruction, into TARGET.
3150 SCRATCH may be used as scratch register. */
3151
3152 void
3153 s390_expand_plus_operand (rtx target, rtx src,
3154 rtx scratch)
3155 {
3156 rtx sum1, sum2;
3157 struct s390_address ad;
3158
3159 /* src must be a PLUS; get its two operands. */
3160 gcc_assert (GET_CODE (src) == PLUS);
3161 gcc_assert (GET_MODE (src) == Pmode);
3162
3163 /* Check if any of the two operands is already scheduled
3164 for replacement by reload. This can happen e.g. when
3165 float registers occur in an address. */
3166 sum1 = find_replacement (&XEXP (src, 0));
3167 sum2 = find_replacement (&XEXP (src, 1));
3168 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3169
3170 /* If the address is already strictly valid, there's nothing to do. */
3171 if (!s390_decompose_address (src, &ad)
3172 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3173 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3174 {
3175 /* Otherwise, one of the operands cannot be an address register;
3176 we reload its value into the scratch register. */
3177 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3178 {
3179 emit_move_insn (scratch, sum1);
3180 sum1 = scratch;
3181 }
3182 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3183 {
3184 emit_move_insn (scratch, sum2);
3185 sum2 = scratch;
3186 }
3187
3188 /* According to the way these invalid addresses are generated
3189 in reload.c, it should never happen (at least on s390) that
3190 *neither* of the PLUS components, after find_replacements
3191 was applied, is an address register. */
3192 if (sum1 == scratch && sum2 == scratch)
3193 {
3194 debug_rtx (src);
3195 gcc_unreachable ();
3196 }
3197
3198 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3199 }
3200
3201 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3202 is only ever performed on addresses, so we can mark the
3203 sum as legitimate for LA in any case. */
3204 s390_load_address (target, src);
3205 }
3206
3207
3208 /* Return true if ADDR is a valid memory address.
3209 STRICT specifies whether strict register checking applies. */
3210
3211 static bool
3212 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3213 {
3214 struct s390_address ad;
3215
3216 if (TARGET_Z10
3217 && larl_operand (addr, VOIDmode)
3218 && (mode == VOIDmode
3219 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3220 return true;
3221
3222 if (!s390_decompose_address (addr, &ad))
3223 return false;
3224
3225 if (strict)
3226 {
3227 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3228 return false;
3229
3230 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3231 return false;
3232 }
3233 else
3234 {
3235 if (ad.base
3236 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3237 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3238 return false;
3239
3240 if (ad.indx
3241 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3242 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3243 return false;
3244 }
3245 return true;
3246 }
3247
3248 /* Return true if OP is a valid operand for the LA instruction.
3249 In 31-bit, we need to prove that the result is used as an
3250 address, as LA performs only a 31-bit addition. */
3251
3252 bool
3253 legitimate_la_operand_p (rtx op)
3254 {
3255 struct s390_address addr;
3256 if (!s390_decompose_address (op, &addr))
3257 return false;
3258
3259 return (TARGET_64BIT || addr.pointer);
3260 }
3261
3262 /* Return true if it is valid *and* preferable to use LA to
3263 compute the sum of OP1 and OP2. */
3264
3265 bool
3266 preferred_la_operand_p (rtx op1, rtx op2)
3267 {
3268 struct s390_address addr;
3269
3270 if (op2 != const0_rtx)
3271 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3272
3273 if (!s390_decompose_address (op1, &addr))
3274 return false;
3275 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3276 return false;
3277 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3278 return false;
3279
3280 /* Avoid LA instructions with index register on z196; it is
3281 preferable to use regular add instructions when possible. */
3282 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3283 return false;
3284
3285 if (!TARGET_64BIT && !addr.pointer)
3286 return false;
3287
3288 if (addr.pointer)
3289 return true;
3290
3291 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3292 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3293 return true;
3294
3295 return false;
3296 }
3297
3298 /* Emit a forced load-address operation to load SRC into DST.
3299 This will use the LOAD ADDRESS instruction even in situations
3300 where legitimate_la_operand_p (SRC) returns false. */
3301
3302 void
3303 s390_load_address (rtx dst, rtx src)
3304 {
3305 if (TARGET_64BIT)
3306 emit_move_insn (dst, src);
3307 else
3308 emit_insn (gen_force_la_31 (dst, src));
3309 }
3310
3311 /* Return a legitimate reference for ORIG (an address) using the
3312 register REG. If REG is 0, a new pseudo is generated.
3313
3314 There are two types of references that must be handled:
3315
3316 1. Global data references must load the address from the GOT, via
3317 the PIC reg. An insn is emitted to do this load, and the reg is
3318 returned.
3319
3320 2. Static data references, constant pool addresses, and code labels
3321 compute the address as an offset from the GOT, whose base is in
3322 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3323 differentiate them from global data objects. The returned
3324 address is the PIC reg + an unspec constant.
3325
3326 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3327 reg also appears in the address. */
3328
3329 rtx
3330 legitimize_pic_address (rtx orig, rtx reg)
3331 {
3332 rtx addr = orig;
3333 rtx new_rtx = orig;
3334 rtx base;
3335
3336 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3337
3338 if (GET_CODE (addr) == LABEL_REF
3339 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3340 {
3341 /* This is a local symbol. */
3342 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3343 {
3344 /* Access local symbols PC-relative via LARL.
3345 This is the same as in the non-PIC case, so it is
3346 handled automatically ... */
3347 }
3348 else
3349 {
3350 /* Access local symbols relative to the GOT. */
3351
3352 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3353
3354 if (reload_in_progress || reload_completed)
3355 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3356
3357 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3358 addr = gen_rtx_CONST (Pmode, addr);
3359 addr = force_const_mem (Pmode, addr);
3360 emit_move_insn (temp, addr);
3361
3362 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3363 if (reg != 0)
3364 {
3365 s390_load_address (reg, new_rtx);
3366 new_rtx = reg;
3367 }
3368 }
3369 }
3370 else if (GET_CODE (addr) == SYMBOL_REF)
3371 {
3372 if (reg == 0)
3373 reg = gen_reg_rtx (Pmode);
3374
3375 if (flag_pic == 1)
3376 {
3377 /* Assume GOT offset < 4k. This is handled the same way
3378 in both 31- and 64-bit code (@GOT). */
3379
3380 if (reload_in_progress || reload_completed)
3381 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3382
3383 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3384 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3385 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3386 new_rtx = gen_const_mem (Pmode, new_rtx);
3387 emit_move_insn (reg, new_rtx);
3388 new_rtx = reg;
3389 }
3390 else if (TARGET_CPU_ZARCH)
3391 {
3392 /* If the GOT offset might be >= 4k, we determine the position
3393 of the GOT entry via a PC-relative LARL (@GOTENT). */
3394
3395 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3396
3397 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3398 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3399
3400 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3401 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3402 emit_move_insn (temp, new_rtx);
3403
3404 new_rtx = gen_const_mem (Pmode, temp);
3405 emit_move_insn (reg, new_rtx);
3406 new_rtx = reg;
3407 }
3408 else
3409 {
3410 /* If the GOT offset might be >= 4k, we have to load it
3411 from the literal pool (@GOT). */
3412
3413 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3414
3415 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3416 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3417
3418 if (reload_in_progress || reload_completed)
3419 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3420
3421 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3422 addr = gen_rtx_CONST (Pmode, addr);
3423 addr = force_const_mem (Pmode, addr);
3424 emit_move_insn (temp, addr);
3425
3426 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3427 new_rtx = gen_const_mem (Pmode, new_rtx);
3428 emit_move_insn (reg, new_rtx);
3429 new_rtx = reg;
3430 }
3431 }
3432 else
3433 {
3434 if (GET_CODE (addr) == CONST)
3435 {
3436 addr = XEXP (addr, 0);
3437 if (GET_CODE (addr) == UNSPEC)
3438 {
3439 gcc_assert (XVECLEN (addr, 0) == 1);
3440 switch (XINT (addr, 1))
3441 {
3442 /* If someone moved a GOT-relative UNSPEC
3443 out of the literal pool, force them back in. */
3444 case UNSPEC_GOTOFF:
3445 case UNSPEC_PLTOFF:
3446 new_rtx = force_const_mem (Pmode, orig);
3447 break;
3448
3449 /* @GOT is OK as is if small. */
3450 case UNSPEC_GOT:
3451 if (flag_pic == 2)
3452 new_rtx = force_const_mem (Pmode, orig);
3453 break;
3454
3455 /* @GOTENT is OK as is. */
3456 case UNSPEC_GOTENT:
3457 break;
3458
3459 /* @PLT is OK as is on 64-bit, must be converted to
3460 GOT-relative @PLTOFF on 31-bit. */
3461 case UNSPEC_PLT:
3462 if (!TARGET_CPU_ZARCH)
3463 {
3464 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3465
3466 if (reload_in_progress || reload_completed)
3467 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3468
3469 addr = XVECEXP (addr, 0, 0);
3470 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3471 UNSPEC_PLTOFF);
3472 addr = gen_rtx_CONST (Pmode, addr);
3473 addr = force_const_mem (Pmode, addr);
3474 emit_move_insn (temp, addr);
3475
3476 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3477 if (reg != 0)
3478 {
3479 s390_load_address (reg, new_rtx);
3480 new_rtx = reg;
3481 }
3482 }
3483 break;
3484
3485 /* Everything else cannot happen. */
3486 default:
3487 gcc_unreachable ();
3488 }
3489 }
3490 else
3491 gcc_assert (GET_CODE (addr) == PLUS);
3492 }
3493 if (GET_CODE (addr) == PLUS)
3494 {
3495 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3496
3497 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3498 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3499
3500 /* Check first to see if this is a constant offset
3501 from a local symbol reference. */
3502 if ((GET_CODE (op0) == LABEL_REF
3503 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3504 && GET_CODE (op1) == CONST_INT)
3505 {
3506 if (TARGET_CPU_ZARCH
3507 && larl_operand (op0, VOIDmode)
3508 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3509 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3510 {
3511 if (INTVAL (op1) & 1)
3512 {
3513 /* LARL can't handle odd offsets, so emit a
3514 pair of LARL and LA. */
3515 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3516
3517 if (!DISP_IN_RANGE (INTVAL (op1)))
3518 {
3519 HOST_WIDE_INT even = INTVAL (op1) - 1;
3520 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3521 op0 = gen_rtx_CONST (Pmode, op0);
3522 op1 = const1_rtx;
3523 }
3524
3525 emit_move_insn (temp, op0);
3526 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3527
3528 if (reg != 0)
3529 {
3530 s390_load_address (reg, new_rtx);
3531 new_rtx = reg;
3532 }
3533 }
3534 else
3535 {
3536 /* If the offset is even, we can just use LARL.
3537 This will happen automatically. */
3538 }
3539 }
3540 else
3541 {
3542 /* Access local symbols relative to the GOT. */
3543
3544 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3545
3546 if (reload_in_progress || reload_completed)
3547 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3548
3549 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3550 UNSPEC_GOTOFF);
3551 addr = gen_rtx_PLUS (Pmode, addr, op1);
3552 addr = gen_rtx_CONST (Pmode, addr);
3553 addr = force_const_mem (Pmode, addr);
3554 emit_move_insn (temp, addr);
3555
3556 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3557 if (reg != 0)
3558 {
3559 s390_load_address (reg, new_rtx);
3560 new_rtx = reg;
3561 }
3562 }
3563 }
3564
3565 /* Now, check whether it is a GOT relative symbol plus offset
3566 that was pulled out of the literal pool. Force it back in. */
3567
3568 else if (GET_CODE (op0) == UNSPEC
3569 && GET_CODE (op1) == CONST_INT
3570 && XINT (op0, 1) == UNSPEC_GOTOFF)
3571 {
3572 gcc_assert (XVECLEN (op0, 0) == 1);
3573
3574 new_rtx = force_const_mem (Pmode, orig);
3575 }
3576
3577 /* Otherwise, compute the sum. */
3578 else
3579 {
3580 base = legitimize_pic_address (XEXP (addr, 0), reg);
3581 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3582 base == reg ? NULL_RTX : reg);
3583 if (GET_CODE (new_rtx) == CONST_INT)
3584 new_rtx = plus_constant (base, INTVAL (new_rtx));
3585 else
3586 {
3587 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3588 {
3589 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3590 new_rtx = XEXP (new_rtx, 1);
3591 }
3592 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3593 }
3594
3595 if (GET_CODE (new_rtx) == CONST)
3596 new_rtx = XEXP (new_rtx, 0);
3597 new_rtx = force_operand (new_rtx, 0);
3598 }
3599 }
3600 }
3601 return new_rtx;
3602 }
3603
3604 /* Load the thread pointer into a register. */
3605
3606 rtx
3607 s390_get_thread_pointer (void)
3608 {
3609 rtx tp = gen_reg_rtx (Pmode);
3610
3611 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3612 mark_reg_pointer (tp, BITS_PER_WORD);
3613
3614 return tp;
3615 }
3616
3617 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3618 in s390_tls_symbol which always refers to __tls_get_offset.
3619 The returned offset is written to RESULT_REG and an USE rtx is
3620 generated for TLS_CALL. */
3621
3622 static GTY(()) rtx s390_tls_symbol;
3623
3624 static void
3625 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3626 {
3627 rtx insn;
3628
3629 gcc_assert (flag_pic);
3630
3631 if (!s390_tls_symbol)
3632 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3633
3634 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3635 gen_rtx_REG (Pmode, RETURN_REGNUM));
3636
3637 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3638 RTL_CONST_CALL_P (insn) = 1;
3639 }
3640
3641 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3642 this (thread-local) address. REG may be used as temporary. */
3643
3644 static rtx
3645 legitimize_tls_address (rtx addr, rtx reg)
3646 {
3647 rtx new_rtx, tls_call, temp, base, r2, insn;
3648
3649 if (GET_CODE (addr) == SYMBOL_REF)
3650 switch (tls_symbolic_operand (addr))
3651 {
3652 case TLS_MODEL_GLOBAL_DYNAMIC:
3653 start_sequence ();
3654 r2 = gen_rtx_REG (Pmode, 2);
3655 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3656 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3657 new_rtx = force_const_mem (Pmode, new_rtx);
3658 emit_move_insn (r2, new_rtx);
3659 s390_emit_tls_call_insn (r2, tls_call);
3660 insn = get_insns ();
3661 end_sequence ();
3662
3663 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3664 temp = gen_reg_rtx (Pmode);
3665 emit_libcall_block (insn, temp, r2, new_rtx);
3666
3667 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3668 if (reg != 0)
3669 {
3670 s390_load_address (reg, new_rtx);
3671 new_rtx = reg;
3672 }
3673 break;
3674
3675 case TLS_MODEL_LOCAL_DYNAMIC:
3676 start_sequence ();
3677 r2 = gen_rtx_REG (Pmode, 2);
3678 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3679 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3680 new_rtx = force_const_mem (Pmode, new_rtx);
3681 emit_move_insn (r2, new_rtx);
3682 s390_emit_tls_call_insn (r2, tls_call);
3683 insn = get_insns ();
3684 end_sequence ();
3685
3686 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3687 temp = gen_reg_rtx (Pmode);
3688 emit_libcall_block (insn, temp, r2, new_rtx);
3689
3690 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3691 base = gen_reg_rtx (Pmode);
3692 s390_load_address (base, new_rtx);
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3695 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3696 new_rtx = force_const_mem (Pmode, new_rtx);
3697 temp = gen_reg_rtx (Pmode);
3698 emit_move_insn (temp, new_rtx);
3699
3700 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3701 if (reg != 0)
3702 {
3703 s390_load_address (reg, new_rtx);
3704 new_rtx = reg;
3705 }
3706 break;
3707
3708 case TLS_MODEL_INITIAL_EXEC:
3709 if (flag_pic == 1)
3710 {
3711 /* Assume GOT offset < 4k. This is handled the same way
3712 in both 31- and 64-bit code. */
3713
3714 if (reload_in_progress || reload_completed)
3715 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3718 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3719 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3720 new_rtx = gen_const_mem (Pmode, new_rtx);
3721 temp = gen_reg_rtx (Pmode);
3722 emit_move_insn (temp, new_rtx);
3723 }
3724 else if (TARGET_CPU_ZARCH)
3725 {
3726 /* If the GOT offset might be >= 4k, we determine the position
3727 of the GOT entry via a PC-relative LARL. */
3728
3729 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3730 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3731 temp = gen_reg_rtx (Pmode);
3732 emit_move_insn (temp, new_rtx);
3733
3734 new_rtx = gen_const_mem (Pmode, temp);
3735 temp = gen_reg_rtx (Pmode);
3736 emit_move_insn (temp, new_rtx);
3737 }
3738 else if (flag_pic)
3739 {
3740 /* If the GOT offset might be >= 4k, we have to load it
3741 from the literal pool. */
3742
3743 if (reload_in_progress || reload_completed)
3744 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3745
3746 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3747 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3748 new_rtx = force_const_mem (Pmode, new_rtx);
3749 temp = gen_reg_rtx (Pmode);
3750 emit_move_insn (temp, new_rtx);
3751
3752 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3753 new_rtx = gen_const_mem (Pmode, new_rtx);
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3758 }
3759 else
3760 {
3761 /* In position-dependent code, load the absolute address of
3762 the GOT entry from the literal pool. */
3763
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = temp;
3771 new_rtx = gen_const_mem (Pmode, new_rtx);
3772 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3773 temp = gen_reg_rtx (Pmode);
3774 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3775 }
3776
3777 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3778 if (reg != 0)
3779 {
3780 s390_load_address (reg, new_rtx);
3781 new_rtx = reg;
3782 }
3783 break;
3784
3785 case TLS_MODEL_LOCAL_EXEC:
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 default:
3801 gcc_unreachable ();
3802 }
3803
3804 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3805 {
3806 switch (XINT (XEXP (addr, 0), 1))
3807 {
3808 case UNSPEC_INDNTPOFF:
3809 gcc_assert (TARGET_CPU_ZARCH);
3810 new_rtx = addr;
3811 break;
3812
3813 default:
3814 gcc_unreachable ();
3815 }
3816 }
3817
3818 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3819 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3820 {
3821 new_rtx = XEXP (XEXP (addr, 0), 0);
3822 if (GET_CODE (new_rtx) != SYMBOL_REF)
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824
3825 new_rtx = legitimize_tls_address (new_rtx, reg);
3826 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3827 new_rtx = force_operand (new_rtx, 0);
3828 }
3829
3830 else
3831 gcc_unreachable (); /* for now ... */
3832
3833 return new_rtx;
3834 }
3835
3836 /* Emit insns making the address in operands[1] valid for a standard
3837 move to operands[0]. operands[1] is replaced by an address which
3838 should be used instead of the former RTX to emit the move
3839 pattern. */
3840
3841 void
3842 emit_symbolic_move (rtx *operands)
3843 {
3844 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3845
3846 if (GET_CODE (operands[0]) == MEM)
3847 operands[1] = force_reg (Pmode, operands[1]);
3848 else if (TLS_SYMBOLIC_CONST (operands[1]))
3849 operands[1] = legitimize_tls_address (operands[1], temp);
3850 else if (flag_pic)
3851 operands[1] = legitimize_pic_address (operands[1], temp);
3852 }
3853
3854 /* Try machine-dependent ways of modifying an illegitimate address X
3855 to be legitimate. If we find one, return the new, valid address.
3856
3857 OLDX is the address as it was before break_out_memory_refs was called.
3858 In some cases it is useful to look at this to decide what needs to be done.
3859
3860 MODE is the mode of the operand pointed to by X.
3861
3862 When -fpic is used, special handling is needed for symbolic references.
3863 See comments by legitimize_pic_address for details. */
3864
3865 static rtx
3866 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3867 enum machine_mode mode ATTRIBUTE_UNUSED)
3868 {
3869 rtx constant_term = const0_rtx;
3870
3871 if (TLS_SYMBOLIC_CONST (x))
3872 {
3873 x = legitimize_tls_address (x, 0);
3874
3875 if (s390_legitimate_address_p (mode, x, FALSE))
3876 return x;
3877 }
3878 else if (GET_CODE (x) == PLUS
3879 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3880 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3881 {
3882 return x;
3883 }
3884 else if (flag_pic)
3885 {
3886 if (SYMBOLIC_CONST (x)
3887 || (GET_CODE (x) == PLUS
3888 && (SYMBOLIC_CONST (XEXP (x, 0))
3889 || SYMBOLIC_CONST (XEXP (x, 1)))))
3890 x = legitimize_pic_address (x, 0);
3891
3892 if (s390_legitimate_address_p (mode, x, FALSE))
3893 return x;
3894 }
3895
3896 x = eliminate_constant_term (x, &constant_term);
3897
3898 /* Optimize loading of large displacements by splitting them
3899 into the multiple of 4K and the rest; this allows the
3900 former to be CSE'd if possible.
3901
3902 Don't do this if the displacement is added to a register
3903 pointing into the stack frame, as the offsets will
3904 change later anyway. */
3905
3906 if (GET_CODE (constant_term) == CONST_INT
3907 && !TARGET_LONG_DISPLACEMENT
3908 && !DISP_IN_RANGE (INTVAL (constant_term))
3909 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3910 {
3911 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3912 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3913
3914 rtx temp = gen_reg_rtx (Pmode);
3915 rtx val = force_operand (GEN_INT (upper), temp);
3916 if (val != temp)
3917 emit_move_insn (temp, val);
3918
3919 x = gen_rtx_PLUS (Pmode, x, temp);
3920 constant_term = GEN_INT (lower);
3921 }
3922
3923 if (GET_CODE (x) == PLUS)
3924 {
3925 if (GET_CODE (XEXP (x, 0)) == REG)
3926 {
3927 rtx temp = gen_reg_rtx (Pmode);
3928 rtx val = force_operand (XEXP (x, 1), temp);
3929 if (val != temp)
3930 emit_move_insn (temp, val);
3931
3932 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3933 }
3934
3935 else if (GET_CODE (XEXP (x, 1)) == REG)
3936 {
3937 rtx temp = gen_reg_rtx (Pmode);
3938 rtx val = force_operand (XEXP (x, 0), temp);
3939 if (val != temp)
3940 emit_move_insn (temp, val);
3941
3942 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3943 }
3944 }
3945
3946 if (constant_term != const0_rtx)
3947 x = gen_rtx_PLUS (Pmode, x, constant_term);
3948
3949 return x;
3950 }
3951
3952 /* Try a machine-dependent way of reloading an illegitimate address AD
3953 operand. If we find one, push the reload and and return the new address.
3954
3955 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3956 and TYPE is the reload type of the current reload. */
3957
3958 rtx
3959 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3960 int opnum, int type)
3961 {
3962 if (!optimize || TARGET_LONG_DISPLACEMENT)
3963 return NULL_RTX;
3964
3965 if (GET_CODE (ad) == PLUS)
3966 {
3967 rtx tem = simplify_binary_operation (PLUS, Pmode,
3968 XEXP (ad, 0), XEXP (ad, 1));
3969 if (tem)
3970 ad = tem;
3971 }
3972
3973 if (GET_CODE (ad) == PLUS
3974 && GET_CODE (XEXP (ad, 0)) == REG
3975 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3976 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3977 {
3978 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3979 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3980 rtx cst, tem, new_rtx;
3981
3982 cst = GEN_INT (upper);
3983 if (!legitimate_reload_constant_p (cst))
3984 cst = force_const_mem (Pmode, cst);
3985
3986 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3987 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3988
3989 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3990 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3991 opnum, (enum reload_type) type);
3992 return new_rtx;
3993 }
3994
3995 return NULL_RTX;
3996 }
3997
3998 /* Emit code to move LEN bytes from DST to SRC. */
3999
4000 void
4001 s390_expand_movmem (rtx dst, rtx src, rtx len)
4002 {
4003 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4004 {
4005 if (INTVAL (len) > 0)
4006 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4007 }
4008
4009 else if (TARGET_MVCLE)
4010 {
4011 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4012 }
4013
4014 else
4015 {
4016 rtx dst_addr, src_addr, count, blocks, temp;
4017 rtx loop_start_label = gen_label_rtx ();
4018 rtx loop_end_label = gen_label_rtx ();
4019 rtx end_label = gen_label_rtx ();
4020 enum machine_mode mode;
4021
4022 mode = GET_MODE (len);
4023 if (mode == VOIDmode)
4024 mode = Pmode;
4025
4026 dst_addr = gen_reg_rtx (Pmode);
4027 src_addr = gen_reg_rtx (Pmode);
4028 count = gen_reg_rtx (mode);
4029 blocks = gen_reg_rtx (mode);
4030
4031 convert_move (count, len, 1);
4032 emit_cmp_and_jump_insns (count, const0_rtx,
4033 EQ, NULL_RTX, mode, 1, end_label);
4034
4035 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4036 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4037 dst = change_address (dst, VOIDmode, dst_addr);
4038 src = change_address (src, VOIDmode, src_addr);
4039
4040 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4041 OPTAB_DIRECT);
4042 if (temp != count)
4043 emit_move_insn (count, temp);
4044
4045 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4046 OPTAB_DIRECT);
4047 if (temp != blocks)
4048 emit_move_insn (blocks, temp);
4049
4050 emit_cmp_and_jump_insns (blocks, const0_rtx,
4051 EQ, NULL_RTX, mode, 1, loop_end_label);
4052
4053 emit_label (loop_start_label);
4054
4055 if (TARGET_Z10
4056 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4057 {
4058 rtx prefetch;
4059
4060 /* Issue a read prefetch for the +3 cache line. */
4061 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4062 const0_rtx, const0_rtx);
4063 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4064 emit_insn (prefetch);
4065
4066 /* Issue a write prefetch for the +3 cache line. */
4067 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4068 const1_rtx, const0_rtx);
4069 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4070 emit_insn (prefetch);
4071 }
4072
4073 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4074 s390_load_address (dst_addr,
4075 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4076 s390_load_address (src_addr,
4077 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4078
4079 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4080 OPTAB_DIRECT);
4081 if (temp != blocks)
4082 emit_move_insn (blocks, temp);
4083
4084 emit_cmp_and_jump_insns (blocks, const0_rtx,
4085 EQ, NULL_RTX, mode, 1, loop_end_label);
4086
4087 emit_jump (loop_start_label);
4088 emit_label (loop_end_label);
4089
4090 emit_insn (gen_movmem_short (dst, src,
4091 convert_to_mode (Pmode, count, 1)));
4092 emit_label (end_label);
4093 }
4094 }
4095
4096 /* Emit code to set LEN bytes at DST to VAL.
4097 Make use of clrmem if VAL is zero. */
4098
4099 void
4100 s390_expand_setmem (rtx dst, rtx len, rtx val)
4101 {
4102 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4103 return;
4104
4105 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4106
4107 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4108 {
4109 if (val == const0_rtx && INTVAL (len) <= 256)
4110 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4111 else
4112 {
4113 /* Initialize memory by storing the first byte. */
4114 emit_move_insn (adjust_address (dst, QImode, 0), val);
4115
4116 if (INTVAL (len) > 1)
4117 {
4118 /* Initiate 1 byte overlap move.
4119 The first byte of DST is propagated through DSTP1.
4120 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4121 DST is set to size 1 so the rest of the memory location
4122 does not count as source operand. */
4123 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4124 set_mem_size (dst, const1_rtx);
4125
4126 emit_insn (gen_movmem_short (dstp1, dst,
4127 GEN_INT (INTVAL (len) - 2)));
4128 }
4129 }
4130 }
4131
4132 else if (TARGET_MVCLE)
4133 {
4134 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4135 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4136 }
4137
4138 else
4139 {
4140 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4141 rtx loop_start_label = gen_label_rtx ();
4142 rtx loop_end_label = gen_label_rtx ();
4143 rtx end_label = gen_label_rtx ();
4144 enum machine_mode mode;
4145
4146 mode = GET_MODE (len);
4147 if (mode == VOIDmode)
4148 mode = Pmode;
4149
4150 dst_addr = gen_reg_rtx (Pmode);
4151 count = gen_reg_rtx (mode);
4152 blocks = gen_reg_rtx (mode);
4153
4154 convert_move (count, len, 1);
4155 emit_cmp_and_jump_insns (count, const0_rtx,
4156 EQ, NULL_RTX, mode, 1, end_label);
4157
4158 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4159 dst = change_address (dst, VOIDmode, dst_addr);
4160
4161 if (val == const0_rtx)
4162 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4163 OPTAB_DIRECT);
4164 else
4165 {
4166 dstp1 = adjust_address (dst, VOIDmode, 1);
4167 set_mem_size (dst, const1_rtx);
4168
4169 /* Initialize memory by storing the first byte. */
4170 emit_move_insn (adjust_address (dst, QImode, 0), val);
4171
4172 /* If count is 1 we are done. */
4173 emit_cmp_and_jump_insns (count, const1_rtx,
4174 EQ, NULL_RTX, mode, 1, end_label);
4175
4176 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4177 OPTAB_DIRECT);
4178 }
4179 if (temp != count)
4180 emit_move_insn (count, temp);
4181
4182 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4183 OPTAB_DIRECT);
4184 if (temp != blocks)
4185 emit_move_insn (blocks, temp);
4186
4187 emit_cmp_and_jump_insns (blocks, const0_rtx,
4188 EQ, NULL_RTX, mode, 1, loop_end_label);
4189
4190 emit_label (loop_start_label);
4191
4192 if (TARGET_Z10
4193 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4194 {
4195 /* Issue a write prefetch for the +4 cache line. */
4196 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4197 GEN_INT (1024)),
4198 const1_rtx, const0_rtx);
4199 emit_insn (prefetch);
4200 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4201 }
4202
4203 if (val == const0_rtx)
4204 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4205 else
4206 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4207 s390_load_address (dst_addr,
4208 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4209
4210 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4211 OPTAB_DIRECT);
4212 if (temp != blocks)
4213 emit_move_insn (blocks, temp);
4214
4215 emit_cmp_and_jump_insns (blocks, const0_rtx,
4216 EQ, NULL_RTX, mode, 1, loop_end_label);
4217
4218 emit_jump (loop_start_label);
4219 emit_label (loop_end_label);
4220
4221 if (val == const0_rtx)
4222 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4223 else
4224 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4225 emit_label (end_label);
4226 }
4227 }
4228
4229 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4230 and return the result in TARGET. */
4231
4232 void
4233 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4234 {
4235 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4236 rtx tmp;
4237
4238 /* As the result of CMPINT is inverted compared to what we need,
4239 we have to swap the operands. */
4240 tmp = op0; op0 = op1; op1 = tmp;
4241
4242 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4243 {
4244 if (INTVAL (len) > 0)
4245 {
4246 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4247 emit_insn (gen_cmpint (target, ccreg));
4248 }
4249 else
4250 emit_move_insn (target, const0_rtx);
4251 }
4252 else if (TARGET_MVCLE)
4253 {
4254 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4255 emit_insn (gen_cmpint (target, ccreg));
4256 }
4257 else
4258 {
4259 rtx addr0, addr1, count, blocks, temp;
4260 rtx loop_start_label = gen_label_rtx ();
4261 rtx loop_end_label = gen_label_rtx ();
4262 rtx end_label = gen_label_rtx ();
4263 enum machine_mode mode;
4264
4265 mode = GET_MODE (len);
4266 if (mode == VOIDmode)
4267 mode = Pmode;
4268
4269 addr0 = gen_reg_rtx (Pmode);
4270 addr1 = gen_reg_rtx (Pmode);
4271 count = gen_reg_rtx (mode);
4272 blocks = gen_reg_rtx (mode);
4273
4274 convert_move (count, len, 1);
4275 emit_cmp_and_jump_insns (count, const0_rtx,
4276 EQ, NULL_RTX, mode, 1, end_label);
4277
4278 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4279 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4280 op0 = change_address (op0, VOIDmode, addr0);
4281 op1 = change_address (op1, VOIDmode, addr1);
4282
4283 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4284 OPTAB_DIRECT);
4285 if (temp != count)
4286 emit_move_insn (count, temp);
4287
4288 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4289 OPTAB_DIRECT);
4290 if (temp != blocks)
4291 emit_move_insn (blocks, temp);
4292
4293 emit_cmp_and_jump_insns (blocks, const0_rtx,
4294 EQ, NULL_RTX, mode, 1, loop_end_label);
4295
4296 emit_label (loop_start_label);
4297
4298 if (TARGET_Z10
4299 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4300 {
4301 rtx prefetch;
4302
4303 /* Issue a read prefetch for the +2 cache line of operand 1. */
4304 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4305 const0_rtx, const0_rtx);
4306 emit_insn (prefetch);
4307 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4308
4309 /* Issue a read prefetch for the +2 cache line of operand 2. */
4310 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4311 const0_rtx, const0_rtx);
4312 emit_insn (prefetch);
4313 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4314 }
4315
4316 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4317 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4318 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4319 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4320 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4321 emit_jump_insn (temp);
4322
4323 s390_load_address (addr0,
4324 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4325 s390_load_address (addr1,
4326 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4327
4328 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4329 OPTAB_DIRECT);
4330 if (temp != blocks)
4331 emit_move_insn (blocks, temp);
4332
4333 emit_cmp_and_jump_insns (blocks, const0_rtx,
4334 EQ, NULL_RTX, mode, 1, loop_end_label);
4335
4336 emit_jump (loop_start_label);
4337 emit_label (loop_end_label);
4338
4339 emit_insn (gen_cmpmem_short (op0, op1,
4340 convert_to_mode (Pmode, count, 1)));
4341 emit_label (end_label);
4342
4343 emit_insn (gen_cmpint (target, ccreg));
4344 }
4345 }
4346
4347
4348 /* Expand conditional increment or decrement using alc/slb instructions.
4349 Should generate code setting DST to either SRC or SRC + INCREMENT,
4350 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4351 Returns true if successful, false otherwise.
4352
4353 That makes it possible to implement some if-constructs without jumps e.g.:
4354 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4355 unsigned int a, b, c;
4356 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4357 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4358 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4359 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4360
4361 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4362 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4363 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4364 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4365 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4366
4367 bool
4368 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4369 rtx dst, rtx src, rtx increment)
4370 {
4371 enum machine_mode cmp_mode;
4372 enum machine_mode cc_mode;
4373 rtx op_res;
4374 rtx insn;
4375 rtvec p;
4376 int ret;
4377
4378 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4379 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4380 cmp_mode = SImode;
4381 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4382 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4383 cmp_mode = DImode;
4384 else
4385 return false;
4386
4387 /* Try ADD LOGICAL WITH CARRY. */
4388 if (increment == const1_rtx)
4389 {
4390 /* Determine CC mode to use. */
4391 if (cmp_code == EQ || cmp_code == NE)
4392 {
4393 if (cmp_op1 != const0_rtx)
4394 {
4395 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4396 NULL_RTX, 0, OPTAB_WIDEN);
4397 cmp_op1 = const0_rtx;
4398 }
4399
4400 cmp_code = cmp_code == EQ ? LEU : GTU;
4401 }
4402
4403 if (cmp_code == LTU || cmp_code == LEU)
4404 {
4405 rtx tem = cmp_op0;
4406 cmp_op0 = cmp_op1;
4407 cmp_op1 = tem;
4408 cmp_code = swap_condition (cmp_code);
4409 }
4410
4411 switch (cmp_code)
4412 {
4413 case GTU:
4414 cc_mode = CCUmode;
4415 break;
4416
4417 case GEU:
4418 cc_mode = CCL3mode;
4419 break;
4420
4421 default:
4422 return false;
4423 }
4424
4425 /* Emit comparison instruction pattern. */
4426 if (!register_operand (cmp_op0, cmp_mode))
4427 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4428
4429 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4430 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4431 /* We use insn_invalid_p here to add clobbers if required. */
4432 ret = insn_invalid_p (emit_insn (insn));
4433 gcc_assert (!ret);
4434
4435 /* Emit ALC instruction pattern. */
4436 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4437 gen_rtx_REG (cc_mode, CC_REGNUM),
4438 const0_rtx);
4439
4440 if (src != const0_rtx)
4441 {
4442 if (!register_operand (src, GET_MODE (dst)))
4443 src = force_reg (GET_MODE (dst), src);
4444
4445 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4446 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4447 }
4448
4449 p = rtvec_alloc (2);
4450 RTVEC_ELT (p, 0) =
4451 gen_rtx_SET (VOIDmode, dst, op_res);
4452 RTVEC_ELT (p, 1) =
4453 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4454 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4455
4456 return true;
4457 }
4458
4459 /* Try SUBTRACT LOGICAL WITH BORROW. */
4460 if (increment == constm1_rtx)
4461 {
4462 /* Determine CC mode to use. */
4463 if (cmp_code == EQ || cmp_code == NE)
4464 {
4465 if (cmp_op1 != const0_rtx)
4466 {
4467 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4468 NULL_RTX, 0, OPTAB_WIDEN);
4469 cmp_op1 = const0_rtx;
4470 }
4471
4472 cmp_code = cmp_code == EQ ? LEU : GTU;
4473 }
4474
4475 if (cmp_code == GTU || cmp_code == GEU)
4476 {
4477 rtx tem = cmp_op0;
4478 cmp_op0 = cmp_op1;
4479 cmp_op1 = tem;
4480 cmp_code = swap_condition (cmp_code);
4481 }
4482
4483 switch (cmp_code)
4484 {
4485 case LEU:
4486 cc_mode = CCUmode;
4487 break;
4488
4489 case LTU:
4490 cc_mode = CCL3mode;
4491 break;
4492
4493 default:
4494 return false;
4495 }
4496
4497 /* Emit comparison instruction pattern. */
4498 if (!register_operand (cmp_op0, cmp_mode))
4499 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4500
4501 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4502 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4503 /* We use insn_invalid_p here to add clobbers if required. */
4504 ret = insn_invalid_p (emit_insn (insn));
4505 gcc_assert (!ret);
4506
4507 /* Emit SLB instruction pattern. */
4508 if (!register_operand (src, GET_MODE (dst)))
4509 src = force_reg (GET_MODE (dst), src);
4510
4511 op_res = gen_rtx_MINUS (GET_MODE (dst),
4512 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4513 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4514 gen_rtx_REG (cc_mode, CC_REGNUM),
4515 const0_rtx));
4516 p = rtvec_alloc (2);
4517 RTVEC_ELT (p, 0) =
4518 gen_rtx_SET (VOIDmode, dst, op_res);
4519 RTVEC_ELT (p, 1) =
4520 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4521 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4522
4523 return true;
4524 }
4525
4526 return false;
4527 }
4528
4529 /* Expand code for the insv template. Return true if successful. */
4530
4531 bool
4532 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4533 {
4534 int bitsize = INTVAL (op1);
4535 int bitpos = INTVAL (op2);
4536
4537 /* On z10 we can use the risbg instruction to implement insv. */
4538 if (TARGET_Z10
4539 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4540 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4541 {
4542 rtx op;
4543 rtx clobber;
4544
4545 op = gen_rtx_SET (GET_MODE(src),
4546 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4547 src);
4548 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4549 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4550
4551 return true;
4552 }
4553
4554 /* We need byte alignment. */
4555 if (bitsize % BITS_PER_UNIT)
4556 return false;
4557
4558 if (bitpos == 0
4559 && memory_operand (dest, VOIDmode)
4560 && (register_operand (src, word_mode)
4561 || const_int_operand (src, VOIDmode)))
4562 {
4563 /* Emit standard pattern if possible. */
4564 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4565 if (GET_MODE_BITSIZE (mode) == bitsize)
4566 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4567
4568 /* (set (ze (mem)) (const_int)). */
4569 else if (const_int_operand (src, VOIDmode))
4570 {
4571 int size = bitsize / BITS_PER_UNIT;
4572 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4573 GET_MODE_SIZE (word_mode) - size);
4574
4575 dest = adjust_address (dest, BLKmode, 0);
4576 set_mem_size (dest, GEN_INT (size));
4577 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4578 }
4579
4580 /* (set (ze (mem)) (reg)). */
4581 else if (register_operand (src, word_mode))
4582 {
4583 if (bitsize <= GET_MODE_BITSIZE (SImode))
4584 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4585 const0_rtx), src);
4586 else
4587 {
4588 /* Emit st,stcmh sequence. */
4589 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4590 int size = stcmh_width / BITS_PER_UNIT;
4591
4592 emit_move_insn (adjust_address (dest, SImode, size),
4593 gen_lowpart (SImode, src));
4594 set_mem_size (dest, GEN_INT (size));
4595 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4596 (stcmh_width), const0_rtx),
4597 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4598 (GET_MODE_BITSIZE (SImode))));
4599 }
4600 }
4601 else
4602 return false;
4603
4604 return true;
4605 }
4606
4607 /* (set (ze (reg)) (const_int)). */
4608 if (TARGET_ZARCH
4609 && register_operand (dest, word_mode)
4610 && (bitpos % 16) == 0
4611 && (bitsize % 16) == 0
4612 && const_int_operand (src, VOIDmode))
4613 {
4614 HOST_WIDE_INT val = INTVAL (src);
4615 int regpos = bitpos + bitsize;
4616
4617 while (regpos > bitpos)
4618 {
4619 enum machine_mode putmode;
4620 int putsize;
4621
4622 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4623 putmode = SImode;
4624 else
4625 putmode = HImode;
4626
4627 putsize = GET_MODE_BITSIZE (putmode);
4628 regpos -= putsize;
4629 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4630 GEN_INT (putsize),
4631 GEN_INT (regpos)),
4632 gen_int_mode (val, putmode));
4633 val >>= putsize;
4634 }
4635 gcc_assert (regpos == bitpos);
4636 return true;
4637 }
4638
4639 return false;
4640 }
4641
4642 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4643 register that holds VAL of mode MODE shifted by COUNT bits. */
4644
4645 static inline rtx
4646 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4647 {
4648 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4649 NULL_RTX, 1, OPTAB_DIRECT);
4650 return expand_simple_binop (SImode, ASHIFT, val, count,
4651 NULL_RTX, 1, OPTAB_DIRECT);
4652 }
4653
4654 /* Structure to hold the initial parameters for a compare_and_swap operation
4655 in HImode and QImode. */
4656
4657 struct alignment_context
4658 {
4659 rtx memsi; /* SI aligned memory location. */
4660 rtx shift; /* Bit offset with regard to lsb. */
4661 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4662 rtx modemaski; /* ~modemask */
4663 bool aligned; /* True if memory is aligned, false else. */
4664 };
4665
4666 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4667 structure AC for transparent simplifying, if the memory alignment is known
4668 to be at least 32bit. MEM is the memory location for the actual operation
4669 and MODE its mode. */
4670
4671 static void
4672 init_alignment_context (struct alignment_context *ac, rtx mem,
4673 enum machine_mode mode)
4674 {
4675 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4676 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4677
4678 if (ac->aligned)
4679 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4680 else
4681 {
4682 /* Alignment is unknown. */
4683 rtx byteoffset, addr, align;
4684
4685 /* Force the address into a register. */
4686 addr = force_reg (Pmode, XEXP (mem, 0));
4687
4688 /* Align it to SImode. */
4689 align = expand_simple_binop (Pmode, AND, addr,
4690 GEN_INT (-GET_MODE_SIZE (SImode)),
4691 NULL_RTX, 1, OPTAB_DIRECT);
4692 /* Generate MEM. */
4693 ac->memsi = gen_rtx_MEM (SImode, align);
4694 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4695 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4696 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4697
4698 /* Calculate shiftcount. */
4699 byteoffset = expand_simple_binop (Pmode, AND, addr,
4700 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4701 NULL_RTX, 1, OPTAB_DIRECT);
4702 /* As we already have some offset, evaluate the remaining distance. */
4703 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4704 NULL_RTX, 1, OPTAB_DIRECT);
4705
4706 }
4707 /* Shift is the byte count, but we need the bitcount. */
4708 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4709 NULL_RTX, 1, OPTAB_DIRECT);
4710 /* Calculate masks. */
4711 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4712 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4713 NULL_RTX, 1, OPTAB_DIRECT);
4714 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4715 }
4716
4717 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4718 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4719 to set if CMP == MEM.
4720 CMP is never in memory for compare_and_swap_cc because
4721 expand_bool_compare_and_swap puts it into a register for later compare. */
4722
4723 void
4724 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4725 {
4726 struct alignment_context ac;
4727 rtx cmpv, newv, val, resv, cc;
4728 rtx res = gen_reg_rtx (SImode);
4729 rtx csloop = gen_label_rtx ();
4730 rtx csend = gen_label_rtx ();
4731
4732 gcc_assert (register_operand (target, VOIDmode));
4733 gcc_assert (MEM_P (mem));
4734
4735 init_alignment_context (&ac, mem, mode);
4736
4737 /* Shift the values to the correct bit positions. */
4738 if (!(ac.aligned && MEM_P (cmp)))
4739 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4740 if (!(ac.aligned && MEM_P (new_rtx)))
4741 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4742
4743 /* Load full word. Subsequent loads are performed by CS. */
4744 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4745 NULL_RTX, 1, OPTAB_DIRECT);
4746
4747 /* Start CS loop. */
4748 emit_label (csloop);
4749 /* val = "<mem>00..0<mem>"
4750 * cmp = "00..0<cmp>00..0"
4751 * new = "00..0<new>00..0"
4752 */
4753
4754 /* Patch cmp and new with val at correct position. */
4755 if (ac.aligned && MEM_P (cmp))
4756 {
4757 cmpv = force_reg (SImode, val);
4758 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4759 }
4760 else
4761 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4762 NULL_RTX, 1, OPTAB_DIRECT));
4763 if (ac.aligned && MEM_P (new_rtx))
4764 {
4765 newv = force_reg (SImode, val);
4766 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4767 }
4768 else
4769 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4770 NULL_RTX, 1, OPTAB_DIRECT));
4771
4772 /* Jump to end if we're done (likely?). */
4773 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4774 cmpv, newv));
4775
4776 /* Check for changes outside mode. */
4777 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4778 NULL_RTX, 1, OPTAB_DIRECT);
4779 cc = s390_emit_compare (NE, resv, val);
4780 emit_move_insn (val, resv);
4781 /* Loop internal if so. */
4782 s390_emit_jump (csloop, cc);
4783
4784 emit_label (csend);
4785
4786 /* Return the correct part of the bitfield. */
4787 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4788 NULL_RTX, 1, OPTAB_DIRECT), 1);
4789 }
4790
4791 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4792 and VAL the value to play with. If AFTER is true then store the value
4793 MEM holds after the operation, if AFTER is false then store the value MEM
4794 holds before the operation. If TARGET is zero then discard that value, else
4795 store it to TARGET. */
4796
4797 void
4798 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4799 rtx target, rtx mem, rtx val, bool after)
4800 {
4801 struct alignment_context ac;
4802 rtx cmp;
4803 rtx new_rtx = gen_reg_rtx (SImode);
4804 rtx orig = gen_reg_rtx (SImode);
4805 rtx csloop = gen_label_rtx ();
4806
4807 gcc_assert (!target || register_operand (target, VOIDmode));
4808 gcc_assert (MEM_P (mem));
4809
4810 init_alignment_context (&ac, mem, mode);
4811
4812 /* Shift val to the correct bit positions.
4813 Preserve "icm", but prevent "ex icm". */
4814 if (!(ac.aligned && code == SET && MEM_P (val)))
4815 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4816
4817 /* Further preparation insns. */
4818 if (code == PLUS || code == MINUS)
4819 emit_move_insn (orig, val);
4820 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4821 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4822 NULL_RTX, 1, OPTAB_DIRECT);
4823
4824 /* Load full word. Subsequent loads are performed by CS. */
4825 cmp = force_reg (SImode, ac.memsi);
4826
4827 /* Start CS loop. */
4828 emit_label (csloop);
4829 emit_move_insn (new_rtx, cmp);
4830
4831 /* Patch new with val at correct position. */
4832 switch (code)
4833 {
4834 case PLUS:
4835 case MINUS:
4836 val = expand_simple_binop (SImode, code, new_rtx, orig,
4837 NULL_RTX, 1, OPTAB_DIRECT);
4838 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4839 NULL_RTX, 1, OPTAB_DIRECT);
4840 /* FALLTHRU */
4841 case SET:
4842 if (ac.aligned && MEM_P (val))
4843 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4844 else
4845 {
4846 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4847 NULL_RTX, 1, OPTAB_DIRECT);
4848 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4849 NULL_RTX, 1, OPTAB_DIRECT);
4850 }
4851 break;
4852 case AND:
4853 case IOR:
4854 case XOR:
4855 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4856 NULL_RTX, 1, OPTAB_DIRECT);
4857 break;
4858 case MULT: /* NAND */
4859 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4860 NULL_RTX, 1, OPTAB_DIRECT);
4861 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4862 NULL_RTX, 1, OPTAB_DIRECT);
4863 break;
4864 default:
4865 gcc_unreachable ();
4866 }
4867
4868 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4869 ac.memsi, cmp, new_rtx));
4870
4871 /* Return the correct part of the bitfield. */
4872 if (target)
4873 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4874 after ? new_rtx : cmp, ac.shift,
4875 NULL_RTX, 1, OPTAB_DIRECT), 1);
4876 }
4877
4878 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4879 We need to emit DTP-relative relocations. */
4880
4881 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4882
4883 static void
4884 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4885 {
4886 switch (size)
4887 {
4888 case 4:
4889 fputs ("\t.long\t", file);
4890 break;
4891 case 8:
4892 fputs ("\t.quad\t", file);
4893 break;
4894 default:
4895 gcc_unreachable ();
4896 }
4897 output_addr_const (file, x);
4898 fputs ("@DTPOFF", file);
4899 }
4900
4901 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4902 /* Implement TARGET_MANGLE_TYPE. */
4903
4904 static const char *
4905 s390_mangle_type (const_tree type)
4906 {
4907 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4908 && TARGET_LONG_DOUBLE_128)
4909 return "g";
4910
4911 /* For all other types, use normal C++ mangling. */
4912 return NULL;
4913 }
4914 #endif
4915
4916 /* In the name of slightly smaller debug output, and to cater to
4917 general assembler lossage, recognize various UNSPEC sequences
4918 and turn them back into a direct symbol reference. */
4919
4920 static rtx
4921 s390_delegitimize_address (rtx orig_x)
4922 {
4923 rtx x, y;
4924
4925 orig_x = delegitimize_mem_from_attrs (orig_x);
4926 x = orig_x;
4927 if (GET_CODE (x) != MEM)
4928 return orig_x;
4929
4930 x = XEXP (x, 0);
4931 if (GET_CODE (x) == PLUS
4932 && GET_CODE (XEXP (x, 1)) == CONST
4933 && GET_CODE (XEXP (x, 0)) == REG
4934 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4935 {
4936 y = XEXP (XEXP (x, 1), 0);
4937 if (GET_CODE (y) == UNSPEC
4938 && XINT (y, 1) == UNSPEC_GOT)
4939 return XVECEXP (y, 0, 0);
4940 return orig_x;
4941 }
4942
4943 if (GET_CODE (x) == CONST)
4944 {
4945 y = XEXP (x, 0);
4946 if (GET_CODE (y) == UNSPEC
4947 && XINT (y, 1) == UNSPEC_GOTENT)
4948 return XVECEXP (y, 0, 0);
4949 return orig_x;
4950 }
4951
4952 return orig_x;
4953 }
4954
4955 /* Output operand OP to stdio stream FILE.
4956 OP is an address (register + offset) which is not used to address data;
4957 instead the rightmost bits are interpreted as the value. */
4958
4959 static void
4960 print_shift_count_operand (FILE *file, rtx op)
4961 {
4962 HOST_WIDE_INT offset;
4963 rtx base;
4964
4965 /* Extract base register and offset. */
4966 if (!s390_decompose_shift_count (op, &base, &offset))
4967 gcc_unreachable ();
4968
4969 /* Sanity check. */
4970 if (base)
4971 {
4972 gcc_assert (GET_CODE (base) == REG);
4973 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
4974 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
4975 }
4976
4977 /* Offsets are constricted to twelve bits. */
4978 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
4979 if (base)
4980 fprintf (file, "(%s)", reg_names[REGNO (base)]);
4981 }
4982
4983 /* See 'get_some_local_dynamic_name'. */
4984
4985 static int
4986 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4987 {
4988 rtx x = *px;
4989
4990 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4991 {
4992 x = get_pool_constant (x);
4993 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
4994 }
4995
4996 if (GET_CODE (x) == SYMBOL_REF
4997 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
4998 {
4999 cfun->machine->some_ld_name = XSTR (x, 0);
5000 return 1;
5001 }
5002
5003 return 0;
5004 }
5005
5006 /* Locate some local-dynamic symbol still in use by this function
5007 so that we can print its name in local-dynamic base patterns. */
5008
5009 static const char *
5010 get_some_local_dynamic_name (void)
5011 {
5012 rtx insn;
5013
5014 if (cfun->machine->some_ld_name)
5015 return cfun->machine->some_ld_name;
5016
5017 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5018 if (INSN_P (insn)
5019 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5020 return cfun->machine->some_ld_name;
5021
5022 gcc_unreachable ();
5023 }
5024
5025 /* Output machine-dependent UNSPECs occurring in address constant X
5026 in assembler syntax to stdio stream FILE. Returns true if the
5027 constant X could be recognized, false otherwise. */
5028
5029 bool
5030 s390_output_addr_const_extra (FILE *file, rtx x)
5031 {
5032 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5033 switch (XINT (x, 1))
5034 {
5035 case UNSPEC_GOTENT:
5036 output_addr_const (file, XVECEXP (x, 0, 0));
5037 fprintf (file, "@GOTENT");
5038 return true;
5039 case UNSPEC_GOT:
5040 output_addr_const (file, XVECEXP (x, 0, 0));
5041 fprintf (file, "@GOT");
5042 return true;
5043 case UNSPEC_GOTOFF:
5044 output_addr_const (file, XVECEXP (x, 0, 0));
5045 fprintf (file, "@GOTOFF");
5046 return true;
5047 case UNSPEC_PLT:
5048 output_addr_const (file, XVECEXP (x, 0, 0));
5049 fprintf (file, "@PLT");
5050 return true;
5051 case UNSPEC_PLTOFF:
5052 output_addr_const (file, XVECEXP (x, 0, 0));
5053 fprintf (file, "@PLTOFF");
5054 return true;
5055 case UNSPEC_TLSGD:
5056 output_addr_const (file, XVECEXP (x, 0, 0));
5057 fprintf (file, "@TLSGD");
5058 return true;
5059 case UNSPEC_TLSLDM:
5060 assemble_name (file, get_some_local_dynamic_name ());
5061 fprintf (file, "@TLSLDM");
5062 return true;
5063 case UNSPEC_DTPOFF:
5064 output_addr_const (file, XVECEXP (x, 0, 0));
5065 fprintf (file, "@DTPOFF");
5066 return true;
5067 case UNSPEC_NTPOFF:
5068 output_addr_const (file, XVECEXP (x, 0, 0));
5069 fprintf (file, "@NTPOFF");
5070 return true;
5071 case UNSPEC_GOTNTPOFF:
5072 output_addr_const (file, XVECEXP (x, 0, 0));
5073 fprintf (file, "@GOTNTPOFF");
5074 return true;
5075 case UNSPEC_INDNTPOFF:
5076 output_addr_const (file, XVECEXP (x, 0, 0));
5077 fprintf (file, "@INDNTPOFF");
5078 return true;
5079 }
5080
5081 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5082 switch (XINT (x, 1))
5083 {
5084 case UNSPEC_POOL_OFFSET:
5085 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5086 output_addr_const (file, x);
5087 return true;
5088 }
5089 return false;
5090 }
5091
5092 /* Output address operand ADDR in assembler syntax to
5093 stdio stream FILE. */
5094
5095 void
5096 print_operand_address (FILE *file, rtx addr)
5097 {
5098 struct s390_address ad;
5099
5100 if (s390_symref_operand_p (addr, NULL, NULL))
5101 {
5102 gcc_assert (TARGET_Z10);
5103 output_addr_const (file, addr);
5104 return;
5105 }
5106
5107 if (!s390_decompose_address (addr, &ad)
5108 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5109 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5110 output_operand_lossage ("cannot decompose address");
5111
5112 if (ad.disp)
5113 output_addr_const (file, ad.disp);
5114 else
5115 fprintf (file, "0");
5116
5117 if (ad.base && ad.indx)
5118 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5119 reg_names[REGNO (ad.base)]);
5120 else if (ad.base)
5121 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5122 }
5123
5124 /* Output operand X in assembler syntax to stdio stream FILE.
5125 CODE specified the format flag. The following format flags
5126 are recognized:
5127
5128 'C': print opcode suffix for branch condition.
5129 'D': print opcode suffix for inverse branch condition.
5130 'E': print opcode suffix for branch on index instruction.
5131 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5132 'G': print the size of the operand in bytes.
5133 'O': print only the displacement of a memory reference.
5134 'R': print only the base register of a memory reference.
5135 'S': print S-type memory reference (base+displacement).
5136 'N': print the second word of a DImode operand.
5137 'M': print the second word of a TImode operand.
5138 'Y': print shift count operand.
5139
5140 'b': print integer X as if it's an unsigned byte.
5141 'c': print integer X as if it's an signed byte.
5142 'x': print integer X as if it's an unsigned halfword.
5143 'h': print integer X as if it's a signed halfword.
5144 'i': print the first nonzero HImode part of X.
5145 'j': print the first HImode part unequal to -1 of X.
5146 'k': print the first nonzero SImode part of X.
5147 'm': print the first SImode part unequal to -1 of X.
5148 'o': print integer X as if it's an unsigned 32bit word. */
5149
5150 void
5151 print_operand (FILE *file, rtx x, int code)
5152 {
5153 switch (code)
5154 {
5155 case 'C':
5156 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5157 return;
5158
5159 case 'D':
5160 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5161 return;
5162
5163 case 'E':
5164 if (GET_CODE (x) == LE)
5165 fprintf (file, "l");
5166 else if (GET_CODE (x) == GT)
5167 fprintf (file, "h");
5168 else
5169 gcc_unreachable ();
5170 return;
5171
5172 case 'J':
5173 if (GET_CODE (x) == SYMBOL_REF)
5174 {
5175 fprintf (file, "%s", ":tls_load:");
5176 output_addr_const (file, x);
5177 }
5178 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5179 {
5180 fprintf (file, "%s", ":tls_gdcall:");
5181 output_addr_const (file, XVECEXP (x, 0, 0));
5182 }
5183 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5184 {
5185 fprintf (file, "%s", ":tls_ldcall:");
5186 assemble_name (file, get_some_local_dynamic_name ());
5187 }
5188 else
5189 gcc_unreachable ();
5190 return;
5191
5192 case 'G':
5193 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5194 return;
5195
5196 case 'O':
5197 {
5198 struct s390_address ad;
5199 int ret;
5200
5201 gcc_assert (GET_CODE (x) == MEM);
5202 ret = s390_decompose_address (XEXP (x, 0), &ad);
5203 gcc_assert (ret);
5204 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5205 gcc_assert (!ad.indx);
5206
5207 if (ad.disp)
5208 output_addr_const (file, ad.disp);
5209 else
5210 fprintf (file, "0");
5211 }
5212 return;
5213
5214 case 'R':
5215 {
5216 struct s390_address ad;
5217 int ret;
5218
5219 gcc_assert (GET_CODE (x) == MEM);
5220 ret = s390_decompose_address (XEXP (x, 0), &ad);
5221 gcc_assert (ret);
5222 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5223 gcc_assert (!ad.indx);
5224
5225 if (ad.base)
5226 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5227 else
5228 fprintf (file, "0");
5229 }
5230 return;
5231
5232 case 'S':
5233 {
5234 struct s390_address ad;
5235 int ret;
5236
5237 gcc_assert (GET_CODE (x) == MEM);
5238 ret = s390_decompose_address (XEXP (x, 0), &ad);
5239 gcc_assert (ret);
5240 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5241 gcc_assert (!ad.indx);
5242
5243 if (ad.disp)
5244 output_addr_const (file, ad.disp);
5245 else
5246 fprintf (file, "0");
5247
5248 if (ad.base)
5249 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5250 }
5251 return;
5252
5253 case 'N':
5254 if (GET_CODE (x) == REG)
5255 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5256 else if (GET_CODE (x) == MEM)
5257 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5258 else
5259 gcc_unreachable ();
5260 break;
5261
5262 case 'M':
5263 if (GET_CODE (x) == REG)
5264 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5265 else if (GET_CODE (x) == MEM)
5266 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5267 else
5268 gcc_unreachable ();
5269 break;
5270
5271 case 'Y':
5272 print_shift_count_operand (file, x);
5273 return;
5274 }
5275
5276 switch (GET_CODE (x))
5277 {
5278 case REG:
5279 fprintf (file, "%s", reg_names[REGNO (x)]);
5280 break;
5281
5282 case MEM:
5283 output_address (XEXP (x, 0));
5284 break;
5285
5286 case CONST:
5287 case CODE_LABEL:
5288 case LABEL_REF:
5289 case SYMBOL_REF:
5290 output_addr_const (file, x);
5291 break;
5292
5293 case CONST_INT:
5294 if (code == 'b')
5295 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5296 else if (code == 'c')
5297 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5298 else if (code == 'x')
5299 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5300 else if (code == 'h')
5301 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5302 else if (code == 'i')
5303 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5304 s390_extract_part (x, HImode, 0));
5305 else if (code == 'j')
5306 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5307 s390_extract_part (x, HImode, -1));
5308 else if (code == 'k')
5309 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5310 s390_extract_part (x, SImode, 0));
5311 else if (code == 'm')
5312 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5313 s390_extract_part (x, SImode, -1));
5314 else if (code == 'o')
5315 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5316 else
5317 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5318 break;
5319
5320 case CONST_DOUBLE:
5321 gcc_assert (GET_MODE (x) == VOIDmode);
5322 if (code == 'b')
5323 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5324 else if (code == 'x')
5325 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5326 else if (code == 'h')
5327 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5328 else
5329 gcc_unreachable ();
5330 break;
5331
5332 default:
5333 fatal_insn ("UNKNOWN in print_operand !?", x);
5334 break;
5335 }
5336 }
5337
5338 /* Target hook for assembling integer objects. We need to define it
5339 here to work a round a bug in some versions of GAS, which couldn't
5340 handle values smaller than INT_MIN when printed in decimal. */
5341
5342 static bool
5343 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5344 {
5345 if (size == 8 && aligned_p
5346 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5347 {
5348 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5349 INTVAL (x));
5350 return true;
5351 }
5352 return default_assemble_integer (x, size, aligned_p);
5353 }
5354
5355 /* Returns true if register REGNO is used for forming
5356 a memory address in expression X. */
5357
5358 static bool
5359 reg_used_in_mem_p (int regno, rtx x)
5360 {
5361 enum rtx_code code = GET_CODE (x);
5362 int i, j;
5363 const char *fmt;
5364
5365 if (code == MEM)
5366 {
5367 if (refers_to_regno_p (regno, regno+1,
5368 XEXP (x, 0), 0))
5369 return true;
5370 }
5371 else if (code == SET
5372 && GET_CODE (SET_DEST (x)) == PC)
5373 {
5374 if (refers_to_regno_p (regno, regno+1,
5375 SET_SRC (x), 0))
5376 return true;
5377 }
5378
5379 fmt = GET_RTX_FORMAT (code);
5380 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5381 {
5382 if (fmt[i] == 'e'
5383 && reg_used_in_mem_p (regno, XEXP (x, i)))
5384 return true;
5385
5386 else if (fmt[i] == 'E')
5387 for (j = 0; j < XVECLEN (x, i); j++)
5388 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5389 return true;
5390 }
5391 return false;
5392 }
5393
5394 /* Returns true if expression DEP_RTX sets an address register
5395 used by instruction INSN to address memory. */
5396
5397 static bool
5398 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5399 {
5400 rtx target, pat;
5401
5402 if (GET_CODE (dep_rtx) == INSN)
5403 dep_rtx = PATTERN (dep_rtx);
5404
5405 if (GET_CODE (dep_rtx) == SET)
5406 {
5407 target = SET_DEST (dep_rtx);
5408 if (GET_CODE (target) == STRICT_LOW_PART)
5409 target = XEXP (target, 0);
5410 while (GET_CODE (target) == SUBREG)
5411 target = SUBREG_REG (target);
5412
5413 if (GET_CODE (target) == REG)
5414 {
5415 int regno = REGNO (target);
5416
5417 if (s390_safe_attr_type (insn) == TYPE_LA)
5418 {
5419 pat = PATTERN (insn);
5420 if (GET_CODE (pat) == PARALLEL)
5421 {
5422 gcc_assert (XVECLEN (pat, 0) == 2);
5423 pat = XVECEXP (pat, 0, 0);
5424 }
5425 gcc_assert (GET_CODE (pat) == SET);
5426 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5427 }
5428 else if (get_attr_atype (insn) == ATYPE_AGEN)
5429 return reg_used_in_mem_p (regno, PATTERN (insn));
5430 }
5431 }
5432 return false;
5433 }
5434
5435 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5436
5437 int
5438 s390_agen_dep_p (rtx dep_insn, rtx insn)
5439 {
5440 rtx dep_rtx = PATTERN (dep_insn);
5441 int i;
5442
5443 if (GET_CODE (dep_rtx) == SET
5444 && addr_generation_dependency_p (dep_rtx, insn))
5445 return 1;
5446 else if (GET_CODE (dep_rtx) == PARALLEL)
5447 {
5448 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5449 {
5450 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5451 return 1;
5452 }
5453 }
5454 return 0;
5455 }
5456
5457
5458 /* A C statement (sans semicolon) to update the integer scheduling priority
5459 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5460 reduce the priority to execute INSN later. Do not define this macro if
5461 you do not need to adjust the scheduling priorities of insns.
5462
5463 A STD instruction should be scheduled earlier,
5464 in order to use the bypass. */
5465 static int
5466 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5467 {
5468 if (! INSN_P (insn))
5469 return priority;
5470
5471 if (s390_tune != PROCESSOR_2084_Z990
5472 && s390_tune != PROCESSOR_2094_Z9_109
5473 && s390_tune != PROCESSOR_2097_Z10
5474 && s390_tune != PROCESSOR_2817_Z196)
5475 return priority;
5476
5477 switch (s390_safe_attr_type (insn))
5478 {
5479 case TYPE_FSTOREDF:
5480 case TYPE_FSTORESF:
5481 priority = priority << 3;
5482 break;
5483 case TYPE_STORE:
5484 case TYPE_STM:
5485 priority = priority << 1;
5486 break;
5487 default:
5488 break;
5489 }
5490 return priority;
5491 }
5492
5493
5494 /* The number of instructions that can be issued per cycle. */
5495
5496 static int
5497 s390_issue_rate (void)
5498 {
5499 switch (s390_tune)
5500 {
5501 case PROCESSOR_2084_Z990:
5502 case PROCESSOR_2094_Z9_109:
5503 case PROCESSOR_2817_Z196:
5504 return 3;
5505 case PROCESSOR_2097_Z10:
5506 return 2;
5507 default:
5508 return 1;
5509 }
5510 }
5511
5512 static int
5513 s390_first_cycle_multipass_dfa_lookahead (void)
5514 {
5515 return 4;
5516 }
5517
5518 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5519 Fix up MEMs as required. */
5520
5521 static void
5522 annotate_constant_pool_refs (rtx *x)
5523 {
5524 int i, j;
5525 const char *fmt;
5526
5527 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5528 || !CONSTANT_POOL_ADDRESS_P (*x));
5529
5530 /* Literal pool references can only occur inside a MEM ... */
5531 if (GET_CODE (*x) == MEM)
5532 {
5533 rtx memref = XEXP (*x, 0);
5534
5535 if (GET_CODE (memref) == SYMBOL_REF
5536 && CONSTANT_POOL_ADDRESS_P (memref))
5537 {
5538 rtx base = cfun->machine->base_reg;
5539 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5540 UNSPEC_LTREF);
5541
5542 *x = replace_equiv_address (*x, addr);
5543 return;
5544 }
5545
5546 if (GET_CODE (memref) == CONST
5547 && GET_CODE (XEXP (memref, 0)) == PLUS
5548 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5549 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5550 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5551 {
5552 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5553 rtx sym = XEXP (XEXP (memref, 0), 0);
5554 rtx base = cfun->machine->base_reg;
5555 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5556 UNSPEC_LTREF);
5557
5558 *x = replace_equiv_address (*x, plus_constant (addr, off));
5559 return;
5560 }
5561 }
5562
5563 /* ... or a load-address type pattern. */
5564 if (GET_CODE (*x) == SET)
5565 {
5566 rtx addrref = SET_SRC (*x);
5567
5568 if (GET_CODE (addrref) == SYMBOL_REF
5569 && CONSTANT_POOL_ADDRESS_P (addrref))
5570 {
5571 rtx base = cfun->machine->base_reg;
5572 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5573 UNSPEC_LTREF);
5574
5575 SET_SRC (*x) = addr;
5576 return;
5577 }
5578
5579 if (GET_CODE (addrref) == CONST
5580 && GET_CODE (XEXP (addrref, 0)) == PLUS
5581 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5582 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5583 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5584 {
5585 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5586 rtx sym = XEXP (XEXP (addrref, 0), 0);
5587 rtx base = cfun->machine->base_reg;
5588 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5589 UNSPEC_LTREF);
5590
5591 SET_SRC (*x) = plus_constant (addr, off);
5592 return;
5593 }
5594 }
5595
5596 /* Annotate LTREL_BASE as well. */
5597 if (GET_CODE (*x) == UNSPEC
5598 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5599 {
5600 rtx base = cfun->machine->base_reg;
5601 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5602 UNSPEC_LTREL_BASE);
5603 return;
5604 }
5605
5606 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5607 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5608 {
5609 if (fmt[i] == 'e')
5610 {
5611 annotate_constant_pool_refs (&XEXP (*x, i));
5612 }
5613 else if (fmt[i] == 'E')
5614 {
5615 for (j = 0; j < XVECLEN (*x, i); j++)
5616 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5617 }
5618 }
5619 }
5620
5621 /* Split all branches that exceed the maximum distance.
5622 Returns true if this created a new literal pool entry. */
5623
5624 static int
5625 s390_split_branches (void)
5626 {
5627 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5628 int new_literal = 0, ret;
5629 rtx insn, pat, tmp, target;
5630 rtx *label;
5631
5632 /* We need correct insn addresses. */
5633
5634 shorten_branches (get_insns ());
5635
5636 /* Find all branches that exceed 64KB, and split them. */
5637
5638 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5639 {
5640 if (GET_CODE (insn) != JUMP_INSN)
5641 continue;
5642
5643 pat = PATTERN (insn);
5644 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5645 pat = XVECEXP (pat, 0, 0);
5646 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5647 continue;
5648
5649 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5650 {
5651 label = &SET_SRC (pat);
5652 }
5653 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5654 {
5655 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5656 label = &XEXP (SET_SRC (pat), 1);
5657 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5658 label = &XEXP (SET_SRC (pat), 2);
5659 else
5660 continue;
5661 }
5662 else
5663 continue;
5664
5665 if (get_attr_length (insn) <= 4)
5666 continue;
5667
5668 /* We are going to use the return register as scratch register,
5669 make sure it will be saved/restored by the prologue/epilogue. */
5670 cfun_frame_layout.save_return_addr_p = 1;
5671
5672 if (!flag_pic)
5673 {
5674 new_literal = 1;
5675 tmp = force_const_mem (Pmode, *label);
5676 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5677 INSN_ADDRESSES_NEW (tmp, -1);
5678 annotate_constant_pool_refs (&PATTERN (tmp));
5679
5680 target = temp_reg;
5681 }
5682 else
5683 {
5684 new_literal = 1;
5685 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5686 UNSPEC_LTREL_OFFSET);
5687 target = gen_rtx_CONST (Pmode, target);
5688 target = force_const_mem (Pmode, target);
5689 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5690 INSN_ADDRESSES_NEW (tmp, -1);
5691 annotate_constant_pool_refs (&PATTERN (tmp));
5692
5693 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5694 cfun->machine->base_reg),
5695 UNSPEC_LTREL_BASE);
5696 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5697 }
5698
5699 ret = validate_change (insn, label, target, 0);
5700 gcc_assert (ret);
5701 }
5702
5703 return new_literal;
5704 }
5705
5706
5707 /* Find an annotated literal pool symbol referenced in RTX X,
5708 and store it at REF. Will abort if X contains references to
5709 more than one such pool symbol; multiple references to the same
5710 symbol are allowed, however.
5711
5712 The rtx pointed to by REF must be initialized to NULL_RTX
5713 by the caller before calling this routine. */
5714
5715 static void
5716 find_constant_pool_ref (rtx x, rtx *ref)
5717 {
5718 int i, j;
5719 const char *fmt;
5720
5721 /* Ignore LTREL_BASE references. */
5722 if (GET_CODE (x) == UNSPEC
5723 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5724 return;
5725 /* Likewise POOL_ENTRY insns. */
5726 if (GET_CODE (x) == UNSPEC_VOLATILE
5727 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5728 return;
5729
5730 gcc_assert (GET_CODE (x) != SYMBOL_REF
5731 || !CONSTANT_POOL_ADDRESS_P (x));
5732
5733 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5734 {
5735 rtx sym = XVECEXP (x, 0, 0);
5736 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5737 && CONSTANT_POOL_ADDRESS_P (sym));
5738
5739 if (*ref == NULL_RTX)
5740 *ref = sym;
5741 else
5742 gcc_assert (*ref == sym);
5743
5744 return;
5745 }
5746
5747 fmt = GET_RTX_FORMAT (GET_CODE (x));
5748 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5749 {
5750 if (fmt[i] == 'e')
5751 {
5752 find_constant_pool_ref (XEXP (x, i), ref);
5753 }
5754 else if (fmt[i] == 'E')
5755 {
5756 for (j = 0; j < XVECLEN (x, i); j++)
5757 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5758 }
5759 }
5760 }
5761
5762 /* Replace every reference to the annotated literal pool
5763 symbol REF in X by its base plus OFFSET. */
5764
5765 static void
5766 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5767 {
5768 int i, j;
5769 const char *fmt;
5770
5771 gcc_assert (*x != ref);
5772
5773 if (GET_CODE (*x) == UNSPEC
5774 && XINT (*x, 1) == UNSPEC_LTREF
5775 && XVECEXP (*x, 0, 0) == ref)
5776 {
5777 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5778 return;
5779 }
5780
5781 if (GET_CODE (*x) == PLUS
5782 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5783 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5784 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5785 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5786 {
5787 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5788 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5789 return;
5790 }
5791
5792 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5793 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5794 {
5795 if (fmt[i] == 'e')
5796 {
5797 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5798 }
5799 else if (fmt[i] == 'E')
5800 {
5801 for (j = 0; j < XVECLEN (*x, i); j++)
5802 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5803 }
5804 }
5805 }
5806
5807 /* Check whether X contains an UNSPEC_LTREL_BASE.
5808 Return its constant pool symbol if found, NULL_RTX otherwise. */
5809
5810 static rtx
5811 find_ltrel_base (rtx x)
5812 {
5813 int i, j;
5814 const char *fmt;
5815
5816 if (GET_CODE (x) == UNSPEC
5817 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5818 return XVECEXP (x, 0, 0);
5819
5820 fmt = GET_RTX_FORMAT (GET_CODE (x));
5821 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5822 {
5823 if (fmt[i] == 'e')
5824 {
5825 rtx fnd = find_ltrel_base (XEXP (x, i));
5826 if (fnd)
5827 return fnd;
5828 }
5829 else if (fmt[i] == 'E')
5830 {
5831 for (j = 0; j < XVECLEN (x, i); j++)
5832 {
5833 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5834 if (fnd)
5835 return fnd;
5836 }
5837 }
5838 }
5839
5840 return NULL_RTX;
5841 }
5842
5843 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5844
5845 static void
5846 replace_ltrel_base (rtx *x)
5847 {
5848 int i, j;
5849 const char *fmt;
5850
5851 if (GET_CODE (*x) == UNSPEC
5852 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5853 {
5854 *x = XVECEXP (*x, 0, 1);
5855 return;
5856 }
5857
5858 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5859 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5860 {
5861 if (fmt[i] == 'e')
5862 {
5863 replace_ltrel_base (&XEXP (*x, i));
5864 }
5865 else if (fmt[i] == 'E')
5866 {
5867 for (j = 0; j < XVECLEN (*x, i); j++)
5868 replace_ltrel_base (&XVECEXP (*x, i, j));
5869 }
5870 }
5871 }
5872
5873
5874 /* We keep a list of constants which we have to add to internal
5875 constant tables in the middle of large functions. */
5876
5877 #define NR_C_MODES 11
5878 enum machine_mode constant_modes[NR_C_MODES] =
5879 {
5880 TFmode, TImode, TDmode,
5881 DFmode, DImode, DDmode,
5882 SFmode, SImode, SDmode,
5883 HImode,
5884 QImode
5885 };
5886
5887 struct constant
5888 {
5889 struct constant *next;
5890 rtx value;
5891 rtx label;
5892 };
5893
5894 struct constant_pool
5895 {
5896 struct constant_pool *next;
5897 rtx first_insn;
5898 rtx pool_insn;
5899 bitmap insns;
5900 rtx emit_pool_after;
5901
5902 struct constant *constants[NR_C_MODES];
5903 struct constant *execute;
5904 rtx label;
5905 int size;
5906 };
5907
5908 /* Allocate new constant_pool structure. */
5909
5910 static struct constant_pool *
5911 s390_alloc_pool (void)
5912 {
5913 struct constant_pool *pool;
5914 int i;
5915
5916 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5917 pool->next = NULL;
5918 for (i = 0; i < NR_C_MODES; i++)
5919 pool->constants[i] = NULL;
5920
5921 pool->execute = NULL;
5922 pool->label = gen_label_rtx ();
5923 pool->first_insn = NULL_RTX;
5924 pool->pool_insn = NULL_RTX;
5925 pool->insns = BITMAP_ALLOC (NULL);
5926 pool->size = 0;
5927 pool->emit_pool_after = NULL_RTX;
5928
5929 return pool;
5930 }
5931
5932 /* Create new constant pool covering instructions starting at INSN
5933 and chain it to the end of POOL_LIST. */
5934
5935 static struct constant_pool *
5936 s390_start_pool (struct constant_pool **pool_list, rtx insn)
5937 {
5938 struct constant_pool *pool, **prev;
5939
5940 pool = s390_alloc_pool ();
5941 pool->first_insn = insn;
5942
5943 for (prev = pool_list; *prev; prev = &(*prev)->next)
5944 ;
5945 *prev = pool;
5946
5947 return pool;
5948 }
5949
5950 /* End range of instructions covered by POOL at INSN and emit
5951 placeholder insn representing the pool. */
5952
5953 static void
5954 s390_end_pool (struct constant_pool *pool, rtx insn)
5955 {
5956 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
5957
5958 if (!insn)
5959 insn = get_last_insn ();
5960
5961 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
5962 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5963 }
5964
5965 /* Add INSN to the list of insns covered by POOL. */
5966
5967 static void
5968 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
5969 {
5970 bitmap_set_bit (pool->insns, INSN_UID (insn));
5971 }
5972
5973 /* Return pool out of POOL_LIST that covers INSN. */
5974
5975 static struct constant_pool *
5976 s390_find_pool (struct constant_pool *pool_list, rtx insn)
5977 {
5978 struct constant_pool *pool;
5979
5980 for (pool = pool_list; pool; pool = pool->next)
5981 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
5982 break;
5983
5984 return pool;
5985 }
5986
5987 /* Add constant VAL of mode MODE to the constant pool POOL. */
5988
5989 static void
5990 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
5991 {
5992 struct constant *c;
5993 int i;
5994
5995 for (i = 0; i < NR_C_MODES; i++)
5996 if (constant_modes[i] == mode)
5997 break;
5998 gcc_assert (i != NR_C_MODES);
5999
6000 for (c = pool->constants[i]; c != NULL; c = c->next)
6001 if (rtx_equal_p (val, c->value))
6002 break;
6003
6004 if (c == NULL)
6005 {
6006 c = (struct constant *) xmalloc (sizeof *c);
6007 c->value = val;
6008 c->label = gen_label_rtx ();
6009 c->next = pool->constants[i];
6010 pool->constants[i] = c;
6011 pool->size += GET_MODE_SIZE (mode);
6012 }
6013 }
6014
6015 /* Return an rtx that represents the offset of X from the start of
6016 pool POOL. */
6017
6018 static rtx
6019 s390_pool_offset (struct constant_pool *pool, rtx x)
6020 {
6021 rtx label;
6022
6023 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6024 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6025 UNSPEC_POOL_OFFSET);
6026 return gen_rtx_CONST (GET_MODE (x), x);
6027 }
6028
6029 /* Find constant VAL of mode MODE in the constant pool POOL.
6030 Return an RTX describing the distance from the start of
6031 the pool to the location of the new constant. */
6032
6033 static rtx
6034 s390_find_constant (struct constant_pool *pool, rtx val,
6035 enum machine_mode mode)
6036 {
6037 struct constant *c;
6038 int i;
6039
6040 for (i = 0; i < NR_C_MODES; i++)
6041 if (constant_modes[i] == mode)
6042 break;
6043 gcc_assert (i != NR_C_MODES);
6044
6045 for (c = pool->constants[i]; c != NULL; c = c->next)
6046 if (rtx_equal_p (val, c->value))
6047 break;
6048
6049 gcc_assert (c);
6050
6051 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6052 }
6053
6054 /* Check whether INSN is an execute. Return the label_ref to its
6055 execute target template if so, NULL_RTX otherwise. */
6056
6057 static rtx
6058 s390_execute_label (rtx insn)
6059 {
6060 if (GET_CODE (insn) == INSN
6061 && GET_CODE (PATTERN (insn)) == PARALLEL
6062 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6063 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6064 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6065
6066 return NULL_RTX;
6067 }
6068
6069 /* Add execute target for INSN to the constant pool POOL. */
6070
6071 static void
6072 s390_add_execute (struct constant_pool *pool, rtx insn)
6073 {
6074 struct constant *c;
6075
6076 for (c = pool->execute; c != NULL; c = c->next)
6077 if (INSN_UID (insn) == INSN_UID (c->value))
6078 break;
6079
6080 if (c == NULL)
6081 {
6082 c = (struct constant *) xmalloc (sizeof *c);
6083 c->value = insn;
6084 c->label = gen_label_rtx ();
6085 c->next = pool->execute;
6086 pool->execute = c;
6087 pool->size += 6;
6088 }
6089 }
6090
6091 /* Find execute target for INSN in the constant pool POOL.
6092 Return an RTX describing the distance from the start of
6093 the pool to the location of the execute target. */
6094
6095 static rtx
6096 s390_find_execute (struct constant_pool *pool, rtx insn)
6097 {
6098 struct constant *c;
6099
6100 for (c = pool->execute; c != NULL; c = c->next)
6101 if (INSN_UID (insn) == INSN_UID (c->value))
6102 break;
6103
6104 gcc_assert (c);
6105
6106 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6107 }
6108
6109 /* For an execute INSN, extract the execute target template. */
6110
6111 static rtx
6112 s390_execute_target (rtx insn)
6113 {
6114 rtx pattern = PATTERN (insn);
6115 gcc_assert (s390_execute_label (insn));
6116
6117 if (XVECLEN (pattern, 0) == 2)
6118 {
6119 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6120 }
6121 else
6122 {
6123 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6124 int i;
6125
6126 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6127 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6128
6129 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6130 }
6131
6132 return pattern;
6133 }
6134
6135 /* Indicate that INSN cannot be duplicated. This is the case for
6136 execute insns that carry a unique label. */
6137
6138 static bool
6139 s390_cannot_copy_insn_p (rtx insn)
6140 {
6141 rtx label = s390_execute_label (insn);
6142 return label && label != const0_rtx;
6143 }
6144
6145 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6146 do not emit the pool base label. */
6147
6148 static void
6149 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6150 {
6151 struct constant *c;
6152 rtx insn = pool->pool_insn;
6153 int i;
6154
6155 /* Switch to rodata section. */
6156 if (TARGET_CPU_ZARCH)
6157 {
6158 insn = emit_insn_after (gen_pool_section_start (), insn);
6159 INSN_ADDRESSES_NEW (insn, -1);
6160 }
6161
6162 /* Ensure minimum pool alignment. */
6163 if (TARGET_CPU_ZARCH)
6164 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6165 else
6166 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6167 INSN_ADDRESSES_NEW (insn, -1);
6168
6169 /* Emit pool base label. */
6170 if (!remote_label)
6171 {
6172 insn = emit_label_after (pool->label, insn);
6173 INSN_ADDRESSES_NEW (insn, -1);
6174 }
6175
6176 /* Dump constants in descending alignment requirement order,
6177 ensuring proper alignment for every constant. */
6178 for (i = 0; i < NR_C_MODES; i++)
6179 for (c = pool->constants[i]; c; c = c->next)
6180 {
6181 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6182 rtx value = copy_rtx (c->value);
6183 if (GET_CODE (value) == CONST
6184 && GET_CODE (XEXP (value, 0)) == UNSPEC
6185 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6186 && XVECLEN (XEXP (value, 0), 0) == 1)
6187 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6188
6189 insn = emit_label_after (c->label, insn);
6190 INSN_ADDRESSES_NEW (insn, -1);
6191
6192 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6193 gen_rtvec (1, value),
6194 UNSPECV_POOL_ENTRY);
6195 insn = emit_insn_after (value, insn);
6196 INSN_ADDRESSES_NEW (insn, -1);
6197 }
6198
6199 /* Ensure minimum alignment for instructions. */
6200 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6201 INSN_ADDRESSES_NEW (insn, -1);
6202
6203 /* Output in-pool execute template insns. */
6204 for (c = pool->execute; c; c = c->next)
6205 {
6206 insn = emit_label_after (c->label, insn);
6207 INSN_ADDRESSES_NEW (insn, -1);
6208
6209 insn = emit_insn_after (s390_execute_target (c->value), insn);
6210 INSN_ADDRESSES_NEW (insn, -1);
6211 }
6212
6213 /* Switch back to previous section. */
6214 if (TARGET_CPU_ZARCH)
6215 {
6216 insn = emit_insn_after (gen_pool_section_end (), insn);
6217 INSN_ADDRESSES_NEW (insn, -1);
6218 }
6219
6220 insn = emit_barrier_after (insn);
6221 INSN_ADDRESSES_NEW (insn, -1);
6222
6223 /* Remove placeholder insn. */
6224 remove_insn (pool->pool_insn);
6225 }
6226
6227 /* Free all memory used by POOL. */
6228
6229 static void
6230 s390_free_pool (struct constant_pool *pool)
6231 {
6232 struct constant *c, *next;
6233 int i;
6234
6235 for (i = 0; i < NR_C_MODES; i++)
6236 for (c = pool->constants[i]; c; c = next)
6237 {
6238 next = c->next;
6239 free (c);
6240 }
6241
6242 for (c = pool->execute; c; c = next)
6243 {
6244 next = c->next;
6245 free (c);
6246 }
6247
6248 BITMAP_FREE (pool->insns);
6249 free (pool);
6250 }
6251
6252
6253 /* Collect main literal pool. Return NULL on overflow. */
6254
6255 static struct constant_pool *
6256 s390_mainpool_start (void)
6257 {
6258 struct constant_pool *pool;
6259 rtx insn;
6260
6261 pool = s390_alloc_pool ();
6262
6263 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6264 {
6265 if (GET_CODE (insn) == INSN
6266 && GET_CODE (PATTERN (insn)) == SET
6267 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6268 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6269 {
6270 gcc_assert (!pool->pool_insn);
6271 pool->pool_insn = insn;
6272 }
6273
6274 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6275 {
6276 s390_add_execute (pool, insn);
6277 }
6278 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6279 {
6280 rtx pool_ref = NULL_RTX;
6281 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6282 if (pool_ref)
6283 {
6284 rtx constant = get_pool_constant (pool_ref);
6285 enum machine_mode mode = get_pool_mode (pool_ref);
6286 s390_add_constant (pool, constant, mode);
6287 }
6288 }
6289
6290 /* If hot/cold partitioning is enabled we have to make sure that
6291 the literal pool is emitted in the same section where the
6292 initialization of the literal pool base pointer takes place.
6293 emit_pool_after is only used in the non-overflow case on non
6294 Z cpus where we can emit the literal pool at the end of the
6295 function body within the text section. */
6296 if (NOTE_P (insn)
6297 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6298 && !pool->emit_pool_after)
6299 pool->emit_pool_after = PREV_INSN (insn);
6300 }
6301
6302 gcc_assert (pool->pool_insn || pool->size == 0);
6303
6304 if (pool->size >= 4096)
6305 {
6306 /* We're going to chunkify the pool, so remove the main
6307 pool placeholder insn. */
6308 remove_insn (pool->pool_insn);
6309
6310 s390_free_pool (pool);
6311 pool = NULL;
6312 }
6313
6314 /* If the functions ends with the section where the literal pool
6315 should be emitted set the marker to its end. */
6316 if (pool && !pool->emit_pool_after)
6317 pool->emit_pool_after = get_last_insn ();
6318
6319 return pool;
6320 }
6321
6322 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6323 Modify the current function to output the pool constants as well as
6324 the pool register setup instruction. */
6325
6326 static void
6327 s390_mainpool_finish (struct constant_pool *pool)
6328 {
6329 rtx base_reg = cfun->machine->base_reg;
6330 rtx insn;
6331
6332 /* If the pool is empty, we're done. */
6333 if (pool->size == 0)
6334 {
6335 /* We don't actually need a base register after all. */
6336 cfun->machine->base_reg = NULL_RTX;
6337
6338 if (pool->pool_insn)
6339 remove_insn (pool->pool_insn);
6340 s390_free_pool (pool);
6341 return;
6342 }
6343
6344 /* We need correct insn addresses. */
6345 shorten_branches (get_insns ());
6346
6347 /* On zSeries, we use a LARL to load the pool register. The pool is
6348 located in the .rodata section, so we emit it after the function. */
6349 if (TARGET_CPU_ZARCH)
6350 {
6351 insn = gen_main_base_64 (base_reg, pool->label);
6352 insn = emit_insn_after (insn, pool->pool_insn);
6353 INSN_ADDRESSES_NEW (insn, -1);
6354 remove_insn (pool->pool_insn);
6355
6356 insn = get_last_insn ();
6357 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6358 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6359
6360 s390_dump_pool (pool, 0);
6361 }
6362
6363 /* On S/390, if the total size of the function's code plus literal pool
6364 does not exceed 4096 bytes, we use BASR to set up a function base
6365 pointer, and emit the literal pool at the end of the function. */
6366 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6367 + pool->size + 8 /* alignment slop */ < 4096)
6368 {
6369 insn = gen_main_base_31_small (base_reg, pool->label);
6370 insn = emit_insn_after (insn, pool->pool_insn);
6371 INSN_ADDRESSES_NEW (insn, -1);
6372 remove_insn (pool->pool_insn);
6373
6374 insn = emit_label_after (pool->label, insn);
6375 INSN_ADDRESSES_NEW (insn, -1);
6376
6377 /* emit_pool_after will be set by s390_mainpool_start to the
6378 last insn of the section where the literal pool should be
6379 emitted. */
6380 insn = pool->emit_pool_after;
6381
6382 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6383 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6384
6385 s390_dump_pool (pool, 1);
6386 }
6387
6388 /* Otherwise, we emit an inline literal pool and use BASR to branch
6389 over it, setting up the pool register at the same time. */
6390 else
6391 {
6392 rtx pool_end = gen_label_rtx ();
6393
6394 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6395 insn = emit_insn_after (insn, pool->pool_insn);
6396 INSN_ADDRESSES_NEW (insn, -1);
6397 remove_insn (pool->pool_insn);
6398
6399 insn = emit_label_after (pool->label, insn);
6400 INSN_ADDRESSES_NEW (insn, -1);
6401
6402 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6403 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6404
6405 insn = emit_label_after (pool_end, pool->pool_insn);
6406 INSN_ADDRESSES_NEW (insn, -1);
6407
6408 s390_dump_pool (pool, 1);
6409 }
6410
6411
6412 /* Replace all literal pool references. */
6413
6414 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6415 {
6416 if (INSN_P (insn))
6417 replace_ltrel_base (&PATTERN (insn));
6418
6419 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6420 {
6421 rtx addr, pool_ref = NULL_RTX;
6422 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6423 if (pool_ref)
6424 {
6425 if (s390_execute_label (insn))
6426 addr = s390_find_execute (pool, insn);
6427 else
6428 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6429 get_pool_mode (pool_ref));
6430
6431 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6432 INSN_CODE (insn) = -1;
6433 }
6434 }
6435 }
6436
6437
6438 /* Free the pool. */
6439 s390_free_pool (pool);
6440 }
6441
6442 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6443 We have decided we cannot use this pool, so revert all changes
6444 to the current function that were done by s390_mainpool_start. */
6445 static void
6446 s390_mainpool_cancel (struct constant_pool *pool)
6447 {
6448 /* We didn't actually change the instruction stream, so simply
6449 free the pool memory. */
6450 s390_free_pool (pool);
6451 }
6452
6453
6454 /* Chunkify the literal pool. */
6455
6456 #define S390_POOL_CHUNK_MIN 0xc00
6457 #define S390_POOL_CHUNK_MAX 0xe00
6458
6459 static struct constant_pool *
6460 s390_chunkify_start (void)
6461 {
6462 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6463 int extra_size = 0;
6464 bitmap far_labels;
6465 rtx pending_ltrel = NULL_RTX;
6466 rtx insn;
6467
6468 rtx (*gen_reload_base) (rtx, rtx) =
6469 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6470
6471
6472 /* We need correct insn addresses. */
6473
6474 shorten_branches (get_insns ());
6475
6476 /* Scan all insns and move literals to pool chunks. */
6477
6478 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6479 {
6480 bool section_switch_p = false;
6481
6482 /* Check for pending LTREL_BASE. */
6483 if (INSN_P (insn))
6484 {
6485 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6486 if (ltrel_base)
6487 {
6488 gcc_assert (ltrel_base == pending_ltrel);
6489 pending_ltrel = NULL_RTX;
6490 }
6491 }
6492
6493 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6494 {
6495 if (!curr_pool)
6496 curr_pool = s390_start_pool (&pool_list, insn);
6497
6498 s390_add_execute (curr_pool, insn);
6499 s390_add_pool_insn (curr_pool, insn);
6500 }
6501 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6502 {
6503 rtx pool_ref = NULL_RTX;
6504 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6505 if (pool_ref)
6506 {
6507 rtx constant = get_pool_constant (pool_ref);
6508 enum machine_mode mode = get_pool_mode (pool_ref);
6509
6510 if (!curr_pool)
6511 curr_pool = s390_start_pool (&pool_list, insn);
6512
6513 s390_add_constant (curr_pool, constant, mode);
6514 s390_add_pool_insn (curr_pool, insn);
6515
6516 /* Don't split the pool chunk between a LTREL_OFFSET load
6517 and the corresponding LTREL_BASE. */
6518 if (GET_CODE (constant) == CONST
6519 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6520 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6521 {
6522 gcc_assert (!pending_ltrel);
6523 pending_ltrel = pool_ref;
6524 }
6525 }
6526 }
6527
6528 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6529 {
6530 if (curr_pool)
6531 s390_add_pool_insn (curr_pool, insn);
6532 /* An LTREL_BASE must follow within the same basic block. */
6533 gcc_assert (!pending_ltrel);
6534 }
6535
6536 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6537 section_switch_p = true;
6538
6539 if (!curr_pool
6540 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6541 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6542 continue;
6543
6544 if (TARGET_CPU_ZARCH)
6545 {
6546 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6547 continue;
6548
6549 s390_end_pool (curr_pool, NULL_RTX);
6550 curr_pool = NULL;
6551 }
6552 else
6553 {
6554 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6555 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6556 + extra_size;
6557
6558 /* We will later have to insert base register reload insns.
6559 Those will have an effect on code size, which we need to
6560 consider here. This calculation makes rather pessimistic
6561 worst-case assumptions. */
6562 if (GET_CODE (insn) == CODE_LABEL)
6563 extra_size += 6;
6564
6565 if (chunk_size < S390_POOL_CHUNK_MIN
6566 && curr_pool->size < S390_POOL_CHUNK_MIN
6567 && !section_switch_p)
6568 continue;
6569
6570 /* Pool chunks can only be inserted after BARRIERs ... */
6571 if (GET_CODE (insn) == BARRIER)
6572 {
6573 s390_end_pool (curr_pool, insn);
6574 curr_pool = NULL;
6575 extra_size = 0;
6576 }
6577
6578 /* ... so if we don't find one in time, create one. */
6579 else if (chunk_size > S390_POOL_CHUNK_MAX
6580 || curr_pool->size > S390_POOL_CHUNK_MAX
6581 || section_switch_p)
6582 {
6583 rtx label, jump, barrier;
6584
6585 if (!section_switch_p)
6586 {
6587 /* We can insert the barrier only after a 'real' insn. */
6588 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6589 continue;
6590 if (get_attr_length (insn) == 0)
6591 continue;
6592 /* Don't separate LTREL_BASE from the corresponding
6593 LTREL_OFFSET load. */
6594 if (pending_ltrel)
6595 continue;
6596 }
6597 else
6598 {
6599 gcc_assert (!pending_ltrel);
6600
6601 /* The old pool has to end before the section switch
6602 note in order to make it part of the current
6603 section. */
6604 insn = PREV_INSN (insn);
6605 }
6606
6607 label = gen_label_rtx ();
6608 jump = emit_jump_insn_after (gen_jump (label), insn);
6609 barrier = emit_barrier_after (jump);
6610 insn = emit_label_after (label, barrier);
6611 JUMP_LABEL (jump) = label;
6612 LABEL_NUSES (label) = 1;
6613
6614 INSN_ADDRESSES_NEW (jump, -1);
6615 INSN_ADDRESSES_NEW (barrier, -1);
6616 INSN_ADDRESSES_NEW (insn, -1);
6617
6618 s390_end_pool (curr_pool, barrier);
6619 curr_pool = NULL;
6620 extra_size = 0;
6621 }
6622 }
6623 }
6624
6625 if (curr_pool)
6626 s390_end_pool (curr_pool, NULL_RTX);
6627 gcc_assert (!pending_ltrel);
6628
6629 /* Find all labels that are branched into
6630 from an insn belonging to a different chunk. */
6631
6632 far_labels = BITMAP_ALLOC (NULL);
6633
6634 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6635 {
6636 /* Labels marked with LABEL_PRESERVE_P can be target
6637 of non-local jumps, so we have to mark them.
6638 The same holds for named labels.
6639
6640 Don't do that, however, if it is the label before
6641 a jump table. */
6642
6643 if (GET_CODE (insn) == CODE_LABEL
6644 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6645 {
6646 rtx vec_insn = next_real_insn (insn);
6647 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6648 PATTERN (vec_insn) : NULL_RTX;
6649 if (!vec_pat
6650 || !(GET_CODE (vec_pat) == ADDR_VEC
6651 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6652 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6653 }
6654
6655 /* If we have a direct jump (conditional or unconditional)
6656 or a casesi jump, check all potential targets. */
6657 else if (GET_CODE (insn) == JUMP_INSN)
6658 {
6659 rtx pat = PATTERN (insn);
6660 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6661 pat = XVECEXP (pat, 0, 0);
6662
6663 if (GET_CODE (pat) == SET)
6664 {
6665 rtx label = JUMP_LABEL (insn);
6666 if (label)
6667 {
6668 if (s390_find_pool (pool_list, label)
6669 != s390_find_pool (pool_list, insn))
6670 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6671 }
6672 }
6673 else if (GET_CODE (pat) == PARALLEL
6674 && XVECLEN (pat, 0) == 2
6675 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6676 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6677 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6678 {
6679 /* Find the jump table used by this casesi jump. */
6680 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6681 rtx vec_insn = next_real_insn (vec_label);
6682 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6683 PATTERN (vec_insn) : NULL_RTX;
6684 if (vec_pat
6685 && (GET_CODE (vec_pat) == ADDR_VEC
6686 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6687 {
6688 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6689
6690 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6691 {
6692 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6693
6694 if (s390_find_pool (pool_list, label)
6695 != s390_find_pool (pool_list, insn))
6696 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6697 }
6698 }
6699 }
6700 }
6701 }
6702
6703 /* Insert base register reload insns before every pool. */
6704
6705 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6706 {
6707 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6708 curr_pool->label);
6709 rtx insn = curr_pool->first_insn;
6710 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6711 }
6712
6713 /* Insert base register reload insns at every far label. */
6714
6715 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6716 if (GET_CODE (insn) == CODE_LABEL
6717 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6718 {
6719 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6720 if (pool)
6721 {
6722 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6723 pool->label);
6724 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6725 }
6726 }
6727
6728
6729 BITMAP_FREE (far_labels);
6730
6731
6732 /* Recompute insn addresses. */
6733
6734 init_insn_lengths ();
6735 shorten_branches (get_insns ());
6736
6737 return pool_list;
6738 }
6739
6740 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6741 After we have decided to use this list, finish implementing
6742 all changes to the current function as required. */
6743
6744 static void
6745 s390_chunkify_finish (struct constant_pool *pool_list)
6746 {
6747 struct constant_pool *curr_pool = NULL;
6748 rtx insn;
6749
6750
6751 /* Replace all literal pool references. */
6752
6753 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6754 {
6755 if (INSN_P (insn))
6756 replace_ltrel_base (&PATTERN (insn));
6757
6758 curr_pool = s390_find_pool (pool_list, insn);
6759 if (!curr_pool)
6760 continue;
6761
6762 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6763 {
6764 rtx addr, pool_ref = NULL_RTX;
6765 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6766 if (pool_ref)
6767 {
6768 if (s390_execute_label (insn))
6769 addr = s390_find_execute (curr_pool, insn);
6770 else
6771 addr = s390_find_constant (curr_pool,
6772 get_pool_constant (pool_ref),
6773 get_pool_mode (pool_ref));
6774
6775 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6776 INSN_CODE (insn) = -1;
6777 }
6778 }
6779 }
6780
6781 /* Dump out all literal pools. */
6782
6783 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6784 s390_dump_pool (curr_pool, 0);
6785
6786 /* Free pool list. */
6787
6788 while (pool_list)
6789 {
6790 struct constant_pool *next = pool_list->next;
6791 s390_free_pool (pool_list);
6792 pool_list = next;
6793 }
6794 }
6795
6796 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6797 We have decided we cannot use this list, so revert all changes
6798 to the current function that were done by s390_chunkify_start. */
6799
6800 static void
6801 s390_chunkify_cancel (struct constant_pool *pool_list)
6802 {
6803 struct constant_pool *curr_pool = NULL;
6804 rtx insn;
6805
6806 /* Remove all pool placeholder insns. */
6807
6808 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6809 {
6810 /* Did we insert an extra barrier? Remove it. */
6811 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6812 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6813 rtx label = NEXT_INSN (curr_pool->pool_insn);
6814
6815 if (jump && GET_CODE (jump) == JUMP_INSN
6816 && barrier && GET_CODE (barrier) == BARRIER
6817 && label && GET_CODE (label) == CODE_LABEL
6818 && GET_CODE (PATTERN (jump)) == SET
6819 && SET_DEST (PATTERN (jump)) == pc_rtx
6820 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6821 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6822 {
6823 remove_insn (jump);
6824 remove_insn (barrier);
6825 remove_insn (label);
6826 }
6827
6828 remove_insn (curr_pool->pool_insn);
6829 }
6830
6831 /* Remove all base register reload insns. */
6832
6833 for (insn = get_insns (); insn; )
6834 {
6835 rtx next_insn = NEXT_INSN (insn);
6836
6837 if (GET_CODE (insn) == INSN
6838 && GET_CODE (PATTERN (insn)) == SET
6839 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6840 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6841 remove_insn (insn);
6842
6843 insn = next_insn;
6844 }
6845
6846 /* Free pool list. */
6847
6848 while (pool_list)
6849 {
6850 struct constant_pool *next = pool_list->next;
6851 s390_free_pool (pool_list);
6852 pool_list = next;
6853 }
6854 }
6855
6856 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6857
6858 void
6859 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6860 {
6861 REAL_VALUE_TYPE r;
6862
6863 switch (GET_MODE_CLASS (mode))
6864 {
6865 case MODE_FLOAT:
6866 case MODE_DECIMAL_FLOAT:
6867 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6868
6869 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6870 assemble_real (r, mode, align);
6871 break;
6872
6873 case MODE_INT:
6874 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6875 mark_symbol_refs_as_used (exp);
6876 break;
6877
6878 default:
6879 gcc_unreachable ();
6880 }
6881 }
6882
6883
6884 /* Return an RTL expression representing the value of the return address
6885 for the frame COUNT steps up from the current frame. FRAME is the
6886 frame pointer of that frame. */
6887
6888 rtx
6889 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6890 {
6891 int offset;
6892 rtx addr;
6893
6894 /* Without backchain, we fail for all but the current frame. */
6895
6896 if (!TARGET_BACKCHAIN && count > 0)
6897 return NULL_RTX;
6898
6899 /* For the current frame, we need to make sure the initial
6900 value of RETURN_REGNUM is actually saved. */
6901
6902 if (count == 0)
6903 {
6904 /* On non-z architectures branch splitting could overwrite r14. */
6905 if (TARGET_CPU_ZARCH)
6906 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6907 else
6908 {
6909 cfun_frame_layout.save_return_addr_p = true;
6910 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
6911 }
6912 }
6913
6914 if (TARGET_PACKED_STACK)
6915 offset = -2 * UNITS_PER_LONG;
6916 else
6917 offset = RETURN_REGNUM * UNITS_PER_LONG;
6918
6919 addr = plus_constant (frame, offset);
6920 addr = memory_address (Pmode, addr);
6921 return gen_rtx_MEM (Pmode, addr);
6922 }
6923
6924 /* Return an RTL expression representing the back chain stored in
6925 the current stack frame. */
6926
6927 rtx
6928 s390_back_chain_rtx (void)
6929 {
6930 rtx chain;
6931
6932 gcc_assert (TARGET_BACKCHAIN);
6933
6934 if (TARGET_PACKED_STACK)
6935 chain = plus_constant (stack_pointer_rtx,
6936 STACK_POINTER_OFFSET - UNITS_PER_LONG);
6937 else
6938 chain = stack_pointer_rtx;
6939
6940 chain = gen_rtx_MEM (Pmode, chain);
6941 return chain;
6942 }
6943
6944 /* Find first call clobbered register unused in a function.
6945 This could be used as base register in a leaf function
6946 or for holding the return address before epilogue. */
6947
6948 static int
6949 find_unused_clobbered_reg (void)
6950 {
6951 int i;
6952 for (i = 0; i < 6; i++)
6953 if (!df_regs_ever_live_p (i))
6954 return i;
6955 return 0;
6956 }
6957
6958
6959 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
6960 clobbered hard regs in SETREG. */
6961
6962 static void
6963 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
6964 {
6965 int *regs_ever_clobbered = (int *)data;
6966 unsigned int i, regno;
6967 enum machine_mode mode = GET_MODE (setreg);
6968
6969 if (GET_CODE (setreg) == SUBREG)
6970 {
6971 rtx inner = SUBREG_REG (setreg);
6972 if (!GENERAL_REG_P (inner))
6973 return;
6974 regno = subreg_regno (setreg);
6975 }
6976 else if (GENERAL_REG_P (setreg))
6977 regno = REGNO (setreg);
6978 else
6979 return;
6980
6981 for (i = regno;
6982 i < regno + HARD_REGNO_NREGS (regno, mode);
6983 i++)
6984 regs_ever_clobbered[i] = 1;
6985 }
6986
6987 /* Walks through all basic blocks of the current function looking
6988 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
6989 of the passed integer array REGS_EVER_CLOBBERED are set to one for
6990 each of those regs. */
6991
6992 static void
6993 s390_regs_ever_clobbered (int *regs_ever_clobbered)
6994 {
6995 basic_block cur_bb;
6996 rtx cur_insn;
6997 unsigned int i;
6998
6999 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7000
7001 /* For non-leaf functions we have to consider all call clobbered regs to be
7002 clobbered. */
7003 if (!current_function_is_leaf)
7004 {
7005 for (i = 0; i < 16; i++)
7006 regs_ever_clobbered[i] = call_really_used_regs[i];
7007 }
7008
7009 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7010 this work is done by liveness analysis (mark_regs_live_at_end).
7011 Special care is needed for functions containing landing pads. Landing pads
7012 may use the eh registers, but the code which sets these registers is not
7013 contained in that function. Hence s390_regs_ever_clobbered is not able to
7014 deal with this automatically. */
7015 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7016 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7017 if (crtl->calls_eh_return
7018 || (cfun->machine->has_landing_pad_p
7019 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7020 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7021
7022 /* For nonlocal gotos all call-saved registers have to be saved.
7023 This flag is also set for the unwinding code in libgcc.
7024 See expand_builtin_unwind_init. For regs_ever_live this is done by
7025 reload. */
7026 if (cfun->has_nonlocal_label)
7027 for (i = 0; i < 16; i++)
7028 if (!call_really_used_regs[i])
7029 regs_ever_clobbered[i] = 1;
7030
7031 FOR_EACH_BB (cur_bb)
7032 {
7033 FOR_BB_INSNS (cur_bb, cur_insn)
7034 {
7035 if (INSN_P (cur_insn))
7036 note_stores (PATTERN (cur_insn),
7037 s390_reg_clobbered_rtx,
7038 regs_ever_clobbered);
7039 }
7040 }
7041 }
7042
7043 /* Determine the frame area which actually has to be accessed
7044 in the function epilogue. The values are stored at the
7045 given pointers AREA_BOTTOM (address of the lowest used stack
7046 address) and AREA_TOP (address of the first item which does
7047 not belong to the stack frame). */
7048
7049 static void
7050 s390_frame_area (int *area_bottom, int *area_top)
7051 {
7052 int b, t;
7053 int i;
7054
7055 b = INT_MAX;
7056 t = INT_MIN;
7057
7058 if (cfun_frame_layout.first_restore_gpr != -1)
7059 {
7060 b = (cfun_frame_layout.gprs_offset
7061 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7062 t = b + (cfun_frame_layout.last_restore_gpr
7063 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7064 }
7065
7066 if (TARGET_64BIT && cfun_save_high_fprs_p)
7067 {
7068 b = MIN (b, cfun_frame_layout.f8_offset);
7069 t = MAX (t, (cfun_frame_layout.f8_offset
7070 + cfun_frame_layout.high_fprs * 8));
7071 }
7072
7073 if (!TARGET_64BIT)
7074 for (i = 2; i < 4; i++)
7075 if (cfun_fpr_bit_p (i))
7076 {
7077 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7078 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7079 }
7080
7081 *area_bottom = b;
7082 *area_top = t;
7083 }
7084
7085 /* Fill cfun->machine with info about register usage of current function.
7086 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7087
7088 static void
7089 s390_register_info (int clobbered_regs[])
7090 {
7091 int i, j;
7092
7093 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7094 cfun_frame_layout.fpr_bitmap = 0;
7095 cfun_frame_layout.high_fprs = 0;
7096 if (TARGET_64BIT)
7097 for (i = 24; i < 32; i++)
7098 if (df_regs_ever_live_p (i) && !global_regs[i])
7099 {
7100 cfun_set_fpr_bit (i - 16);
7101 cfun_frame_layout.high_fprs++;
7102 }
7103
7104 /* Find first and last gpr to be saved. We trust regs_ever_live
7105 data, except that we don't save and restore global registers.
7106
7107 Also, all registers with special meaning to the compiler need
7108 to be handled extra. */
7109
7110 s390_regs_ever_clobbered (clobbered_regs);
7111
7112 for (i = 0; i < 16; i++)
7113 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7114
7115 if (frame_pointer_needed)
7116 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7117
7118 if (flag_pic)
7119 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7120 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7121
7122 clobbered_regs[BASE_REGNUM]
7123 |= (cfun->machine->base_reg
7124 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7125
7126 clobbered_regs[RETURN_REGNUM]
7127 |= (!current_function_is_leaf
7128 || TARGET_TPF_PROFILING
7129 || cfun->machine->split_branches_pending_p
7130 || cfun_frame_layout.save_return_addr_p
7131 || crtl->calls_eh_return
7132 || cfun->stdarg);
7133
7134 clobbered_regs[STACK_POINTER_REGNUM]
7135 |= (!current_function_is_leaf
7136 || TARGET_TPF_PROFILING
7137 || cfun_save_high_fprs_p
7138 || get_frame_size () > 0
7139 || cfun->calls_alloca
7140 || cfun->stdarg);
7141
7142 for (i = 6; i < 16; i++)
7143 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7144 break;
7145 for (j = 15; j > i; j--)
7146 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7147 break;
7148
7149 if (i == 16)
7150 {
7151 /* Nothing to save/restore. */
7152 cfun_frame_layout.first_save_gpr_slot = -1;
7153 cfun_frame_layout.last_save_gpr_slot = -1;
7154 cfun_frame_layout.first_save_gpr = -1;
7155 cfun_frame_layout.first_restore_gpr = -1;
7156 cfun_frame_layout.last_save_gpr = -1;
7157 cfun_frame_layout.last_restore_gpr = -1;
7158 }
7159 else
7160 {
7161 /* Save slots for gprs from i to j. */
7162 cfun_frame_layout.first_save_gpr_slot = i;
7163 cfun_frame_layout.last_save_gpr_slot = j;
7164
7165 for (i = cfun_frame_layout.first_save_gpr_slot;
7166 i < cfun_frame_layout.last_save_gpr_slot + 1;
7167 i++)
7168 if (clobbered_regs[i])
7169 break;
7170
7171 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7172 if (clobbered_regs[j])
7173 break;
7174
7175 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7176 {
7177 /* Nothing to save/restore. */
7178 cfun_frame_layout.first_save_gpr = -1;
7179 cfun_frame_layout.first_restore_gpr = -1;
7180 cfun_frame_layout.last_save_gpr = -1;
7181 cfun_frame_layout.last_restore_gpr = -1;
7182 }
7183 else
7184 {
7185 /* Save / Restore from gpr i to j. */
7186 cfun_frame_layout.first_save_gpr = i;
7187 cfun_frame_layout.first_restore_gpr = i;
7188 cfun_frame_layout.last_save_gpr = j;
7189 cfun_frame_layout.last_restore_gpr = j;
7190 }
7191 }
7192
7193 if (cfun->stdarg)
7194 {
7195 /* Varargs functions need to save gprs 2 to 6. */
7196 if (cfun->va_list_gpr_size
7197 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7198 {
7199 int min_gpr = crtl->args.info.gprs;
7200 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7201 if (max_gpr > GP_ARG_NUM_REG)
7202 max_gpr = GP_ARG_NUM_REG;
7203
7204 if (cfun_frame_layout.first_save_gpr == -1
7205 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7206 {
7207 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7208 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7209 }
7210
7211 if (cfun_frame_layout.last_save_gpr == -1
7212 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7213 {
7214 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7215 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7216 }
7217 }
7218
7219 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7220 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7221 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7222 {
7223 int min_fpr = crtl->args.info.fprs;
7224 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7225 if (max_fpr > FP_ARG_NUM_REG)
7226 max_fpr = FP_ARG_NUM_REG;
7227
7228 /* ??? This is currently required to ensure proper location
7229 of the fpr save slots within the va_list save area. */
7230 if (TARGET_PACKED_STACK)
7231 min_fpr = 0;
7232
7233 for (i = min_fpr; i < max_fpr; i++)
7234 cfun_set_fpr_bit (i);
7235 }
7236 }
7237
7238 if (!TARGET_64BIT)
7239 for (i = 2; i < 4; i++)
7240 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7241 cfun_set_fpr_bit (i);
7242 }
7243
7244 /* Fill cfun->machine with info about frame of current function. */
7245
7246 static void
7247 s390_frame_info (void)
7248 {
7249 int i;
7250
7251 cfun_frame_layout.frame_size = get_frame_size ();
7252 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7253 fatal_error ("total size of local variables exceeds architecture limit");
7254
7255 if (!TARGET_PACKED_STACK)
7256 {
7257 cfun_frame_layout.backchain_offset = 0;
7258 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7259 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7260 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7261 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7262 * UNITS_PER_LONG);
7263 }
7264 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7265 {
7266 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7267 - UNITS_PER_LONG);
7268 cfun_frame_layout.gprs_offset
7269 = (cfun_frame_layout.backchain_offset
7270 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7271 * UNITS_PER_LONG);
7272
7273 if (TARGET_64BIT)
7274 {
7275 cfun_frame_layout.f4_offset
7276 = (cfun_frame_layout.gprs_offset
7277 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7278
7279 cfun_frame_layout.f0_offset
7280 = (cfun_frame_layout.f4_offset
7281 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7282 }
7283 else
7284 {
7285 /* On 31 bit we have to care about alignment of the
7286 floating point regs to provide fastest access. */
7287 cfun_frame_layout.f0_offset
7288 = ((cfun_frame_layout.gprs_offset
7289 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7290 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7291
7292 cfun_frame_layout.f4_offset
7293 = (cfun_frame_layout.f0_offset
7294 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7295 }
7296 }
7297 else /* no backchain */
7298 {
7299 cfun_frame_layout.f4_offset
7300 = (STACK_POINTER_OFFSET
7301 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7302
7303 cfun_frame_layout.f0_offset
7304 = (cfun_frame_layout.f4_offset
7305 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7306
7307 cfun_frame_layout.gprs_offset
7308 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7309 }
7310
7311 if (current_function_is_leaf
7312 && !TARGET_TPF_PROFILING
7313 && cfun_frame_layout.frame_size == 0
7314 && !cfun_save_high_fprs_p
7315 && !cfun->calls_alloca
7316 && !cfun->stdarg)
7317 return;
7318
7319 if (!TARGET_PACKED_STACK)
7320 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7321 + crtl->outgoing_args_size
7322 + cfun_frame_layout.high_fprs * 8);
7323 else
7324 {
7325 if (TARGET_BACKCHAIN)
7326 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7327
7328 /* No alignment trouble here because f8-f15 are only saved under
7329 64 bit. */
7330 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7331 cfun_frame_layout.f4_offset),
7332 cfun_frame_layout.gprs_offset)
7333 - cfun_frame_layout.high_fprs * 8);
7334
7335 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7336
7337 for (i = 0; i < 8; i++)
7338 if (cfun_fpr_bit_p (i))
7339 cfun_frame_layout.frame_size += 8;
7340
7341 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7342
7343 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7344 the frame size to sustain 8 byte alignment of stack frames. */
7345 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7346 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7347 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7348
7349 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7350 }
7351 }
7352
7353 /* Generate frame layout. Fills in register and frame data for the current
7354 function in cfun->machine. This routine can be called multiple times;
7355 it will re-do the complete frame layout every time. */
7356
7357 static void
7358 s390_init_frame_layout (void)
7359 {
7360 HOST_WIDE_INT frame_size;
7361 int base_used;
7362 int clobbered_regs[16];
7363
7364 /* On S/390 machines, we may need to perform branch splitting, which
7365 will require both base and return address register. We have no
7366 choice but to assume we're going to need them until right at the
7367 end of the machine dependent reorg phase. */
7368 if (!TARGET_CPU_ZARCH)
7369 cfun->machine->split_branches_pending_p = true;
7370
7371 do
7372 {
7373 frame_size = cfun_frame_layout.frame_size;
7374
7375 /* Try to predict whether we'll need the base register. */
7376 base_used = cfun->machine->split_branches_pending_p
7377 || crtl->uses_const_pool
7378 || (!DISP_IN_RANGE (frame_size)
7379 && !CONST_OK_FOR_K (frame_size));
7380
7381 /* Decide which register to use as literal pool base. In small
7382 leaf functions, try to use an unused call-clobbered register
7383 as base register to avoid save/restore overhead. */
7384 if (!base_used)
7385 cfun->machine->base_reg = NULL_RTX;
7386 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7387 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7388 else
7389 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7390
7391 s390_register_info (clobbered_regs);
7392 s390_frame_info ();
7393 }
7394 while (frame_size != cfun_frame_layout.frame_size);
7395 }
7396
7397 /* Update frame layout. Recompute actual register save data based on
7398 current info and update regs_ever_live for the special registers.
7399 May be called multiple times, but may never cause *more* registers
7400 to be saved than s390_init_frame_layout allocated room for. */
7401
7402 static void
7403 s390_update_frame_layout (void)
7404 {
7405 int clobbered_regs[16];
7406
7407 s390_register_info (clobbered_regs);
7408
7409 df_set_regs_ever_live (BASE_REGNUM,
7410 clobbered_regs[BASE_REGNUM] ? true : false);
7411 df_set_regs_ever_live (RETURN_REGNUM,
7412 clobbered_regs[RETURN_REGNUM] ? true : false);
7413 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7414 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7415
7416 if (cfun->machine->base_reg)
7417 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7418 }
7419
7420 /* Return true if it is legal to put a value with MODE into REGNO. */
7421
7422 bool
7423 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7424 {
7425 switch (REGNO_REG_CLASS (regno))
7426 {
7427 case FP_REGS:
7428 if (REGNO_PAIR_OK (regno, mode))
7429 {
7430 if (mode == SImode || mode == DImode)
7431 return true;
7432
7433 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7434 return true;
7435 }
7436 break;
7437 case ADDR_REGS:
7438 if (FRAME_REGNO_P (regno) && mode == Pmode)
7439 return true;
7440
7441 /* fallthrough */
7442 case GENERAL_REGS:
7443 if (REGNO_PAIR_OK (regno, mode))
7444 {
7445 if (TARGET_ZARCH
7446 || (mode != TFmode && mode != TCmode && mode != TDmode))
7447 return true;
7448 }
7449 break;
7450 case CC_REGS:
7451 if (GET_MODE_CLASS (mode) == MODE_CC)
7452 return true;
7453 break;
7454 case ACCESS_REGS:
7455 if (REGNO_PAIR_OK (regno, mode))
7456 {
7457 if (mode == SImode || mode == Pmode)
7458 return true;
7459 }
7460 break;
7461 default:
7462 return false;
7463 }
7464
7465 return false;
7466 }
7467
7468 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7469
7470 bool
7471 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7472 {
7473 /* Once we've decided upon a register to use as base register, it must
7474 no longer be used for any other purpose. */
7475 if (cfun->machine->base_reg)
7476 if (REGNO (cfun->machine->base_reg) == old_reg
7477 || REGNO (cfun->machine->base_reg) == new_reg)
7478 return false;
7479
7480 return true;
7481 }
7482
7483 /* Maximum number of registers to represent a value of mode MODE
7484 in a register of class RCLASS. */
7485
7486 bool
7487 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7488 {
7489 switch (rclass)
7490 {
7491 case FP_REGS:
7492 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7493 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7494 else
7495 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7496 case ACCESS_REGS:
7497 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7498 default:
7499 break;
7500 }
7501 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7502 }
7503
7504 /* Return true if register FROM can be eliminated via register TO. */
7505
7506 static bool
7507 s390_can_eliminate (const int from, const int to)
7508 {
7509 /* On zSeries machines, we have not marked the base register as fixed.
7510 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7511 If a function requires the base register, we say here that this
7512 elimination cannot be performed. This will cause reload to free
7513 up the base register (as if it were fixed). On the other hand,
7514 if the current function does *not* require the base register, we
7515 say here the elimination succeeds, which in turn allows reload
7516 to allocate the base register for any other purpose. */
7517 if (from == BASE_REGNUM && to == BASE_REGNUM)
7518 {
7519 if (TARGET_CPU_ZARCH)
7520 {
7521 s390_init_frame_layout ();
7522 return cfun->machine->base_reg == NULL_RTX;
7523 }
7524
7525 return false;
7526 }
7527
7528 /* Everything else must point into the stack frame. */
7529 gcc_assert (to == STACK_POINTER_REGNUM
7530 || to == HARD_FRAME_POINTER_REGNUM);
7531
7532 gcc_assert (from == FRAME_POINTER_REGNUM
7533 || from == ARG_POINTER_REGNUM
7534 || from == RETURN_ADDRESS_POINTER_REGNUM);
7535
7536 /* Make sure we actually saved the return address. */
7537 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7538 if (!crtl->calls_eh_return
7539 && !cfun->stdarg
7540 && !cfun_frame_layout.save_return_addr_p)
7541 return false;
7542
7543 return true;
7544 }
7545
7546 /* Return offset between register FROM and TO initially after prolog. */
7547
7548 HOST_WIDE_INT
7549 s390_initial_elimination_offset (int from, int to)
7550 {
7551 HOST_WIDE_INT offset;
7552 int index;
7553
7554 /* ??? Why are we called for non-eliminable pairs? */
7555 if (!s390_can_eliminate (from, to))
7556 return 0;
7557
7558 switch (from)
7559 {
7560 case FRAME_POINTER_REGNUM:
7561 offset = (get_frame_size()
7562 + STACK_POINTER_OFFSET
7563 + crtl->outgoing_args_size);
7564 break;
7565
7566 case ARG_POINTER_REGNUM:
7567 s390_init_frame_layout ();
7568 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7569 break;
7570
7571 case RETURN_ADDRESS_POINTER_REGNUM:
7572 s390_init_frame_layout ();
7573 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7574 gcc_assert (index >= 0);
7575 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7576 offset += index * UNITS_PER_LONG;
7577 break;
7578
7579 case BASE_REGNUM:
7580 offset = 0;
7581 break;
7582
7583 default:
7584 gcc_unreachable ();
7585 }
7586
7587 return offset;
7588 }
7589
7590 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7591 to register BASE. Return generated insn. */
7592
7593 static rtx
7594 save_fpr (rtx base, int offset, int regnum)
7595 {
7596 rtx addr;
7597 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7598
7599 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7600 set_mem_alias_set (addr, get_varargs_alias_set ());
7601 else
7602 set_mem_alias_set (addr, get_frame_alias_set ());
7603
7604 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7605 }
7606
7607 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7608 to register BASE. Return generated insn. */
7609
7610 static rtx
7611 restore_fpr (rtx base, int offset, int regnum)
7612 {
7613 rtx addr;
7614 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7615 set_mem_alias_set (addr, get_frame_alias_set ());
7616
7617 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7618 }
7619
7620 /* Return true if REGNO is a global register, but not one
7621 of the special ones that need to be saved/restored in anyway. */
7622
7623 static inline bool
7624 global_not_special_regno_p (int regno)
7625 {
7626 return (global_regs[regno]
7627 /* These registers are special and need to be
7628 restored in any case. */
7629 && !(regno == STACK_POINTER_REGNUM
7630 || regno == RETURN_REGNUM
7631 || regno == BASE_REGNUM
7632 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7633 }
7634
7635 /* Generate insn to save registers FIRST to LAST into
7636 the register save area located at offset OFFSET
7637 relative to register BASE. */
7638
7639 static rtx
7640 save_gprs (rtx base, int offset, int first, int last)
7641 {
7642 rtx addr, insn, note;
7643 int i;
7644
7645 addr = plus_constant (base, offset);
7646 addr = gen_rtx_MEM (Pmode, addr);
7647
7648 set_mem_alias_set (addr, get_frame_alias_set ());
7649
7650 /* Special-case single register. */
7651 if (first == last)
7652 {
7653 if (TARGET_64BIT)
7654 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7655 else
7656 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7657
7658 if (!global_not_special_regno_p (first))
7659 RTX_FRAME_RELATED_P (insn) = 1;
7660 return insn;
7661 }
7662
7663
7664 insn = gen_store_multiple (addr,
7665 gen_rtx_REG (Pmode, first),
7666 GEN_INT (last - first + 1));
7667
7668 if (first <= 6 && cfun->stdarg)
7669 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7670 {
7671 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7672
7673 if (first + i <= 6)
7674 set_mem_alias_set (mem, get_varargs_alias_set ());
7675 }
7676
7677 /* We need to set the FRAME_RELATED flag on all SETs
7678 inside the store-multiple pattern.
7679
7680 However, we must not emit DWARF records for registers 2..5
7681 if they are stored for use by variable arguments ...
7682
7683 ??? Unfortunately, it is not enough to simply not the
7684 FRAME_RELATED flags for those SETs, because the first SET
7685 of the PARALLEL is always treated as if it had the flag
7686 set, even if it does not. Therefore we emit a new pattern
7687 without those registers as REG_FRAME_RELATED_EXPR note. */
7688
7689 if (first >= 6 && !global_not_special_regno_p (first))
7690 {
7691 rtx pat = PATTERN (insn);
7692
7693 for (i = 0; i < XVECLEN (pat, 0); i++)
7694 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7695 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7696 0, i)))))
7697 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7698
7699 RTX_FRAME_RELATED_P (insn) = 1;
7700 }
7701 else if (last >= 6)
7702 {
7703 int start;
7704
7705 for (start = first >= 6 ? first : 6; start <= last; start++)
7706 if (!global_not_special_regno_p (start))
7707 break;
7708
7709 if (start > last)
7710 return insn;
7711
7712 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7713 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7714 gen_rtx_REG (Pmode, start),
7715 GEN_INT (last - start + 1));
7716 note = PATTERN (note);
7717
7718 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7719
7720 for (i = 0; i < XVECLEN (note, 0); i++)
7721 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7722 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7723 0, i)))))
7724 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7725
7726 RTX_FRAME_RELATED_P (insn) = 1;
7727 }
7728
7729 return insn;
7730 }
7731
7732 /* Generate insn to restore registers FIRST to LAST from
7733 the register save area located at offset OFFSET
7734 relative to register BASE. */
7735
7736 static rtx
7737 restore_gprs (rtx base, int offset, int first, int last)
7738 {
7739 rtx addr, insn;
7740
7741 addr = plus_constant (base, offset);
7742 addr = gen_rtx_MEM (Pmode, addr);
7743 set_mem_alias_set (addr, get_frame_alias_set ());
7744
7745 /* Special-case single register. */
7746 if (first == last)
7747 {
7748 if (TARGET_64BIT)
7749 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7750 else
7751 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7752
7753 return insn;
7754 }
7755
7756 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7757 addr,
7758 GEN_INT (last - first + 1));
7759 return insn;
7760 }
7761
7762 /* Return insn sequence to load the GOT register. */
7763
7764 static GTY(()) rtx got_symbol;
7765 rtx
7766 s390_load_got (void)
7767 {
7768 rtx insns;
7769
7770 if (!got_symbol)
7771 {
7772 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7773 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7774 }
7775
7776 start_sequence ();
7777
7778 if (TARGET_CPU_ZARCH)
7779 {
7780 emit_move_insn (pic_offset_table_rtx, got_symbol);
7781 }
7782 else
7783 {
7784 rtx offset;
7785
7786 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7787 UNSPEC_LTREL_OFFSET);
7788 offset = gen_rtx_CONST (Pmode, offset);
7789 offset = force_const_mem (Pmode, offset);
7790
7791 emit_move_insn (pic_offset_table_rtx, offset);
7792
7793 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7794 UNSPEC_LTREL_BASE);
7795 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7796
7797 emit_move_insn (pic_offset_table_rtx, offset);
7798 }
7799
7800 insns = get_insns ();
7801 end_sequence ();
7802 return insns;
7803 }
7804
7805 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7806 and the change to the stack pointer. */
7807
7808 static void
7809 s390_emit_stack_tie (void)
7810 {
7811 rtx mem = gen_frame_mem (BLKmode,
7812 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7813
7814 emit_insn (gen_stack_tie (mem));
7815 }
7816
7817 /* Expand the prologue into a bunch of separate insns. */
7818
7819 void
7820 s390_emit_prologue (void)
7821 {
7822 rtx insn, addr;
7823 rtx temp_reg;
7824 int i;
7825 int offset;
7826 int next_fpr = 0;
7827
7828 /* Complete frame layout. */
7829
7830 s390_update_frame_layout ();
7831
7832 /* Annotate all constant pool references to let the scheduler know
7833 they implicitly use the base register. */
7834
7835 push_topmost_sequence ();
7836
7837 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7838 if (INSN_P (insn))
7839 {
7840 annotate_constant_pool_refs (&PATTERN (insn));
7841 df_insn_rescan (insn);
7842 }
7843
7844 pop_topmost_sequence ();
7845
7846 /* Choose best register to use for temp use within prologue.
7847 See below for why TPF must use the register 1. */
7848
7849 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7850 && !current_function_is_leaf
7851 && !TARGET_TPF_PROFILING)
7852 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7853 else
7854 temp_reg = gen_rtx_REG (Pmode, 1);
7855
7856 /* Save call saved gprs. */
7857 if (cfun_frame_layout.first_save_gpr != -1)
7858 {
7859 insn = save_gprs (stack_pointer_rtx,
7860 cfun_frame_layout.gprs_offset +
7861 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7862 - cfun_frame_layout.first_save_gpr_slot),
7863 cfun_frame_layout.first_save_gpr,
7864 cfun_frame_layout.last_save_gpr);
7865 emit_insn (insn);
7866 }
7867
7868 /* Dummy insn to mark literal pool slot. */
7869
7870 if (cfun->machine->base_reg)
7871 emit_insn (gen_main_pool (cfun->machine->base_reg));
7872
7873 offset = cfun_frame_layout.f0_offset;
7874
7875 /* Save f0 and f2. */
7876 for (i = 0; i < 2; i++)
7877 {
7878 if (cfun_fpr_bit_p (i))
7879 {
7880 save_fpr (stack_pointer_rtx, offset, i + 16);
7881 offset += 8;
7882 }
7883 else if (!TARGET_PACKED_STACK)
7884 offset += 8;
7885 }
7886
7887 /* Save f4 and f6. */
7888 offset = cfun_frame_layout.f4_offset;
7889 for (i = 2; i < 4; i++)
7890 {
7891 if (cfun_fpr_bit_p (i))
7892 {
7893 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7894 offset += 8;
7895
7896 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7897 therefore are not frame related. */
7898 if (!call_really_used_regs[i + 16])
7899 RTX_FRAME_RELATED_P (insn) = 1;
7900 }
7901 else if (!TARGET_PACKED_STACK)
7902 offset += 8;
7903 }
7904
7905 if (TARGET_PACKED_STACK
7906 && cfun_save_high_fprs_p
7907 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7908 {
7909 offset = (cfun_frame_layout.f8_offset
7910 + (cfun_frame_layout.high_fprs - 1) * 8);
7911
7912 for (i = 15; i > 7 && offset >= 0; i--)
7913 if (cfun_fpr_bit_p (i))
7914 {
7915 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7916
7917 RTX_FRAME_RELATED_P (insn) = 1;
7918 offset -= 8;
7919 }
7920 if (offset >= cfun_frame_layout.f8_offset)
7921 next_fpr = i + 16;
7922 }
7923
7924 if (!TARGET_PACKED_STACK)
7925 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
7926
7927 /* Decrement stack pointer. */
7928
7929 if (cfun_frame_layout.frame_size > 0)
7930 {
7931 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7932 rtx real_frame_off;
7933
7934 if (s390_stack_size)
7935 {
7936 HOST_WIDE_INT stack_guard;
7937
7938 if (s390_stack_guard)
7939 stack_guard = s390_stack_guard;
7940 else
7941 {
7942 /* If no value for stack guard is provided the smallest power of 2
7943 larger than the current frame size is chosen. */
7944 stack_guard = 1;
7945 while (stack_guard < cfun_frame_layout.frame_size)
7946 stack_guard <<= 1;
7947 }
7948
7949 if (cfun_frame_layout.frame_size >= s390_stack_size)
7950 {
7951 warning (0, "frame size of function %qs is "
7952 HOST_WIDE_INT_PRINT_DEC
7953 " bytes exceeding user provided stack limit of "
7954 HOST_WIDE_INT_PRINT_DEC " bytes. "
7955 "An unconditional trap is added.",
7956 current_function_name(), cfun_frame_layout.frame_size,
7957 s390_stack_size);
7958 emit_insn (gen_trap ());
7959 }
7960 else
7961 {
7962 /* stack_guard has to be smaller than s390_stack_size.
7963 Otherwise we would emit an AND with zero which would
7964 not match the test under mask pattern. */
7965 if (stack_guard >= s390_stack_size)
7966 {
7967 warning (0, "frame size of function %qs is "
7968 HOST_WIDE_INT_PRINT_DEC
7969 " bytes which is more than half the stack size. "
7970 "The dynamic check would not be reliable. "
7971 "No check emitted for this function.",
7972 current_function_name(),
7973 cfun_frame_layout.frame_size);
7974 }
7975 else
7976 {
7977 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
7978 & ~(stack_guard - 1));
7979
7980 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
7981 GEN_INT (stack_check_mask));
7982 if (TARGET_64BIT)
7983 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
7984 t, const0_rtx),
7985 t, const0_rtx, const0_rtx));
7986 else
7987 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
7988 t, const0_rtx),
7989 t, const0_rtx, const0_rtx));
7990 }
7991 }
7992 }
7993
7994 if (s390_warn_framesize > 0
7995 && cfun_frame_layout.frame_size >= s390_warn_framesize)
7996 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
7997 current_function_name (), cfun_frame_layout.frame_size);
7998
7999 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8000 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8001
8002 /* Save incoming stack pointer into temp reg. */
8003 if (TARGET_BACKCHAIN || next_fpr)
8004 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8005
8006 /* Subtract frame size from stack pointer. */
8007
8008 if (DISP_IN_RANGE (INTVAL (frame_off)))
8009 {
8010 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8011 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8012 frame_off));
8013 insn = emit_insn (insn);
8014 }
8015 else
8016 {
8017 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8018 frame_off = force_const_mem (Pmode, frame_off);
8019
8020 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8021 annotate_constant_pool_refs (&PATTERN (insn));
8022 }
8023
8024 RTX_FRAME_RELATED_P (insn) = 1;
8025 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8026 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8027 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8028 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8029 real_frame_off)));
8030
8031 /* Set backchain. */
8032
8033 if (TARGET_BACKCHAIN)
8034 {
8035 if (cfun_frame_layout.backchain_offset)
8036 addr = gen_rtx_MEM (Pmode,
8037 plus_constant (stack_pointer_rtx,
8038 cfun_frame_layout.backchain_offset));
8039 else
8040 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8041 set_mem_alias_set (addr, get_frame_alias_set ());
8042 insn = emit_insn (gen_move_insn (addr, temp_reg));
8043 }
8044
8045 /* If we support non-call exceptions (e.g. for Java),
8046 we need to make sure the backchain pointer is set up
8047 before any possibly trapping memory access. */
8048 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8049 {
8050 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8051 emit_clobber (addr);
8052 }
8053 }
8054
8055 /* Save fprs 8 - 15 (64 bit ABI). */
8056
8057 if (cfun_save_high_fprs_p && next_fpr)
8058 {
8059 /* If the stack might be accessed through a different register
8060 we have to make sure that the stack pointer decrement is not
8061 moved below the use of the stack slots. */
8062 s390_emit_stack_tie ();
8063
8064 insn = emit_insn (gen_add2_insn (temp_reg,
8065 GEN_INT (cfun_frame_layout.f8_offset)));
8066
8067 offset = 0;
8068
8069 for (i = 24; i <= next_fpr; i++)
8070 if (cfun_fpr_bit_p (i - 16))
8071 {
8072 rtx addr = plus_constant (stack_pointer_rtx,
8073 cfun_frame_layout.frame_size
8074 + cfun_frame_layout.f8_offset
8075 + offset);
8076
8077 insn = save_fpr (temp_reg, offset, i);
8078 offset += 8;
8079 RTX_FRAME_RELATED_P (insn) = 1;
8080 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8081 gen_rtx_SET (VOIDmode,
8082 gen_rtx_MEM (DFmode, addr),
8083 gen_rtx_REG (DFmode, i)));
8084 }
8085 }
8086
8087 /* Set frame pointer, if needed. */
8088
8089 if (frame_pointer_needed)
8090 {
8091 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8092 RTX_FRAME_RELATED_P (insn) = 1;
8093 }
8094
8095 /* Set up got pointer, if needed. */
8096
8097 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8098 {
8099 rtx insns = s390_load_got ();
8100
8101 for (insn = insns; insn; insn = NEXT_INSN (insn))
8102 annotate_constant_pool_refs (&PATTERN (insn));
8103
8104 emit_insn (insns);
8105 }
8106
8107 if (TARGET_TPF_PROFILING)
8108 {
8109 /* Generate a BAS instruction to serve as a function
8110 entry intercept to facilitate the use of tracing
8111 algorithms located at the branch target. */
8112 emit_insn (gen_prologue_tpf ());
8113
8114 /* Emit a blockage here so that all code
8115 lies between the profiling mechanisms. */
8116 emit_insn (gen_blockage ());
8117 }
8118 }
8119
8120 /* Expand the epilogue into a bunch of separate insns. */
8121
8122 void
8123 s390_emit_epilogue (bool sibcall)
8124 {
8125 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8126 int area_bottom, area_top, offset = 0;
8127 int next_offset;
8128 rtvec p;
8129 int i;
8130
8131 if (TARGET_TPF_PROFILING)
8132 {
8133
8134 /* Generate a BAS instruction to serve as a function
8135 entry intercept to facilitate the use of tracing
8136 algorithms located at the branch target. */
8137
8138 /* Emit a blockage here so that all code
8139 lies between the profiling mechanisms. */
8140 emit_insn (gen_blockage ());
8141
8142 emit_insn (gen_epilogue_tpf ());
8143 }
8144
8145 /* Check whether to use frame or stack pointer for restore. */
8146
8147 frame_pointer = (frame_pointer_needed
8148 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8149
8150 s390_frame_area (&area_bottom, &area_top);
8151
8152 /* Check whether we can access the register save area.
8153 If not, increment the frame pointer as required. */
8154
8155 if (area_top <= area_bottom)
8156 {
8157 /* Nothing to restore. */
8158 }
8159 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8160 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8161 {
8162 /* Area is in range. */
8163 offset = cfun_frame_layout.frame_size;
8164 }
8165 else
8166 {
8167 rtx insn, frame_off, cfa;
8168
8169 offset = area_bottom < 0 ? -area_bottom : 0;
8170 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8171
8172 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8173 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8174 if (DISP_IN_RANGE (INTVAL (frame_off)))
8175 {
8176 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8177 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8178 insn = emit_insn (insn);
8179 }
8180 else
8181 {
8182 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8183 frame_off = force_const_mem (Pmode, frame_off);
8184
8185 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8186 annotate_constant_pool_refs (&PATTERN (insn));
8187 }
8188 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8189 RTX_FRAME_RELATED_P (insn) = 1;
8190 }
8191
8192 /* Restore call saved fprs. */
8193
8194 if (TARGET_64BIT)
8195 {
8196 if (cfun_save_high_fprs_p)
8197 {
8198 next_offset = cfun_frame_layout.f8_offset;
8199 for (i = 24; i < 32; i++)
8200 {
8201 if (cfun_fpr_bit_p (i - 16))
8202 {
8203 restore_fpr (frame_pointer,
8204 offset + next_offset, i);
8205 cfa_restores
8206 = alloc_reg_note (REG_CFA_RESTORE,
8207 gen_rtx_REG (DFmode, i), cfa_restores);
8208 next_offset += 8;
8209 }
8210 }
8211 }
8212
8213 }
8214 else
8215 {
8216 next_offset = cfun_frame_layout.f4_offset;
8217 for (i = 18; i < 20; i++)
8218 {
8219 if (cfun_fpr_bit_p (i - 16))
8220 {
8221 restore_fpr (frame_pointer,
8222 offset + next_offset, i);
8223 cfa_restores
8224 = alloc_reg_note (REG_CFA_RESTORE,
8225 gen_rtx_REG (DFmode, i), cfa_restores);
8226 next_offset += 8;
8227 }
8228 else if (!TARGET_PACKED_STACK)
8229 next_offset += 8;
8230 }
8231
8232 }
8233
8234 /* Return register. */
8235
8236 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8237
8238 /* Restore call saved gprs. */
8239
8240 if (cfun_frame_layout.first_restore_gpr != -1)
8241 {
8242 rtx insn, addr;
8243 int i;
8244
8245 /* Check for global register and save them
8246 to stack location from where they get restored. */
8247
8248 for (i = cfun_frame_layout.first_restore_gpr;
8249 i <= cfun_frame_layout.last_restore_gpr;
8250 i++)
8251 {
8252 if (global_not_special_regno_p (i))
8253 {
8254 addr = plus_constant (frame_pointer,
8255 offset + cfun_frame_layout.gprs_offset
8256 + (i - cfun_frame_layout.first_save_gpr_slot)
8257 * UNITS_PER_LONG);
8258 addr = gen_rtx_MEM (Pmode, addr);
8259 set_mem_alias_set (addr, get_frame_alias_set ());
8260 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8261 }
8262 else
8263 cfa_restores
8264 = alloc_reg_note (REG_CFA_RESTORE,
8265 gen_rtx_REG (Pmode, i), cfa_restores);
8266 }
8267
8268 if (! sibcall)
8269 {
8270 /* Fetch return address from stack before load multiple,
8271 this will do good for scheduling. */
8272
8273 if (cfun_frame_layout.save_return_addr_p
8274 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8275 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8276 {
8277 int return_regnum = find_unused_clobbered_reg();
8278 if (!return_regnum)
8279 return_regnum = 4;
8280 return_reg = gen_rtx_REG (Pmode, return_regnum);
8281
8282 addr = plus_constant (frame_pointer,
8283 offset + cfun_frame_layout.gprs_offset
8284 + (RETURN_REGNUM
8285 - cfun_frame_layout.first_save_gpr_slot)
8286 * UNITS_PER_LONG);
8287 addr = gen_rtx_MEM (Pmode, addr);
8288 set_mem_alias_set (addr, get_frame_alias_set ());
8289 emit_move_insn (return_reg, addr);
8290 }
8291 }
8292
8293 insn = restore_gprs (frame_pointer,
8294 offset + cfun_frame_layout.gprs_offset
8295 + (cfun_frame_layout.first_restore_gpr
8296 - cfun_frame_layout.first_save_gpr_slot)
8297 * UNITS_PER_LONG,
8298 cfun_frame_layout.first_restore_gpr,
8299 cfun_frame_layout.last_restore_gpr);
8300 insn = emit_insn (insn);
8301 REG_NOTES (insn) = cfa_restores;
8302 add_reg_note (insn, REG_CFA_DEF_CFA,
8303 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8304 RTX_FRAME_RELATED_P (insn) = 1;
8305 }
8306
8307 if (! sibcall)
8308 {
8309
8310 /* Return to caller. */
8311
8312 p = rtvec_alloc (2);
8313
8314 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8315 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8316 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8317 }
8318 }
8319
8320
8321 /* Return the size in bytes of a function argument of
8322 type TYPE and/or mode MODE. At least one of TYPE or
8323 MODE must be specified. */
8324
8325 static int
8326 s390_function_arg_size (enum machine_mode mode, const_tree type)
8327 {
8328 if (type)
8329 return int_size_in_bytes (type);
8330
8331 /* No type info available for some library calls ... */
8332 if (mode != BLKmode)
8333 return GET_MODE_SIZE (mode);
8334
8335 /* If we have neither type nor mode, abort */
8336 gcc_unreachable ();
8337 }
8338
8339 /* Return true if a function argument of type TYPE and mode MODE
8340 is to be passed in a floating-point register, if available. */
8341
8342 static bool
8343 s390_function_arg_float (enum machine_mode mode, const_tree type)
8344 {
8345 int size = s390_function_arg_size (mode, type);
8346 if (size > 8)
8347 return false;
8348
8349 /* Soft-float changes the ABI: no floating-point registers are used. */
8350 if (TARGET_SOFT_FLOAT)
8351 return false;
8352
8353 /* No type info available for some library calls ... */
8354 if (!type)
8355 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8356
8357 /* The ABI says that record types with a single member are treated
8358 just like that member would be. */
8359 while (TREE_CODE (type) == RECORD_TYPE)
8360 {
8361 tree field, single = NULL_TREE;
8362
8363 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8364 {
8365 if (TREE_CODE (field) != FIELD_DECL)
8366 continue;
8367
8368 if (single == NULL_TREE)
8369 single = TREE_TYPE (field);
8370 else
8371 return false;
8372 }
8373
8374 if (single == NULL_TREE)
8375 return false;
8376 else
8377 type = single;
8378 }
8379
8380 return TREE_CODE (type) == REAL_TYPE;
8381 }
8382
8383 /* Return true if a function argument of type TYPE and mode MODE
8384 is to be passed in an integer register, or a pair of integer
8385 registers, if available. */
8386
8387 static bool
8388 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8389 {
8390 int size = s390_function_arg_size (mode, type);
8391 if (size > 8)
8392 return false;
8393
8394 /* No type info available for some library calls ... */
8395 if (!type)
8396 return GET_MODE_CLASS (mode) == MODE_INT
8397 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8398
8399 /* We accept small integral (and similar) types. */
8400 if (INTEGRAL_TYPE_P (type)
8401 || POINTER_TYPE_P (type)
8402 || TREE_CODE (type) == OFFSET_TYPE
8403 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8404 return true;
8405
8406 /* We also accept structs of size 1, 2, 4, 8 that are not
8407 passed in floating-point registers. */
8408 if (AGGREGATE_TYPE_P (type)
8409 && exact_log2 (size) >= 0
8410 && !s390_function_arg_float (mode, type))
8411 return true;
8412
8413 return false;
8414 }
8415
8416 /* Return 1 if a function argument of type TYPE and mode MODE
8417 is to be passed by reference. The ABI specifies that only
8418 structures of size 1, 2, 4, or 8 bytes are passed by value,
8419 all other structures (and complex numbers) are passed by
8420 reference. */
8421
8422 static bool
8423 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8424 enum machine_mode mode, const_tree type,
8425 bool named ATTRIBUTE_UNUSED)
8426 {
8427 int size = s390_function_arg_size (mode, type);
8428 if (size > 8)
8429 return true;
8430
8431 if (type)
8432 {
8433 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8434 return 1;
8435
8436 if (TREE_CODE (type) == COMPLEX_TYPE
8437 || TREE_CODE (type) == VECTOR_TYPE)
8438 return 1;
8439 }
8440
8441 return 0;
8442 }
8443
8444 /* Update the data in CUM to advance over an argument of mode MODE and
8445 data type TYPE. (TYPE is null for libcalls where that information
8446 may not be available.). The boolean NAMED specifies whether the
8447 argument is a named argument (as opposed to an unnamed argument
8448 matching an ellipsis). */
8449
8450 static void
8451 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8452 const_tree type, bool named ATTRIBUTE_UNUSED)
8453 {
8454 if (s390_function_arg_float (mode, type))
8455 {
8456 cum->fprs += 1;
8457 }
8458 else if (s390_function_arg_integer (mode, type))
8459 {
8460 int size = s390_function_arg_size (mode, type);
8461 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8462 }
8463 else
8464 gcc_unreachable ();
8465 }
8466
8467 /* Define where to put the arguments to a function.
8468 Value is zero to push the argument on the stack,
8469 or a hard register in which to store the argument.
8470
8471 MODE is the argument's machine mode.
8472 TYPE is the data type of the argument (as a tree).
8473 This is null for libcalls where that information may
8474 not be available.
8475 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8476 the preceding args and about the function being called.
8477 NAMED is nonzero if this argument is a named parameter
8478 (otherwise it is an extra parameter matching an ellipsis).
8479
8480 On S/390, we use general purpose registers 2 through 6 to
8481 pass integer, pointer, and certain structure arguments, and
8482 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8483 to pass floating point arguments. All remaining arguments
8484 are pushed to the stack. */
8485
8486 static rtx
8487 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8488 const_tree type, bool named ATTRIBUTE_UNUSED)
8489 {
8490 if (s390_function_arg_float (mode, type))
8491 {
8492 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8493 return 0;
8494 else
8495 return gen_rtx_REG (mode, cum->fprs + 16);
8496 }
8497 else if (s390_function_arg_integer (mode, type))
8498 {
8499 int size = s390_function_arg_size (mode, type);
8500 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8501
8502 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8503 return 0;
8504 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8505 return gen_rtx_REG (mode, cum->gprs + 2);
8506 else if (n_gprs == 2)
8507 {
8508 rtvec p = rtvec_alloc (2);
8509
8510 RTVEC_ELT (p, 0)
8511 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8512 const0_rtx);
8513 RTVEC_ELT (p, 1)
8514 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8515 GEN_INT (4));
8516
8517 return gen_rtx_PARALLEL (mode, p);
8518 }
8519 }
8520
8521 /* After the real arguments, expand_call calls us once again
8522 with a void_type_node type. Whatever we return here is
8523 passed as operand 2 to the call expanders.
8524
8525 We don't need this feature ... */
8526 else if (type == void_type_node)
8527 return const0_rtx;
8528
8529 gcc_unreachable ();
8530 }
8531
8532 /* Return true if return values of type TYPE should be returned
8533 in a memory buffer whose address is passed by the caller as
8534 hidden first argument. */
8535
8536 static bool
8537 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8538 {
8539 /* We accept small integral (and similar) types. */
8540 if (INTEGRAL_TYPE_P (type)
8541 || POINTER_TYPE_P (type)
8542 || TREE_CODE (type) == OFFSET_TYPE
8543 || TREE_CODE (type) == REAL_TYPE)
8544 return int_size_in_bytes (type) > 8;
8545
8546 /* Aggregates and similar constructs are always returned
8547 in memory. */
8548 if (AGGREGATE_TYPE_P (type)
8549 || TREE_CODE (type) == COMPLEX_TYPE
8550 || TREE_CODE (type) == VECTOR_TYPE)
8551 return true;
8552
8553 /* ??? We get called on all sorts of random stuff from
8554 aggregate_value_p. We can't abort, but it's not clear
8555 what's safe to return. Pretend it's a struct I guess. */
8556 return true;
8557 }
8558
8559 /* Function arguments and return values are promoted to word size. */
8560
8561 static enum machine_mode
8562 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8563 int *punsignedp,
8564 const_tree fntype ATTRIBUTE_UNUSED,
8565 int for_return ATTRIBUTE_UNUSED)
8566 {
8567 if (INTEGRAL_MODE_P (mode)
8568 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8569 {
8570 if (POINTER_TYPE_P (type))
8571 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8572 return Pmode;
8573 }
8574
8575 return mode;
8576 }
8577
8578 /* Define where to return a (scalar) value of type TYPE.
8579 If TYPE is null, define where to return a (scalar)
8580 value of mode MODE from a libcall. */
8581
8582 rtx
8583 s390_function_value (const_tree type, const_tree fn, enum machine_mode mode)
8584 {
8585 if (type)
8586 {
8587 int unsignedp = TYPE_UNSIGNED (type);
8588 mode = promote_function_mode (type, TYPE_MODE (type), &unsignedp, fn, 1);
8589 }
8590
8591 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8592 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8593
8594 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8595 return gen_rtx_REG (mode, 16);
8596 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8597 || UNITS_PER_LONG == UNITS_PER_WORD)
8598 return gen_rtx_REG (mode, 2);
8599 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8600 {
8601 rtvec p = rtvec_alloc (2);
8602
8603 RTVEC_ELT (p, 0)
8604 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8605 RTVEC_ELT (p, 1)
8606 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8607
8608 return gen_rtx_PARALLEL (mode, p);
8609 }
8610
8611 gcc_unreachable ();
8612 }
8613
8614
8615 /* Create and return the va_list datatype.
8616
8617 On S/390, va_list is an array type equivalent to
8618
8619 typedef struct __va_list_tag
8620 {
8621 long __gpr;
8622 long __fpr;
8623 void *__overflow_arg_area;
8624 void *__reg_save_area;
8625 } va_list[1];
8626
8627 where __gpr and __fpr hold the number of general purpose
8628 or floating point arguments used up to now, respectively,
8629 __overflow_arg_area points to the stack location of the
8630 next argument passed on the stack, and __reg_save_area
8631 always points to the start of the register area in the
8632 call frame of the current function. The function prologue
8633 saves all registers used for argument passing into this
8634 area if the function uses variable arguments. */
8635
8636 static tree
8637 s390_build_builtin_va_list (void)
8638 {
8639 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8640
8641 record = lang_hooks.types.make_type (RECORD_TYPE);
8642
8643 type_decl =
8644 build_decl (BUILTINS_LOCATION,
8645 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8646
8647 f_gpr = build_decl (BUILTINS_LOCATION,
8648 FIELD_DECL, get_identifier ("__gpr"),
8649 long_integer_type_node);
8650 f_fpr = build_decl (BUILTINS_LOCATION,
8651 FIELD_DECL, get_identifier ("__fpr"),
8652 long_integer_type_node);
8653 f_ovf = build_decl (BUILTINS_LOCATION,
8654 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8655 ptr_type_node);
8656 f_sav = build_decl (BUILTINS_LOCATION,
8657 FIELD_DECL, get_identifier ("__reg_save_area"),
8658 ptr_type_node);
8659
8660 va_list_gpr_counter_field = f_gpr;
8661 va_list_fpr_counter_field = f_fpr;
8662
8663 DECL_FIELD_CONTEXT (f_gpr) = record;
8664 DECL_FIELD_CONTEXT (f_fpr) = record;
8665 DECL_FIELD_CONTEXT (f_ovf) = record;
8666 DECL_FIELD_CONTEXT (f_sav) = record;
8667
8668 TREE_CHAIN (record) = type_decl;
8669 TYPE_NAME (record) = type_decl;
8670 TYPE_FIELDS (record) = f_gpr;
8671 DECL_CHAIN (f_gpr) = f_fpr;
8672 DECL_CHAIN (f_fpr) = f_ovf;
8673 DECL_CHAIN (f_ovf) = f_sav;
8674
8675 layout_type (record);
8676
8677 /* The correct type is an array type of one element. */
8678 return build_array_type (record, build_index_type (size_zero_node));
8679 }
8680
8681 /* Implement va_start by filling the va_list structure VALIST.
8682 STDARG_P is always true, and ignored.
8683 NEXTARG points to the first anonymous stack argument.
8684
8685 The following global variables are used to initialize
8686 the va_list structure:
8687
8688 crtl->args.info:
8689 holds number of gprs and fprs used for named arguments.
8690 crtl->args.arg_offset_rtx:
8691 holds the offset of the first anonymous stack argument
8692 (relative to the virtual arg pointer). */
8693
8694 static void
8695 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8696 {
8697 HOST_WIDE_INT n_gpr, n_fpr;
8698 int off;
8699 tree f_gpr, f_fpr, f_ovf, f_sav;
8700 tree gpr, fpr, ovf, sav, t;
8701
8702 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8703 f_fpr = DECL_CHAIN (f_gpr);
8704 f_ovf = DECL_CHAIN (f_fpr);
8705 f_sav = DECL_CHAIN (f_ovf);
8706
8707 valist = build_va_arg_indirect_ref (valist);
8708 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8709 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8710 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8711 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8712
8713 /* Count number of gp and fp argument registers used. */
8714
8715 n_gpr = crtl->args.info.gprs;
8716 n_fpr = crtl->args.info.fprs;
8717
8718 if (cfun->va_list_gpr_size)
8719 {
8720 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8721 build_int_cst (NULL_TREE, n_gpr));
8722 TREE_SIDE_EFFECTS (t) = 1;
8723 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8724 }
8725
8726 if (cfun->va_list_fpr_size)
8727 {
8728 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8729 build_int_cst (NULL_TREE, n_fpr));
8730 TREE_SIDE_EFFECTS (t) = 1;
8731 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8732 }
8733
8734 /* Find the overflow area. */
8735 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8736 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8737 {
8738 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8739
8740 off = INTVAL (crtl->args.arg_offset_rtx);
8741 off = off < 0 ? 0 : off;
8742 if (TARGET_DEBUG_ARG)
8743 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8744 (int)n_gpr, (int)n_fpr, off);
8745
8746 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8747
8748 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8749 TREE_SIDE_EFFECTS (t) = 1;
8750 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8751 }
8752
8753 /* Find the register save area. */
8754 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8755 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8756 {
8757 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8758 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8759 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8760
8761 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8762 TREE_SIDE_EFFECTS (t) = 1;
8763 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8764 }
8765 }
8766
8767 /* Implement va_arg by updating the va_list structure
8768 VALIST as required to retrieve an argument of type
8769 TYPE, and returning that argument.
8770
8771 Generates code equivalent to:
8772
8773 if (integral value) {
8774 if (size <= 4 && args.gpr < 5 ||
8775 size > 4 && args.gpr < 4 )
8776 ret = args.reg_save_area[args.gpr+8]
8777 else
8778 ret = *args.overflow_arg_area++;
8779 } else if (float value) {
8780 if (args.fgpr < 2)
8781 ret = args.reg_save_area[args.fpr+64]
8782 else
8783 ret = *args.overflow_arg_area++;
8784 } else if (aggregate value) {
8785 if (args.gpr < 5)
8786 ret = *args.reg_save_area[args.gpr]
8787 else
8788 ret = **args.overflow_arg_area++;
8789 } */
8790
8791 static tree
8792 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8793 gimple_seq *post_p ATTRIBUTE_UNUSED)
8794 {
8795 tree f_gpr, f_fpr, f_ovf, f_sav;
8796 tree gpr, fpr, ovf, sav, reg, t, u;
8797 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8798 tree lab_false, lab_over, addr;
8799
8800 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8801 f_fpr = DECL_CHAIN (f_gpr);
8802 f_ovf = DECL_CHAIN (f_fpr);
8803 f_sav = DECL_CHAIN (f_ovf);
8804
8805 valist = build_va_arg_indirect_ref (valist);
8806 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8807 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8808 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8809
8810 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8811 both appear on a lhs. */
8812 valist = unshare_expr (valist);
8813 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8814
8815 size = int_size_in_bytes (type);
8816
8817 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8818 {
8819 if (TARGET_DEBUG_ARG)
8820 {
8821 fprintf (stderr, "va_arg: aggregate type");
8822 debug_tree (type);
8823 }
8824
8825 /* Aggregates are passed by reference. */
8826 indirect_p = 1;
8827 reg = gpr;
8828 n_reg = 1;
8829
8830 /* kernel stack layout on 31 bit: It is assumed here that no padding
8831 will be added by s390_frame_info because for va_args always an even
8832 number of gprs has to be saved r15-r2 = 14 regs. */
8833 sav_ofs = 2 * UNITS_PER_LONG;
8834 sav_scale = UNITS_PER_LONG;
8835 size = UNITS_PER_LONG;
8836 max_reg = GP_ARG_NUM_REG - n_reg;
8837 }
8838 else if (s390_function_arg_float (TYPE_MODE (type), type))
8839 {
8840 if (TARGET_DEBUG_ARG)
8841 {
8842 fprintf (stderr, "va_arg: float type");
8843 debug_tree (type);
8844 }
8845
8846 /* FP args go in FP registers, if present. */
8847 indirect_p = 0;
8848 reg = fpr;
8849 n_reg = 1;
8850 sav_ofs = 16 * UNITS_PER_LONG;
8851 sav_scale = 8;
8852 max_reg = FP_ARG_NUM_REG - n_reg;
8853 }
8854 else
8855 {
8856 if (TARGET_DEBUG_ARG)
8857 {
8858 fprintf (stderr, "va_arg: other type");
8859 debug_tree (type);
8860 }
8861
8862 /* Otherwise into GP registers. */
8863 indirect_p = 0;
8864 reg = gpr;
8865 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8866
8867 /* kernel stack layout on 31 bit: It is assumed here that no padding
8868 will be added by s390_frame_info because for va_args always an even
8869 number of gprs has to be saved r15-r2 = 14 regs. */
8870 sav_ofs = 2 * UNITS_PER_LONG;
8871
8872 if (size < UNITS_PER_LONG)
8873 sav_ofs += UNITS_PER_LONG - size;
8874
8875 sav_scale = UNITS_PER_LONG;
8876 max_reg = GP_ARG_NUM_REG - n_reg;
8877 }
8878
8879 /* Pull the value out of the saved registers ... */
8880
8881 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8882 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8883 addr = create_tmp_var (ptr_type_node, "addr");
8884
8885 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8886 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8887 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8888 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8889 gimplify_and_add (t, pre_p);
8890
8891 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
8892 size_int (sav_ofs));
8893 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
8894 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
8895 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
8896
8897 gimplify_assign (addr, t, pre_p);
8898
8899 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8900
8901 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8902
8903
8904 /* ... Otherwise out of the overflow area. */
8905
8906 t = ovf;
8907 if (size < UNITS_PER_LONG)
8908 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8909 size_int (UNITS_PER_LONG - size));
8910
8911 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8912
8913 gimplify_assign (addr, t, pre_p);
8914
8915 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8916 size_int (size));
8917 gimplify_assign (ovf, t, pre_p);
8918
8919 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8920
8921
8922 /* Increment register save count. */
8923
8924 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
8925 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
8926 gimplify_and_add (u, pre_p);
8927
8928 if (indirect_p)
8929 {
8930 t = build_pointer_type_for_mode (build_pointer_type (type),
8931 ptr_mode, true);
8932 addr = fold_convert (t, addr);
8933 addr = build_va_arg_indirect_ref (addr);
8934 }
8935 else
8936 {
8937 t = build_pointer_type_for_mode (type, ptr_mode, true);
8938 addr = fold_convert (t, addr);
8939 }
8940
8941 return build_va_arg_indirect_ref (addr);
8942 }
8943
8944
8945 /* Builtins. */
8946
8947 enum s390_builtin
8948 {
8949 S390_BUILTIN_THREAD_POINTER,
8950 S390_BUILTIN_SET_THREAD_POINTER,
8951
8952 S390_BUILTIN_max
8953 };
8954
8955 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
8956 CODE_FOR_get_tp_64,
8957 CODE_FOR_set_tp_64
8958 };
8959
8960 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
8961 CODE_FOR_get_tp_31,
8962 CODE_FOR_set_tp_31
8963 };
8964
8965 static void
8966 s390_init_builtins (void)
8967 {
8968 tree ftype;
8969
8970 ftype = build_function_type (ptr_type_node, void_list_node);
8971 add_builtin_function ("__builtin_thread_pointer", ftype,
8972 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
8973 NULL, NULL_TREE);
8974
8975 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
8976 add_builtin_function ("__builtin_set_thread_pointer", ftype,
8977 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
8978 NULL, NULL_TREE);
8979 }
8980
8981 /* Expand an expression EXP that calls a built-in function,
8982 with result going to TARGET if that's convenient
8983 (and in mode MODE if that's convenient).
8984 SUBTARGET may be used as the target for computing one of EXP's operands.
8985 IGNORE is nonzero if the value is to be ignored. */
8986
8987 static rtx
8988 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8989 enum machine_mode mode ATTRIBUTE_UNUSED,
8990 int ignore ATTRIBUTE_UNUSED)
8991 {
8992 #define MAX_ARGS 2
8993
8994 enum insn_code const *code_for_builtin =
8995 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
8996
8997 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8998 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8999 enum insn_code icode;
9000 rtx op[MAX_ARGS], pat;
9001 int arity;
9002 bool nonvoid;
9003 tree arg;
9004 call_expr_arg_iterator iter;
9005
9006 if (fcode >= S390_BUILTIN_max)
9007 internal_error ("bad builtin fcode");
9008 icode = code_for_builtin[fcode];
9009 if (icode == 0)
9010 internal_error ("bad builtin fcode");
9011
9012 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9013
9014 arity = 0;
9015 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9016 {
9017 const struct insn_operand_data *insn_op;
9018
9019 if (arg == error_mark_node)
9020 return NULL_RTX;
9021 if (arity > MAX_ARGS)
9022 return NULL_RTX;
9023
9024 insn_op = &insn_data[icode].operand[arity + nonvoid];
9025
9026 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9027
9028 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9029 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9030 arity++;
9031 }
9032
9033 if (nonvoid)
9034 {
9035 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9036 if (!target
9037 || GET_MODE (target) != tmode
9038 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9039 target = gen_reg_rtx (tmode);
9040 }
9041
9042 switch (arity)
9043 {
9044 case 0:
9045 pat = GEN_FCN (icode) (target);
9046 break;
9047 case 1:
9048 if (nonvoid)
9049 pat = GEN_FCN (icode) (target, op[0]);
9050 else
9051 pat = GEN_FCN (icode) (op[0]);
9052 break;
9053 case 2:
9054 pat = GEN_FCN (icode) (target, op[0], op[1]);
9055 break;
9056 default:
9057 gcc_unreachable ();
9058 }
9059 if (!pat)
9060 return NULL_RTX;
9061 emit_insn (pat);
9062
9063 if (nonvoid)
9064 return target;
9065 else
9066 return const0_rtx;
9067 }
9068
9069
9070 /* Output assembly code for the trampoline template to
9071 stdio stream FILE.
9072
9073 On S/390, we use gpr 1 internally in the trampoline code;
9074 gpr 0 is used to hold the static chain. */
9075
9076 static void
9077 s390_asm_trampoline_template (FILE *file)
9078 {
9079 rtx op[2];
9080 op[0] = gen_rtx_REG (Pmode, 0);
9081 op[1] = gen_rtx_REG (Pmode, 1);
9082
9083 if (TARGET_64BIT)
9084 {
9085 output_asm_insn ("basr\t%1,0", op);
9086 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9087 output_asm_insn ("br\t%1", op);
9088 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9089 }
9090 else
9091 {
9092 output_asm_insn ("basr\t%1,0", op);
9093 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9094 output_asm_insn ("br\t%1", op);
9095 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9096 }
9097 }
9098
9099 /* Emit RTL insns to initialize the variable parts of a trampoline.
9100 FNADDR is an RTX for the address of the function's pure code.
9101 CXT is an RTX for the static chain value for the function. */
9102
9103 static void
9104 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9105 {
9106 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9107 rtx mem;
9108
9109 emit_block_move (m_tramp, assemble_trampoline_template (),
9110 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9111
9112 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9113 emit_move_insn (mem, cxt);
9114 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9115 emit_move_insn (mem, fnaddr);
9116 }
9117
9118 /* Output assembler code to FILE to increment profiler label # LABELNO
9119 for profiling a function entry. */
9120
9121 void
9122 s390_function_profiler (FILE *file, int labelno)
9123 {
9124 rtx op[7];
9125
9126 char label[128];
9127 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9128
9129 fprintf (file, "# function profiler \n");
9130
9131 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9132 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9133 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9134
9135 op[2] = gen_rtx_REG (Pmode, 1);
9136 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9137 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9138
9139 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9140 if (flag_pic)
9141 {
9142 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9143 op[4] = gen_rtx_CONST (Pmode, op[4]);
9144 }
9145
9146 if (TARGET_64BIT)
9147 {
9148 output_asm_insn ("stg\t%0,%1", op);
9149 output_asm_insn ("larl\t%2,%3", op);
9150 output_asm_insn ("brasl\t%0,%4", op);
9151 output_asm_insn ("lg\t%0,%1", op);
9152 }
9153 else if (!flag_pic)
9154 {
9155 op[6] = gen_label_rtx ();
9156
9157 output_asm_insn ("st\t%0,%1", op);
9158 output_asm_insn ("bras\t%2,%l6", op);
9159 output_asm_insn (".long\t%4", op);
9160 output_asm_insn (".long\t%3", op);
9161 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9162 output_asm_insn ("l\t%0,0(%2)", op);
9163 output_asm_insn ("l\t%2,4(%2)", op);
9164 output_asm_insn ("basr\t%0,%0", op);
9165 output_asm_insn ("l\t%0,%1", op);
9166 }
9167 else
9168 {
9169 op[5] = gen_label_rtx ();
9170 op[6] = gen_label_rtx ();
9171
9172 output_asm_insn ("st\t%0,%1", op);
9173 output_asm_insn ("bras\t%2,%l6", op);
9174 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9175 output_asm_insn (".long\t%4-%l5", op);
9176 output_asm_insn (".long\t%3-%l5", op);
9177 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9178 output_asm_insn ("lr\t%0,%2", op);
9179 output_asm_insn ("a\t%0,0(%2)", op);
9180 output_asm_insn ("a\t%2,4(%2)", op);
9181 output_asm_insn ("basr\t%0,%0", op);
9182 output_asm_insn ("l\t%0,%1", op);
9183 }
9184 }
9185
9186 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9187 into its SYMBOL_REF_FLAGS. */
9188
9189 static void
9190 s390_encode_section_info (tree decl, rtx rtl, int first)
9191 {
9192 default_encode_section_info (decl, rtl, first);
9193
9194 if (TREE_CODE (decl) == VAR_DECL)
9195 {
9196 /* If a variable has a forced alignment to < 2 bytes, mark it
9197 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9198 operand. */
9199 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9200 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9201 if (!DECL_SIZE (decl)
9202 || !DECL_ALIGN (decl)
9203 || !host_integerp (DECL_SIZE (decl), 0)
9204 || (DECL_ALIGN (decl) <= 64
9205 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9206 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9207 }
9208
9209 /* Literal pool references don't have a decl so they are handled
9210 differently here. We rely on the information in the MEM_ALIGN
9211 entry to decide upon natural alignment. */
9212 if (MEM_P (rtl)
9213 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9214 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9215 && (MEM_ALIGN (rtl) == 0
9216 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9217 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9218 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9219 }
9220
9221 /* Output thunk to FILE that implements a C++ virtual function call (with
9222 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9223 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9224 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9225 relative to the resulting this pointer. */
9226
9227 static void
9228 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9229 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9230 tree function)
9231 {
9232 rtx op[10];
9233 int nonlocal = 0;
9234
9235 /* Make sure unwind info is emitted for the thunk if needed. */
9236 final_start_function (emit_barrier (), file, 1);
9237
9238 /* Operand 0 is the target function. */
9239 op[0] = XEXP (DECL_RTL (function), 0);
9240 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9241 {
9242 nonlocal = 1;
9243 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9244 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9245 op[0] = gen_rtx_CONST (Pmode, op[0]);
9246 }
9247
9248 /* Operand 1 is the 'this' pointer. */
9249 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9250 op[1] = gen_rtx_REG (Pmode, 3);
9251 else
9252 op[1] = gen_rtx_REG (Pmode, 2);
9253
9254 /* Operand 2 is the delta. */
9255 op[2] = GEN_INT (delta);
9256
9257 /* Operand 3 is the vcall_offset. */
9258 op[3] = GEN_INT (vcall_offset);
9259
9260 /* Operand 4 is the temporary register. */
9261 op[4] = gen_rtx_REG (Pmode, 1);
9262
9263 /* Operands 5 to 8 can be used as labels. */
9264 op[5] = NULL_RTX;
9265 op[6] = NULL_RTX;
9266 op[7] = NULL_RTX;
9267 op[8] = NULL_RTX;
9268
9269 /* Operand 9 can be used for temporary register. */
9270 op[9] = NULL_RTX;
9271
9272 /* Generate code. */
9273 if (TARGET_64BIT)
9274 {
9275 /* Setup literal pool pointer if required. */
9276 if ((!DISP_IN_RANGE (delta)
9277 && !CONST_OK_FOR_K (delta)
9278 && !CONST_OK_FOR_Os (delta))
9279 || (!DISP_IN_RANGE (vcall_offset)
9280 && !CONST_OK_FOR_K (vcall_offset)
9281 && !CONST_OK_FOR_Os (vcall_offset)))
9282 {
9283 op[5] = gen_label_rtx ();
9284 output_asm_insn ("larl\t%4,%5", op);
9285 }
9286
9287 /* Add DELTA to this pointer. */
9288 if (delta)
9289 {
9290 if (CONST_OK_FOR_J (delta))
9291 output_asm_insn ("la\t%1,%2(%1)", op);
9292 else if (DISP_IN_RANGE (delta))
9293 output_asm_insn ("lay\t%1,%2(%1)", op);
9294 else if (CONST_OK_FOR_K (delta))
9295 output_asm_insn ("aghi\t%1,%2", op);
9296 else if (CONST_OK_FOR_Os (delta))
9297 output_asm_insn ("agfi\t%1,%2", op);
9298 else
9299 {
9300 op[6] = gen_label_rtx ();
9301 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9302 }
9303 }
9304
9305 /* Perform vcall adjustment. */
9306 if (vcall_offset)
9307 {
9308 if (DISP_IN_RANGE (vcall_offset))
9309 {
9310 output_asm_insn ("lg\t%4,0(%1)", op);
9311 output_asm_insn ("ag\t%1,%3(%4)", op);
9312 }
9313 else if (CONST_OK_FOR_K (vcall_offset))
9314 {
9315 output_asm_insn ("lghi\t%4,%3", op);
9316 output_asm_insn ("ag\t%4,0(%1)", op);
9317 output_asm_insn ("ag\t%1,0(%4)", op);
9318 }
9319 else if (CONST_OK_FOR_Os (vcall_offset))
9320 {
9321 output_asm_insn ("lgfi\t%4,%3", op);
9322 output_asm_insn ("ag\t%4,0(%1)", op);
9323 output_asm_insn ("ag\t%1,0(%4)", op);
9324 }
9325 else
9326 {
9327 op[7] = gen_label_rtx ();
9328 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9329 output_asm_insn ("ag\t%4,0(%1)", op);
9330 output_asm_insn ("ag\t%1,0(%4)", op);
9331 }
9332 }
9333
9334 /* Jump to target. */
9335 output_asm_insn ("jg\t%0", op);
9336
9337 /* Output literal pool if required. */
9338 if (op[5])
9339 {
9340 output_asm_insn (".align\t4", op);
9341 targetm.asm_out.internal_label (file, "L",
9342 CODE_LABEL_NUMBER (op[5]));
9343 }
9344 if (op[6])
9345 {
9346 targetm.asm_out.internal_label (file, "L",
9347 CODE_LABEL_NUMBER (op[6]));
9348 output_asm_insn (".long\t%2", op);
9349 }
9350 if (op[7])
9351 {
9352 targetm.asm_out.internal_label (file, "L",
9353 CODE_LABEL_NUMBER (op[7]));
9354 output_asm_insn (".long\t%3", op);
9355 }
9356 }
9357 else
9358 {
9359 /* Setup base pointer if required. */
9360 if (!vcall_offset
9361 || (!DISP_IN_RANGE (delta)
9362 && !CONST_OK_FOR_K (delta)
9363 && !CONST_OK_FOR_Os (delta))
9364 || (!DISP_IN_RANGE (delta)
9365 && !CONST_OK_FOR_K (vcall_offset)
9366 && !CONST_OK_FOR_Os (vcall_offset)))
9367 {
9368 op[5] = gen_label_rtx ();
9369 output_asm_insn ("basr\t%4,0", op);
9370 targetm.asm_out.internal_label (file, "L",
9371 CODE_LABEL_NUMBER (op[5]));
9372 }
9373
9374 /* Add DELTA to this pointer. */
9375 if (delta)
9376 {
9377 if (CONST_OK_FOR_J (delta))
9378 output_asm_insn ("la\t%1,%2(%1)", op);
9379 else if (DISP_IN_RANGE (delta))
9380 output_asm_insn ("lay\t%1,%2(%1)", op);
9381 else if (CONST_OK_FOR_K (delta))
9382 output_asm_insn ("ahi\t%1,%2", op);
9383 else if (CONST_OK_FOR_Os (delta))
9384 output_asm_insn ("afi\t%1,%2", op);
9385 else
9386 {
9387 op[6] = gen_label_rtx ();
9388 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9389 }
9390 }
9391
9392 /* Perform vcall adjustment. */
9393 if (vcall_offset)
9394 {
9395 if (CONST_OK_FOR_J (vcall_offset))
9396 {
9397 output_asm_insn ("l\t%4,0(%1)", op);
9398 output_asm_insn ("a\t%1,%3(%4)", op);
9399 }
9400 else if (DISP_IN_RANGE (vcall_offset))
9401 {
9402 output_asm_insn ("l\t%4,0(%1)", op);
9403 output_asm_insn ("ay\t%1,%3(%4)", op);
9404 }
9405 else if (CONST_OK_FOR_K (vcall_offset))
9406 {
9407 output_asm_insn ("lhi\t%4,%3", op);
9408 output_asm_insn ("a\t%4,0(%1)", op);
9409 output_asm_insn ("a\t%1,0(%4)", op);
9410 }
9411 else if (CONST_OK_FOR_Os (vcall_offset))
9412 {
9413 output_asm_insn ("iilf\t%4,%3", op);
9414 output_asm_insn ("a\t%4,0(%1)", op);
9415 output_asm_insn ("a\t%1,0(%4)", op);
9416 }
9417 else
9418 {
9419 op[7] = gen_label_rtx ();
9420 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9421 output_asm_insn ("a\t%4,0(%1)", op);
9422 output_asm_insn ("a\t%1,0(%4)", op);
9423 }
9424
9425 /* We had to clobber the base pointer register.
9426 Re-setup the base pointer (with a different base). */
9427 op[5] = gen_label_rtx ();
9428 output_asm_insn ("basr\t%4,0", op);
9429 targetm.asm_out.internal_label (file, "L",
9430 CODE_LABEL_NUMBER (op[5]));
9431 }
9432
9433 /* Jump to target. */
9434 op[8] = gen_label_rtx ();
9435
9436 if (!flag_pic)
9437 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9438 else if (!nonlocal)
9439 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9440 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9441 else if (flag_pic == 1)
9442 {
9443 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9444 output_asm_insn ("l\t%4,%0(%4)", op);
9445 }
9446 else if (flag_pic == 2)
9447 {
9448 op[9] = gen_rtx_REG (Pmode, 0);
9449 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9450 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9451 output_asm_insn ("ar\t%4,%9", op);
9452 output_asm_insn ("l\t%4,0(%4)", op);
9453 }
9454
9455 output_asm_insn ("br\t%4", op);
9456
9457 /* Output literal pool. */
9458 output_asm_insn (".align\t4", op);
9459
9460 if (nonlocal && flag_pic == 2)
9461 output_asm_insn (".long\t%0", op);
9462 if (nonlocal)
9463 {
9464 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9465 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9466 }
9467
9468 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9469 if (!flag_pic)
9470 output_asm_insn (".long\t%0", op);
9471 else
9472 output_asm_insn (".long\t%0-%5", op);
9473
9474 if (op[6])
9475 {
9476 targetm.asm_out.internal_label (file, "L",
9477 CODE_LABEL_NUMBER (op[6]));
9478 output_asm_insn (".long\t%2", op);
9479 }
9480 if (op[7])
9481 {
9482 targetm.asm_out.internal_label (file, "L",
9483 CODE_LABEL_NUMBER (op[7]));
9484 output_asm_insn (".long\t%3", op);
9485 }
9486 }
9487 final_end_function ();
9488 }
9489
9490 static bool
9491 s390_valid_pointer_mode (enum machine_mode mode)
9492 {
9493 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9494 }
9495
9496 /* Checks whether the given CALL_EXPR would use a caller
9497 saved register. This is used to decide whether sibling call
9498 optimization could be performed on the respective function
9499 call. */
9500
9501 static bool
9502 s390_call_saved_register_used (tree call_expr)
9503 {
9504 CUMULATIVE_ARGS cum;
9505 tree parameter;
9506 enum machine_mode mode;
9507 tree type;
9508 rtx parm_rtx;
9509 int reg, i;
9510
9511 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9512
9513 for (i = 0; i < call_expr_nargs (call_expr); i++)
9514 {
9515 parameter = CALL_EXPR_ARG (call_expr, i);
9516 gcc_assert (parameter);
9517
9518 /* For an undeclared variable passed as parameter we will get
9519 an ERROR_MARK node here. */
9520 if (TREE_CODE (parameter) == ERROR_MARK)
9521 return true;
9522
9523 type = TREE_TYPE (parameter);
9524 gcc_assert (type);
9525
9526 mode = TYPE_MODE (type);
9527 gcc_assert (mode);
9528
9529 if (pass_by_reference (&cum, mode, type, true))
9530 {
9531 mode = Pmode;
9532 type = build_pointer_type (type);
9533 }
9534
9535 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9536
9537 s390_function_arg_advance (&cum, mode, type, 0);
9538
9539 if (!parm_rtx)
9540 continue;
9541
9542 if (REG_P (parm_rtx))
9543 {
9544 for (reg = 0;
9545 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9546 reg++)
9547 if (!call_used_regs[reg + REGNO (parm_rtx)])
9548 return true;
9549 }
9550
9551 if (GET_CODE (parm_rtx) == PARALLEL)
9552 {
9553 int i;
9554
9555 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9556 {
9557 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9558
9559 gcc_assert (REG_P (r));
9560
9561 for (reg = 0;
9562 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9563 reg++)
9564 if (!call_used_regs[reg + REGNO (r)])
9565 return true;
9566 }
9567 }
9568
9569 }
9570 return false;
9571 }
9572
9573 /* Return true if the given call expression can be
9574 turned into a sibling call.
9575 DECL holds the declaration of the function to be called whereas
9576 EXP is the call expression itself. */
9577
9578 static bool
9579 s390_function_ok_for_sibcall (tree decl, tree exp)
9580 {
9581 /* The TPF epilogue uses register 1. */
9582 if (TARGET_TPF_PROFILING)
9583 return false;
9584
9585 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9586 which would have to be restored before the sibcall. */
9587 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9588 return false;
9589
9590 /* Register 6 on s390 is available as an argument register but unfortunately
9591 "caller saved". This makes functions needing this register for arguments
9592 not suitable for sibcalls. */
9593 return !s390_call_saved_register_used (exp);
9594 }
9595
9596 /* Return the fixed registers used for condition codes. */
9597
9598 static bool
9599 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9600 {
9601 *p1 = CC_REGNUM;
9602 *p2 = INVALID_REGNUM;
9603
9604 return true;
9605 }
9606
9607 /* This function is used by the call expanders of the machine description.
9608 It emits the call insn itself together with the necessary operations
9609 to adjust the target address and returns the emitted insn.
9610 ADDR_LOCATION is the target address rtx
9611 TLS_CALL the location of the thread-local symbol
9612 RESULT_REG the register where the result of the call should be stored
9613 RETADDR_REG the register where the return address should be stored
9614 If this parameter is NULL_RTX the call is considered
9615 to be a sibling call. */
9616
9617 rtx
9618 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9619 rtx retaddr_reg)
9620 {
9621 bool plt_call = false;
9622 rtx insn;
9623 rtx call;
9624 rtx clobber;
9625 rtvec vec;
9626
9627 /* Direct function calls need special treatment. */
9628 if (GET_CODE (addr_location) == SYMBOL_REF)
9629 {
9630 /* When calling a global routine in PIC mode, we must
9631 replace the symbol itself with the PLT stub. */
9632 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9633 {
9634 if (retaddr_reg != NULL_RTX)
9635 {
9636 addr_location = gen_rtx_UNSPEC (Pmode,
9637 gen_rtvec (1, addr_location),
9638 UNSPEC_PLT);
9639 addr_location = gen_rtx_CONST (Pmode, addr_location);
9640 plt_call = true;
9641 }
9642 else
9643 /* For -fpic code the PLT entries might use r12 which is
9644 call-saved. Therefore we cannot do a sibcall when
9645 calling directly using a symbol ref. When reaching
9646 this point we decided (in s390_function_ok_for_sibcall)
9647 to do a sibcall for a function pointer but one of the
9648 optimizers was able to get rid of the function pointer
9649 by propagating the symbol ref into the call. This
9650 optimization is illegal for S/390 so we turn the direct
9651 call into a indirect call again. */
9652 addr_location = force_reg (Pmode, addr_location);
9653 }
9654
9655 /* Unless we can use the bras(l) insn, force the
9656 routine address into a register. */
9657 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9658 {
9659 if (flag_pic)
9660 addr_location = legitimize_pic_address (addr_location, 0);
9661 else
9662 addr_location = force_reg (Pmode, addr_location);
9663 }
9664 }
9665
9666 /* If it is already an indirect call or the code above moved the
9667 SYMBOL_REF to somewhere else make sure the address can be found in
9668 register 1. */
9669 if (retaddr_reg == NULL_RTX
9670 && GET_CODE (addr_location) != SYMBOL_REF
9671 && !plt_call)
9672 {
9673 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9674 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9675 }
9676
9677 addr_location = gen_rtx_MEM (QImode, addr_location);
9678 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9679
9680 if (result_reg != NULL_RTX)
9681 call = gen_rtx_SET (VOIDmode, result_reg, call);
9682
9683 if (retaddr_reg != NULL_RTX)
9684 {
9685 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9686
9687 if (tls_call != NULL_RTX)
9688 vec = gen_rtvec (3, call, clobber,
9689 gen_rtx_USE (VOIDmode, tls_call));
9690 else
9691 vec = gen_rtvec (2, call, clobber);
9692
9693 call = gen_rtx_PARALLEL (VOIDmode, vec);
9694 }
9695
9696 insn = emit_call_insn (call);
9697
9698 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9699 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9700 {
9701 /* s390_function_ok_for_sibcall should
9702 have denied sibcalls in this case. */
9703 gcc_assert (retaddr_reg != NULL_RTX);
9704
9705 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9706 }
9707 return insn;
9708 }
9709
9710 /* Implement CONDITIONAL_REGISTER_USAGE. */
9711
9712 void
9713 s390_conditional_register_usage (void)
9714 {
9715 int i;
9716
9717 if (flag_pic)
9718 {
9719 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9720 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9721 }
9722 if (TARGET_CPU_ZARCH)
9723 {
9724 fixed_regs[BASE_REGNUM] = 0;
9725 call_used_regs[BASE_REGNUM] = 0;
9726 fixed_regs[RETURN_REGNUM] = 0;
9727 call_used_regs[RETURN_REGNUM] = 0;
9728 }
9729 if (TARGET_64BIT)
9730 {
9731 for (i = 24; i < 32; i++)
9732 call_used_regs[i] = call_really_used_regs[i] = 0;
9733 }
9734 else
9735 {
9736 for (i = 18; i < 20; i++)
9737 call_used_regs[i] = call_really_used_regs[i] = 0;
9738 }
9739
9740 if (TARGET_SOFT_FLOAT)
9741 {
9742 for (i = 16; i < 32; i++)
9743 call_used_regs[i] = fixed_regs[i] = 1;
9744 }
9745 }
9746
9747 /* Corresponding function to eh_return expander. */
9748
9749 static GTY(()) rtx s390_tpf_eh_return_symbol;
9750 void
9751 s390_emit_tpf_eh_return (rtx target)
9752 {
9753 rtx insn, reg;
9754
9755 if (!s390_tpf_eh_return_symbol)
9756 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9757
9758 reg = gen_rtx_REG (Pmode, 2);
9759
9760 emit_move_insn (reg, target);
9761 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9762 gen_rtx_REG (Pmode, RETURN_REGNUM));
9763 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9764
9765 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9766 }
9767
9768 /* Rework the prologue/epilogue to avoid saving/restoring
9769 registers unnecessarily. */
9770
9771 static void
9772 s390_optimize_prologue (void)
9773 {
9774 rtx insn, new_insn, next_insn;
9775
9776 /* Do a final recompute of the frame-related data. */
9777
9778 s390_update_frame_layout ();
9779
9780 /* If all special registers are in fact used, there's nothing we
9781 can do, so no point in walking the insn list. */
9782
9783 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9784 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9785 && (TARGET_CPU_ZARCH
9786 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9787 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9788 return;
9789
9790 /* Search for prologue/epilogue insns and replace them. */
9791
9792 for (insn = get_insns (); insn; insn = next_insn)
9793 {
9794 int first, last, off;
9795 rtx set, base, offset;
9796
9797 next_insn = NEXT_INSN (insn);
9798
9799 if (GET_CODE (insn) != INSN)
9800 continue;
9801
9802 if (GET_CODE (PATTERN (insn)) == PARALLEL
9803 && store_multiple_operation (PATTERN (insn), VOIDmode))
9804 {
9805 set = XVECEXP (PATTERN (insn), 0, 0);
9806 first = REGNO (SET_SRC (set));
9807 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9808 offset = const0_rtx;
9809 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9810 off = INTVAL (offset);
9811
9812 if (GET_CODE (base) != REG || off < 0)
9813 continue;
9814 if (cfun_frame_layout.first_save_gpr != -1
9815 && (cfun_frame_layout.first_save_gpr < first
9816 || cfun_frame_layout.last_save_gpr > last))
9817 continue;
9818 if (REGNO (base) != STACK_POINTER_REGNUM
9819 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9820 continue;
9821 if (first > BASE_REGNUM || last < BASE_REGNUM)
9822 continue;
9823
9824 if (cfun_frame_layout.first_save_gpr != -1)
9825 {
9826 new_insn = save_gprs (base,
9827 off + (cfun_frame_layout.first_save_gpr
9828 - first) * UNITS_PER_LONG,
9829 cfun_frame_layout.first_save_gpr,
9830 cfun_frame_layout.last_save_gpr);
9831 new_insn = emit_insn_before (new_insn, insn);
9832 INSN_ADDRESSES_NEW (new_insn, -1);
9833 }
9834
9835 remove_insn (insn);
9836 continue;
9837 }
9838
9839 if (cfun_frame_layout.first_save_gpr == -1
9840 && GET_CODE (PATTERN (insn)) == SET
9841 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9842 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9843 || (!TARGET_CPU_ZARCH
9844 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9845 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9846 {
9847 set = PATTERN (insn);
9848 first = REGNO (SET_SRC (set));
9849 offset = const0_rtx;
9850 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9851 off = INTVAL (offset);
9852
9853 if (GET_CODE (base) != REG || off < 0)
9854 continue;
9855 if (REGNO (base) != STACK_POINTER_REGNUM
9856 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9857 continue;
9858
9859 remove_insn (insn);
9860 continue;
9861 }
9862
9863 if (GET_CODE (PATTERN (insn)) == PARALLEL
9864 && load_multiple_operation (PATTERN (insn), VOIDmode))
9865 {
9866 set = XVECEXP (PATTERN (insn), 0, 0);
9867 first = REGNO (SET_DEST (set));
9868 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9869 offset = const0_rtx;
9870 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9871 off = INTVAL (offset);
9872
9873 if (GET_CODE (base) != REG || off < 0)
9874 continue;
9875 if (cfun_frame_layout.first_restore_gpr != -1
9876 && (cfun_frame_layout.first_restore_gpr < first
9877 || cfun_frame_layout.last_restore_gpr > last))
9878 continue;
9879 if (REGNO (base) != STACK_POINTER_REGNUM
9880 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9881 continue;
9882 if (first > BASE_REGNUM || last < BASE_REGNUM)
9883 continue;
9884
9885 if (cfun_frame_layout.first_restore_gpr != -1)
9886 {
9887 new_insn = restore_gprs (base,
9888 off + (cfun_frame_layout.first_restore_gpr
9889 - first) * UNITS_PER_LONG,
9890 cfun_frame_layout.first_restore_gpr,
9891 cfun_frame_layout.last_restore_gpr);
9892 new_insn = emit_insn_before (new_insn, insn);
9893 INSN_ADDRESSES_NEW (new_insn, -1);
9894 }
9895
9896 remove_insn (insn);
9897 continue;
9898 }
9899
9900 if (cfun_frame_layout.first_restore_gpr == -1
9901 && GET_CODE (PATTERN (insn)) == SET
9902 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
9903 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
9904 || (!TARGET_CPU_ZARCH
9905 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
9906 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
9907 {
9908 set = PATTERN (insn);
9909 first = REGNO (SET_DEST (set));
9910 offset = const0_rtx;
9911 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9912 off = INTVAL (offset);
9913
9914 if (GET_CODE (base) != REG || off < 0)
9915 continue;
9916 if (REGNO (base) != STACK_POINTER_REGNUM
9917 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9918 continue;
9919
9920 remove_insn (insn);
9921 continue;
9922 }
9923 }
9924 }
9925
9926 /* On z10 and later the dynamic branch prediction must see the
9927 backward jump within a certain windows. If not it falls back to
9928 the static prediction. This function rearranges the loop backward
9929 branch in a way which makes the static prediction always correct.
9930 The function returns true if it added an instruction. */
9931 static bool
9932 s390_fix_long_loop_prediction (rtx insn)
9933 {
9934 rtx set = single_set (insn);
9935 rtx code_label, label_ref, new_label;
9936 rtx uncond_jump;
9937 rtx cur_insn;
9938 rtx tmp;
9939 int distance;
9940
9941 /* This will exclude branch on count and branch on index patterns
9942 since these are correctly statically predicted. */
9943 if (!set
9944 || SET_DEST (set) != pc_rtx
9945 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
9946 return false;
9947
9948 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
9949 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
9950
9951 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
9952
9953 code_label = XEXP (label_ref, 0);
9954
9955 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
9956 || INSN_ADDRESSES (INSN_UID (insn)) == -1
9957 || (INSN_ADDRESSES (INSN_UID (insn))
9958 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
9959 return false;
9960
9961 for (distance = 0, cur_insn = PREV_INSN (insn);
9962 distance < PREDICT_DISTANCE - 6;
9963 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
9964 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
9965 return false;
9966
9967 new_label = gen_label_rtx ();
9968 uncond_jump = emit_jump_insn_after (
9969 gen_rtx_SET (VOIDmode, pc_rtx,
9970 gen_rtx_LABEL_REF (VOIDmode, code_label)),
9971 insn);
9972 emit_label_after (new_label, uncond_jump);
9973
9974 tmp = XEXP (SET_SRC (set), 1);
9975 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
9976 XEXP (SET_SRC (set), 2) = tmp;
9977 INSN_CODE (insn) = -1;
9978
9979 XEXP (label_ref, 0) = new_label;
9980 JUMP_LABEL (insn) = new_label;
9981 JUMP_LABEL (uncond_jump) = code_label;
9982
9983 return true;
9984 }
9985
9986 /* Returns 1 if INSN reads the value of REG for purposes not related
9987 to addressing of memory, and 0 otherwise. */
9988 static int
9989 s390_non_addr_reg_read_p (rtx reg, rtx insn)
9990 {
9991 return reg_referenced_p (reg, PATTERN (insn))
9992 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
9993 }
9994
9995 /* Starting from INSN find_cond_jump looks downwards in the insn
9996 stream for a single jump insn which is the last user of the
9997 condition code set in INSN. */
9998 static rtx
9999 find_cond_jump (rtx insn)
10000 {
10001 for (; insn; insn = NEXT_INSN (insn))
10002 {
10003 rtx ite, cc;
10004
10005 if (LABEL_P (insn))
10006 break;
10007
10008 if (!JUMP_P (insn))
10009 {
10010 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10011 break;
10012 continue;
10013 }
10014
10015 /* This will be triggered by a return. */
10016 if (GET_CODE (PATTERN (insn)) != SET)
10017 break;
10018
10019 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10020 ite = SET_SRC (PATTERN (insn));
10021
10022 if (GET_CODE (ite) != IF_THEN_ELSE)
10023 break;
10024
10025 cc = XEXP (XEXP (ite, 0), 0);
10026 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10027 break;
10028
10029 if (find_reg_note (insn, REG_DEAD, cc))
10030 return insn;
10031 break;
10032 }
10033
10034 return NULL_RTX;
10035 }
10036
10037 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10038 the semantics does not change. If NULL_RTX is passed as COND the
10039 function tries to find the conditional jump starting with INSN. */
10040 static void
10041 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10042 {
10043 rtx tmp = *op0;
10044
10045 if (cond == NULL_RTX)
10046 {
10047 rtx jump = find_cond_jump (NEXT_INSN (insn));
10048 jump = jump ? single_set (jump) : NULL_RTX;
10049
10050 if (jump == NULL_RTX)
10051 return;
10052
10053 cond = XEXP (XEXP (jump, 1), 0);
10054 }
10055
10056 *op0 = *op1;
10057 *op1 = tmp;
10058 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10059 }
10060
10061 /* On z10, instructions of the compare-and-branch family have the
10062 property to access the register occurring as second operand with
10063 its bits complemented. If such a compare is grouped with a second
10064 instruction that accesses the same register non-complemented, and
10065 if that register's value is delivered via a bypass, then the
10066 pipeline recycles, thereby causing significant performance decline.
10067 This function locates such situations and exchanges the two
10068 operands of the compare. The function return true whenever it
10069 added an insn. */
10070 static bool
10071 s390_z10_optimize_cmp (rtx insn)
10072 {
10073 rtx prev_insn, next_insn;
10074 bool insn_added_p = false;
10075 rtx cond, *op0, *op1;
10076
10077 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10078 {
10079 /* Handle compare and branch and branch on count
10080 instructions. */
10081 rtx pattern = single_set (insn);
10082
10083 if (!pattern
10084 || SET_DEST (pattern) != pc_rtx
10085 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10086 return false;
10087
10088 cond = XEXP (SET_SRC (pattern), 0);
10089 op0 = &XEXP (cond, 0);
10090 op1 = &XEXP (cond, 1);
10091 }
10092 else if (GET_CODE (PATTERN (insn)) == SET)
10093 {
10094 rtx src, dest;
10095
10096 /* Handle normal compare instructions. */
10097 src = SET_SRC (PATTERN (insn));
10098 dest = SET_DEST (PATTERN (insn));
10099
10100 if (!REG_P (dest)
10101 || !CC_REGNO_P (REGNO (dest))
10102 || GET_CODE (src) != COMPARE)
10103 return false;
10104
10105 /* s390_swap_cmp will try to find the conditional
10106 jump when passing NULL_RTX as condition. */
10107 cond = NULL_RTX;
10108 op0 = &XEXP (src, 0);
10109 op1 = &XEXP (src, 1);
10110 }
10111 else
10112 return false;
10113
10114 if (!REG_P (*op0) || !REG_P (*op1))
10115 return false;
10116
10117 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10118 return false;
10119
10120 /* Swap the COMPARE arguments and its mask if there is a
10121 conflicting access in the previous insn. */
10122 prev_insn = prev_active_insn (insn);
10123 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10124 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10125 s390_swap_cmp (cond, op0, op1, insn);
10126
10127 /* Check if there is a conflict with the next insn. If there
10128 was no conflict with the previous insn, then swap the
10129 COMPARE arguments and its mask. If we already swapped
10130 the operands, or if swapping them would cause a conflict
10131 with the previous insn, issue a NOP after the COMPARE in
10132 order to separate the two instuctions. */
10133 next_insn = next_active_insn (insn);
10134 if (next_insn != NULL_RTX && INSN_P (next_insn)
10135 && s390_non_addr_reg_read_p (*op1, next_insn))
10136 {
10137 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10138 && s390_non_addr_reg_read_p (*op0, prev_insn))
10139 {
10140 if (REGNO (*op1) == 0)
10141 emit_insn_after (gen_nop1 (), insn);
10142 else
10143 emit_insn_after (gen_nop (), insn);
10144 insn_added_p = true;
10145 }
10146 else
10147 s390_swap_cmp (cond, op0, op1, insn);
10148 }
10149 return insn_added_p;
10150 }
10151
10152 /* Perform machine-dependent processing. */
10153
10154 static void
10155 s390_reorg (void)
10156 {
10157 bool pool_overflow = false;
10158
10159 /* Make sure all splits have been performed; splits after
10160 machine_dependent_reorg might confuse insn length counts. */
10161 split_all_insns_noflow ();
10162
10163 /* Install the main literal pool and the associated base
10164 register load insns.
10165
10166 In addition, there are two problematic situations we need
10167 to correct:
10168
10169 - the literal pool might be > 4096 bytes in size, so that
10170 some of its elements cannot be directly accessed
10171
10172 - a branch target might be > 64K away from the branch, so that
10173 it is not possible to use a PC-relative instruction.
10174
10175 To fix those, we split the single literal pool into multiple
10176 pool chunks, reloading the pool base register at various
10177 points throughout the function to ensure it always points to
10178 the pool chunk the following code expects, and / or replace
10179 PC-relative branches by absolute branches.
10180
10181 However, the two problems are interdependent: splitting the
10182 literal pool can move a branch further away from its target,
10183 causing the 64K limit to overflow, and on the other hand,
10184 replacing a PC-relative branch by an absolute branch means
10185 we need to put the branch target address into the literal
10186 pool, possibly causing it to overflow.
10187
10188 So, we loop trying to fix up both problems until we manage
10189 to satisfy both conditions at the same time. Note that the
10190 loop is guaranteed to terminate as every pass of the loop
10191 strictly decreases the total number of PC-relative branches
10192 in the function. (This is not completely true as there
10193 might be branch-over-pool insns introduced by chunkify_start.
10194 Those never need to be split however.) */
10195
10196 for (;;)
10197 {
10198 struct constant_pool *pool = NULL;
10199
10200 /* Collect the literal pool. */
10201 if (!pool_overflow)
10202 {
10203 pool = s390_mainpool_start ();
10204 if (!pool)
10205 pool_overflow = true;
10206 }
10207
10208 /* If literal pool overflowed, start to chunkify it. */
10209 if (pool_overflow)
10210 pool = s390_chunkify_start ();
10211
10212 /* Split out-of-range branches. If this has created new
10213 literal pool entries, cancel current chunk list and
10214 recompute it. zSeries machines have large branch
10215 instructions, so we never need to split a branch. */
10216 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10217 {
10218 if (pool_overflow)
10219 s390_chunkify_cancel (pool);
10220 else
10221 s390_mainpool_cancel (pool);
10222
10223 continue;
10224 }
10225
10226 /* If we made it up to here, both conditions are satisfied.
10227 Finish up literal pool related changes. */
10228 if (pool_overflow)
10229 s390_chunkify_finish (pool);
10230 else
10231 s390_mainpool_finish (pool);
10232
10233 /* We're done splitting branches. */
10234 cfun->machine->split_branches_pending_p = false;
10235 break;
10236 }
10237
10238 /* Generate out-of-pool execute target insns. */
10239 if (TARGET_CPU_ZARCH)
10240 {
10241 rtx insn, label, target;
10242
10243 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10244 {
10245 label = s390_execute_label (insn);
10246 if (!label)
10247 continue;
10248
10249 gcc_assert (label != const0_rtx);
10250
10251 target = emit_label (XEXP (label, 0));
10252 INSN_ADDRESSES_NEW (target, -1);
10253
10254 target = emit_insn (s390_execute_target (insn));
10255 INSN_ADDRESSES_NEW (target, -1);
10256 }
10257 }
10258
10259 /* Try to optimize prologue and epilogue further. */
10260 s390_optimize_prologue ();
10261
10262 /* Walk over the insns and do some >=z10 specific changes. */
10263 if (s390_tune == PROCESSOR_2097_Z10
10264 || s390_tune == PROCESSOR_2817_Z196)
10265 {
10266 rtx insn;
10267 bool insn_added_p = false;
10268
10269 /* The insn lengths and addresses have to be up to date for the
10270 following manipulations. */
10271 shorten_branches (get_insns ());
10272
10273 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10274 {
10275 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10276 continue;
10277
10278 if (JUMP_P (insn))
10279 insn_added_p |= s390_fix_long_loop_prediction (insn);
10280
10281 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10282 || GET_CODE (PATTERN (insn)) == SET)
10283 && s390_tune == PROCESSOR_2097_Z10)
10284 insn_added_p |= s390_z10_optimize_cmp (insn);
10285 }
10286
10287 /* Adjust branches if we added new instructions. */
10288 if (insn_added_p)
10289 shorten_branches (get_insns ());
10290 }
10291 }
10292
10293 /* Return true if INSN is a fp load insn writing register REGNO. */
10294 static inline bool
10295 s390_fpload_toreg (rtx insn, unsigned int regno)
10296 {
10297 rtx set;
10298 enum attr_type flag = s390_safe_attr_type (insn);
10299
10300 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10301 return false;
10302
10303 set = single_set (insn);
10304
10305 if (set == NULL_RTX)
10306 return false;
10307
10308 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10309 return false;
10310
10311 if (REGNO (SET_DEST (set)) != regno)
10312 return false;
10313
10314 return true;
10315 }
10316
10317 /* This value describes the distance to be avoided between an
10318 aritmetic fp instruction and an fp load writing the same register.
10319 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10320 fine but the exact value has to be avoided. Otherwise the FP
10321 pipeline will throw an exception causing a major penalty. */
10322 #define Z10_EARLYLOAD_DISTANCE 7
10323
10324 /* Rearrange the ready list in order to avoid the situation described
10325 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10326 moved to the very end of the ready list. */
10327 static void
10328 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10329 {
10330 unsigned int regno;
10331 int nready = *nready_p;
10332 rtx tmp;
10333 int i;
10334 rtx insn;
10335 rtx set;
10336 enum attr_type flag;
10337 int distance;
10338
10339 /* Skip DISTANCE - 1 active insns. */
10340 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10341 distance > 0 && insn != NULL_RTX;
10342 distance--, insn = prev_active_insn (insn))
10343 if (CALL_P (insn) || JUMP_P (insn))
10344 return;
10345
10346 if (insn == NULL_RTX)
10347 return;
10348
10349 set = single_set (insn);
10350
10351 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10352 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10353 return;
10354
10355 flag = s390_safe_attr_type (insn);
10356
10357 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10358 return;
10359
10360 regno = REGNO (SET_DEST (set));
10361 i = nready - 1;
10362
10363 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10364 i--;
10365
10366 if (!i)
10367 return;
10368
10369 tmp = ready[i];
10370 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10371 ready[0] = tmp;
10372 }
10373
10374 /* This function is called via hook TARGET_SCHED_REORDER before
10375 issueing one insn from list READY which contains *NREADYP entries.
10376 For target z10 it reorders load instructions to avoid early load
10377 conflicts in the floating point pipeline */
10378 static int
10379 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10380 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10381 {
10382 if (s390_tune == PROCESSOR_2097_Z10)
10383 if (reload_completed && *nreadyp > 1)
10384 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10385
10386 return s390_issue_rate ();
10387 }
10388
10389 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10390 the scheduler has issued INSN. It stores the last issued insn into
10391 last_scheduled_insn in order to make it available for
10392 s390_sched_reorder. */
10393 static int
10394 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10395 int verbose ATTRIBUTE_UNUSED,
10396 rtx insn, int more)
10397 {
10398 last_scheduled_insn = insn;
10399
10400 if (GET_CODE (PATTERN (insn)) != USE
10401 && GET_CODE (PATTERN (insn)) != CLOBBER)
10402 return more - 1;
10403 else
10404 return more;
10405 }
10406
10407 static void
10408 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10409 int verbose ATTRIBUTE_UNUSED,
10410 int max_ready ATTRIBUTE_UNUSED)
10411 {
10412 last_scheduled_insn = NULL_RTX;
10413 }
10414
10415 /* This function checks the whole of insn X for memory references. The
10416 function always returns zero because the framework it is called
10417 from would stop recursively analyzing the insn upon a return value
10418 other than zero. The real result of this function is updating
10419 counter variable MEM_COUNT. */
10420 static int
10421 check_dpu (rtx *x, unsigned *mem_count)
10422 {
10423 if (*x != NULL_RTX && MEM_P (*x))
10424 (*mem_count)++;
10425 return 0;
10426 }
10427
10428 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10429 a new number struct loop *loop should be unrolled if tuned for cpus with
10430 a built-in stride prefetcher.
10431 The loop is analyzed for memory accesses by calling check_dpu for
10432 each rtx of the loop. Depending on the loop_depth and the amount of
10433 memory accesses a new number <=nunroll is returned to improve the
10434 behaviour of the hardware prefetch unit. */
10435 static unsigned
10436 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10437 {
10438 basic_block *bbs;
10439 rtx insn;
10440 unsigned i;
10441 unsigned mem_count = 0;
10442
10443 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10444 return nunroll;
10445
10446 /* Count the number of memory references within the loop body. */
10447 bbs = get_loop_body (loop);
10448 for (i = 0; i < loop->num_nodes; i++)
10449 {
10450 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10451 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10452 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10453 }
10454 free (bbs);
10455
10456 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10457 if (mem_count == 0)
10458 return nunroll;
10459
10460 switch (loop_depth(loop))
10461 {
10462 case 1:
10463 return MIN (nunroll, 28 / mem_count);
10464 case 2:
10465 return MIN (nunroll, 22 / mem_count);
10466 default:
10467 return MIN (nunroll, 16 / mem_count);
10468 }
10469 }
10470
10471 /* Initialize GCC target structure. */
10472
10473 #undef TARGET_ASM_ALIGNED_HI_OP
10474 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10475 #undef TARGET_ASM_ALIGNED_DI_OP
10476 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10477 #undef TARGET_ASM_INTEGER
10478 #define TARGET_ASM_INTEGER s390_assemble_integer
10479
10480 #undef TARGET_ASM_OPEN_PAREN
10481 #define TARGET_ASM_OPEN_PAREN ""
10482
10483 #undef TARGET_ASM_CLOSE_PAREN
10484 #define TARGET_ASM_CLOSE_PAREN ""
10485
10486 #undef TARGET_DEFAULT_TARGET_FLAGS
10487 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
10488
10489 #undef TARGET_HANDLE_OPTION
10490 #define TARGET_HANDLE_OPTION s390_handle_option
10491
10492 #undef TARGET_OPTION_OVERRIDE
10493 #define TARGET_OPTION_OVERRIDE s390_option_override
10494
10495 #undef TARGET_OPTION_OPTIMIZATION
10496 #define TARGET_OPTION_OPTIMIZATION s390_option_optimization
10497
10498 #undef TARGET_ENCODE_SECTION_INFO
10499 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10500
10501 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10502 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10503
10504 #ifdef HAVE_AS_TLS
10505 #undef TARGET_HAVE_TLS
10506 #define TARGET_HAVE_TLS true
10507 #endif
10508 #undef TARGET_CANNOT_FORCE_CONST_MEM
10509 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10510
10511 #undef TARGET_DELEGITIMIZE_ADDRESS
10512 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10513
10514 #undef TARGET_LEGITIMIZE_ADDRESS
10515 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10516
10517 #undef TARGET_RETURN_IN_MEMORY
10518 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10519
10520 #undef TARGET_INIT_BUILTINS
10521 #define TARGET_INIT_BUILTINS s390_init_builtins
10522 #undef TARGET_EXPAND_BUILTIN
10523 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10524
10525 #undef TARGET_ASM_OUTPUT_MI_THUNK
10526 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10527 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10528 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10529
10530 #undef TARGET_SCHED_ADJUST_PRIORITY
10531 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10532 #undef TARGET_SCHED_ISSUE_RATE
10533 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10534 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10535 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10536
10537 #undef TARGET_SCHED_VARIABLE_ISSUE
10538 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10539 #undef TARGET_SCHED_REORDER
10540 #define TARGET_SCHED_REORDER s390_sched_reorder
10541 #undef TARGET_SCHED_INIT
10542 #define TARGET_SCHED_INIT s390_sched_init
10543
10544 #undef TARGET_CANNOT_COPY_INSN_P
10545 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10546 #undef TARGET_RTX_COSTS
10547 #define TARGET_RTX_COSTS s390_rtx_costs
10548 #undef TARGET_ADDRESS_COST
10549 #define TARGET_ADDRESS_COST s390_address_cost
10550
10551 #undef TARGET_MACHINE_DEPENDENT_REORG
10552 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10553
10554 #undef TARGET_VALID_POINTER_MODE
10555 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10556
10557 #undef TARGET_BUILD_BUILTIN_VA_LIST
10558 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10559 #undef TARGET_EXPAND_BUILTIN_VA_START
10560 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10561 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10562 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10563
10564 #undef TARGET_PROMOTE_FUNCTION_MODE
10565 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10566 #undef TARGET_PASS_BY_REFERENCE
10567 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10568
10569 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10570 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10571 #undef TARGET_FUNCTION_ARG
10572 #define TARGET_FUNCTION_ARG s390_function_arg
10573 #undef TARGET_FUNCTION_ARG_ADVANCE
10574 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10575
10576 #undef TARGET_FIXED_CONDITION_CODE_REGS
10577 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10578
10579 #undef TARGET_CC_MODES_COMPATIBLE
10580 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10581
10582 #undef TARGET_INVALID_WITHIN_DOLOOP
10583 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10584
10585 #ifdef HAVE_AS_TLS
10586 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10587 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10588 #endif
10589
10590 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10591 #undef TARGET_MANGLE_TYPE
10592 #define TARGET_MANGLE_TYPE s390_mangle_type
10593 #endif
10594
10595 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10596 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10597
10598 #undef TARGET_SECONDARY_RELOAD
10599 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10600
10601 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10602 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10603
10604 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10605 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10606
10607 #undef TARGET_LEGITIMATE_ADDRESS_P
10608 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10609
10610 #undef TARGET_CAN_ELIMINATE
10611 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10612
10613 #undef TARGET_LOOP_UNROLL_ADJUST
10614 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10615
10616 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10617 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10618 #undef TARGET_TRAMPOLINE_INIT
10619 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10620
10621 #undef TARGET_UNWIND_WORD_MODE
10622 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10623
10624 struct gcc_target targetm = TARGET_INITIALIZER;
10625
10626 #include "gt-s390.h"