re PR target/46234 (ICE in expand_expr_real_2 for va-arg-XXX tescases)
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "integrate.h"
47 #include "ggc.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "optabs.h"
53 #include "gimple.h"
54 #include "df.h"
55 #include "params.h"
56 #include "cfgloop.h"
57
58
59 /* Define the specific costs for a given cpu. */
60
61 struct processor_costs
62 {
63 /* multiplication */
64 const int m; /* cost of an M instruction. */
65 const int mghi; /* cost of an MGHI instruction. */
66 const int mh; /* cost of an MH instruction. */
67 const int mhi; /* cost of an MHI instruction. */
68 const int ml; /* cost of an ML instruction. */
69 const int mr; /* cost of an MR instruction. */
70 const int ms; /* cost of an MS instruction. */
71 const int msg; /* cost of an MSG instruction. */
72 const int msgf; /* cost of an MSGF instruction. */
73 const int msgfr; /* cost of an MSGFR instruction. */
74 const int msgr; /* cost of an MSGR instruction. */
75 const int msr; /* cost of an MSR instruction. */
76 const int mult_df; /* cost of multiplication in DFmode. */
77 const int mxbr;
78 /* square root */
79 const int sqxbr; /* cost of square root in TFmode. */
80 const int sqdbr; /* cost of square root in DFmode. */
81 const int sqebr; /* cost of square root in SFmode. */
82 /* multiply and add */
83 const int madbr; /* cost of multiply and add in DFmode. */
84 const int maebr; /* cost of multiply and add in SFmode. */
85 /* division */
86 const int dxbr;
87 const int ddbr;
88 const int debr;
89 const int dlgr;
90 const int dlr;
91 const int dr;
92 const int dsgfr;
93 const int dsgr;
94 };
95
96 const struct processor_costs *s390_cost;
97
98 static const
99 struct processor_costs z900_cost =
100 {
101 COSTS_N_INSNS (5), /* M */
102 COSTS_N_INSNS (10), /* MGHI */
103 COSTS_N_INSNS (5), /* MH */
104 COSTS_N_INSNS (4), /* MHI */
105 COSTS_N_INSNS (5), /* ML */
106 COSTS_N_INSNS (5), /* MR */
107 COSTS_N_INSNS (4), /* MS */
108 COSTS_N_INSNS (15), /* MSG */
109 COSTS_N_INSNS (7), /* MSGF */
110 COSTS_N_INSNS (7), /* MSGFR */
111 COSTS_N_INSNS (10), /* MSGR */
112 COSTS_N_INSNS (4), /* MSR */
113 COSTS_N_INSNS (7), /* multiplication in DFmode */
114 COSTS_N_INSNS (13), /* MXBR */
115 COSTS_N_INSNS (136), /* SQXBR */
116 COSTS_N_INSNS (44), /* SQDBR */
117 COSTS_N_INSNS (35), /* SQEBR */
118 COSTS_N_INSNS (18), /* MADBR */
119 COSTS_N_INSNS (13), /* MAEBR */
120 COSTS_N_INSNS (134), /* DXBR */
121 COSTS_N_INSNS (30), /* DDBR */
122 COSTS_N_INSNS (27), /* DEBR */
123 COSTS_N_INSNS (220), /* DLGR */
124 COSTS_N_INSNS (34), /* DLR */
125 COSTS_N_INSNS (34), /* DR */
126 COSTS_N_INSNS (32), /* DSGFR */
127 COSTS_N_INSNS (32), /* DSGR */
128 };
129
130 static const
131 struct processor_costs z990_cost =
132 {
133 COSTS_N_INSNS (4), /* M */
134 COSTS_N_INSNS (2), /* MGHI */
135 COSTS_N_INSNS (2), /* MH */
136 COSTS_N_INSNS (2), /* MHI */
137 COSTS_N_INSNS (4), /* ML */
138 COSTS_N_INSNS (4), /* MR */
139 COSTS_N_INSNS (5), /* MS */
140 COSTS_N_INSNS (6), /* MSG */
141 COSTS_N_INSNS (4), /* MSGF */
142 COSTS_N_INSNS (4), /* MSGFR */
143 COSTS_N_INSNS (4), /* MSGR */
144 COSTS_N_INSNS (4), /* MSR */
145 COSTS_N_INSNS (1), /* multiplication in DFmode */
146 COSTS_N_INSNS (28), /* MXBR */
147 COSTS_N_INSNS (130), /* SQXBR */
148 COSTS_N_INSNS (66), /* SQDBR */
149 COSTS_N_INSNS (38), /* SQEBR */
150 COSTS_N_INSNS (1), /* MADBR */
151 COSTS_N_INSNS (1), /* MAEBR */
152 COSTS_N_INSNS (60), /* DXBR */
153 COSTS_N_INSNS (40), /* DDBR */
154 COSTS_N_INSNS (26), /* DEBR */
155 COSTS_N_INSNS (176), /* DLGR */
156 COSTS_N_INSNS (31), /* DLR */
157 COSTS_N_INSNS (31), /* DR */
158 COSTS_N_INSNS (31), /* DSGFR */
159 COSTS_N_INSNS (31), /* DSGR */
160 };
161
162 static const
163 struct processor_costs z9_109_cost =
164 {
165 COSTS_N_INSNS (4), /* M */
166 COSTS_N_INSNS (2), /* MGHI */
167 COSTS_N_INSNS (2), /* MH */
168 COSTS_N_INSNS (2), /* MHI */
169 COSTS_N_INSNS (4), /* ML */
170 COSTS_N_INSNS (4), /* MR */
171 COSTS_N_INSNS (5), /* MS */
172 COSTS_N_INSNS (6), /* MSG */
173 COSTS_N_INSNS (4), /* MSGF */
174 COSTS_N_INSNS (4), /* MSGFR */
175 COSTS_N_INSNS (4), /* MSGR */
176 COSTS_N_INSNS (4), /* MSR */
177 COSTS_N_INSNS (1), /* multiplication in DFmode */
178 COSTS_N_INSNS (28), /* MXBR */
179 COSTS_N_INSNS (130), /* SQXBR */
180 COSTS_N_INSNS (66), /* SQDBR */
181 COSTS_N_INSNS (38), /* SQEBR */
182 COSTS_N_INSNS (1), /* MADBR */
183 COSTS_N_INSNS (1), /* MAEBR */
184 COSTS_N_INSNS (60), /* DXBR */
185 COSTS_N_INSNS (40), /* DDBR */
186 COSTS_N_INSNS (26), /* DEBR */
187 COSTS_N_INSNS (30), /* DLGR */
188 COSTS_N_INSNS (23), /* DLR */
189 COSTS_N_INSNS (23), /* DR */
190 COSTS_N_INSNS (24), /* DSGFR */
191 COSTS_N_INSNS (24), /* DSGR */
192 };
193
194 static const
195 struct processor_costs z10_cost =
196 {
197 COSTS_N_INSNS (10), /* M */
198 COSTS_N_INSNS (10), /* MGHI */
199 COSTS_N_INSNS (10), /* MH */
200 COSTS_N_INSNS (10), /* MHI */
201 COSTS_N_INSNS (10), /* ML */
202 COSTS_N_INSNS (10), /* MR */
203 COSTS_N_INSNS (10), /* MS */
204 COSTS_N_INSNS (10), /* MSG */
205 COSTS_N_INSNS (10), /* MSGF */
206 COSTS_N_INSNS (10), /* MSGFR */
207 COSTS_N_INSNS (10), /* MSGR */
208 COSTS_N_INSNS (10), /* MSR */
209 COSTS_N_INSNS (1) , /* multiplication in DFmode */
210 COSTS_N_INSNS (50), /* MXBR */
211 COSTS_N_INSNS (120), /* SQXBR */
212 COSTS_N_INSNS (52), /* SQDBR */
213 COSTS_N_INSNS (38), /* SQEBR */
214 COSTS_N_INSNS (1), /* MADBR */
215 COSTS_N_INSNS (1), /* MAEBR */
216 COSTS_N_INSNS (111), /* DXBR */
217 COSTS_N_INSNS (39), /* DDBR */
218 COSTS_N_INSNS (32), /* DEBR */
219 COSTS_N_INSNS (160), /* DLGR */
220 COSTS_N_INSNS (71), /* DLR */
221 COSTS_N_INSNS (71), /* DR */
222 COSTS_N_INSNS (71), /* DSGFR */
223 COSTS_N_INSNS (71), /* DSGR */
224 };
225
226 static const
227 struct processor_costs z196_cost =
228 {
229 COSTS_N_INSNS (7), /* M */
230 COSTS_N_INSNS (5), /* MGHI */
231 COSTS_N_INSNS (5), /* MH */
232 COSTS_N_INSNS (5), /* MHI */
233 COSTS_N_INSNS (7), /* ML */
234 COSTS_N_INSNS (7), /* MR */
235 COSTS_N_INSNS (6), /* MS */
236 COSTS_N_INSNS (8), /* MSG */
237 COSTS_N_INSNS (6), /* MSGF */
238 COSTS_N_INSNS (6), /* MSGFR */
239 COSTS_N_INSNS (8), /* MSGR */
240 COSTS_N_INSNS (6), /* MSR */
241 COSTS_N_INSNS (1) , /* multiplication in DFmode */
242 COSTS_N_INSNS (40), /* MXBR B+40 */
243 COSTS_N_INSNS (100), /* SQXBR B+100 */
244 COSTS_N_INSNS (42), /* SQDBR B+42 */
245 COSTS_N_INSNS (28), /* SQEBR B+28 */
246 COSTS_N_INSNS (1), /* MADBR B */
247 COSTS_N_INSNS (1), /* MAEBR B */
248 COSTS_N_INSNS (101), /* DXBR B+101 */
249 COSTS_N_INSNS (29), /* DDBR */
250 COSTS_N_INSNS (22), /* DEBR */
251 COSTS_N_INSNS (160), /* DLGR cracked */
252 COSTS_N_INSNS (160), /* DLR cracked */
253 COSTS_N_INSNS (160), /* DR expanded */
254 COSTS_N_INSNS (160), /* DSGFR cracked */
255 COSTS_N_INSNS (160), /* DSGR cracked */
256 };
257
258 extern int reload_completed;
259
260 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
261 static rtx last_scheduled_insn;
262
263 /* Structure used to hold the components of a S/390 memory
264 address. A legitimate address on S/390 is of the general
265 form
266 base + index + displacement
267 where any of the components is optional.
268
269 base and index are registers of the class ADDR_REGS,
270 displacement is an unsigned 12-bit immediate constant. */
271
272 struct s390_address
273 {
274 rtx base;
275 rtx indx;
276 rtx disp;
277 bool pointer;
278 bool literal_pool;
279 };
280
281 /* Which cpu are we tuning for. */
282 enum processor_type s390_tune = PROCESSOR_max;
283 int s390_tune_flags;
284 /* Which instruction set architecture to use. */
285 enum processor_type s390_arch;
286 int s390_arch_flags;
287
288 HOST_WIDE_INT s390_warn_framesize = 0;
289 HOST_WIDE_INT s390_stack_size = 0;
290 HOST_WIDE_INT s390_stack_guard = 0;
291
292 /* The following structure is embedded in the machine
293 specific part of struct function. */
294
295 struct GTY (()) s390_frame_layout
296 {
297 /* Offset within stack frame. */
298 HOST_WIDE_INT gprs_offset;
299 HOST_WIDE_INT f0_offset;
300 HOST_WIDE_INT f4_offset;
301 HOST_WIDE_INT f8_offset;
302 HOST_WIDE_INT backchain_offset;
303
304 /* Number of first and last gpr where slots in the register
305 save area are reserved for. */
306 int first_save_gpr_slot;
307 int last_save_gpr_slot;
308
309 /* Number of first and last gpr to be saved, restored. */
310 int first_save_gpr;
311 int first_restore_gpr;
312 int last_save_gpr;
313 int last_restore_gpr;
314
315 /* Bits standing for floating point registers. Set, if the
316 respective register has to be saved. Starting with reg 16 (f0)
317 at the rightmost bit.
318 Bit 15 - 8 7 6 5 4 3 2 1 0
319 fpr 15 - 8 7 5 3 1 6 4 2 0
320 reg 31 - 24 23 22 21 20 19 18 17 16 */
321 unsigned int fpr_bitmap;
322
323 /* Number of floating point registers f8-f15 which must be saved. */
324 int high_fprs;
325
326 /* Set if return address needs to be saved.
327 This flag is set by s390_return_addr_rtx if it could not use
328 the initial value of r14 and therefore depends on r14 saved
329 to the stack. */
330 bool save_return_addr_p;
331
332 /* Size of stack frame. */
333 HOST_WIDE_INT frame_size;
334 };
335
336 /* Define the structure for the machine field in struct function. */
337
338 struct GTY(()) machine_function
339 {
340 struct s390_frame_layout frame_layout;
341
342 /* Literal pool base register. */
343 rtx base_reg;
344
345 /* True if we may need to perform branch splitting. */
346 bool split_branches_pending_p;
347
348 /* Some local-dynamic TLS symbol name. */
349 const char *some_ld_name;
350
351 bool has_landing_pad_p;
352 };
353
354 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
355
356 #define cfun_frame_layout (cfun->machine->frame_layout)
357 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
358 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
359 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
360 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
361 (1 << (BITNUM)))
362 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
363 (1 << (BITNUM))))
364
365 /* Number of GPRs and FPRs used for argument passing. */
366 #define GP_ARG_NUM_REG 5
367 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
368
369 /* A couple of shortcuts. */
370 #define CONST_OK_FOR_J(x) \
371 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
372 #define CONST_OK_FOR_K(x) \
373 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
374 #define CONST_OK_FOR_Os(x) \
375 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
376 #define CONST_OK_FOR_Op(x) \
377 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
378 #define CONST_OK_FOR_On(x) \
379 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
380
381 #define REGNO_PAIR_OK(REGNO, MODE) \
382 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
383
384 /* That's the read ahead of the dynamic branch prediction unit in
385 bytes on a z10 (or higher) CPU. */
386 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
387
388 static enum machine_mode
389 s390_libgcc_cmp_return_mode (void)
390 {
391 return TARGET_64BIT ? DImode : SImode;
392 }
393
394 static enum machine_mode
395 s390_libgcc_shift_count_mode (void)
396 {
397 return TARGET_64BIT ? DImode : SImode;
398 }
399
400 static enum machine_mode
401 s390_unwind_word_mode (void)
402 {
403 return TARGET_64BIT ? DImode : SImode;
404 }
405
406 /* Return true if the back end supports mode MODE. */
407 static bool
408 s390_scalar_mode_supported_p (enum machine_mode mode)
409 {
410 /* In contrast to the default implementation reject TImode constants on 31bit
411 TARGET_ZARCH for ABI compliance. */
412 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
413 return false;
414
415 if (DECIMAL_FLOAT_MODE_P (mode))
416 return default_decimal_float_supported_p ();
417
418 return default_scalar_mode_supported_p (mode);
419 }
420
421 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
422
423 void
424 s390_set_has_landing_pad_p (bool value)
425 {
426 cfun->machine->has_landing_pad_p = value;
427 }
428
429 /* If two condition code modes are compatible, return a condition code
430 mode which is compatible with both. Otherwise, return
431 VOIDmode. */
432
433 static enum machine_mode
434 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
435 {
436 if (m1 == m2)
437 return m1;
438
439 switch (m1)
440 {
441 case CCZmode:
442 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
443 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
444 return m2;
445 return VOIDmode;
446
447 case CCSmode:
448 case CCUmode:
449 case CCTmode:
450 case CCSRmode:
451 case CCURmode:
452 case CCZ1mode:
453 if (m2 == CCZmode)
454 return m1;
455
456 return VOIDmode;
457
458 default:
459 return VOIDmode;
460 }
461 return VOIDmode;
462 }
463
464 /* Return true if SET either doesn't set the CC register, or else
465 the source and destination have matching CC modes and that
466 CC mode is at least as constrained as REQ_MODE. */
467
468 static bool
469 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
470 {
471 enum machine_mode set_mode;
472
473 gcc_assert (GET_CODE (set) == SET);
474
475 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
476 return 1;
477
478 set_mode = GET_MODE (SET_DEST (set));
479 switch (set_mode)
480 {
481 case CCSmode:
482 case CCSRmode:
483 case CCUmode:
484 case CCURmode:
485 case CCLmode:
486 case CCL1mode:
487 case CCL2mode:
488 case CCL3mode:
489 case CCT1mode:
490 case CCT2mode:
491 case CCT3mode:
492 if (req_mode != set_mode)
493 return 0;
494 break;
495
496 case CCZmode:
497 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
498 && req_mode != CCSRmode && req_mode != CCURmode)
499 return 0;
500 break;
501
502 case CCAPmode:
503 case CCANmode:
504 if (req_mode != CCAmode)
505 return 0;
506 break;
507
508 default:
509 gcc_unreachable ();
510 }
511
512 return (GET_MODE (SET_SRC (set)) == set_mode);
513 }
514
515 /* Return true if every SET in INSN that sets the CC register
516 has source and destination with matching CC modes and that
517 CC mode is at least as constrained as REQ_MODE.
518 If REQ_MODE is VOIDmode, always return false. */
519
520 bool
521 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
522 {
523 int i;
524
525 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
526 if (req_mode == VOIDmode)
527 return false;
528
529 if (GET_CODE (PATTERN (insn)) == SET)
530 return s390_match_ccmode_set (PATTERN (insn), req_mode);
531
532 if (GET_CODE (PATTERN (insn)) == PARALLEL)
533 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
534 {
535 rtx set = XVECEXP (PATTERN (insn), 0, i);
536 if (GET_CODE (set) == SET)
537 if (!s390_match_ccmode_set (set, req_mode))
538 return false;
539 }
540
541 return true;
542 }
543
544 /* If a test-under-mask instruction can be used to implement
545 (compare (and ... OP1) OP2), return the CC mode required
546 to do that. Otherwise, return VOIDmode.
547 MIXED is true if the instruction can distinguish between
548 CC1 and CC2 for mixed selected bits (TMxx), it is false
549 if the instruction cannot (TM). */
550
551 enum machine_mode
552 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
553 {
554 int bit0, bit1;
555
556 /* ??? Fixme: should work on CONST_DOUBLE as well. */
557 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
558 return VOIDmode;
559
560 /* Selected bits all zero: CC0.
561 e.g.: int a; if ((a & (16 + 128)) == 0) */
562 if (INTVAL (op2) == 0)
563 return CCTmode;
564
565 /* Selected bits all one: CC3.
566 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
567 if (INTVAL (op2) == INTVAL (op1))
568 return CCT3mode;
569
570 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
571 int a;
572 if ((a & (16 + 128)) == 16) -> CCT1
573 if ((a & (16 + 128)) == 128) -> CCT2 */
574 if (mixed)
575 {
576 bit1 = exact_log2 (INTVAL (op2));
577 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
578 if (bit0 != -1 && bit1 != -1)
579 return bit0 > bit1 ? CCT1mode : CCT2mode;
580 }
581
582 return VOIDmode;
583 }
584
585 /* Given a comparison code OP (EQ, NE, etc.) and the operands
586 OP0 and OP1 of a COMPARE, return the mode to be used for the
587 comparison. */
588
589 enum machine_mode
590 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
591 {
592 switch (code)
593 {
594 case EQ:
595 case NE:
596 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
597 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
598 return CCAPmode;
599 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
600 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
601 return CCAPmode;
602 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
603 || GET_CODE (op1) == NEG)
604 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
605 return CCLmode;
606
607 if (GET_CODE (op0) == AND)
608 {
609 /* Check whether we can potentially do it via TM. */
610 enum machine_mode ccmode;
611 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
612 if (ccmode != VOIDmode)
613 {
614 /* Relax CCTmode to CCZmode to allow fall-back to AND
615 if that turns out to be beneficial. */
616 return ccmode == CCTmode ? CCZmode : ccmode;
617 }
618 }
619
620 if (register_operand (op0, HImode)
621 && GET_CODE (op1) == CONST_INT
622 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
623 return CCT3mode;
624 if (register_operand (op0, QImode)
625 && GET_CODE (op1) == CONST_INT
626 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
627 return CCT3mode;
628
629 return CCZmode;
630
631 case LE:
632 case LT:
633 case GE:
634 case GT:
635 /* The only overflow condition of NEG and ABS happens when
636 -INT_MAX is used as parameter, which stays negative. So
637 we have an overflow from a positive value to a negative.
638 Using CCAP mode the resulting cc can be used for comparisons. */
639 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
640 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
641 return CCAPmode;
642
643 /* If constants are involved in an add instruction it is possible to use
644 the resulting cc for comparisons with zero. Knowing the sign of the
645 constant the overflow behavior gets predictable. e.g.:
646 int a, b; if ((b = a + c) > 0)
647 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
648 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
649 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
650 {
651 if (INTVAL (XEXP((op0), 1)) < 0)
652 return CCANmode;
653 else
654 return CCAPmode;
655 }
656 /* Fall through. */
657 case UNORDERED:
658 case ORDERED:
659 case UNEQ:
660 case UNLE:
661 case UNLT:
662 case UNGE:
663 case UNGT:
664 case LTGT:
665 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
666 && GET_CODE (op1) != CONST_INT)
667 return CCSRmode;
668 return CCSmode;
669
670 case LTU:
671 case GEU:
672 if (GET_CODE (op0) == PLUS
673 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
674 return CCL1mode;
675
676 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
677 && GET_CODE (op1) != CONST_INT)
678 return CCURmode;
679 return CCUmode;
680
681 case LEU:
682 case GTU:
683 if (GET_CODE (op0) == MINUS
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCL2mode;
686
687 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
688 && GET_CODE (op1) != CONST_INT)
689 return CCURmode;
690 return CCUmode;
691
692 default:
693 gcc_unreachable ();
694 }
695 }
696
697 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
698 that we can implement more efficiently. */
699
700 void
701 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
702 {
703 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
704 if ((*code == EQ || *code == NE)
705 && *op1 == const0_rtx
706 && GET_CODE (*op0) == ZERO_EXTRACT
707 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
708 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
709 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
710 {
711 rtx inner = XEXP (*op0, 0);
712 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
713 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
714 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
715
716 if (len > 0 && len < modesize
717 && pos >= 0 && pos + len <= modesize
718 && modesize <= HOST_BITS_PER_WIDE_INT)
719 {
720 unsigned HOST_WIDE_INT block;
721 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
722 block <<= modesize - pos - len;
723
724 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
725 gen_int_mode (block, GET_MODE (inner)));
726 }
727 }
728
729 /* Narrow AND of memory against immediate to enable TM. */
730 if ((*code == EQ || *code == NE)
731 && *op1 == const0_rtx
732 && GET_CODE (*op0) == AND
733 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
735 {
736 rtx inner = XEXP (*op0, 0);
737 rtx mask = XEXP (*op0, 1);
738
739 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
740 if (GET_CODE (inner) == SUBREG
741 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
742 && (GET_MODE_SIZE (GET_MODE (inner))
743 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
744 && ((INTVAL (mask)
745 & GET_MODE_MASK (GET_MODE (inner))
746 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
747 == 0))
748 inner = SUBREG_REG (inner);
749
750 /* Do not change volatile MEMs. */
751 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
752 {
753 int part = s390_single_part (XEXP (*op0, 1),
754 GET_MODE (inner), QImode, 0);
755 if (part >= 0)
756 {
757 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
758 inner = adjust_address_nv (inner, QImode, part);
759 *op0 = gen_rtx_AND (QImode, inner, mask);
760 }
761 }
762 }
763
764 /* Narrow comparisons against 0xffff to HImode if possible. */
765 if ((*code == EQ || *code == NE)
766 && GET_CODE (*op1) == CONST_INT
767 && INTVAL (*op1) == 0xffff
768 && SCALAR_INT_MODE_P (GET_MODE (*op0))
769 && (nonzero_bits (*op0, GET_MODE (*op0))
770 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
771 {
772 *op0 = gen_lowpart (HImode, *op0);
773 *op1 = constm1_rtx;
774 }
775
776 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
777 if (GET_CODE (*op0) == UNSPEC
778 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
779 && XVECLEN (*op0, 0) == 1
780 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
781 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
782 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
783 && *op1 == const0_rtx)
784 {
785 enum rtx_code new_code = UNKNOWN;
786 switch (*code)
787 {
788 case EQ: new_code = EQ; break;
789 case NE: new_code = NE; break;
790 case LT: new_code = GTU; break;
791 case GT: new_code = LTU; break;
792 case LE: new_code = GEU; break;
793 case GE: new_code = LEU; break;
794 default: break;
795 }
796
797 if (new_code != UNKNOWN)
798 {
799 *op0 = XVECEXP (*op0, 0, 0);
800 *code = new_code;
801 }
802 }
803
804 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
805 if (GET_CODE (*op0) == UNSPEC
806 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
807 && XVECLEN (*op0, 0) == 1
808 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
809 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
810 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
811 && *op1 == const0_rtx)
812 {
813 enum rtx_code new_code = UNKNOWN;
814 switch (*code)
815 {
816 case EQ: new_code = EQ; break;
817 case NE: new_code = NE; break;
818 default: break;
819 }
820
821 if (new_code != UNKNOWN)
822 {
823 *op0 = XVECEXP (*op0, 0, 0);
824 *code = new_code;
825 }
826 }
827
828 /* Simplify cascaded EQ, NE with const0_rtx. */
829 if ((*code == NE || *code == EQ)
830 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
831 && GET_MODE (*op0) == SImode
832 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
833 && REG_P (XEXP (*op0, 0))
834 && XEXP (*op0, 1) == const0_rtx
835 && *op1 == const0_rtx)
836 {
837 if ((*code == EQ && GET_CODE (*op0) == NE)
838 || (*code == NE && GET_CODE (*op0) == EQ))
839 *code = EQ;
840 else
841 *code = NE;
842 *op0 = XEXP (*op0, 0);
843 }
844
845 /* Prefer register over memory as first operand. */
846 if (MEM_P (*op0) && REG_P (*op1))
847 {
848 rtx tem = *op0; *op0 = *op1; *op1 = tem;
849 *code = swap_condition (*code);
850 }
851 }
852
853 /* Emit a compare instruction suitable to implement the comparison
854 OP0 CODE OP1. Return the correct condition RTL to be placed in
855 the IF_THEN_ELSE of the conditional branch testing the result. */
856
857 rtx
858 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
859 {
860 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
861 rtx cc;
862
863 /* Do not output a redundant compare instruction if a compare_and_swap
864 pattern already computed the result and the machine modes are compatible. */
865 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
866 {
867 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
868 == GET_MODE (op0));
869 cc = op0;
870 }
871 else
872 {
873 cc = gen_rtx_REG (mode, CC_REGNUM);
874 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
875 }
876
877 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
878 }
879
880 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
881 matches CMP.
882 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
883 conditional branch testing the result. */
884
885 static rtx
886 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
887 {
888 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
889 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
890 }
891
892 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
893 unconditional jump, else a conditional jump under condition COND. */
894
895 void
896 s390_emit_jump (rtx target, rtx cond)
897 {
898 rtx insn;
899
900 target = gen_rtx_LABEL_REF (VOIDmode, target);
901 if (cond)
902 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
903
904 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
905 emit_jump_insn (insn);
906 }
907
908 /* Return branch condition mask to implement a branch
909 specified by CODE. Return -1 for invalid comparisons. */
910
911 int
912 s390_branch_condition_mask (rtx code)
913 {
914 const int CC0 = 1 << 3;
915 const int CC1 = 1 << 2;
916 const int CC2 = 1 << 1;
917 const int CC3 = 1 << 0;
918
919 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
920 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
921 gcc_assert (XEXP (code, 1) == const0_rtx);
922
923 switch (GET_MODE (XEXP (code, 0)))
924 {
925 case CCZmode:
926 case CCZ1mode:
927 switch (GET_CODE (code))
928 {
929 case EQ: return CC0;
930 case NE: return CC1 | CC2 | CC3;
931 default: return -1;
932 }
933 break;
934
935 case CCT1mode:
936 switch (GET_CODE (code))
937 {
938 case EQ: return CC1;
939 case NE: return CC0 | CC2 | CC3;
940 default: return -1;
941 }
942 break;
943
944 case CCT2mode:
945 switch (GET_CODE (code))
946 {
947 case EQ: return CC2;
948 case NE: return CC0 | CC1 | CC3;
949 default: return -1;
950 }
951 break;
952
953 case CCT3mode:
954 switch (GET_CODE (code))
955 {
956 case EQ: return CC3;
957 case NE: return CC0 | CC1 | CC2;
958 default: return -1;
959 }
960 break;
961
962 case CCLmode:
963 switch (GET_CODE (code))
964 {
965 case EQ: return CC0 | CC2;
966 case NE: return CC1 | CC3;
967 default: return -1;
968 }
969 break;
970
971 case CCL1mode:
972 switch (GET_CODE (code))
973 {
974 case LTU: return CC2 | CC3; /* carry */
975 case GEU: return CC0 | CC1; /* no carry */
976 default: return -1;
977 }
978 break;
979
980 case CCL2mode:
981 switch (GET_CODE (code))
982 {
983 case GTU: return CC0 | CC1; /* borrow */
984 case LEU: return CC2 | CC3; /* no borrow */
985 default: return -1;
986 }
987 break;
988
989 case CCL3mode:
990 switch (GET_CODE (code))
991 {
992 case EQ: return CC0 | CC2;
993 case NE: return CC1 | CC3;
994 case LTU: return CC1;
995 case GTU: return CC3;
996 case LEU: return CC1 | CC2;
997 case GEU: return CC2 | CC3;
998 default: return -1;
999 }
1000
1001 case CCUmode:
1002 switch (GET_CODE (code))
1003 {
1004 case EQ: return CC0;
1005 case NE: return CC1 | CC2 | CC3;
1006 case LTU: return CC1;
1007 case GTU: return CC2;
1008 case LEU: return CC0 | CC1;
1009 case GEU: return CC0 | CC2;
1010 default: return -1;
1011 }
1012 break;
1013
1014 case CCURmode:
1015 switch (GET_CODE (code))
1016 {
1017 case EQ: return CC0;
1018 case NE: return CC2 | CC1 | CC3;
1019 case LTU: return CC2;
1020 case GTU: return CC1;
1021 case LEU: return CC0 | CC2;
1022 case GEU: return CC0 | CC1;
1023 default: return -1;
1024 }
1025 break;
1026
1027 case CCAPmode:
1028 switch (GET_CODE (code))
1029 {
1030 case EQ: return CC0;
1031 case NE: return CC1 | CC2 | CC3;
1032 case LT: return CC1 | CC3;
1033 case GT: return CC2;
1034 case LE: return CC0 | CC1 | CC3;
1035 case GE: return CC0 | CC2;
1036 default: return -1;
1037 }
1038 break;
1039
1040 case CCANmode:
1041 switch (GET_CODE (code))
1042 {
1043 case EQ: return CC0;
1044 case NE: return CC1 | CC2 | CC3;
1045 case LT: return CC1;
1046 case GT: return CC2 | CC3;
1047 case LE: return CC0 | CC1;
1048 case GE: return CC0 | CC2 | CC3;
1049 default: return -1;
1050 }
1051 break;
1052
1053 case CCSmode:
1054 switch (GET_CODE (code))
1055 {
1056 case EQ: return CC0;
1057 case NE: return CC1 | CC2 | CC3;
1058 case LT: return CC1;
1059 case GT: return CC2;
1060 case LE: return CC0 | CC1;
1061 case GE: return CC0 | CC2;
1062 case UNORDERED: return CC3;
1063 case ORDERED: return CC0 | CC1 | CC2;
1064 case UNEQ: return CC0 | CC3;
1065 case UNLT: return CC1 | CC3;
1066 case UNGT: return CC2 | CC3;
1067 case UNLE: return CC0 | CC1 | CC3;
1068 case UNGE: return CC0 | CC2 | CC3;
1069 case LTGT: return CC1 | CC2;
1070 default: return -1;
1071 }
1072 break;
1073
1074 case CCSRmode:
1075 switch (GET_CODE (code))
1076 {
1077 case EQ: return CC0;
1078 case NE: return CC2 | CC1 | CC3;
1079 case LT: return CC2;
1080 case GT: return CC1;
1081 case LE: return CC0 | CC2;
1082 case GE: return CC0 | CC1;
1083 case UNORDERED: return CC3;
1084 case ORDERED: return CC0 | CC2 | CC1;
1085 case UNEQ: return CC0 | CC3;
1086 case UNLT: return CC2 | CC3;
1087 case UNGT: return CC1 | CC3;
1088 case UNLE: return CC0 | CC2 | CC3;
1089 case UNGE: return CC0 | CC1 | CC3;
1090 case LTGT: return CC2 | CC1;
1091 default: return -1;
1092 }
1093 break;
1094
1095 default:
1096 return -1;
1097 }
1098 }
1099
1100
1101 /* Return branch condition mask to implement a compare and branch
1102 specified by CODE. Return -1 for invalid comparisons. */
1103
1104 int
1105 s390_compare_and_branch_condition_mask (rtx code)
1106 {
1107 const int CC0 = 1 << 3;
1108 const int CC1 = 1 << 2;
1109 const int CC2 = 1 << 1;
1110
1111 switch (GET_CODE (code))
1112 {
1113 case EQ:
1114 return CC0;
1115 case NE:
1116 return CC1 | CC2;
1117 case LT:
1118 case LTU:
1119 return CC1;
1120 case GT:
1121 case GTU:
1122 return CC2;
1123 case LE:
1124 case LEU:
1125 return CC0 | CC1;
1126 case GE:
1127 case GEU:
1128 return CC0 | CC2;
1129 default:
1130 gcc_unreachable ();
1131 }
1132 return -1;
1133 }
1134
1135 /* If INV is false, return assembler mnemonic string to implement
1136 a branch specified by CODE. If INV is true, return mnemonic
1137 for the corresponding inverted branch. */
1138
1139 static const char *
1140 s390_branch_condition_mnemonic (rtx code, int inv)
1141 {
1142 int mask;
1143
1144 static const char *const mnemonic[16] =
1145 {
1146 NULL, "o", "h", "nle",
1147 "l", "nhe", "lh", "ne",
1148 "e", "nlh", "he", "nl",
1149 "le", "nh", "no", NULL
1150 };
1151
1152 if (GET_CODE (XEXP (code, 0)) == REG
1153 && REGNO (XEXP (code, 0)) == CC_REGNUM
1154 && XEXP (code, 1) == const0_rtx)
1155 mask = s390_branch_condition_mask (code);
1156 else
1157 mask = s390_compare_and_branch_condition_mask (code);
1158
1159 gcc_assert (mask >= 0);
1160
1161 if (inv)
1162 mask ^= 15;
1163
1164 gcc_assert (mask >= 1 && mask <= 14);
1165
1166 return mnemonic[mask];
1167 }
1168
1169 /* Return the part of op which has a value different from def.
1170 The size of the part is determined by mode.
1171 Use this function only if you already know that op really
1172 contains such a part. */
1173
1174 unsigned HOST_WIDE_INT
1175 s390_extract_part (rtx op, enum machine_mode mode, int def)
1176 {
1177 unsigned HOST_WIDE_INT value = 0;
1178 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1179 int part_bits = GET_MODE_BITSIZE (mode);
1180 unsigned HOST_WIDE_INT part_mask
1181 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1182 int i;
1183
1184 for (i = 0; i < max_parts; i++)
1185 {
1186 if (i == 0)
1187 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1188 else
1189 value >>= part_bits;
1190
1191 if ((value & part_mask) != (def & part_mask))
1192 return value & part_mask;
1193 }
1194
1195 gcc_unreachable ();
1196 }
1197
1198 /* If OP is an integer constant of mode MODE with exactly one
1199 part of mode PART_MODE unequal to DEF, return the number of that
1200 part. Otherwise, return -1. */
1201
1202 int
1203 s390_single_part (rtx op,
1204 enum machine_mode mode,
1205 enum machine_mode part_mode,
1206 int def)
1207 {
1208 unsigned HOST_WIDE_INT value = 0;
1209 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1210 unsigned HOST_WIDE_INT part_mask
1211 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1212 int i, part = -1;
1213
1214 if (GET_CODE (op) != CONST_INT)
1215 return -1;
1216
1217 for (i = 0; i < n_parts; i++)
1218 {
1219 if (i == 0)
1220 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1221 else
1222 value >>= GET_MODE_BITSIZE (part_mode);
1223
1224 if ((value & part_mask) != (def & part_mask))
1225 {
1226 if (part != -1)
1227 return -1;
1228 else
1229 part = i;
1230 }
1231 }
1232 return part == -1 ? -1 : n_parts - 1 - part;
1233 }
1234
1235 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1236 bits and no other bits are set in IN. POS and LENGTH can be used
1237 to obtain the start position and the length of the bitfield.
1238
1239 POS gives the position of the first bit of the bitfield counting
1240 from the lowest order bit starting with zero. In order to use this
1241 value for S/390 instructions this has to be converted to "bits big
1242 endian" style. */
1243
1244 bool
1245 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1246 int *pos, int *length)
1247 {
1248 int tmp_pos = 0;
1249 int tmp_length = 0;
1250 int i;
1251 unsigned HOST_WIDE_INT mask = 1ULL;
1252 bool contiguous = false;
1253
1254 for (i = 0; i < size; mask <<= 1, i++)
1255 {
1256 if (contiguous)
1257 {
1258 if (mask & in)
1259 tmp_length++;
1260 else
1261 break;
1262 }
1263 else
1264 {
1265 if (mask & in)
1266 {
1267 contiguous = true;
1268 tmp_length++;
1269 }
1270 else
1271 tmp_pos++;
1272 }
1273 }
1274
1275 if (!tmp_length)
1276 return false;
1277
1278 /* Calculate a mask for all bits beyond the contiguous bits. */
1279 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1280
1281 if (mask & in)
1282 return false;
1283
1284 if (tmp_length + tmp_pos - 1 > size)
1285 return false;
1286
1287 if (length)
1288 *length = tmp_length;
1289
1290 if (pos)
1291 *pos = tmp_pos;
1292
1293 return true;
1294 }
1295
1296 /* Check whether we can (and want to) split a double-word
1297 move in mode MODE from SRC to DST into two single-word
1298 moves, moving the subword FIRST_SUBWORD first. */
1299
1300 bool
1301 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1302 {
1303 /* Floating point registers cannot be split. */
1304 if (FP_REG_P (src) || FP_REG_P (dst))
1305 return false;
1306
1307 /* We don't need to split if operands are directly accessible. */
1308 if (s_operand (src, mode) || s_operand (dst, mode))
1309 return false;
1310
1311 /* Non-offsettable memory references cannot be split. */
1312 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1313 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1314 return false;
1315
1316 /* Moving the first subword must not clobber a register
1317 needed to move the second subword. */
1318 if (register_operand (dst, mode))
1319 {
1320 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1321 if (reg_overlap_mentioned_p (subreg, src))
1322 return false;
1323 }
1324
1325 return true;
1326 }
1327
1328 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1329 and [MEM2, MEM2 + SIZE] do overlap and false
1330 otherwise. */
1331
1332 bool
1333 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1334 {
1335 rtx addr1, addr2, addr_delta;
1336 HOST_WIDE_INT delta;
1337
1338 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1339 return true;
1340
1341 if (size == 0)
1342 return false;
1343
1344 addr1 = XEXP (mem1, 0);
1345 addr2 = XEXP (mem2, 0);
1346
1347 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1348
1349 /* This overlapping check is used by peepholes merging memory block operations.
1350 Overlapping operations would otherwise be recognized by the S/390 hardware
1351 and would fall back to a slower implementation. Allowing overlapping
1352 operations would lead to slow code but not to wrong code. Therefore we are
1353 somewhat optimistic if we cannot prove that the memory blocks are
1354 overlapping.
1355 That's why we return false here although this may accept operations on
1356 overlapping memory areas. */
1357 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1358 return false;
1359
1360 delta = INTVAL (addr_delta);
1361
1362 if (delta == 0
1363 || (delta > 0 && delta < size)
1364 || (delta < 0 && -delta < size))
1365 return true;
1366
1367 return false;
1368 }
1369
1370 /* Check whether the address of memory reference MEM2 equals exactly
1371 the address of memory reference MEM1 plus DELTA. Return true if
1372 we can prove this to be the case, false otherwise. */
1373
1374 bool
1375 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1376 {
1377 rtx addr1, addr2, addr_delta;
1378
1379 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1380 return false;
1381
1382 addr1 = XEXP (mem1, 0);
1383 addr2 = XEXP (mem2, 0);
1384
1385 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1386 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1387 return false;
1388
1389 return true;
1390 }
1391
1392 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1393
1394 void
1395 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1396 rtx *operands)
1397 {
1398 enum machine_mode wmode = mode;
1399 rtx dst = operands[0];
1400 rtx src1 = operands[1];
1401 rtx src2 = operands[2];
1402 rtx op, clob, tem;
1403
1404 /* If we cannot handle the operation directly, use a temp register. */
1405 if (!s390_logical_operator_ok_p (operands))
1406 dst = gen_reg_rtx (mode);
1407
1408 /* QImode and HImode patterns make sense only if we have a destination
1409 in memory. Otherwise perform the operation in SImode. */
1410 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1411 wmode = SImode;
1412
1413 /* Widen operands if required. */
1414 if (mode != wmode)
1415 {
1416 if (GET_CODE (dst) == SUBREG
1417 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1418 dst = tem;
1419 else if (REG_P (dst))
1420 dst = gen_rtx_SUBREG (wmode, dst, 0);
1421 else
1422 dst = gen_reg_rtx (wmode);
1423
1424 if (GET_CODE (src1) == SUBREG
1425 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1426 src1 = tem;
1427 else if (GET_MODE (src1) != VOIDmode)
1428 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1429
1430 if (GET_CODE (src2) == SUBREG
1431 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1432 src2 = tem;
1433 else if (GET_MODE (src2) != VOIDmode)
1434 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1435 }
1436
1437 /* Emit the instruction. */
1438 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1439 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1440 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1441
1442 /* Fix up the destination if needed. */
1443 if (dst != operands[0])
1444 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1445 }
1446
1447 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1448
1449 bool
1450 s390_logical_operator_ok_p (rtx *operands)
1451 {
1452 /* If the destination operand is in memory, it needs to coincide
1453 with one of the source operands. After reload, it has to be
1454 the first source operand. */
1455 if (GET_CODE (operands[0]) == MEM)
1456 return rtx_equal_p (operands[0], operands[1])
1457 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1458
1459 return true;
1460 }
1461
1462 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1463 operand IMMOP to switch from SS to SI type instructions. */
1464
1465 void
1466 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1467 {
1468 int def = code == AND ? -1 : 0;
1469 HOST_WIDE_INT mask;
1470 int part;
1471
1472 gcc_assert (GET_CODE (*memop) == MEM);
1473 gcc_assert (!MEM_VOLATILE_P (*memop));
1474
1475 mask = s390_extract_part (*immop, QImode, def);
1476 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1477 gcc_assert (part >= 0);
1478
1479 *memop = adjust_address (*memop, QImode, part);
1480 *immop = gen_int_mode (mask, QImode);
1481 }
1482
1483
1484 /* How to allocate a 'struct machine_function'. */
1485
1486 static struct machine_function *
1487 s390_init_machine_status (void)
1488 {
1489 return ggc_alloc_cleared_machine_function ();
1490 }
1491
1492 /* Change optimizations to be performed, depending on the
1493 optimization level. */
1494
1495 static const struct default_options s390_option_optimization_table[] =
1496 {
1497 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
1498
1499 /* ??? There are apparently still problems with -fcaller-saves. */
1500 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
1501
1502 /* Use MVCLE instructions to decrease code size if requested. */
1503 { OPT_LEVELS_SIZE, OPT_mmvcle, NULL, 1 },
1504
1505 { OPT_LEVELS_NONE, 0, NULL, 0 }
1506 };
1507
1508 /* Implement TARGET_OPTION_INIT_STRUCT. */
1509
1510 static void
1511 s390_option_init_struct (struct gcc_options *opts)
1512 {
1513 /* By default, always emit DWARF-2 unwind info. This allows debugging
1514 without maintaining a stack frame back-chain. */
1515 opts->x_flag_asynchronous_unwind_tables = 1;
1516 }
1517
1518 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1519 to the associated processor_type and processor_flags if so. */
1520
1521 static bool
1522 s390_handle_arch_option (const char *arg,
1523 enum processor_type *type,
1524 int *flags)
1525 {
1526 static struct pta
1527 {
1528 const char *const name; /* processor name or nickname. */
1529 const enum processor_type processor;
1530 const int flags; /* From enum processor_flags. */
1531 }
1532 const processor_alias_table[] =
1533 {
1534 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1535 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1536 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1537 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1538 | PF_LONG_DISPLACEMENT},
1539 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1540 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1541 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1542 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1543 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1544 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1545 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1546 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1547 };
1548 size_t i;
1549
1550 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1551 if (strcmp (arg, processor_alias_table[i].name) == 0)
1552 {
1553 *type = processor_alias_table[i].processor;
1554 *flags = processor_alias_table[i].flags;
1555 return true;
1556 }
1557 return false;
1558 }
1559
1560 /* Implement TARGET_HANDLE_OPTION. */
1561
1562 static bool
1563 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1564 {
1565 switch (code)
1566 {
1567 case OPT_march_:
1568 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1569
1570 case OPT_mstack_guard_:
1571 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1572 return false;
1573 if (exact_log2 (s390_stack_guard) == -1)
1574 error ("stack guard value must be an exact power of 2");
1575 return true;
1576
1577 case OPT_mstack_size_:
1578 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1579 return false;
1580 if (exact_log2 (s390_stack_size) == -1)
1581 error ("stack size must be an exact power of 2");
1582 return true;
1583
1584 case OPT_mtune_:
1585 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1586
1587 case OPT_mwarn_framesize_:
1588 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1589
1590 default:
1591 return true;
1592 }
1593 }
1594
1595 static void
1596 s390_option_override (void)
1597 {
1598 /* Set up function hooks. */
1599 init_machine_status = s390_init_machine_status;
1600
1601 /* Architecture mode defaults according to ABI. */
1602 if (!(target_flags_explicit & MASK_ZARCH))
1603 {
1604 if (TARGET_64BIT)
1605 target_flags |= MASK_ZARCH;
1606 else
1607 target_flags &= ~MASK_ZARCH;
1608 }
1609
1610 /* Determine processor architectural level. */
1611 if (!s390_arch_string)
1612 {
1613 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1614 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1615 }
1616
1617 /* Determine processor to tune for. */
1618 if (s390_tune == PROCESSOR_max)
1619 {
1620 s390_tune = s390_arch;
1621 s390_tune_flags = s390_arch_flags;
1622 }
1623
1624 /* Sanity checks. */
1625 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1626 error ("z/Architecture mode not supported on %s", s390_arch_string);
1627 if (TARGET_64BIT && !TARGET_ZARCH)
1628 error ("64-bit ABI not supported in ESA/390 mode");
1629
1630 if (TARGET_HARD_DFP && !TARGET_DFP)
1631 {
1632 if (target_flags_explicit & MASK_HARD_DFP)
1633 {
1634 if (!TARGET_CPU_DFP)
1635 error ("Hardware decimal floating point instructions"
1636 " not available on %s", s390_arch_string);
1637 if (!TARGET_ZARCH)
1638 error ("Hardware decimal floating point instructions"
1639 " not available in ESA/390 mode");
1640 }
1641 else
1642 target_flags &= ~MASK_HARD_DFP;
1643 }
1644
1645 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1646 {
1647 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1648 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1649
1650 target_flags &= ~MASK_HARD_DFP;
1651 }
1652
1653 /* Set processor cost function. */
1654 switch (s390_tune)
1655 {
1656 case PROCESSOR_2084_Z990:
1657 s390_cost = &z990_cost;
1658 break;
1659 case PROCESSOR_2094_Z9_109:
1660 s390_cost = &z9_109_cost;
1661 break;
1662 case PROCESSOR_2097_Z10:
1663 s390_cost = &z10_cost;
1664 case PROCESSOR_2817_Z196:
1665 s390_cost = &z196_cost;
1666 break;
1667 default:
1668 s390_cost = &z900_cost;
1669 }
1670
1671 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1672 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1673 "in combination");
1674
1675 if (s390_stack_size)
1676 {
1677 if (s390_stack_guard >= s390_stack_size)
1678 error ("stack size must be greater than the stack guard value");
1679 else if (s390_stack_size > 1 << 16)
1680 error ("stack size must not be greater than 64k");
1681 }
1682 else if (s390_stack_guard)
1683 error ("-mstack-guard implies use of -mstack-size");
1684
1685 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1686 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1687 target_flags |= MASK_LONG_DOUBLE_128;
1688 #endif
1689
1690 if (s390_tune == PROCESSOR_2097_Z10
1691 || s390_tune == PROCESSOR_2817_Z196)
1692 {
1693 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1694 global_options.x_param_values,
1695 global_options_set.x_param_values);
1696 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1697 global_options.x_param_values,
1698 global_options_set.x_param_values);
1699 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1700 global_options.x_param_values,
1701 global_options_set.x_param_values);
1702 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1703 global_options.x_param_values,
1704 global_options_set.x_param_values);
1705 }
1706
1707 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1708 global_options.x_param_values,
1709 global_options_set.x_param_values);
1710 /* values for loop prefetching */
1711 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1712 global_options.x_param_values,
1713 global_options_set.x_param_values);
1714 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1715 global_options.x_param_values,
1716 global_options_set.x_param_values);
1717 /* s390 has more than 2 levels and the size is much larger. Since
1718 we are always running virtualized assume that we only get a small
1719 part of the caches above l1. */
1720 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1721 global_options.x_param_values,
1722 global_options_set.x_param_values);
1723 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1724 global_options.x_param_values,
1725 global_options_set.x_param_values);
1726 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1727 global_options.x_param_values,
1728 global_options_set.x_param_values);
1729
1730 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1731 requires the arch flags to be evaluated already. Since prefetching
1732 is beneficial on s390, we enable it if available. */
1733 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1734 flag_prefetch_loop_arrays = 1;
1735 }
1736
1737 /* Map for smallest class containing reg regno. */
1738
1739 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1740 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1741 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1742 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1743 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1744 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1745 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1746 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1747 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1748 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1749 ACCESS_REGS, ACCESS_REGS
1750 };
1751
1752 /* Return attribute type of insn. */
1753
1754 static enum attr_type
1755 s390_safe_attr_type (rtx insn)
1756 {
1757 if (recog_memoized (insn) >= 0)
1758 return get_attr_type (insn);
1759 else
1760 return TYPE_NONE;
1761 }
1762
1763 /* Return true if DISP is a valid short displacement. */
1764
1765 static bool
1766 s390_short_displacement (rtx disp)
1767 {
1768 /* No displacement is OK. */
1769 if (!disp)
1770 return true;
1771
1772 /* Without the long displacement facility we don't need to
1773 distingiush between long and short displacement. */
1774 if (!TARGET_LONG_DISPLACEMENT)
1775 return true;
1776
1777 /* Integer displacement in range. */
1778 if (GET_CODE (disp) == CONST_INT)
1779 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1780
1781 /* GOT offset is not OK, the GOT can be large. */
1782 if (GET_CODE (disp) == CONST
1783 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1784 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1785 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1786 return false;
1787
1788 /* All other symbolic constants are literal pool references,
1789 which are OK as the literal pool must be small. */
1790 if (GET_CODE (disp) == CONST)
1791 return true;
1792
1793 return false;
1794 }
1795
1796 /* Decompose a RTL expression ADDR for a memory address into
1797 its components, returned in OUT.
1798
1799 Returns false if ADDR is not a valid memory address, true
1800 otherwise. If OUT is NULL, don't return the components,
1801 but check for validity only.
1802
1803 Note: Only addresses in canonical form are recognized.
1804 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1805 canonical form so that they will be recognized. */
1806
1807 static int
1808 s390_decompose_address (rtx addr, struct s390_address *out)
1809 {
1810 HOST_WIDE_INT offset = 0;
1811 rtx base = NULL_RTX;
1812 rtx indx = NULL_RTX;
1813 rtx disp = NULL_RTX;
1814 rtx orig_disp;
1815 bool pointer = false;
1816 bool base_ptr = false;
1817 bool indx_ptr = false;
1818 bool literal_pool = false;
1819
1820 /* We may need to substitute the literal pool base register into the address
1821 below. However, at this point we do not know which register is going to
1822 be used as base, so we substitute the arg pointer register. This is going
1823 to be treated as holding a pointer below -- it shouldn't be used for any
1824 other purpose. */
1825 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1826
1827 /* Decompose address into base + index + displacement. */
1828
1829 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1830 base = addr;
1831
1832 else if (GET_CODE (addr) == PLUS)
1833 {
1834 rtx op0 = XEXP (addr, 0);
1835 rtx op1 = XEXP (addr, 1);
1836 enum rtx_code code0 = GET_CODE (op0);
1837 enum rtx_code code1 = GET_CODE (op1);
1838
1839 if (code0 == REG || code0 == UNSPEC)
1840 {
1841 if (code1 == REG || code1 == UNSPEC)
1842 {
1843 indx = op0; /* index + base */
1844 base = op1;
1845 }
1846
1847 else
1848 {
1849 base = op0; /* base + displacement */
1850 disp = op1;
1851 }
1852 }
1853
1854 else if (code0 == PLUS)
1855 {
1856 indx = XEXP (op0, 0); /* index + base + disp */
1857 base = XEXP (op0, 1);
1858 disp = op1;
1859 }
1860
1861 else
1862 {
1863 return false;
1864 }
1865 }
1866
1867 else
1868 disp = addr; /* displacement */
1869
1870 /* Extract integer part of displacement. */
1871 orig_disp = disp;
1872 if (disp)
1873 {
1874 if (GET_CODE (disp) == CONST_INT)
1875 {
1876 offset = INTVAL (disp);
1877 disp = NULL_RTX;
1878 }
1879 else if (GET_CODE (disp) == CONST
1880 && GET_CODE (XEXP (disp, 0)) == PLUS
1881 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1882 {
1883 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1884 disp = XEXP (XEXP (disp, 0), 0);
1885 }
1886 }
1887
1888 /* Strip off CONST here to avoid special case tests later. */
1889 if (disp && GET_CODE (disp) == CONST)
1890 disp = XEXP (disp, 0);
1891
1892 /* We can convert literal pool addresses to
1893 displacements by basing them off the base register. */
1894 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1895 {
1896 /* Either base or index must be free to hold the base register. */
1897 if (!base)
1898 base = fake_pool_base, literal_pool = true;
1899 else if (!indx)
1900 indx = fake_pool_base, literal_pool = true;
1901 else
1902 return false;
1903
1904 /* Mark up the displacement. */
1905 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1906 UNSPEC_LTREL_OFFSET);
1907 }
1908
1909 /* Validate base register. */
1910 if (base)
1911 {
1912 if (GET_CODE (base) == UNSPEC)
1913 switch (XINT (base, 1))
1914 {
1915 case UNSPEC_LTREF:
1916 if (!disp)
1917 disp = gen_rtx_UNSPEC (Pmode,
1918 gen_rtvec (1, XVECEXP (base, 0, 0)),
1919 UNSPEC_LTREL_OFFSET);
1920 else
1921 return false;
1922
1923 base = XVECEXP (base, 0, 1);
1924 break;
1925
1926 case UNSPEC_LTREL_BASE:
1927 if (XVECLEN (base, 0) == 1)
1928 base = fake_pool_base, literal_pool = true;
1929 else
1930 base = XVECEXP (base, 0, 1);
1931 break;
1932
1933 default:
1934 return false;
1935 }
1936
1937 if (!REG_P (base)
1938 || (GET_MODE (base) != SImode
1939 && GET_MODE (base) != Pmode))
1940 return false;
1941
1942 if (REGNO (base) == STACK_POINTER_REGNUM
1943 || REGNO (base) == FRAME_POINTER_REGNUM
1944 || ((reload_completed || reload_in_progress)
1945 && frame_pointer_needed
1946 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1947 || REGNO (base) == ARG_POINTER_REGNUM
1948 || (flag_pic
1949 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1950 pointer = base_ptr = true;
1951
1952 if ((reload_completed || reload_in_progress)
1953 && base == cfun->machine->base_reg)
1954 pointer = base_ptr = literal_pool = true;
1955 }
1956
1957 /* Validate index register. */
1958 if (indx)
1959 {
1960 if (GET_CODE (indx) == UNSPEC)
1961 switch (XINT (indx, 1))
1962 {
1963 case UNSPEC_LTREF:
1964 if (!disp)
1965 disp = gen_rtx_UNSPEC (Pmode,
1966 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1967 UNSPEC_LTREL_OFFSET);
1968 else
1969 return false;
1970
1971 indx = XVECEXP (indx, 0, 1);
1972 break;
1973
1974 case UNSPEC_LTREL_BASE:
1975 if (XVECLEN (indx, 0) == 1)
1976 indx = fake_pool_base, literal_pool = true;
1977 else
1978 indx = XVECEXP (indx, 0, 1);
1979 break;
1980
1981 default:
1982 return false;
1983 }
1984
1985 if (!REG_P (indx)
1986 || (GET_MODE (indx) != SImode
1987 && GET_MODE (indx) != Pmode))
1988 return false;
1989
1990 if (REGNO (indx) == STACK_POINTER_REGNUM
1991 || REGNO (indx) == FRAME_POINTER_REGNUM
1992 || ((reload_completed || reload_in_progress)
1993 && frame_pointer_needed
1994 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1995 || REGNO (indx) == ARG_POINTER_REGNUM
1996 || (flag_pic
1997 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1998 pointer = indx_ptr = true;
1999
2000 if ((reload_completed || reload_in_progress)
2001 && indx == cfun->machine->base_reg)
2002 pointer = indx_ptr = literal_pool = true;
2003 }
2004
2005 /* Prefer to use pointer as base, not index. */
2006 if (base && indx && !base_ptr
2007 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2008 {
2009 rtx tmp = base;
2010 base = indx;
2011 indx = tmp;
2012 }
2013
2014 /* Validate displacement. */
2015 if (!disp)
2016 {
2017 /* If virtual registers are involved, the displacement will change later
2018 anyway as the virtual registers get eliminated. This could make a
2019 valid displacement invalid, but it is more likely to make an invalid
2020 displacement valid, because we sometimes access the register save area
2021 via negative offsets to one of those registers.
2022 Thus we don't check the displacement for validity here. If after
2023 elimination the displacement turns out to be invalid after all,
2024 this is fixed up by reload in any case. */
2025 if (base != arg_pointer_rtx
2026 && indx != arg_pointer_rtx
2027 && base != return_address_pointer_rtx
2028 && indx != return_address_pointer_rtx
2029 && base != frame_pointer_rtx
2030 && indx != frame_pointer_rtx
2031 && base != virtual_stack_vars_rtx
2032 && indx != virtual_stack_vars_rtx)
2033 if (!DISP_IN_RANGE (offset))
2034 return false;
2035 }
2036 else
2037 {
2038 /* All the special cases are pointers. */
2039 pointer = true;
2040
2041 /* In the small-PIC case, the linker converts @GOT
2042 and @GOTNTPOFF offsets to possible displacements. */
2043 if (GET_CODE (disp) == UNSPEC
2044 && (XINT (disp, 1) == UNSPEC_GOT
2045 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2046 && flag_pic == 1)
2047 {
2048 ;
2049 }
2050
2051 /* Accept pool label offsets. */
2052 else if (GET_CODE (disp) == UNSPEC
2053 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2054 ;
2055
2056 /* Accept literal pool references. */
2057 else if (GET_CODE (disp) == UNSPEC
2058 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2059 {
2060 orig_disp = gen_rtx_CONST (Pmode, disp);
2061 if (offset)
2062 {
2063 /* If we have an offset, make sure it does not
2064 exceed the size of the constant pool entry. */
2065 rtx sym = XVECEXP (disp, 0, 0);
2066 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2067 return false;
2068
2069 orig_disp = plus_constant (orig_disp, offset);
2070 }
2071 }
2072
2073 else
2074 return false;
2075 }
2076
2077 if (!base && !indx)
2078 pointer = true;
2079
2080 if (out)
2081 {
2082 out->base = base;
2083 out->indx = indx;
2084 out->disp = orig_disp;
2085 out->pointer = pointer;
2086 out->literal_pool = literal_pool;
2087 }
2088
2089 return true;
2090 }
2091
2092 /* Decompose a RTL expression OP for a shift count into its components,
2093 and return the base register in BASE and the offset in OFFSET.
2094
2095 Return true if OP is a valid shift count, false if not. */
2096
2097 bool
2098 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2099 {
2100 HOST_WIDE_INT off = 0;
2101
2102 /* We can have an integer constant, an address register,
2103 or a sum of the two. */
2104 if (GET_CODE (op) == CONST_INT)
2105 {
2106 off = INTVAL (op);
2107 op = NULL_RTX;
2108 }
2109 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2110 {
2111 off = INTVAL (XEXP (op, 1));
2112 op = XEXP (op, 0);
2113 }
2114 while (op && GET_CODE (op) == SUBREG)
2115 op = SUBREG_REG (op);
2116
2117 if (op && GET_CODE (op) != REG)
2118 return false;
2119
2120 if (offset)
2121 *offset = off;
2122 if (base)
2123 *base = op;
2124
2125 return true;
2126 }
2127
2128
2129 /* Return true if CODE is a valid address without index. */
2130
2131 bool
2132 s390_legitimate_address_without_index_p (rtx op)
2133 {
2134 struct s390_address addr;
2135
2136 if (!s390_decompose_address (XEXP (op, 0), &addr))
2137 return false;
2138 if (addr.indx)
2139 return false;
2140
2141 return true;
2142 }
2143
2144
2145 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2146 and return these parts in SYMREF and ADDEND. You can pass NULL in
2147 SYMREF and/or ADDEND if you are not interested in these values.
2148 Literal pool references are *not* considered symbol references. */
2149
2150 static bool
2151 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2152 {
2153 HOST_WIDE_INT tmpaddend = 0;
2154
2155 if (GET_CODE (addr) == CONST)
2156 addr = XEXP (addr, 0);
2157
2158 if (GET_CODE (addr) == PLUS)
2159 {
2160 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2161 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2162 && CONST_INT_P (XEXP (addr, 1)))
2163 {
2164 tmpaddend = INTVAL (XEXP (addr, 1));
2165 addr = XEXP (addr, 0);
2166 }
2167 else
2168 return false;
2169 }
2170 else
2171 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2172 return false;
2173
2174 if (symref)
2175 *symref = addr;
2176 if (addend)
2177 *addend = tmpaddend;
2178
2179 return true;
2180 }
2181
2182
2183 /* Return true if the address in OP is valid for constraint letter C
2184 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2185 pool MEMs should be accepted. Only the Q, R, S, T constraint
2186 letters are allowed for C. */
2187
2188 static int
2189 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2190 {
2191 struct s390_address addr;
2192 bool decomposed = false;
2193
2194 /* This check makes sure that no symbolic address (except literal
2195 pool references) are accepted by the R or T constraints. */
2196 if (s390_symref_operand_p (op, NULL, NULL))
2197 return 0;
2198
2199 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2200 if (!lit_pool_ok)
2201 {
2202 if (!s390_decompose_address (op, &addr))
2203 return 0;
2204 if (addr.literal_pool)
2205 return 0;
2206 decomposed = true;
2207 }
2208
2209 switch (c)
2210 {
2211 case 'Q': /* no index short displacement */
2212 if (!decomposed && !s390_decompose_address (op, &addr))
2213 return 0;
2214 if (addr.indx)
2215 return 0;
2216 if (!s390_short_displacement (addr.disp))
2217 return 0;
2218 break;
2219
2220 case 'R': /* with index short displacement */
2221 if (TARGET_LONG_DISPLACEMENT)
2222 {
2223 if (!decomposed && !s390_decompose_address (op, &addr))
2224 return 0;
2225 if (!s390_short_displacement (addr.disp))
2226 return 0;
2227 }
2228 /* Any invalid address here will be fixed up by reload,
2229 so accept it for the most generic constraint. */
2230 break;
2231
2232 case 'S': /* no index long displacement */
2233 if (!TARGET_LONG_DISPLACEMENT)
2234 return 0;
2235 if (!decomposed && !s390_decompose_address (op, &addr))
2236 return 0;
2237 if (addr.indx)
2238 return 0;
2239 if (s390_short_displacement (addr.disp))
2240 return 0;
2241 break;
2242
2243 case 'T': /* with index long displacement */
2244 if (!TARGET_LONG_DISPLACEMENT)
2245 return 0;
2246 /* Any invalid address here will be fixed up by reload,
2247 so accept it for the most generic constraint. */
2248 if ((decomposed || s390_decompose_address (op, &addr))
2249 && s390_short_displacement (addr.disp))
2250 return 0;
2251 break;
2252 default:
2253 return 0;
2254 }
2255 return 1;
2256 }
2257
2258
2259 /* Evaluates constraint strings described by the regular expression
2260 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2261 the constraint given in STR, or 0 else. */
2262
2263 int
2264 s390_mem_constraint (const char *str, rtx op)
2265 {
2266 char c = str[0];
2267
2268 switch (c)
2269 {
2270 case 'A':
2271 /* Check for offsettable variants of memory constraints. */
2272 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2273 return 0;
2274 if ((reload_completed || reload_in_progress)
2275 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2276 return 0;
2277 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2278 case 'B':
2279 /* Check for non-literal-pool variants of memory constraints. */
2280 if (!MEM_P (op))
2281 return 0;
2282 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2283 case 'Q':
2284 case 'R':
2285 case 'S':
2286 case 'T':
2287 if (GET_CODE (op) != MEM)
2288 return 0;
2289 return s390_check_qrst_address (c, XEXP (op, 0), true);
2290 case 'U':
2291 return (s390_check_qrst_address ('Q', op, true)
2292 || s390_check_qrst_address ('R', op, true));
2293 case 'W':
2294 return (s390_check_qrst_address ('S', op, true)
2295 || s390_check_qrst_address ('T', op, true));
2296 case 'Y':
2297 /* Simply check for the basic form of a shift count. Reload will
2298 take care of making sure we have a proper base register. */
2299 if (!s390_decompose_shift_count (op, NULL, NULL))
2300 return 0;
2301 break;
2302 case 'Z':
2303 return s390_check_qrst_address (str[1], op, true);
2304 default:
2305 return 0;
2306 }
2307 return 1;
2308 }
2309
2310
2311 /* Evaluates constraint strings starting with letter O. Input
2312 parameter C is the second letter following the "O" in the constraint
2313 string. Returns 1 if VALUE meets the respective constraint and 0
2314 otherwise. */
2315
2316 int
2317 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2318 {
2319 if (!TARGET_EXTIMM)
2320 return 0;
2321
2322 switch (c)
2323 {
2324 case 's':
2325 return trunc_int_for_mode (value, SImode) == value;
2326
2327 case 'p':
2328 return value == 0
2329 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2330
2331 case 'n':
2332 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2333
2334 default:
2335 gcc_unreachable ();
2336 }
2337 }
2338
2339
2340 /* Evaluates constraint strings starting with letter N. Parameter STR
2341 contains the letters following letter "N" in the constraint string.
2342 Returns true if VALUE matches the constraint. */
2343
2344 int
2345 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2346 {
2347 enum machine_mode mode, part_mode;
2348 int def;
2349 int part, part_goal;
2350
2351
2352 if (str[0] == 'x')
2353 part_goal = -1;
2354 else
2355 part_goal = str[0] - '0';
2356
2357 switch (str[1])
2358 {
2359 case 'Q':
2360 part_mode = QImode;
2361 break;
2362 case 'H':
2363 part_mode = HImode;
2364 break;
2365 case 'S':
2366 part_mode = SImode;
2367 break;
2368 default:
2369 return 0;
2370 }
2371
2372 switch (str[2])
2373 {
2374 case 'H':
2375 mode = HImode;
2376 break;
2377 case 'S':
2378 mode = SImode;
2379 break;
2380 case 'D':
2381 mode = DImode;
2382 break;
2383 default:
2384 return 0;
2385 }
2386
2387 switch (str[3])
2388 {
2389 case '0':
2390 def = 0;
2391 break;
2392 case 'F':
2393 def = -1;
2394 break;
2395 default:
2396 return 0;
2397 }
2398
2399 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2400 return 0;
2401
2402 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2403 if (part < 0)
2404 return 0;
2405 if (part_goal != -1 && part_goal != part)
2406 return 0;
2407
2408 return 1;
2409 }
2410
2411
2412 /* Returns true if the input parameter VALUE is a float zero. */
2413
2414 int
2415 s390_float_const_zero_p (rtx value)
2416 {
2417 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2418 && value == CONST0_RTX (GET_MODE (value)));
2419 }
2420
2421
2422 /* Compute a (partial) cost for rtx X. Return true if the complete
2423 cost has been computed, and false if subexpressions should be
2424 scanned. In either case, *TOTAL contains the cost result.
2425 CODE contains GET_CODE (x), OUTER_CODE contains the code
2426 of the superexpression of x. */
2427
2428 static bool
2429 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2430 bool speed ATTRIBUTE_UNUSED)
2431 {
2432 switch (code)
2433 {
2434 case CONST:
2435 case CONST_INT:
2436 case LABEL_REF:
2437 case SYMBOL_REF:
2438 case CONST_DOUBLE:
2439 case MEM:
2440 *total = 0;
2441 return true;
2442
2443 case ASHIFT:
2444 case ASHIFTRT:
2445 case LSHIFTRT:
2446 case ROTATE:
2447 case ROTATERT:
2448 case AND:
2449 case IOR:
2450 case XOR:
2451 case NEG:
2452 case NOT:
2453 *total = COSTS_N_INSNS (1);
2454 return false;
2455
2456 case PLUS:
2457 case MINUS:
2458 /* Check for multiply and add. */
2459 if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2460 && GET_CODE (XEXP (x, 0)) == MULT
2461 && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
2462 {
2463 /* This is the multiply and add case. */
2464 if (GET_MODE (x) == DFmode)
2465 *total = s390_cost->madbr;
2466 else
2467 *total = s390_cost->maebr;
2468 *total += (rtx_cost (XEXP (XEXP (x, 0), 0), MULT, speed)
2469 + rtx_cost (XEXP (XEXP (x, 0), 1), MULT, speed)
2470 + rtx_cost (XEXP (x, 1), (enum rtx_code) code, speed));
2471 return true; /* Do not do an additional recursive descent. */
2472 }
2473 *total = COSTS_N_INSNS (1);
2474 return false;
2475
2476 case MULT:
2477 switch (GET_MODE (x))
2478 {
2479 case SImode:
2480 {
2481 rtx left = XEXP (x, 0);
2482 rtx right = XEXP (x, 1);
2483 if (GET_CODE (right) == CONST_INT
2484 && CONST_OK_FOR_K (INTVAL (right)))
2485 *total = s390_cost->mhi;
2486 else if (GET_CODE (left) == SIGN_EXTEND)
2487 *total = s390_cost->mh;
2488 else
2489 *total = s390_cost->ms; /* msr, ms, msy */
2490 break;
2491 }
2492 case DImode:
2493 {
2494 rtx left = XEXP (x, 0);
2495 rtx right = XEXP (x, 1);
2496 if (TARGET_ZARCH)
2497 {
2498 if (GET_CODE (right) == CONST_INT
2499 && CONST_OK_FOR_K (INTVAL (right)))
2500 *total = s390_cost->mghi;
2501 else if (GET_CODE (left) == SIGN_EXTEND)
2502 *total = s390_cost->msgf;
2503 else
2504 *total = s390_cost->msg; /* msgr, msg */
2505 }
2506 else /* TARGET_31BIT */
2507 {
2508 if (GET_CODE (left) == SIGN_EXTEND
2509 && GET_CODE (right) == SIGN_EXTEND)
2510 /* mulsidi case: mr, m */
2511 *total = s390_cost->m;
2512 else if (GET_CODE (left) == ZERO_EXTEND
2513 && GET_CODE (right) == ZERO_EXTEND
2514 && TARGET_CPU_ZARCH)
2515 /* umulsidi case: ml, mlr */
2516 *total = s390_cost->ml;
2517 else
2518 /* Complex calculation is required. */
2519 *total = COSTS_N_INSNS (40);
2520 }
2521 break;
2522 }
2523 case SFmode:
2524 case DFmode:
2525 *total = s390_cost->mult_df;
2526 break;
2527 case TFmode:
2528 *total = s390_cost->mxbr;
2529 break;
2530 default:
2531 return false;
2532 }
2533 return false;
2534
2535 case UDIV:
2536 case UMOD:
2537 if (GET_MODE (x) == TImode) /* 128 bit division */
2538 *total = s390_cost->dlgr;
2539 else if (GET_MODE (x) == DImode)
2540 {
2541 rtx right = XEXP (x, 1);
2542 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2543 *total = s390_cost->dlr;
2544 else /* 64 by 64 bit division */
2545 *total = s390_cost->dlgr;
2546 }
2547 else if (GET_MODE (x) == SImode) /* 32 bit division */
2548 *total = s390_cost->dlr;
2549 return false;
2550
2551 case DIV:
2552 case MOD:
2553 if (GET_MODE (x) == DImode)
2554 {
2555 rtx right = XEXP (x, 1);
2556 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2557 if (TARGET_ZARCH)
2558 *total = s390_cost->dsgfr;
2559 else
2560 *total = s390_cost->dr;
2561 else /* 64 by 64 bit division */
2562 *total = s390_cost->dsgr;
2563 }
2564 else if (GET_MODE (x) == SImode) /* 32 bit division */
2565 *total = s390_cost->dlr;
2566 else if (GET_MODE (x) == SFmode)
2567 {
2568 *total = s390_cost->debr;
2569 }
2570 else if (GET_MODE (x) == DFmode)
2571 {
2572 *total = s390_cost->ddbr;
2573 }
2574 else if (GET_MODE (x) == TFmode)
2575 {
2576 *total = s390_cost->dxbr;
2577 }
2578 return false;
2579
2580 case SQRT:
2581 if (GET_MODE (x) == SFmode)
2582 *total = s390_cost->sqebr;
2583 else if (GET_MODE (x) == DFmode)
2584 *total = s390_cost->sqdbr;
2585 else /* TFmode */
2586 *total = s390_cost->sqxbr;
2587 return false;
2588
2589 case SIGN_EXTEND:
2590 case ZERO_EXTEND:
2591 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2592 || outer_code == PLUS || outer_code == MINUS
2593 || outer_code == COMPARE)
2594 *total = 0;
2595 return false;
2596
2597 case COMPARE:
2598 *total = COSTS_N_INSNS (1);
2599 if (GET_CODE (XEXP (x, 0)) == AND
2600 && GET_CODE (XEXP (x, 1)) == CONST_INT
2601 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2602 {
2603 rtx op0 = XEXP (XEXP (x, 0), 0);
2604 rtx op1 = XEXP (XEXP (x, 0), 1);
2605 rtx op2 = XEXP (x, 1);
2606
2607 if (memory_operand (op0, GET_MODE (op0))
2608 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2609 return true;
2610 if (register_operand (op0, GET_MODE (op0))
2611 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2612 return true;
2613 }
2614 return false;
2615
2616 default:
2617 return false;
2618 }
2619 }
2620
2621 /* Return the cost of an address rtx ADDR. */
2622
2623 static int
2624 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2625 {
2626 struct s390_address ad;
2627 if (!s390_decompose_address (addr, &ad))
2628 return 1000;
2629
2630 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2631 }
2632
2633 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2634 otherwise return 0. */
2635
2636 int
2637 tls_symbolic_operand (rtx op)
2638 {
2639 if (GET_CODE (op) != SYMBOL_REF)
2640 return 0;
2641 return SYMBOL_REF_TLS_MODEL (op);
2642 }
2643 \f
2644 /* Split DImode access register reference REG (on 64-bit) into its constituent
2645 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2646 gen_highpart cannot be used as they assume all registers are word-sized,
2647 while our access registers have only half that size. */
2648
2649 void
2650 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2651 {
2652 gcc_assert (TARGET_64BIT);
2653 gcc_assert (ACCESS_REG_P (reg));
2654 gcc_assert (GET_MODE (reg) == DImode);
2655 gcc_assert (!(REGNO (reg) & 1));
2656
2657 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2658 *hi = gen_rtx_REG (SImode, REGNO (reg));
2659 }
2660
2661 /* Return true if OP contains a symbol reference */
2662
2663 bool
2664 symbolic_reference_mentioned_p (rtx op)
2665 {
2666 const char *fmt;
2667 int i;
2668
2669 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2670 return 1;
2671
2672 fmt = GET_RTX_FORMAT (GET_CODE (op));
2673 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2674 {
2675 if (fmt[i] == 'E')
2676 {
2677 int j;
2678
2679 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2680 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2681 return 1;
2682 }
2683
2684 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2685 return 1;
2686 }
2687
2688 return 0;
2689 }
2690
2691 /* Return true if OP contains a reference to a thread-local symbol. */
2692
2693 bool
2694 tls_symbolic_reference_mentioned_p (rtx op)
2695 {
2696 const char *fmt;
2697 int i;
2698
2699 if (GET_CODE (op) == SYMBOL_REF)
2700 return tls_symbolic_operand (op);
2701
2702 fmt = GET_RTX_FORMAT (GET_CODE (op));
2703 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2704 {
2705 if (fmt[i] == 'E')
2706 {
2707 int j;
2708
2709 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2710 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2711 return true;
2712 }
2713
2714 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2715 return true;
2716 }
2717
2718 return false;
2719 }
2720
2721
2722 /* Return true if OP is a legitimate general operand when
2723 generating PIC code. It is given that flag_pic is on
2724 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2725
2726 int
2727 legitimate_pic_operand_p (rtx op)
2728 {
2729 /* Accept all non-symbolic constants. */
2730 if (!SYMBOLIC_CONST (op))
2731 return 1;
2732
2733 /* Reject everything else; must be handled
2734 via emit_symbolic_move. */
2735 return 0;
2736 }
2737
2738 /* Returns true if the constant value OP is a legitimate general operand.
2739 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2740
2741 int
2742 legitimate_constant_p (rtx op)
2743 {
2744 /* Accept all non-symbolic constants. */
2745 if (!SYMBOLIC_CONST (op))
2746 return 1;
2747
2748 /* Accept immediate LARL operands. */
2749 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2750 return 1;
2751
2752 /* Thread-local symbols are never legal constants. This is
2753 so that emit_call knows that computing such addresses
2754 might require a function call. */
2755 if (TLS_SYMBOLIC_CONST (op))
2756 return 0;
2757
2758 /* In the PIC case, symbolic constants must *not* be
2759 forced into the literal pool. We accept them here,
2760 so that they will be handled by emit_symbolic_move. */
2761 if (flag_pic)
2762 return 1;
2763
2764 /* All remaining non-PIC symbolic constants are
2765 forced into the literal pool. */
2766 return 0;
2767 }
2768
2769 /* Determine if it's legal to put X into the constant pool. This
2770 is not possible if X contains the address of a symbol that is
2771 not constant (TLS) or not known at final link time (PIC). */
2772
2773 static bool
2774 s390_cannot_force_const_mem (rtx x)
2775 {
2776 switch (GET_CODE (x))
2777 {
2778 case CONST_INT:
2779 case CONST_DOUBLE:
2780 /* Accept all non-symbolic constants. */
2781 return false;
2782
2783 case LABEL_REF:
2784 /* Labels are OK iff we are non-PIC. */
2785 return flag_pic != 0;
2786
2787 case SYMBOL_REF:
2788 /* 'Naked' TLS symbol references are never OK,
2789 non-TLS symbols are OK iff we are non-PIC. */
2790 if (tls_symbolic_operand (x))
2791 return true;
2792 else
2793 return flag_pic != 0;
2794
2795 case CONST:
2796 return s390_cannot_force_const_mem (XEXP (x, 0));
2797 case PLUS:
2798 case MINUS:
2799 return s390_cannot_force_const_mem (XEXP (x, 0))
2800 || s390_cannot_force_const_mem (XEXP (x, 1));
2801
2802 case UNSPEC:
2803 switch (XINT (x, 1))
2804 {
2805 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2806 case UNSPEC_LTREL_OFFSET:
2807 case UNSPEC_GOT:
2808 case UNSPEC_GOTOFF:
2809 case UNSPEC_PLTOFF:
2810 case UNSPEC_TLSGD:
2811 case UNSPEC_TLSLDM:
2812 case UNSPEC_NTPOFF:
2813 case UNSPEC_DTPOFF:
2814 case UNSPEC_GOTNTPOFF:
2815 case UNSPEC_INDNTPOFF:
2816 return false;
2817
2818 /* If the literal pool shares the code section, be put
2819 execute template placeholders into the pool as well. */
2820 case UNSPEC_INSN:
2821 return TARGET_CPU_ZARCH;
2822
2823 default:
2824 return true;
2825 }
2826 break;
2827
2828 default:
2829 gcc_unreachable ();
2830 }
2831 }
2832
2833 /* Returns true if the constant value OP is a legitimate general
2834 operand during and after reload. The difference to
2835 legitimate_constant_p is that this function will not accept
2836 a constant that would need to be forced to the literal pool
2837 before it can be used as operand.
2838 This function accepts all constants which can be loaded directly
2839 into a GPR. */
2840
2841 bool
2842 legitimate_reload_constant_p (rtx op)
2843 {
2844 /* Accept la(y) operands. */
2845 if (GET_CODE (op) == CONST_INT
2846 && DISP_IN_RANGE (INTVAL (op)))
2847 return true;
2848
2849 /* Accept l(g)hi/l(g)fi operands. */
2850 if (GET_CODE (op) == CONST_INT
2851 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2852 return true;
2853
2854 /* Accept lliXX operands. */
2855 if (TARGET_ZARCH
2856 && GET_CODE (op) == CONST_INT
2857 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2858 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2859 return true;
2860
2861 if (TARGET_EXTIMM
2862 && GET_CODE (op) == CONST_INT
2863 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2864 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2865 return true;
2866
2867 /* Accept larl operands. */
2868 if (TARGET_CPU_ZARCH
2869 && larl_operand (op, VOIDmode))
2870 return true;
2871
2872 /* Accept floating-point zero operands that fit into a single GPR. */
2873 if (GET_CODE (op) == CONST_DOUBLE
2874 && s390_float_const_zero_p (op)
2875 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2876 return true;
2877
2878 /* Accept double-word operands that can be split. */
2879 if (GET_CODE (op) == CONST_INT
2880 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2881 {
2882 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2883 rtx hi = operand_subword (op, 0, 0, dword_mode);
2884 rtx lo = operand_subword (op, 1, 0, dword_mode);
2885 return legitimate_reload_constant_p (hi)
2886 && legitimate_reload_constant_p (lo);
2887 }
2888
2889 /* Everything else cannot be handled without reload. */
2890 return false;
2891 }
2892
2893 /* Returns true if the constant value OP is a legitimate fp operand
2894 during and after reload.
2895 This function accepts all constants which can be loaded directly
2896 into an FPR. */
2897
2898 static bool
2899 legitimate_reload_fp_constant_p (rtx op)
2900 {
2901 /* Accept floating-point zero operands if the load zero instruction
2902 can be used. */
2903 if (TARGET_Z196
2904 && GET_CODE (op) == CONST_DOUBLE
2905 && s390_float_const_zero_p (op))
2906 return true;
2907
2908 return false;
2909 }
2910
2911 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2912 return the class of reg to actually use. */
2913
2914 enum reg_class
2915 s390_preferred_reload_class (rtx op, enum reg_class rclass)
2916 {
2917 switch (GET_CODE (op))
2918 {
2919 /* Constants we cannot reload into general registers
2920 must be forced into the literal pool. */
2921 case CONST_DOUBLE:
2922 case CONST_INT:
2923 if (reg_class_subset_p (GENERAL_REGS, rclass)
2924 && legitimate_reload_constant_p (op))
2925 return GENERAL_REGS;
2926 else if (reg_class_subset_p (ADDR_REGS, rclass)
2927 && legitimate_reload_constant_p (op))
2928 return ADDR_REGS;
2929 else if (reg_class_subset_p (FP_REGS, rclass)
2930 && legitimate_reload_fp_constant_p (op))
2931 return FP_REGS;
2932 return NO_REGS;
2933
2934 /* If a symbolic constant or a PLUS is reloaded,
2935 it is most likely being used as an address, so
2936 prefer ADDR_REGS. If 'class' is not a superset
2937 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2938 case PLUS:
2939 case LABEL_REF:
2940 case SYMBOL_REF:
2941 case CONST:
2942 if (reg_class_subset_p (ADDR_REGS, rclass))
2943 return ADDR_REGS;
2944 else
2945 return NO_REGS;
2946
2947 default:
2948 break;
2949 }
2950
2951 return rclass;
2952 }
2953
2954 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2955 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2956 aligned. */
2957
2958 bool
2959 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2960 {
2961 HOST_WIDE_INT addend;
2962 rtx symref;
2963
2964 if (!s390_symref_operand_p (addr, &symref, &addend))
2965 return false;
2966
2967 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2968 && !(addend & (alignment - 1)));
2969 }
2970
2971 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2972 operand SCRATCH is used to reload the even part of the address and
2973 adding one. */
2974
2975 void
2976 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2977 {
2978 HOST_WIDE_INT addend;
2979 rtx symref;
2980
2981 if (!s390_symref_operand_p (addr, &symref, &addend))
2982 gcc_unreachable ();
2983
2984 if (!(addend & 1))
2985 /* Easy case. The addend is even so larl will do fine. */
2986 emit_move_insn (reg, addr);
2987 else
2988 {
2989 /* We can leave the scratch register untouched if the target
2990 register is a valid base register. */
2991 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2992 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2993 scratch = reg;
2994
2995 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2996 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2997
2998 if (addend != 1)
2999 emit_move_insn (scratch,
3000 gen_rtx_CONST (Pmode,
3001 gen_rtx_PLUS (Pmode, symref,
3002 GEN_INT (addend - 1))));
3003 else
3004 emit_move_insn (scratch, symref);
3005
3006 /* Increment the address using la in order to avoid clobbering cc. */
3007 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3008 }
3009 }
3010
3011 /* Generate what is necessary to move between REG and MEM using
3012 SCRATCH. The direction is given by TOMEM. */
3013
3014 void
3015 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3016 {
3017 /* Reload might have pulled a constant out of the literal pool.
3018 Force it back in. */
3019 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3020 || GET_CODE (mem) == CONST)
3021 mem = force_const_mem (GET_MODE (reg), mem);
3022
3023 gcc_assert (MEM_P (mem));
3024
3025 /* For a load from memory we can leave the scratch register
3026 untouched if the target register is a valid base register. */
3027 if (!tomem
3028 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3029 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3030 && GET_MODE (reg) == GET_MODE (scratch))
3031 scratch = reg;
3032
3033 /* Load address into scratch register. Since we can't have a
3034 secondary reload for a secondary reload we have to cover the case
3035 where larl would need a secondary reload here as well. */
3036 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3037
3038 /* Now we can use a standard load/store to do the move. */
3039 if (tomem)
3040 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3041 else
3042 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3043 }
3044
3045 /* Inform reload about cases where moving X with a mode MODE to a register in
3046 RCLASS requires an extra scratch or immediate register. Return the class
3047 needed for the immediate register. */
3048
3049 static reg_class_t
3050 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3051 enum machine_mode mode, secondary_reload_info *sri)
3052 {
3053 enum reg_class rclass = (enum reg_class) rclass_i;
3054
3055 /* Intermediate register needed. */
3056 if (reg_classes_intersect_p (CC_REGS, rclass))
3057 return GENERAL_REGS;
3058
3059 if (TARGET_Z10)
3060 {
3061 /* On z10 several optimizer steps may generate larl operands with
3062 an odd addend. */
3063 if (in_p
3064 && s390_symref_operand_p (x, NULL, NULL)
3065 && mode == Pmode
3066 && !s390_check_symref_alignment (x, 2))
3067 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3068 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3069
3070 /* On z10 we need a scratch register when moving QI, TI or floating
3071 point mode values from or to a memory location with a SYMBOL_REF
3072 or if the symref addend of a SI or DI move is not aligned to the
3073 width of the access. */
3074 if (MEM_P (x)
3075 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3076 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3077 || (!TARGET_ZARCH && mode == DImode)
3078 || ((mode == HImode || mode == SImode || mode == DImode)
3079 && (!s390_check_symref_alignment (XEXP (x, 0),
3080 GET_MODE_SIZE (mode))))))
3081 {
3082 #define __SECONDARY_RELOAD_CASE(M,m) \
3083 case M##mode: \
3084 if (TARGET_64BIT) \
3085 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3086 CODE_FOR_reload##m##di_tomem_z10; \
3087 else \
3088 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3089 CODE_FOR_reload##m##si_tomem_z10; \
3090 break;
3091
3092 switch (GET_MODE (x))
3093 {
3094 __SECONDARY_RELOAD_CASE (QI, qi);
3095 __SECONDARY_RELOAD_CASE (HI, hi);
3096 __SECONDARY_RELOAD_CASE (SI, si);
3097 __SECONDARY_RELOAD_CASE (DI, di);
3098 __SECONDARY_RELOAD_CASE (TI, ti);
3099 __SECONDARY_RELOAD_CASE (SF, sf);
3100 __SECONDARY_RELOAD_CASE (DF, df);
3101 __SECONDARY_RELOAD_CASE (TF, tf);
3102 __SECONDARY_RELOAD_CASE (SD, sd);
3103 __SECONDARY_RELOAD_CASE (DD, dd);
3104 __SECONDARY_RELOAD_CASE (TD, td);
3105
3106 default:
3107 gcc_unreachable ();
3108 }
3109 #undef __SECONDARY_RELOAD_CASE
3110 }
3111 }
3112
3113 /* We need a scratch register when loading a PLUS expression which
3114 is not a legitimate operand of the LOAD ADDRESS instruction. */
3115 if (in_p && s390_plus_operand (x, mode))
3116 sri->icode = (TARGET_64BIT ?
3117 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3118
3119 /* Performing a multiword move from or to memory we have to make sure the
3120 second chunk in memory is addressable without causing a displacement
3121 overflow. If that would be the case we calculate the address in
3122 a scratch register. */
3123 if (MEM_P (x)
3124 && GET_CODE (XEXP (x, 0)) == PLUS
3125 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3126 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3127 + GET_MODE_SIZE (mode) - 1))
3128 {
3129 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3130 in a s_operand address since we may fallback to lm/stm. So we only
3131 have to care about overflows in the b+i+d case. */
3132 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3133 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3134 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3135 /* For FP_REGS no lm/stm is available so this check is triggered
3136 for displacement overflows in b+i+d and b+d like addresses. */
3137 || (reg_classes_intersect_p (FP_REGS, rclass)
3138 && s390_class_max_nregs (FP_REGS, mode) > 1))
3139 {
3140 if (in_p)
3141 sri->icode = (TARGET_64BIT ?
3142 CODE_FOR_reloaddi_nonoffmem_in :
3143 CODE_FOR_reloadsi_nonoffmem_in);
3144 else
3145 sri->icode = (TARGET_64BIT ?
3146 CODE_FOR_reloaddi_nonoffmem_out :
3147 CODE_FOR_reloadsi_nonoffmem_out);
3148 }
3149 }
3150
3151 /* A scratch address register is needed when a symbolic constant is
3152 copied to r0 compiling with -fPIC. In other cases the target
3153 register might be used as temporary (see legitimize_pic_address). */
3154 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3155 sri->icode = (TARGET_64BIT ?
3156 CODE_FOR_reloaddi_PIC_addr :
3157 CODE_FOR_reloadsi_PIC_addr);
3158
3159 /* Either scratch or no register needed. */
3160 return NO_REGS;
3161 }
3162
3163 /* Generate code to load SRC, which is PLUS that is not a
3164 legitimate operand for the LA instruction, into TARGET.
3165 SCRATCH may be used as scratch register. */
3166
3167 void
3168 s390_expand_plus_operand (rtx target, rtx src,
3169 rtx scratch)
3170 {
3171 rtx sum1, sum2;
3172 struct s390_address ad;
3173
3174 /* src must be a PLUS; get its two operands. */
3175 gcc_assert (GET_CODE (src) == PLUS);
3176 gcc_assert (GET_MODE (src) == Pmode);
3177
3178 /* Check if any of the two operands is already scheduled
3179 for replacement by reload. This can happen e.g. when
3180 float registers occur in an address. */
3181 sum1 = find_replacement (&XEXP (src, 0));
3182 sum2 = find_replacement (&XEXP (src, 1));
3183 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3184
3185 /* If the address is already strictly valid, there's nothing to do. */
3186 if (!s390_decompose_address (src, &ad)
3187 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3188 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3189 {
3190 /* Otherwise, one of the operands cannot be an address register;
3191 we reload its value into the scratch register. */
3192 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3193 {
3194 emit_move_insn (scratch, sum1);
3195 sum1 = scratch;
3196 }
3197 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3198 {
3199 emit_move_insn (scratch, sum2);
3200 sum2 = scratch;
3201 }
3202
3203 /* According to the way these invalid addresses are generated
3204 in reload.c, it should never happen (at least on s390) that
3205 *neither* of the PLUS components, after find_replacements
3206 was applied, is an address register. */
3207 if (sum1 == scratch && sum2 == scratch)
3208 {
3209 debug_rtx (src);
3210 gcc_unreachable ();
3211 }
3212
3213 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3214 }
3215
3216 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3217 is only ever performed on addresses, so we can mark the
3218 sum as legitimate for LA in any case. */
3219 s390_load_address (target, src);
3220 }
3221
3222
3223 /* Return true if ADDR is a valid memory address.
3224 STRICT specifies whether strict register checking applies. */
3225
3226 static bool
3227 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3228 {
3229 struct s390_address ad;
3230
3231 if (TARGET_Z10
3232 && larl_operand (addr, VOIDmode)
3233 && (mode == VOIDmode
3234 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3235 return true;
3236
3237 if (!s390_decompose_address (addr, &ad))
3238 return false;
3239
3240 if (strict)
3241 {
3242 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3243 return false;
3244
3245 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3246 return false;
3247 }
3248 else
3249 {
3250 if (ad.base
3251 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3252 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3253 return false;
3254
3255 if (ad.indx
3256 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3257 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3258 return false;
3259 }
3260 return true;
3261 }
3262
3263 /* Return true if OP is a valid operand for the LA instruction.
3264 In 31-bit, we need to prove that the result is used as an
3265 address, as LA performs only a 31-bit addition. */
3266
3267 bool
3268 legitimate_la_operand_p (rtx op)
3269 {
3270 struct s390_address addr;
3271 if (!s390_decompose_address (op, &addr))
3272 return false;
3273
3274 return (TARGET_64BIT || addr.pointer);
3275 }
3276
3277 /* Return true if it is valid *and* preferable to use LA to
3278 compute the sum of OP1 and OP2. */
3279
3280 bool
3281 preferred_la_operand_p (rtx op1, rtx op2)
3282 {
3283 struct s390_address addr;
3284
3285 if (op2 != const0_rtx)
3286 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3287
3288 if (!s390_decompose_address (op1, &addr))
3289 return false;
3290 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3291 return false;
3292 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3293 return false;
3294
3295 /* Avoid LA instructions with index register on z196; it is
3296 preferable to use regular add instructions when possible. */
3297 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3298 return false;
3299
3300 if (!TARGET_64BIT && !addr.pointer)
3301 return false;
3302
3303 if (addr.pointer)
3304 return true;
3305
3306 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3307 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3308 return true;
3309
3310 return false;
3311 }
3312
3313 /* Emit a forced load-address operation to load SRC into DST.
3314 This will use the LOAD ADDRESS instruction even in situations
3315 where legitimate_la_operand_p (SRC) returns false. */
3316
3317 void
3318 s390_load_address (rtx dst, rtx src)
3319 {
3320 if (TARGET_64BIT)
3321 emit_move_insn (dst, src);
3322 else
3323 emit_insn (gen_force_la_31 (dst, src));
3324 }
3325
3326 /* Return a legitimate reference for ORIG (an address) using the
3327 register REG. If REG is 0, a new pseudo is generated.
3328
3329 There are two types of references that must be handled:
3330
3331 1. Global data references must load the address from the GOT, via
3332 the PIC reg. An insn is emitted to do this load, and the reg is
3333 returned.
3334
3335 2. Static data references, constant pool addresses, and code labels
3336 compute the address as an offset from the GOT, whose base is in
3337 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3338 differentiate them from global data objects. The returned
3339 address is the PIC reg + an unspec constant.
3340
3341 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3342 reg also appears in the address. */
3343
3344 rtx
3345 legitimize_pic_address (rtx orig, rtx reg)
3346 {
3347 rtx addr = orig;
3348 rtx new_rtx = orig;
3349 rtx base;
3350
3351 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3352
3353 if (GET_CODE (addr) == LABEL_REF
3354 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3355 {
3356 /* This is a local symbol. */
3357 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3358 {
3359 /* Access local symbols PC-relative via LARL.
3360 This is the same as in the non-PIC case, so it is
3361 handled automatically ... */
3362 }
3363 else
3364 {
3365 /* Access local symbols relative to the GOT. */
3366
3367 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3368
3369 if (reload_in_progress || reload_completed)
3370 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3371
3372 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3373 addr = gen_rtx_CONST (Pmode, addr);
3374 addr = force_const_mem (Pmode, addr);
3375 emit_move_insn (temp, addr);
3376
3377 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3378 if (reg != 0)
3379 {
3380 s390_load_address (reg, new_rtx);
3381 new_rtx = reg;
3382 }
3383 }
3384 }
3385 else if (GET_CODE (addr) == SYMBOL_REF)
3386 {
3387 if (reg == 0)
3388 reg = gen_reg_rtx (Pmode);
3389
3390 if (flag_pic == 1)
3391 {
3392 /* Assume GOT offset < 4k. This is handled the same way
3393 in both 31- and 64-bit code (@GOT). */
3394
3395 if (reload_in_progress || reload_completed)
3396 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3397
3398 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3399 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3400 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3401 new_rtx = gen_const_mem (Pmode, new_rtx);
3402 emit_move_insn (reg, new_rtx);
3403 new_rtx = reg;
3404 }
3405 else if (TARGET_CPU_ZARCH)
3406 {
3407 /* If the GOT offset might be >= 4k, we determine the position
3408 of the GOT entry via a PC-relative LARL (@GOTENT). */
3409
3410 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3411
3412 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3413 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3414
3415 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3416 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3417 emit_move_insn (temp, new_rtx);
3418
3419 new_rtx = gen_const_mem (Pmode, temp);
3420 emit_move_insn (reg, new_rtx);
3421 new_rtx = reg;
3422 }
3423 else
3424 {
3425 /* If the GOT offset might be >= 4k, we have to load it
3426 from the literal pool (@GOT). */
3427
3428 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3429
3430 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3431 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3432
3433 if (reload_in_progress || reload_completed)
3434 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3435
3436 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3437 addr = gen_rtx_CONST (Pmode, addr);
3438 addr = force_const_mem (Pmode, addr);
3439 emit_move_insn (temp, addr);
3440
3441 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3442 new_rtx = gen_const_mem (Pmode, new_rtx);
3443 emit_move_insn (reg, new_rtx);
3444 new_rtx = reg;
3445 }
3446 }
3447 else
3448 {
3449 if (GET_CODE (addr) == CONST)
3450 {
3451 addr = XEXP (addr, 0);
3452 if (GET_CODE (addr) == UNSPEC)
3453 {
3454 gcc_assert (XVECLEN (addr, 0) == 1);
3455 switch (XINT (addr, 1))
3456 {
3457 /* If someone moved a GOT-relative UNSPEC
3458 out of the literal pool, force them back in. */
3459 case UNSPEC_GOTOFF:
3460 case UNSPEC_PLTOFF:
3461 new_rtx = force_const_mem (Pmode, orig);
3462 break;
3463
3464 /* @GOT is OK as is if small. */
3465 case UNSPEC_GOT:
3466 if (flag_pic == 2)
3467 new_rtx = force_const_mem (Pmode, orig);
3468 break;
3469
3470 /* @GOTENT is OK as is. */
3471 case UNSPEC_GOTENT:
3472 break;
3473
3474 /* @PLT is OK as is on 64-bit, must be converted to
3475 GOT-relative @PLTOFF on 31-bit. */
3476 case UNSPEC_PLT:
3477 if (!TARGET_CPU_ZARCH)
3478 {
3479 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3480
3481 if (reload_in_progress || reload_completed)
3482 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3483
3484 addr = XVECEXP (addr, 0, 0);
3485 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3486 UNSPEC_PLTOFF);
3487 addr = gen_rtx_CONST (Pmode, addr);
3488 addr = force_const_mem (Pmode, addr);
3489 emit_move_insn (temp, addr);
3490
3491 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3492 if (reg != 0)
3493 {
3494 s390_load_address (reg, new_rtx);
3495 new_rtx = reg;
3496 }
3497 }
3498 break;
3499
3500 /* Everything else cannot happen. */
3501 default:
3502 gcc_unreachable ();
3503 }
3504 }
3505 else
3506 gcc_assert (GET_CODE (addr) == PLUS);
3507 }
3508 if (GET_CODE (addr) == PLUS)
3509 {
3510 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3511
3512 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3513 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3514
3515 /* Check first to see if this is a constant offset
3516 from a local symbol reference. */
3517 if ((GET_CODE (op0) == LABEL_REF
3518 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3519 && GET_CODE (op1) == CONST_INT)
3520 {
3521 if (TARGET_CPU_ZARCH
3522 && larl_operand (op0, VOIDmode)
3523 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3524 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3525 {
3526 if (INTVAL (op1) & 1)
3527 {
3528 /* LARL can't handle odd offsets, so emit a
3529 pair of LARL and LA. */
3530 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3531
3532 if (!DISP_IN_RANGE (INTVAL (op1)))
3533 {
3534 HOST_WIDE_INT even = INTVAL (op1) - 1;
3535 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3536 op0 = gen_rtx_CONST (Pmode, op0);
3537 op1 = const1_rtx;
3538 }
3539
3540 emit_move_insn (temp, op0);
3541 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3542
3543 if (reg != 0)
3544 {
3545 s390_load_address (reg, new_rtx);
3546 new_rtx = reg;
3547 }
3548 }
3549 else
3550 {
3551 /* If the offset is even, we can just use LARL.
3552 This will happen automatically. */
3553 }
3554 }
3555 else
3556 {
3557 /* Access local symbols relative to the GOT. */
3558
3559 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3560
3561 if (reload_in_progress || reload_completed)
3562 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3563
3564 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3565 UNSPEC_GOTOFF);
3566 addr = gen_rtx_PLUS (Pmode, addr, op1);
3567 addr = gen_rtx_CONST (Pmode, addr);
3568 addr = force_const_mem (Pmode, addr);
3569 emit_move_insn (temp, addr);
3570
3571 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3572 if (reg != 0)
3573 {
3574 s390_load_address (reg, new_rtx);
3575 new_rtx = reg;
3576 }
3577 }
3578 }
3579
3580 /* Now, check whether it is a GOT relative symbol plus offset
3581 that was pulled out of the literal pool. Force it back in. */
3582
3583 else if (GET_CODE (op0) == UNSPEC
3584 && GET_CODE (op1) == CONST_INT
3585 && XINT (op0, 1) == UNSPEC_GOTOFF)
3586 {
3587 gcc_assert (XVECLEN (op0, 0) == 1);
3588
3589 new_rtx = force_const_mem (Pmode, orig);
3590 }
3591
3592 /* Otherwise, compute the sum. */
3593 else
3594 {
3595 base = legitimize_pic_address (XEXP (addr, 0), reg);
3596 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3597 base == reg ? NULL_RTX : reg);
3598 if (GET_CODE (new_rtx) == CONST_INT)
3599 new_rtx = plus_constant (base, INTVAL (new_rtx));
3600 else
3601 {
3602 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3603 {
3604 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3605 new_rtx = XEXP (new_rtx, 1);
3606 }
3607 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3608 }
3609
3610 if (GET_CODE (new_rtx) == CONST)
3611 new_rtx = XEXP (new_rtx, 0);
3612 new_rtx = force_operand (new_rtx, 0);
3613 }
3614 }
3615 }
3616 return new_rtx;
3617 }
3618
3619 /* Load the thread pointer into a register. */
3620
3621 rtx
3622 s390_get_thread_pointer (void)
3623 {
3624 rtx tp = gen_reg_rtx (Pmode);
3625
3626 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3627 mark_reg_pointer (tp, BITS_PER_WORD);
3628
3629 return tp;
3630 }
3631
3632 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3633 in s390_tls_symbol which always refers to __tls_get_offset.
3634 The returned offset is written to RESULT_REG and an USE rtx is
3635 generated for TLS_CALL. */
3636
3637 static GTY(()) rtx s390_tls_symbol;
3638
3639 static void
3640 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3641 {
3642 rtx insn;
3643
3644 gcc_assert (flag_pic);
3645
3646 if (!s390_tls_symbol)
3647 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3648
3649 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3650 gen_rtx_REG (Pmode, RETURN_REGNUM));
3651
3652 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3653 RTL_CONST_CALL_P (insn) = 1;
3654 }
3655
3656 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3657 this (thread-local) address. REG may be used as temporary. */
3658
3659 static rtx
3660 legitimize_tls_address (rtx addr, rtx reg)
3661 {
3662 rtx new_rtx, tls_call, temp, base, r2, insn;
3663
3664 if (GET_CODE (addr) == SYMBOL_REF)
3665 switch (tls_symbolic_operand (addr))
3666 {
3667 case TLS_MODEL_GLOBAL_DYNAMIC:
3668 start_sequence ();
3669 r2 = gen_rtx_REG (Pmode, 2);
3670 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3671 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3672 new_rtx = force_const_mem (Pmode, new_rtx);
3673 emit_move_insn (r2, new_rtx);
3674 s390_emit_tls_call_insn (r2, tls_call);
3675 insn = get_insns ();
3676 end_sequence ();
3677
3678 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3679 temp = gen_reg_rtx (Pmode);
3680 emit_libcall_block (insn, temp, r2, new_rtx);
3681
3682 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3683 if (reg != 0)
3684 {
3685 s390_load_address (reg, new_rtx);
3686 new_rtx = reg;
3687 }
3688 break;
3689
3690 case TLS_MODEL_LOCAL_DYNAMIC:
3691 start_sequence ();
3692 r2 = gen_rtx_REG (Pmode, 2);
3693 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3694 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3695 new_rtx = force_const_mem (Pmode, new_rtx);
3696 emit_move_insn (r2, new_rtx);
3697 s390_emit_tls_call_insn (r2, tls_call);
3698 insn = get_insns ();
3699 end_sequence ();
3700
3701 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3702 temp = gen_reg_rtx (Pmode);
3703 emit_libcall_block (insn, temp, r2, new_rtx);
3704
3705 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3706 base = gen_reg_rtx (Pmode);
3707 s390_load_address (base, new_rtx);
3708
3709 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3710 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3711 new_rtx = force_const_mem (Pmode, new_rtx);
3712 temp = gen_reg_rtx (Pmode);
3713 emit_move_insn (temp, new_rtx);
3714
3715 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3716 if (reg != 0)
3717 {
3718 s390_load_address (reg, new_rtx);
3719 new_rtx = reg;
3720 }
3721 break;
3722
3723 case TLS_MODEL_INITIAL_EXEC:
3724 if (flag_pic == 1)
3725 {
3726 /* Assume GOT offset < 4k. This is handled the same way
3727 in both 31- and 64-bit code. */
3728
3729 if (reload_in_progress || reload_completed)
3730 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3731
3732 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3733 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3734 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3735 new_rtx = gen_const_mem (Pmode, new_rtx);
3736 temp = gen_reg_rtx (Pmode);
3737 emit_move_insn (temp, new_rtx);
3738 }
3739 else if (TARGET_CPU_ZARCH)
3740 {
3741 /* If the GOT offset might be >= 4k, we determine the position
3742 of the GOT entry via a PC-relative LARL. */
3743
3744 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3745 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3746 temp = gen_reg_rtx (Pmode);
3747 emit_move_insn (temp, new_rtx);
3748
3749 new_rtx = gen_const_mem (Pmode, temp);
3750 temp = gen_reg_rtx (Pmode);
3751 emit_move_insn (temp, new_rtx);
3752 }
3753 else if (flag_pic)
3754 {
3755 /* If the GOT offset might be >= 4k, we have to load it
3756 from the literal pool. */
3757
3758 if (reload_in_progress || reload_completed)
3759 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3760
3761 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3762 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3763 new_rtx = force_const_mem (Pmode, new_rtx);
3764 temp = gen_reg_rtx (Pmode);
3765 emit_move_insn (temp, new_rtx);
3766
3767 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3768 new_rtx = gen_const_mem (Pmode, new_rtx);
3769
3770 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3771 temp = gen_reg_rtx (Pmode);
3772 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3773 }
3774 else
3775 {
3776 /* In position-dependent code, load the absolute address of
3777 the GOT entry from the literal pool. */
3778
3779 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3780 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3781 new_rtx = force_const_mem (Pmode, new_rtx);
3782 temp = gen_reg_rtx (Pmode);
3783 emit_move_insn (temp, new_rtx);
3784
3785 new_rtx = temp;
3786 new_rtx = gen_const_mem (Pmode, new_rtx);
3787 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3788 temp = gen_reg_rtx (Pmode);
3789 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3790 }
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 case TLS_MODEL_LOCAL_EXEC:
3801 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3802 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3803 new_rtx = force_const_mem (Pmode, new_rtx);
3804 temp = gen_reg_rtx (Pmode);
3805 emit_move_insn (temp, new_rtx);
3806
3807 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3808 if (reg != 0)
3809 {
3810 s390_load_address (reg, new_rtx);
3811 new_rtx = reg;
3812 }
3813 break;
3814
3815 default:
3816 gcc_unreachable ();
3817 }
3818
3819 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3820 {
3821 switch (XINT (XEXP (addr, 0), 1))
3822 {
3823 case UNSPEC_INDNTPOFF:
3824 gcc_assert (TARGET_CPU_ZARCH);
3825 new_rtx = addr;
3826 break;
3827
3828 default:
3829 gcc_unreachable ();
3830 }
3831 }
3832
3833 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3834 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3835 {
3836 new_rtx = XEXP (XEXP (addr, 0), 0);
3837 if (GET_CODE (new_rtx) != SYMBOL_REF)
3838 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3839
3840 new_rtx = legitimize_tls_address (new_rtx, reg);
3841 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3842 new_rtx = force_operand (new_rtx, 0);
3843 }
3844
3845 else
3846 gcc_unreachable (); /* for now ... */
3847
3848 return new_rtx;
3849 }
3850
3851 /* Emit insns making the address in operands[1] valid for a standard
3852 move to operands[0]. operands[1] is replaced by an address which
3853 should be used instead of the former RTX to emit the move
3854 pattern. */
3855
3856 void
3857 emit_symbolic_move (rtx *operands)
3858 {
3859 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3860
3861 if (GET_CODE (operands[0]) == MEM)
3862 operands[1] = force_reg (Pmode, operands[1]);
3863 else if (TLS_SYMBOLIC_CONST (operands[1]))
3864 operands[1] = legitimize_tls_address (operands[1], temp);
3865 else if (flag_pic)
3866 operands[1] = legitimize_pic_address (operands[1], temp);
3867 }
3868
3869 /* Try machine-dependent ways of modifying an illegitimate address X
3870 to be legitimate. If we find one, return the new, valid address.
3871
3872 OLDX is the address as it was before break_out_memory_refs was called.
3873 In some cases it is useful to look at this to decide what needs to be done.
3874
3875 MODE is the mode of the operand pointed to by X.
3876
3877 When -fpic is used, special handling is needed for symbolic references.
3878 See comments by legitimize_pic_address for details. */
3879
3880 static rtx
3881 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3882 enum machine_mode mode ATTRIBUTE_UNUSED)
3883 {
3884 rtx constant_term = const0_rtx;
3885
3886 if (TLS_SYMBOLIC_CONST (x))
3887 {
3888 x = legitimize_tls_address (x, 0);
3889
3890 if (s390_legitimate_address_p (mode, x, FALSE))
3891 return x;
3892 }
3893 else if (GET_CODE (x) == PLUS
3894 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3895 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3896 {
3897 return x;
3898 }
3899 else if (flag_pic)
3900 {
3901 if (SYMBOLIC_CONST (x)
3902 || (GET_CODE (x) == PLUS
3903 && (SYMBOLIC_CONST (XEXP (x, 0))
3904 || SYMBOLIC_CONST (XEXP (x, 1)))))
3905 x = legitimize_pic_address (x, 0);
3906
3907 if (s390_legitimate_address_p (mode, x, FALSE))
3908 return x;
3909 }
3910
3911 x = eliminate_constant_term (x, &constant_term);
3912
3913 /* Optimize loading of large displacements by splitting them
3914 into the multiple of 4K and the rest; this allows the
3915 former to be CSE'd if possible.
3916
3917 Don't do this if the displacement is added to a register
3918 pointing into the stack frame, as the offsets will
3919 change later anyway. */
3920
3921 if (GET_CODE (constant_term) == CONST_INT
3922 && !TARGET_LONG_DISPLACEMENT
3923 && !DISP_IN_RANGE (INTVAL (constant_term))
3924 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3925 {
3926 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3927 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3928
3929 rtx temp = gen_reg_rtx (Pmode);
3930 rtx val = force_operand (GEN_INT (upper), temp);
3931 if (val != temp)
3932 emit_move_insn (temp, val);
3933
3934 x = gen_rtx_PLUS (Pmode, x, temp);
3935 constant_term = GEN_INT (lower);
3936 }
3937
3938 if (GET_CODE (x) == PLUS)
3939 {
3940 if (GET_CODE (XEXP (x, 0)) == REG)
3941 {
3942 rtx temp = gen_reg_rtx (Pmode);
3943 rtx val = force_operand (XEXP (x, 1), temp);
3944 if (val != temp)
3945 emit_move_insn (temp, val);
3946
3947 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3948 }
3949
3950 else if (GET_CODE (XEXP (x, 1)) == REG)
3951 {
3952 rtx temp = gen_reg_rtx (Pmode);
3953 rtx val = force_operand (XEXP (x, 0), temp);
3954 if (val != temp)
3955 emit_move_insn (temp, val);
3956
3957 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3958 }
3959 }
3960
3961 if (constant_term != const0_rtx)
3962 x = gen_rtx_PLUS (Pmode, x, constant_term);
3963
3964 return x;
3965 }
3966
3967 /* Try a machine-dependent way of reloading an illegitimate address AD
3968 operand. If we find one, push the reload and and return the new address.
3969
3970 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3971 and TYPE is the reload type of the current reload. */
3972
3973 rtx
3974 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3975 int opnum, int type)
3976 {
3977 if (!optimize || TARGET_LONG_DISPLACEMENT)
3978 return NULL_RTX;
3979
3980 if (GET_CODE (ad) == PLUS)
3981 {
3982 rtx tem = simplify_binary_operation (PLUS, Pmode,
3983 XEXP (ad, 0), XEXP (ad, 1));
3984 if (tem)
3985 ad = tem;
3986 }
3987
3988 if (GET_CODE (ad) == PLUS
3989 && GET_CODE (XEXP (ad, 0)) == REG
3990 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3991 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3992 {
3993 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3994 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3995 rtx cst, tem, new_rtx;
3996
3997 cst = GEN_INT (upper);
3998 if (!legitimate_reload_constant_p (cst))
3999 cst = force_const_mem (Pmode, cst);
4000
4001 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4002 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4003
4004 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4005 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4006 opnum, (enum reload_type) type);
4007 return new_rtx;
4008 }
4009
4010 return NULL_RTX;
4011 }
4012
4013 /* Emit code to move LEN bytes from DST to SRC. */
4014
4015 void
4016 s390_expand_movmem (rtx dst, rtx src, rtx len)
4017 {
4018 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4019 {
4020 if (INTVAL (len) > 0)
4021 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4022 }
4023
4024 else if (TARGET_MVCLE)
4025 {
4026 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4027 }
4028
4029 else
4030 {
4031 rtx dst_addr, src_addr, count, blocks, temp;
4032 rtx loop_start_label = gen_label_rtx ();
4033 rtx loop_end_label = gen_label_rtx ();
4034 rtx end_label = gen_label_rtx ();
4035 enum machine_mode mode;
4036
4037 mode = GET_MODE (len);
4038 if (mode == VOIDmode)
4039 mode = Pmode;
4040
4041 dst_addr = gen_reg_rtx (Pmode);
4042 src_addr = gen_reg_rtx (Pmode);
4043 count = gen_reg_rtx (mode);
4044 blocks = gen_reg_rtx (mode);
4045
4046 convert_move (count, len, 1);
4047 emit_cmp_and_jump_insns (count, const0_rtx,
4048 EQ, NULL_RTX, mode, 1, end_label);
4049
4050 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4051 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4052 dst = change_address (dst, VOIDmode, dst_addr);
4053 src = change_address (src, VOIDmode, src_addr);
4054
4055 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4056 OPTAB_DIRECT);
4057 if (temp != count)
4058 emit_move_insn (count, temp);
4059
4060 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4061 OPTAB_DIRECT);
4062 if (temp != blocks)
4063 emit_move_insn (blocks, temp);
4064
4065 emit_cmp_and_jump_insns (blocks, const0_rtx,
4066 EQ, NULL_RTX, mode, 1, loop_end_label);
4067
4068 emit_label (loop_start_label);
4069
4070 if (TARGET_Z10
4071 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4072 {
4073 rtx prefetch;
4074
4075 /* Issue a read prefetch for the +3 cache line. */
4076 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4077 const0_rtx, const0_rtx);
4078 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4079 emit_insn (prefetch);
4080
4081 /* Issue a write prefetch for the +3 cache line. */
4082 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4083 const1_rtx, const0_rtx);
4084 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4085 emit_insn (prefetch);
4086 }
4087
4088 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4089 s390_load_address (dst_addr,
4090 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4091 s390_load_address (src_addr,
4092 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4093
4094 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4095 OPTAB_DIRECT);
4096 if (temp != blocks)
4097 emit_move_insn (blocks, temp);
4098
4099 emit_cmp_and_jump_insns (blocks, const0_rtx,
4100 EQ, NULL_RTX, mode, 1, loop_end_label);
4101
4102 emit_jump (loop_start_label);
4103 emit_label (loop_end_label);
4104
4105 emit_insn (gen_movmem_short (dst, src,
4106 convert_to_mode (Pmode, count, 1)));
4107 emit_label (end_label);
4108 }
4109 }
4110
4111 /* Emit code to set LEN bytes at DST to VAL.
4112 Make use of clrmem if VAL is zero. */
4113
4114 void
4115 s390_expand_setmem (rtx dst, rtx len, rtx val)
4116 {
4117 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4118 return;
4119
4120 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4121
4122 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4123 {
4124 if (val == const0_rtx && INTVAL (len) <= 256)
4125 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4126 else
4127 {
4128 /* Initialize memory by storing the first byte. */
4129 emit_move_insn (adjust_address (dst, QImode, 0), val);
4130
4131 if (INTVAL (len) > 1)
4132 {
4133 /* Initiate 1 byte overlap move.
4134 The first byte of DST is propagated through DSTP1.
4135 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4136 DST is set to size 1 so the rest of the memory location
4137 does not count as source operand. */
4138 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4139 set_mem_size (dst, const1_rtx);
4140
4141 emit_insn (gen_movmem_short (dstp1, dst,
4142 GEN_INT (INTVAL (len) - 2)));
4143 }
4144 }
4145 }
4146
4147 else if (TARGET_MVCLE)
4148 {
4149 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4150 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4151 }
4152
4153 else
4154 {
4155 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4156 rtx loop_start_label = gen_label_rtx ();
4157 rtx loop_end_label = gen_label_rtx ();
4158 rtx end_label = gen_label_rtx ();
4159 enum machine_mode mode;
4160
4161 mode = GET_MODE (len);
4162 if (mode == VOIDmode)
4163 mode = Pmode;
4164
4165 dst_addr = gen_reg_rtx (Pmode);
4166 count = gen_reg_rtx (mode);
4167 blocks = gen_reg_rtx (mode);
4168
4169 convert_move (count, len, 1);
4170 emit_cmp_and_jump_insns (count, const0_rtx,
4171 EQ, NULL_RTX, mode, 1, end_label);
4172
4173 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4174 dst = change_address (dst, VOIDmode, dst_addr);
4175
4176 if (val == const0_rtx)
4177 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4178 OPTAB_DIRECT);
4179 else
4180 {
4181 dstp1 = adjust_address (dst, VOIDmode, 1);
4182 set_mem_size (dst, const1_rtx);
4183
4184 /* Initialize memory by storing the first byte. */
4185 emit_move_insn (adjust_address (dst, QImode, 0), val);
4186
4187 /* If count is 1 we are done. */
4188 emit_cmp_and_jump_insns (count, const1_rtx,
4189 EQ, NULL_RTX, mode, 1, end_label);
4190
4191 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4192 OPTAB_DIRECT);
4193 }
4194 if (temp != count)
4195 emit_move_insn (count, temp);
4196
4197 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4198 OPTAB_DIRECT);
4199 if (temp != blocks)
4200 emit_move_insn (blocks, temp);
4201
4202 emit_cmp_and_jump_insns (blocks, const0_rtx,
4203 EQ, NULL_RTX, mode, 1, loop_end_label);
4204
4205 emit_label (loop_start_label);
4206
4207 if (TARGET_Z10
4208 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4209 {
4210 /* Issue a write prefetch for the +4 cache line. */
4211 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4212 GEN_INT (1024)),
4213 const1_rtx, const0_rtx);
4214 emit_insn (prefetch);
4215 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4216 }
4217
4218 if (val == const0_rtx)
4219 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4220 else
4221 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4222 s390_load_address (dst_addr,
4223 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4224
4225 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4226 OPTAB_DIRECT);
4227 if (temp != blocks)
4228 emit_move_insn (blocks, temp);
4229
4230 emit_cmp_and_jump_insns (blocks, const0_rtx,
4231 EQ, NULL_RTX, mode, 1, loop_end_label);
4232
4233 emit_jump (loop_start_label);
4234 emit_label (loop_end_label);
4235
4236 if (val == const0_rtx)
4237 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4238 else
4239 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4240 emit_label (end_label);
4241 }
4242 }
4243
4244 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4245 and return the result in TARGET. */
4246
4247 void
4248 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4249 {
4250 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4251 rtx tmp;
4252
4253 /* As the result of CMPINT is inverted compared to what we need,
4254 we have to swap the operands. */
4255 tmp = op0; op0 = op1; op1 = tmp;
4256
4257 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4258 {
4259 if (INTVAL (len) > 0)
4260 {
4261 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4262 emit_insn (gen_cmpint (target, ccreg));
4263 }
4264 else
4265 emit_move_insn (target, const0_rtx);
4266 }
4267 else if (TARGET_MVCLE)
4268 {
4269 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4270 emit_insn (gen_cmpint (target, ccreg));
4271 }
4272 else
4273 {
4274 rtx addr0, addr1, count, blocks, temp;
4275 rtx loop_start_label = gen_label_rtx ();
4276 rtx loop_end_label = gen_label_rtx ();
4277 rtx end_label = gen_label_rtx ();
4278 enum machine_mode mode;
4279
4280 mode = GET_MODE (len);
4281 if (mode == VOIDmode)
4282 mode = Pmode;
4283
4284 addr0 = gen_reg_rtx (Pmode);
4285 addr1 = gen_reg_rtx (Pmode);
4286 count = gen_reg_rtx (mode);
4287 blocks = gen_reg_rtx (mode);
4288
4289 convert_move (count, len, 1);
4290 emit_cmp_and_jump_insns (count, const0_rtx,
4291 EQ, NULL_RTX, mode, 1, end_label);
4292
4293 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4294 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4295 op0 = change_address (op0, VOIDmode, addr0);
4296 op1 = change_address (op1, VOIDmode, addr1);
4297
4298 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4299 OPTAB_DIRECT);
4300 if (temp != count)
4301 emit_move_insn (count, temp);
4302
4303 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4304 OPTAB_DIRECT);
4305 if (temp != blocks)
4306 emit_move_insn (blocks, temp);
4307
4308 emit_cmp_and_jump_insns (blocks, const0_rtx,
4309 EQ, NULL_RTX, mode, 1, loop_end_label);
4310
4311 emit_label (loop_start_label);
4312
4313 if (TARGET_Z10
4314 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4315 {
4316 rtx prefetch;
4317
4318 /* Issue a read prefetch for the +2 cache line of operand 1. */
4319 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4320 const0_rtx, const0_rtx);
4321 emit_insn (prefetch);
4322 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4323
4324 /* Issue a read prefetch for the +2 cache line of operand 2. */
4325 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4326 const0_rtx, const0_rtx);
4327 emit_insn (prefetch);
4328 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4329 }
4330
4331 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4332 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4333 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4334 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4335 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4336 emit_jump_insn (temp);
4337
4338 s390_load_address (addr0,
4339 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4340 s390_load_address (addr1,
4341 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4342
4343 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4344 OPTAB_DIRECT);
4345 if (temp != blocks)
4346 emit_move_insn (blocks, temp);
4347
4348 emit_cmp_and_jump_insns (blocks, const0_rtx,
4349 EQ, NULL_RTX, mode, 1, loop_end_label);
4350
4351 emit_jump (loop_start_label);
4352 emit_label (loop_end_label);
4353
4354 emit_insn (gen_cmpmem_short (op0, op1,
4355 convert_to_mode (Pmode, count, 1)));
4356 emit_label (end_label);
4357
4358 emit_insn (gen_cmpint (target, ccreg));
4359 }
4360 }
4361
4362
4363 /* Expand conditional increment or decrement using alc/slb instructions.
4364 Should generate code setting DST to either SRC or SRC + INCREMENT,
4365 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4366 Returns true if successful, false otherwise.
4367
4368 That makes it possible to implement some if-constructs without jumps e.g.:
4369 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4370 unsigned int a, b, c;
4371 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4372 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4373 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4374 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4375
4376 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4377 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4378 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4379 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4380 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4381
4382 bool
4383 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4384 rtx dst, rtx src, rtx increment)
4385 {
4386 enum machine_mode cmp_mode;
4387 enum machine_mode cc_mode;
4388 rtx op_res;
4389 rtx insn;
4390 rtvec p;
4391 int ret;
4392
4393 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4394 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4395 cmp_mode = SImode;
4396 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4397 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4398 cmp_mode = DImode;
4399 else
4400 return false;
4401
4402 /* Try ADD LOGICAL WITH CARRY. */
4403 if (increment == const1_rtx)
4404 {
4405 /* Determine CC mode to use. */
4406 if (cmp_code == EQ || cmp_code == NE)
4407 {
4408 if (cmp_op1 != const0_rtx)
4409 {
4410 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4411 NULL_RTX, 0, OPTAB_WIDEN);
4412 cmp_op1 = const0_rtx;
4413 }
4414
4415 cmp_code = cmp_code == EQ ? LEU : GTU;
4416 }
4417
4418 if (cmp_code == LTU || cmp_code == LEU)
4419 {
4420 rtx tem = cmp_op0;
4421 cmp_op0 = cmp_op1;
4422 cmp_op1 = tem;
4423 cmp_code = swap_condition (cmp_code);
4424 }
4425
4426 switch (cmp_code)
4427 {
4428 case GTU:
4429 cc_mode = CCUmode;
4430 break;
4431
4432 case GEU:
4433 cc_mode = CCL3mode;
4434 break;
4435
4436 default:
4437 return false;
4438 }
4439
4440 /* Emit comparison instruction pattern. */
4441 if (!register_operand (cmp_op0, cmp_mode))
4442 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4443
4444 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4445 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4446 /* We use insn_invalid_p here to add clobbers if required. */
4447 ret = insn_invalid_p (emit_insn (insn));
4448 gcc_assert (!ret);
4449
4450 /* Emit ALC instruction pattern. */
4451 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4452 gen_rtx_REG (cc_mode, CC_REGNUM),
4453 const0_rtx);
4454
4455 if (src != const0_rtx)
4456 {
4457 if (!register_operand (src, GET_MODE (dst)))
4458 src = force_reg (GET_MODE (dst), src);
4459
4460 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4461 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4462 }
4463
4464 p = rtvec_alloc (2);
4465 RTVEC_ELT (p, 0) =
4466 gen_rtx_SET (VOIDmode, dst, op_res);
4467 RTVEC_ELT (p, 1) =
4468 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4469 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4470
4471 return true;
4472 }
4473
4474 /* Try SUBTRACT LOGICAL WITH BORROW. */
4475 if (increment == constm1_rtx)
4476 {
4477 /* Determine CC mode to use. */
4478 if (cmp_code == EQ || cmp_code == NE)
4479 {
4480 if (cmp_op1 != const0_rtx)
4481 {
4482 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4483 NULL_RTX, 0, OPTAB_WIDEN);
4484 cmp_op1 = const0_rtx;
4485 }
4486
4487 cmp_code = cmp_code == EQ ? LEU : GTU;
4488 }
4489
4490 if (cmp_code == GTU || cmp_code == GEU)
4491 {
4492 rtx tem = cmp_op0;
4493 cmp_op0 = cmp_op1;
4494 cmp_op1 = tem;
4495 cmp_code = swap_condition (cmp_code);
4496 }
4497
4498 switch (cmp_code)
4499 {
4500 case LEU:
4501 cc_mode = CCUmode;
4502 break;
4503
4504 case LTU:
4505 cc_mode = CCL3mode;
4506 break;
4507
4508 default:
4509 return false;
4510 }
4511
4512 /* Emit comparison instruction pattern. */
4513 if (!register_operand (cmp_op0, cmp_mode))
4514 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4515
4516 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4517 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4518 /* We use insn_invalid_p here to add clobbers if required. */
4519 ret = insn_invalid_p (emit_insn (insn));
4520 gcc_assert (!ret);
4521
4522 /* Emit SLB instruction pattern. */
4523 if (!register_operand (src, GET_MODE (dst)))
4524 src = force_reg (GET_MODE (dst), src);
4525
4526 op_res = gen_rtx_MINUS (GET_MODE (dst),
4527 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4528 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4529 gen_rtx_REG (cc_mode, CC_REGNUM),
4530 const0_rtx));
4531 p = rtvec_alloc (2);
4532 RTVEC_ELT (p, 0) =
4533 gen_rtx_SET (VOIDmode, dst, op_res);
4534 RTVEC_ELT (p, 1) =
4535 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4536 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4537
4538 return true;
4539 }
4540
4541 return false;
4542 }
4543
4544 /* Expand code for the insv template. Return true if successful. */
4545
4546 bool
4547 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4548 {
4549 int bitsize = INTVAL (op1);
4550 int bitpos = INTVAL (op2);
4551
4552 /* On z10 we can use the risbg instruction to implement insv. */
4553 if (TARGET_Z10
4554 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4555 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4556 {
4557 rtx op;
4558 rtx clobber;
4559
4560 op = gen_rtx_SET (GET_MODE(src),
4561 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4562 src);
4563 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4564 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4565
4566 return true;
4567 }
4568
4569 /* We need byte alignment. */
4570 if (bitsize % BITS_PER_UNIT)
4571 return false;
4572
4573 if (bitpos == 0
4574 && memory_operand (dest, VOIDmode)
4575 && (register_operand (src, word_mode)
4576 || const_int_operand (src, VOIDmode)))
4577 {
4578 /* Emit standard pattern if possible. */
4579 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4580 if (GET_MODE_BITSIZE (mode) == bitsize)
4581 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4582
4583 /* (set (ze (mem)) (const_int)). */
4584 else if (const_int_operand (src, VOIDmode))
4585 {
4586 int size = bitsize / BITS_PER_UNIT;
4587 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4588 GET_MODE_SIZE (word_mode) - size);
4589
4590 dest = adjust_address (dest, BLKmode, 0);
4591 set_mem_size (dest, GEN_INT (size));
4592 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4593 }
4594
4595 /* (set (ze (mem)) (reg)). */
4596 else if (register_operand (src, word_mode))
4597 {
4598 if (bitsize <= GET_MODE_BITSIZE (SImode))
4599 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4600 const0_rtx), src);
4601 else
4602 {
4603 /* Emit st,stcmh sequence. */
4604 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4605 int size = stcmh_width / BITS_PER_UNIT;
4606
4607 emit_move_insn (adjust_address (dest, SImode, size),
4608 gen_lowpart (SImode, src));
4609 set_mem_size (dest, GEN_INT (size));
4610 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4611 (stcmh_width), const0_rtx),
4612 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4613 (GET_MODE_BITSIZE (SImode))));
4614 }
4615 }
4616 else
4617 return false;
4618
4619 return true;
4620 }
4621
4622 /* (set (ze (reg)) (const_int)). */
4623 if (TARGET_ZARCH
4624 && register_operand (dest, word_mode)
4625 && (bitpos % 16) == 0
4626 && (bitsize % 16) == 0
4627 && const_int_operand (src, VOIDmode))
4628 {
4629 HOST_WIDE_INT val = INTVAL (src);
4630 int regpos = bitpos + bitsize;
4631
4632 while (regpos > bitpos)
4633 {
4634 enum machine_mode putmode;
4635 int putsize;
4636
4637 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4638 putmode = SImode;
4639 else
4640 putmode = HImode;
4641
4642 putsize = GET_MODE_BITSIZE (putmode);
4643 regpos -= putsize;
4644 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4645 GEN_INT (putsize),
4646 GEN_INT (regpos)),
4647 gen_int_mode (val, putmode));
4648 val >>= putsize;
4649 }
4650 gcc_assert (regpos == bitpos);
4651 return true;
4652 }
4653
4654 return false;
4655 }
4656
4657 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4658 register that holds VAL of mode MODE shifted by COUNT bits. */
4659
4660 static inline rtx
4661 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4662 {
4663 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4664 NULL_RTX, 1, OPTAB_DIRECT);
4665 return expand_simple_binop (SImode, ASHIFT, val, count,
4666 NULL_RTX, 1, OPTAB_DIRECT);
4667 }
4668
4669 /* Structure to hold the initial parameters for a compare_and_swap operation
4670 in HImode and QImode. */
4671
4672 struct alignment_context
4673 {
4674 rtx memsi; /* SI aligned memory location. */
4675 rtx shift; /* Bit offset with regard to lsb. */
4676 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4677 rtx modemaski; /* ~modemask */
4678 bool aligned; /* True if memory is aligned, false else. */
4679 };
4680
4681 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4682 structure AC for transparent simplifying, if the memory alignment is known
4683 to be at least 32bit. MEM is the memory location for the actual operation
4684 and MODE its mode. */
4685
4686 static void
4687 init_alignment_context (struct alignment_context *ac, rtx mem,
4688 enum machine_mode mode)
4689 {
4690 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4691 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4692
4693 if (ac->aligned)
4694 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4695 else
4696 {
4697 /* Alignment is unknown. */
4698 rtx byteoffset, addr, align;
4699
4700 /* Force the address into a register. */
4701 addr = force_reg (Pmode, XEXP (mem, 0));
4702
4703 /* Align it to SImode. */
4704 align = expand_simple_binop (Pmode, AND, addr,
4705 GEN_INT (-GET_MODE_SIZE (SImode)),
4706 NULL_RTX, 1, OPTAB_DIRECT);
4707 /* Generate MEM. */
4708 ac->memsi = gen_rtx_MEM (SImode, align);
4709 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4710 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4711 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4712
4713 /* Calculate shiftcount. */
4714 byteoffset = expand_simple_binop (Pmode, AND, addr,
4715 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4716 NULL_RTX, 1, OPTAB_DIRECT);
4717 /* As we already have some offset, evaluate the remaining distance. */
4718 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4719 NULL_RTX, 1, OPTAB_DIRECT);
4720
4721 }
4722 /* Shift is the byte count, but we need the bitcount. */
4723 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4724 NULL_RTX, 1, OPTAB_DIRECT);
4725 /* Calculate masks. */
4726 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4727 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4728 NULL_RTX, 1, OPTAB_DIRECT);
4729 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4730 }
4731
4732 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4733 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4734 to set if CMP == MEM.
4735 CMP is never in memory for compare_and_swap_cc because
4736 expand_bool_compare_and_swap puts it into a register for later compare. */
4737
4738 void
4739 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4740 {
4741 struct alignment_context ac;
4742 rtx cmpv, newv, val, resv, cc;
4743 rtx res = gen_reg_rtx (SImode);
4744 rtx csloop = gen_label_rtx ();
4745 rtx csend = gen_label_rtx ();
4746
4747 gcc_assert (register_operand (target, VOIDmode));
4748 gcc_assert (MEM_P (mem));
4749
4750 init_alignment_context (&ac, mem, mode);
4751
4752 /* Shift the values to the correct bit positions. */
4753 if (!(ac.aligned && MEM_P (cmp)))
4754 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4755 if (!(ac.aligned && MEM_P (new_rtx)))
4756 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4757
4758 /* Load full word. Subsequent loads are performed by CS. */
4759 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4760 NULL_RTX, 1, OPTAB_DIRECT);
4761
4762 /* Start CS loop. */
4763 emit_label (csloop);
4764 /* val = "<mem>00..0<mem>"
4765 * cmp = "00..0<cmp>00..0"
4766 * new = "00..0<new>00..0"
4767 */
4768
4769 /* Patch cmp and new with val at correct position. */
4770 if (ac.aligned && MEM_P (cmp))
4771 {
4772 cmpv = force_reg (SImode, val);
4773 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4774 }
4775 else
4776 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4777 NULL_RTX, 1, OPTAB_DIRECT));
4778 if (ac.aligned && MEM_P (new_rtx))
4779 {
4780 newv = force_reg (SImode, val);
4781 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4782 }
4783 else
4784 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4785 NULL_RTX, 1, OPTAB_DIRECT));
4786
4787 /* Jump to end if we're done (likely?). */
4788 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4789 cmpv, newv));
4790
4791 /* Check for changes outside mode. */
4792 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4793 NULL_RTX, 1, OPTAB_DIRECT);
4794 cc = s390_emit_compare (NE, resv, val);
4795 emit_move_insn (val, resv);
4796 /* Loop internal if so. */
4797 s390_emit_jump (csloop, cc);
4798
4799 emit_label (csend);
4800
4801 /* Return the correct part of the bitfield. */
4802 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4803 NULL_RTX, 1, OPTAB_DIRECT), 1);
4804 }
4805
4806 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4807 and VAL the value to play with. If AFTER is true then store the value
4808 MEM holds after the operation, if AFTER is false then store the value MEM
4809 holds before the operation. If TARGET is zero then discard that value, else
4810 store it to TARGET. */
4811
4812 void
4813 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4814 rtx target, rtx mem, rtx val, bool after)
4815 {
4816 struct alignment_context ac;
4817 rtx cmp;
4818 rtx new_rtx = gen_reg_rtx (SImode);
4819 rtx orig = gen_reg_rtx (SImode);
4820 rtx csloop = gen_label_rtx ();
4821
4822 gcc_assert (!target || register_operand (target, VOIDmode));
4823 gcc_assert (MEM_P (mem));
4824
4825 init_alignment_context (&ac, mem, mode);
4826
4827 /* Shift val to the correct bit positions.
4828 Preserve "icm", but prevent "ex icm". */
4829 if (!(ac.aligned && code == SET && MEM_P (val)))
4830 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4831
4832 /* Further preparation insns. */
4833 if (code == PLUS || code == MINUS)
4834 emit_move_insn (orig, val);
4835 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4836 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4837 NULL_RTX, 1, OPTAB_DIRECT);
4838
4839 /* Load full word. Subsequent loads are performed by CS. */
4840 cmp = force_reg (SImode, ac.memsi);
4841
4842 /* Start CS loop. */
4843 emit_label (csloop);
4844 emit_move_insn (new_rtx, cmp);
4845
4846 /* Patch new with val at correct position. */
4847 switch (code)
4848 {
4849 case PLUS:
4850 case MINUS:
4851 val = expand_simple_binop (SImode, code, new_rtx, orig,
4852 NULL_RTX, 1, OPTAB_DIRECT);
4853 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4854 NULL_RTX, 1, OPTAB_DIRECT);
4855 /* FALLTHRU */
4856 case SET:
4857 if (ac.aligned && MEM_P (val))
4858 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4859 else
4860 {
4861 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4862 NULL_RTX, 1, OPTAB_DIRECT);
4863 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4864 NULL_RTX, 1, OPTAB_DIRECT);
4865 }
4866 break;
4867 case AND:
4868 case IOR:
4869 case XOR:
4870 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4871 NULL_RTX, 1, OPTAB_DIRECT);
4872 break;
4873 case MULT: /* NAND */
4874 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4875 NULL_RTX, 1, OPTAB_DIRECT);
4876 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4877 NULL_RTX, 1, OPTAB_DIRECT);
4878 break;
4879 default:
4880 gcc_unreachable ();
4881 }
4882
4883 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4884 ac.memsi, cmp, new_rtx));
4885
4886 /* Return the correct part of the bitfield. */
4887 if (target)
4888 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4889 after ? new_rtx : cmp, ac.shift,
4890 NULL_RTX, 1, OPTAB_DIRECT), 1);
4891 }
4892
4893 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4894 We need to emit DTP-relative relocations. */
4895
4896 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4897
4898 static void
4899 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4900 {
4901 switch (size)
4902 {
4903 case 4:
4904 fputs ("\t.long\t", file);
4905 break;
4906 case 8:
4907 fputs ("\t.quad\t", file);
4908 break;
4909 default:
4910 gcc_unreachable ();
4911 }
4912 output_addr_const (file, x);
4913 fputs ("@DTPOFF", file);
4914 }
4915
4916 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4917 /* Implement TARGET_MANGLE_TYPE. */
4918
4919 static const char *
4920 s390_mangle_type (const_tree type)
4921 {
4922 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4923 && TARGET_LONG_DOUBLE_128)
4924 return "g";
4925
4926 /* For all other types, use normal C++ mangling. */
4927 return NULL;
4928 }
4929 #endif
4930
4931 /* In the name of slightly smaller debug output, and to cater to
4932 general assembler lossage, recognize various UNSPEC sequences
4933 and turn them back into a direct symbol reference. */
4934
4935 static rtx
4936 s390_delegitimize_address (rtx orig_x)
4937 {
4938 rtx x, y;
4939
4940 orig_x = delegitimize_mem_from_attrs (orig_x);
4941 x = orig_x;
4942 if (GET_CODE (x) != MEM)
4943 return orig_x;
4944
4945 x = XEXP (x, 0);
4946 if (GET_CODE (x) == PLUS
4947 && GET_CODE (XEXP (x, 1)) == CONST
4948 && GET_CODE (XEXP (x, 0)) == REG
4949 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4950 {
4951 y = XEXP (XEXP (x, 1), 0);
4952 if (GET_CODE (y) == UNSPEC
4953 && XINT (y, 1) == UNSPEC_GOT)
4954 return XVECEXP (y, 0, 0);
4955 return orig_x;
4956 }
4957
4958 if (GET_CODE (x) == CONST)
4959 {
4960 y = XEXP (x, 0);
4961 if (GET_CODE (y) == UNSPEC
4962 && XINT (y, 1) == UNSPEC_GOTENT)
4963 return XVECEXP (y, 0, 0);
4964 return orig_x;
4965 }
4966
4967 return orig_x;
4968 }
4969
4970 /* Output operand OP to stdio stream FILE.
4971 OP is an address (register + offset) which is not used to address data;
4972 instead the rightmost bits are interpreted as the value. */
4973
4974 static void
4975 print_shift_count_operand (FILE *file, rtx op)
4976 {
4977 HOST_WIDE_INT offset;
4978 rtx base;
4979
4980 /* Extract base register and offset. */
4981 if (!s390_decompose_shift_count (op, &base, &offset))
4982 gcc_unreachable ();
4983
4984 /* Sanity check. */
4985 if (base)
4986 {
4987 gcc_assert (GET_CODE (base) == REG);
4988 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
4989 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
4990 }
4991
4992 /* Offsets are constricted to twelve bits. */
4993 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
4994 if (base)
4995 fprintf (file, "(%s)", reg_names[REGNO (base)]);
4996 }
4997
4998 /* See 'get_some_local_dynamic_name'. */
4999
5000 static int
5001 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5002 {
5003 rtx x = *px;
5004
5005 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5006 {
5007 x = get_pool_constant (x);
5008 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5009 }
5010
5011 if (GET_CODE (x) == SYMBOL_REF
5012 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5013 {
5014 cfun->machine->some_ld_name = XSTR (x, 0);
5015 return 1;
5016 }
5017
5018 return 0;
5019 }
5020
5021 /* Locate some local-dynamic symbol still in use by this function
5022 so that we can print its name in local-dynamic base patterns. */
5023
5024 static const char *
5025 get_some_local_dynamic_name (void)
5026 {
5027 rtx insn;
5028
5029 if (cfun->machine->some_ld_name)
5030 return cfun->machine->some_ld_name;
5031
5032 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5033 if (INSN_P (insn)
5034 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5035 return cfun->machine->some_ld_name;
5036
5037 gcc_unreachable ();
5038 }
5039
5040 /* Output machine-dependent UNSPECs occurring in address constant X
5041 in assembler syntax to stdio stream FILE. Returns true if the
5042 constant X could be recognized, false otherwise. */
5043
5044 bool
5045 s390_output_addr_const_extra (FILE *file, rtx x)
5046 {
5047 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5048 switch (XINT (x, 1))
5049 {
5050 case UNSPEC_GOTENT:
5051 output_addr_const (file, XVECEXP (x, 0, 0));
5052 fprintf (file, "@GOTENT");
5053 return true;
5054 case UNSPEC_GOT:
5055 output_addr_const (file, XVECEXP (x, 0, 0));
5056 fprintf (file, "@GOT");
5057 return true;
5058 case UNSPEC_GOTOFF:
5059 output_addr_const (file, XVECEXP (x, 0, 0));
5060 fprintf (file, "@GOTOFF");
5061 return true;
5062 case UNSPEC_PLT:
5063 output_addr_const (file, XVECEXP (x, 0, 0));
5064 fprintf (file, "@PLT");
5065 return true;
5066 case UNSPEC_PLTOFF:
5067 output_addr_const (file, XVECEXP (x, 0, 0));
5068 fprintf (file, "@PLTOFF");
5069 return true;
5070 case UNSPEC_TLSGD:
5071 output_addr_const (file, XVECEXP (x, 0, 0));
5072 fprintf (file, "@TLSGD");
5073 return true;
5074 case UNSPEC_TLSLDM:
5075 assemble_name (file, get_some_local_dynamic_name ());
5076 fprintf (file, "@TLSLDM");
5077 return true;
5078 case UNSPEC_DTPOFF:
5079 output_addr_const (file, XVECEXP (x, 0, 0));
5080 fprintf (file, "@DTPOFF");
5081 return true;
5082 case UNSPEC_NTPOFF:
5083 output_addr_const (file, XVECEXP (x, 0, 0));
5084 fprintf (file, "@NTPOFF");
5085 return true;
5086 case UNSPEC_GOTNTPOFF:
5087 output_addr_const (file, XVECEXP (x, 0, 0));
5088 fprintf (file, "@GOTNTPOFF");
5089 return true;
5090 case UNSPEC_INDNTPOFF:
5091 output_addr_const (file, XVECEXP (x, 0, 0));
5092 fprintf (file, "@INDNTPOFF");
5093 return true;
5094 }
5095
5096 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5097 switch (XINT (x, 1))
5098 {
5099 case UNSPEC_POOL_OFFSET:
5100 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5101 output_addr_const (file, x);
5102 return true;
5103 }
5104 return false;
5105 }
5106
5107 /* Output address operand ADDR in assembler syntax to
5108 stdio stream FILE. */
5109
5110 void
5111 print_operand_address (FILE *file, rtx addr)
5112 {
5113 struct s390_address ad;
5114
5115 if (s390_symref_operand_p (addr, NULL, NULL))
5116 {
5117 gcc_assert (TARGET_Z10);
5118 output_addr_const (file, addr);
5119 return;
5120 }
5121
5122 if (!s390_decompose_address (addr, &ad)
5123 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5124 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5125 output_operand_lossage ("cannot decompose address");
5126
5127 if (ad.disp)
5128 output_addr_const (file, ad.disp);
5129 else
5130 fprintf (file, "0");
5131
5132 if (ad.base && ad.indx)
5133 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5134 reg_names[REGNO (ad.base)]);
5135 else if (ad.base)
5136 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5137 }
5138
5139 /* Output operand X in assembler syntax to stdio stream FILE.
5140 CODE specified the format flag. The following format flags
5141 are recognized:
5142
5143 'C': print opcode suffix for branch condition.
5144 'D': print opcode suffix for inverse branch condition.
5145 'E': print opcode suffix for branch on index instruction.
5146 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5147 'G': print the size of the operand in bytes.
5148 'O': print only the displacement of a memory reference.
5149 'R': print only the base register of a memory reference.
5150 'S': print S-type memory reference (base+displacement).
5151 'N': print the second word of a DImode operand.
5152 'M': print the second word of a TImode operand.
5153 'Y': print shift count operand.
5154
5155 'b': print integer X as if it's an unsigned byte.
5156 'c': print integer X as if it's an signed byte.
5157 'x': print integer X as if it's an unsigned halfword.
5158 'h': print integer X as if it's a signed halfword.
5159 'i': print the first nonzero HImode part of X.
5160 'j': print the first HImode part unequal to -1 of X.
5161 'k': print the first nonzero SImode part of X.
5162 'm': print the first SImode part unequal to -1 of X.
5163 'o': print integer X as if it's an unsigned 32bit word. */
5164
5165 void
5166 print_operand (FILE *file, rtx x, int code)
5167 {
5168 switch (code)
5169 {
5170 case 'C':
5171 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5172 return;
5173
5174 case 'D':
5175 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5176 return;
5177
5178 case 'E':
5179 if (GET_CODE (x) == LE)
5180 fprintf (file, "l");
5181 else if (GET_CODE (x) == GT)
5182 fprintf (file, "h");
5183 else
5184 gcc_unreachable ();
5185 return;
5186
5187 case 'J':
5188 if (GET_CODE (x) == SYMBOL_REF)
5189 {
5190 fprintf (file, "%s", ":tls_load:");
5191 output_addr_const (file, x);
5192 }
5193 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5194 {
5195 fprintf (file, "%s", ":tls_gdcall:");
5196 output_addr_const (file, XVECEXP (x, 0, 0));
5197 }
5198 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5199 {
5200 fprintf (file, "%s", ":tls_ldcall:");
5201 assemble_name (file, get_some_local_dynamic_name ());
5202 }
5203 else
5204 gcc_unreachable ();
5205 return;
5206
5207 case 'G':
5208 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5209 return;
5210
5211 case 'O':
5212 {
5213 struct s390_address ad;
5214 int ret;
5215
5216 gcc_assert (GET_CODE (x) == MEM);
5217 ret = s390_decompose_address (XEXP (x, 0), &ad);
5218 gcc_assert (ret);
5219 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5220 gcc_assert (!ad.indx);
5221
5222 if (ad.disp)
5223 output_addr_const (file, ad.disp);
5224 else
5225 fprintf (file, "0");
5226 }
5227 return;
5228
5229 case 'R':
5230 {
5231 struct s390_address ad;
5232 int ret;
5233
5234 gcc_assert (GET_CODE (x) == MEM);
5235 ret = s390_decompose_address (XEXP (x, 0), &ad);
5236 gcc_assert (ret);
5237 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5238 gcc_assert (!ad.indx);
5239
5240 if (ad.base)
5241 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5242 else
5243 fprintf (file, "0");
5244 }
5245 return;
5246
5247 case 'S':
5248 {
5249 struct s390_address ad;
5250 int ret;
5251
5252 gcc_assert (GET_CODE (x) == MEM);
5253 ret = s390_decompose_address (XEXP (x, 0), &ad);
5254 gcc_assert (ret);
5255 gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
5256 gcc_assert (!ad.indx);
5257
5258 if (ad.disp)
5259 output_addr_const (file, ad.disp);
5260 else
5261 fprintf (file, "0");
5262
5263 if (ad.base)
5264 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5265 }
5266 return;
5267
5268 case 'N':
5269 if (GET_CODE (x) == REG)
5270 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5271 else if (GET_CODE (x) == MEM)
5272 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5273 else
5274 gcc_unreachable ();
5275 break;
5276
5277 case 'M':
5278 if (GET_CODE (x) == REG)
5279 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5280 else if (GET_CODE (x) == MEM)
5281 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5282 else
5283 gcc_unreachable ();
5284 break;
5285
5286 case 'Y':
5287 print_shift_count_operand (file, x);
5288 return;
5289 }
5290
5291 switch (GET_CODE (x))
5292 {
5293 case REG:
5294 fprintf (file, "%s", reg_names[REGNO (x)]);
5295 break;
5296
5297 case MEM:
5298 output_address (XEXP (x, 0));
5299 break;
5300
5301 case CONST:
5302 case CODE_LABEL:
5303 case LABEL_REF:
5304 case SYMBOL_REF:
5305 output_addr_const (file, x);
5306 break;
5307
5308 case CONST_INT:
5309 if (code == 'b')
5310 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5311 else if (code == 'c')
5312 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5313 else if (code == 'x')
5314 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5315 else if (code == 'h')
5316 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5317 else if (code == 'i')
5318 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5319 s390_extract_part (x, HImode, 0));
5320 else if (code == 'j')
5321 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5322 s390_extract_part (x, HImode, -1));
5323 else if (code == 'k')
5324 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5325 s390_extract_part (x, SImode, 0));
5326 else if (code == 'm')
5327 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5328 s390_extract_part (x, SImode, -1));
5329 else if (code == 'o')
5330 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5331 else
5332 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5333 break;
5334
5335 case CONST_DOUBLE:
5336 gcc_assert (GET_MODE (x) == VOIDmode);
5337 if (code == 'b')
5338 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5339 else if (code == 'x')
5340 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5341 else if (code == 'h')
5342 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5343 else
5344 gcc_unreachable ();
5345 break;
5346
5347 default:
5348 fatal_insn ("UNKNOWN in print_operand !?", x);
5349 break;
5350 }
5351 }
5352
5353 /* Target hook for assembling integer objects. We need to define it
5354 here to work a round a bug in some versions of GAS, which couldn't
5355 handle values smaller than INT_MIN when printed in decimal. */
5356
5357 static bool
5358 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5359 {
5360 if (size == 8 && aligned_p
5361 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5362 {
5363 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5364 INTVAL (x));
5365 return true;
5366 }
5367 return default_assemble_integer (x, size, aligned_p);
5368 }
5369
5370 /* Returns true if register REGNO is used for forming
5371 a memory address in expression X. */
5372
5373 static bool
5374 reg_used_in_mem_p (int regno, rtx x)
5375 {
5376 enum rtx_code code = GET_CODE (x);
5377 int i, j;
5378 const char *fmt;
5379
5380 if (code == MEM)
5381 {
5382 if (refers_to_regno_p (regno, regno+1,
5383 XEXP (x, 0), 0))
5384 return true;
5385 }
5386 else if (code == SET
5387 && GET_CODE (SET_DEST (x)) == PC)
5388 {
5389 if (refers_to_regno_p (regno, regno+1,
5390 SET_SRC (x), 0))
5391 return true;
5392 }
5393
5394 fmt = GET_RTX_FORMAT (code);
5395 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5396 {
5397 if (fmt[i] == 'e'
5398 && reg_used_in_mem_p (regno, XEXP (x, i)))
5399 return true;
5400
5401 else if (fmt[i] == 'E')
5402 for (j = 0; j < XVECLEN (x, i); j++)
5403 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5404 return true;
5405 }
5406 return false;
5407 }
5408
5409 /* Returns true if expression DEP_RTX sets an address register
5410 used by instruction INSN to address memory. */
5411
5412 static bool
5413 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5414 {
5415 rtx target, pat;
5416
5417 if (GET_CODE (dep_rtx) == INSN)
5418 dep_rtx = PATTERN (dep_rtx);
5419
5420 if (GET_CODE (dep_rtx) == SET)
5421 {
5422 target = SET_DEST (dep_rtx);
5423 if (GET_CODE (target) == STRICT_LOW_PART)
5424 target = XEXP (target, 0);
5425 while (GET_CODE (target) == SUBREG)
5426 target = SUBREG_REG (target);
5427
5428 if (GET_CODE (target) == REG)
5429 {
5430 int regno = REGNO (target);
5431
5432 if (s390_safe_attr_type (insn) == TYPE_LA)
5433 {
5434 pat = PATTERN (insn);
5435 if (GET_CODE (pat) == PARALLEL)
5436 {
5437 gcc_assert (XVECLEN (pat, 0) == 2);
5438 pat = XVECEXP (pat, 0, 0);
5439 }
5440 gcc_assert (GET_CODE (pat) == SET);
5441 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5442 }
5443 else if (get_attr_atype (insn) == ATYPE_AGEN)
5444 return reg_used_in_mem_p (regno, PATTERN (insn));
5445 }
5446 }
5447 return false;
5448 }
5449
5450 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5451
5452 int
5453 s390_agen_dep_p (rtx dep_insn, rtx insn)
5454 {
5455 rtx dep_rtx = PATTERN (dep_insn);
5456 int i;
5457
5458 if (GET_CODE (dep_rtx) == SET
5459 && addr_generation_dependency_p (dep_rtx, insn))
5460 return 1;
5461 else if (GET_CODE (dep_rtx) == PARALLEL)
5462 {
5463 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5464 {
5465 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5466 return 1;
5467 }
5468 }
5469 return 0;
5470 }
5471
5472
5473 /* A C statement (sans semicolon) to update the integer scheduling priority
5474 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5475 reduce the priority to execute INSN later. Do not define this macro if
5476 you do not need to adjust the scheduling priorities of insns.
5477
5478 A STD instruction should be scheduled earlier,
5479 in order to use the bypass. */
5480 static int
5481 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5482 {
5483 if (! INSN_P (insn))
5484 return priority;
5485
5486 if (s390_tune != PROCESSOR_2084_Z990
5487 && s390_tune != PROCESSOR_2094_Z9_109
5488 && s390_tune != PROCESSOR_2097_Z10
5489 && s390_tune != PROCESSOR_2817_Z196)
5490 return priority;
5491
5492 switch (s390_safe_attr_type (insn))
5493 {
5494 case TYPE_FSTOREDF:
5495 case TYPE_FSTORESF:
5496 priority = priority << 3;
5497 break;
5498 case TYPE_STORE:
5499 case TYPE_STM:
5500 priority = priority << 1;
5501 break;
5502 default:
5503 break;
5504 }
5505 return priority;
5506 }
5507
5508
5509 /* The number of instructions that can be issued per cycle. */
5510
5511 static int
5512 s390_issue_rate (void)
5513 {
5514 switch (s390_tune)
5515 {
5516 case PROCESSOR_2084_Z990:
5517 case PROCESSOR_2094_Z9_109:
5518 case PROCESSOR_2817_Z196:
5519 return 3;
5520 case PROCESSOR_2097_Z10:
5521 return 2;
5522 default:
5523 return 1;
5524 }
5525 }
5526
5527 static int
5528 s390_first_cycle_multipass_dfa_lookahead (void)
5529 {
5530 return 4;
5531 }
5532
5533 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5534 Fix up MEMs as required. */
5535
5536 static void
5537 annotate_constant_pool_refs (rtx *x)
5538 {
5539 int i, j;
5540 const char *fmt;
5541
5542 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5543 || !CONSTANT_POOL_ADDRESS_P (*x));
5544
5545 /* Literal pool references can only occur inside a MEM ... */
5546 if (GET_CODE (*x) == MEM)
5547 {
5548 rtx memref = XEXP (*x, 0);
5549
5550 if (GET_CODE (memref) == SYMBOL_REF
5551 && CONSTANT_POOL_ADDRESS_P (memref))
5552 {
5553 rtx base = cfun->machine->base_reg;
5554 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5555 UNSPEC_LTREF);
5556
5557 *x = replace_equiv_address (*x, addr);
5558 return;
5559 }
5560
5561 if (GET_CODE (memref) == CONST
5562 && GET_CODE (XEXP (memref, 0)) == PLUS
5563 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5564 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5565 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5566 {
5567 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5568 rtx sym = XEXP (XEXP (memref, 0), 0);
5569 rtx base = cfun->machine->base_reg;
5570 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5571 UNSPEC_LTREF);
5572
5573 *x = replace_equiv_address (*x, plus_constant (addr, off));
5574 return;
5575 }
5576 }
5577
5578 /* ... or a load-address type pattern. */
5579 if (GET_CODE (*x) == SET)
5580 {
5581 rtx addrref = SET_SRC (*x);
5582
5583 if (GET_CODE (addrref) == SYMBOL_REF
5584 && CONSTANT_POOL_ADDRESS_P (addrref))
5585 {
5586 rtx base = cfun->machine->base_reg;
5587 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5588 UNSPEC_LTREF);
5589
5590 SET_SRC (*x) = addr;
5591 return;
5592 }
5593
5594 if (GET_CODE (addrref) == CONST
5595 && GET_CODE (XEXP (addrref, 0)) == PLUS
5596 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5597 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5598 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5599 {
5600 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5601 rtx sym = XEXP (XEXP (addrref, 0), 0);
5602 rtx base = cfun->machine->base_reg;
5603 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5604 UNSPEC_LTREF);
5605
5606 SET_SRC (*x) = plus_constant (addr, off);
5607 return;
5608 }
5609 }
5610
5611 /* Annotate LTREL_BASE as well. */
5612 if (GET_CODE (*x) == UNSPEC
5613 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5614 {
5615 rtx base = cfun->machine->base_reg;
5616 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5617 UNSPEC_LTREL_BASE);
5618 return;
5619 }
5620
5621 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5622 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5623 {
5624 if (fmt[i] == 'e')
5625 {
5626 annotate_constant_pool_refs (&XEXP (*x, i));
5627 }
5628 else if (fmt[i] == 'E')
5629 {
5630 for (j = 0; j < XVECLEN (*x, i); j++)
5631 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5632 }
5633 }
5634 }
5635
5636 /* Split all branches that exceed the maximum distance.
5637 Returns true if this created a new literal pool entry. */
5638
5639 static int
5640 s390_split_branches (void)
5641 {
5642 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5643 int new_literal = 0, ret;
5644 rtx insn, pat, tmp, target;
5645 rtx *label;
5646
5647 /* We need correct insn addresses. */
5648
5649 shorten_branches (get_insns ());
5650
5651 /* Find all branches that exceed 64KB, and split them. */
5652
5653 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5654 {
5655 if (GET_CODE (insn) != JUMP_INSN)
5656 continue;
5657
5658 pat = PATTERN (insn);
5659 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5660 pat = XVECEXP (pat, 0, 0);
5661 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5662 continue;
5663
5664 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5665 {
5666 label = &SET_SRC (pat);
5667 }
5668 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5669 {
5670 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5671 label = &XEXP (SET_SRC (pat), 1);
5672 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5673 label = &XEXP (SET_SRC (pat), 2);
5674 else
5675 continue;
5676 }
5677 else
5678 continue;
5679
5680 if (get_attr_length (insn) <= 4)
5681 continue;
5682
5683 /* We are going to use the return register as scratch register,
5684 make sure it will be saved/restored by the prologue/epilogue. */
5685 cfun_frame_layout.save_return_addr_p = 1;
5686
5687 if (!flag_pic)
5688 {
5689 new_literal = 1;
5690 tmp = force_const_mem (Pmode, *label);
5691 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5692 INSN_ADDRESSES_NEW (tmp, -1);
5693 annotate_constant_pool_refs (&PATTERN (tmp));
5694
5695 target = temp_reg;
5696 }
5697 else
5698 {
5699 new_literal = 1;
5700 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5701 UNSPEC_LTREL_OFFSET);
5702 target = gen_rtx_CONST (Pmode, target);
5703 target = force_const_mem (Pmode, target);
5704 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5705 INSN_ADDRESSES_NEW (tmp, -1);
5706 annotate_constant_pool_refs (&PATTERN (tmp));
5707
5708 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5709 cfun->machine->base_reg),
5710 UNSPEC_LTREL_BASE);
5711 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5712 }
5713
5714 ret = validate_change (insn, label, target, 0);
5715 gcc_assert (ret);
5716 }
5717
5718 return new_literal;
5719 }
5720
5721
5722 /* Find an annotated literal pool symbol referenced in RTX X,
5723 and store it at REF. Will abort if X contains references to
5724 more than one such pool symbol; multiple references to the same
5725 symbol are allowed, however.
5726
5727 The rtx pointed to by REF must be initialized to NULL_RTX
5728 by the caller before calling this routine. */
5729
5730 static void
5731 find_constant_pool_ref (rtx x, rtx *ref)
5732 {
5733 int i, j;
5734 const char *fmt;
5735
5736 /* Ignore LTREL_BASE references. */
5737 if (GET_CODE (x) == UNSPEC
5738 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5739 return;
5740 /* Likewise POOL_ENTRY insns. */
5741 if (GET_CODE (x) == UNSPEC_VOLATILE
5742 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5743 return;
5744
5745 gcc_assert (GET_CODE (x) != SYMBOL_REF
5746 || !CONSTANT_POOL_ADDRESS_P (x));
5747
5748 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5749 {
5750 rtx sym = XVECEXP (x, 0, 0);
5751 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5752 && CONSTANT_POOL_ADDRESS_P (sym));
5753
5754 if (*ref == NULL_RTX)
5755 *ref = sym;
5756 else
5757 gcc_assert (*ref == sym);
5758
5759 return;
5760 }
5761
5762 fmt = GET_RTX_FORMAT (GET_CODE (x));
5763 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5764 {
5765 if (fmt[i] == 'e')
5766 {
5767 find_constant_pool_ref (XEXP (x, i), ref);
5768 }
5769 else if (fmt[i] == 'E')
5770 {
5771 for (j = 0; j < XVECLEN (x, i); j++)
5772 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5773 }
5774 }
5775 }
5776
5777 /* Replace every reference to the annotated literal pool
5778 symbol REF in X by its base plus OFFSET. */
5779
5780 static void
5781 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5782 {
5783 int i, j;
5784 const char *fmt;
5785
5786 gcc_assert (*x != ref);
5787
5788 if (GET_CODE (*x) == UNSPEC
5789 && XINT (*x, 1) == UNSPEC_LTREF
5790 && XVECEXP (*x, 0, 0) == ref)
5791 {
5792 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5793 return;
5794 }
5795
5796 if (GET_CODE (*x) == PLUS
5797 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5798 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5799 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5800 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5801 {
5802 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5803 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5804 return;
5805 }
5806
5807 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5808 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5809 {
5810 if (fmt[i] == 'e')
5811 {
5812 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5813 }
5814 else if (fmt[i] == 'E')
5815 {
5816 for (j = 0; j < XVECLEN (*x, i); j++)
5817 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5818 }
5819 }
5820 }
5821
5822 /* Check whether X contains an UNSPEC_LTREL_BASE.
5823 Return its constant pool symbol if found, NULL_RTX otherwise. */
5824
5825 static rtx
5826 find_ltrel_base (rtx x)
5827 {
5828 int i, j;
5829 const char *fmt;
5830
5831 if (GET_CODE (x) == UNSPEC
5832 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5833 return XVECEXP (x, 0, 0);
5834
5835 fmt = GET_RTX_FORMAT (GET_CODE (x));
5836 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5837 {
5838 if (fmt[i] == 'e')
5839 {
5840 rtx fnd = find_ltrel_base (XEXP (x, i));
5841 if (fnd)
5842 return fnd;
5843 }
5844 else if (fmt[i] == 'E')
5845 {
5846 for (j = 0; j < XVECLEN (x, i); j++)
5847 {
5848 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5849 if (fnd)
5850 return fnd;
5851 }
5852 }
5853 }
5854
5855 return NULL_RTX;
5856 }
5857
5858 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5859
5860 static void
5861 replace_ltrel_base (rtx *x)
5862 {
5863 int i, j;
5864 const char *fmt;
5865
5866 if (GET_CODE (*x) == UNSPEC
5867 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5868 {
5869 *x = XVECEXP (*x, 0, 1);
5870 return;
5871 }
5872
5873 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5874 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5875 {
5876 if (fmt[i] == 'e')
5877 {
5878 replace_ltrel_base (&XEXP (*x, i));
5879 }
5880 else if (fmt[i] == 'E')
5881 {
5882 for (j = 0; j < XVECLEN (*x, i); j++)
5883 replace_ltrel_base (&XVECEXP (*x, i, j));
5884 }
5885 }
5886 }
5887
5888
5889 /* We keep a list of constants which we have to add to internal
5890 constant tables in the middle of large functions. */
5891
5892 #define NR_C_MODES 11
5893 enum machine_mode constant_modes[NR_C_MODES] =
5894 {
5895 TFmode, TImode, TDmode,
5896 DFmode, DImode, DDmode,
5897 SFmode, SImode, SDmode,
5898 HImode,
5899 QImode
5900 };
5901
5902 struct constant
5903 {
5904 struct constant *next;
5905 rtx value;
5906 rtx label;
5907 };
5908
5909 struct constant_pool
5910 {
5911 struct constant_pool *next;
5912 rtx first_insn;
5913 rtx pool_insn;
5914 bitmap insns;
5915 rtx emit_pool_after;
5916
5917 struct constant *constants[NR_C_MODES];
5918 struct constant *execute;
5919 rtx label;
5920 int size;
5921 };
5922
5923 /* Allocate new constant_pool structure. */
5924
5925 static struct constant_pool *
5926 s390_alloc_pool (void)
5927 {
5928 struct constant_pool *pool;
5929 int i;
5930
5931 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5932 pool->next = NULL;
5933 for (i = 0; i < NR_C_MODES; i++)
5934 pool->constants[i] = NULL;
5935
5936 pool->execute = NULL;
5937 pool->label = gen_label_rtx ();
5938 pool->first_insn = NULL_RTX;
5939 pool->pool_insn = NULL_RTX;
5940 pool->insns = BITMAP_ALLOC (NULL);
5941 pool->size = 0;
5942 pool->emit_pool_after = NULL_RTX;
5943
5944 return pool;
5945 }
5946
5947 /* Create new constant pool covering instructions starting at INSN
5948 and chain it to the end of POOL_LIST. */
5949
5950 static struct constant_pool *
5951 s390_start_pool (struct constant_pool **pool_list, rtx insn)
5952 {
5953 struct constant_pool *pool, **prev;
5954
5955 pool = s390_alloc_pool ();
5956 pool->first_insn = insn;
5957
5958 for (prev = pool_list; *prev; prev = &(*prev)->next)
5959 ;
5960 *prev = pool;
5961
5962 return pool;
5963 }
5964
5965 /* End range of instructions covered by POOL at INSN and emit
5966 placeholder insn representing the pool. */
5967
5968 static void
5969 s390_end_pool (struct constant_pool *pool, rtx insn)
5970 {
5971 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
5972
5973 if (!insn)
5974 insn = get_last_insn ();
5975
5976 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
5977 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
5978 }
5979
5980 /* Add INSN to the list of insns covered by POOL. */
5981
5982 static void
5983 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
5984 {
5985 bitmap_set_bit (pool->insns, INSN_UID (insn));
5986 }
5987
5988 /* Return pool out of POOL_LIST that covers INSN. */
5989
5990 static struct constant_pool *
5991 s390_find_pool (struct constant_pool *pool_list, rtx insn)
5992 {
5993 struct constant_pool *pool;
5994
5995 for (pool = pool_list; pool; pool = pool->next)
5996 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
5997 break;
5998
5999 return pool;
6000 }
6001
6002 /* Add constant VAL of mode MODE to the constant pool POOL. */
6003
6004 static void
6005 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6006 {
6007 struct constant *c;
6008 int i;
6009
6010 for (i = 0; i < NR_C_MODES; i++)
6011 if (constant_modes[i] == mode)
6012 break;
6013 gcc_assert (i != NR_C_MODES);
6014
6015 for (c = pool->constants[i]; c != NULL; c = c->next)
6016 if (rtx_equal_p (val, c->value))
6017 break;
6018
6019 if (c == NULL)
6020 {
6021 c = (struct constant *) xmalloc (sizeof *c);
6022 c->value = val;
6023 c->label = gen_label_rtx ();
6024 c->next = pool->constants[i];
6025 pool->constants[i] = c;
6026 pool->size += GET_MODE_SIZE (mode);
6027 }
6028 }
6029
6030 /* Return an rtx that represents the offset of X from the start of
6031 pool POOL. */
6032
6033 static rtx
6034 s390_pool_offset (struct constant_pool *pool, rtx x)
6035 {
6036 rtx label;
6037
6038 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6039 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6040 UNSPEC_POOL_OFFSET);
6041 return gen_rtx_CONST (GET_MODE (x), x);
6042 }
6043
6044 /* Find constant VAL of mode MODE in the constant pool POOL.
6045 Return an RTX describing the distance from the start of
6046 the pool to the location of the new constant. */
6047
6048 static rtx
6049 s390_find_constant (struct constant_pool *pool, rtx val,
6050 enum machine_mode mode)
6051 {
6052 struct constant *c;
6053 int i;
6054
6055 for (i = 0; i < NR_C_MODES; i++)
6056 if (constant_modes[i] == mode)
6057 break;
6058 gcc_assert (i != NR_C_MODES);
6059
6060 for (c = pool->constants[i]; c != NULL; c = c->next)
6061 if (rtx_equal_p (val, c->value))
6062 break;
6063
6064 gcc_assert (c);
6065
6066 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6067 }
6068
6069 /* Check whether INSN is an execute. Return the label_ref to its
6070 execute target template if so, NULL_RTX otherwise. */
6071
6072 static rtx
6073 s390_execute_label (rtx insn)
6074 {
6075 if (GET_CODE (insn) == INSN
6076 && GET_CODE (PATTERN (insn)) == PARALLEL
6077 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6078 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6079 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6080
6081 return NULL_RTX;
6082 }
6083
6084 /* Add execute target for INSN to the constant pool POOL. */
6085
6086 static void
6087 s390_add_execute (struct constant_pool *pool, rtx insn)
6088 {
6089 struct constant *c;
6090
6091 for (c = pool->execute; c != NULL; c = c->next)
6092 if (INSN_UID (insn) == INSN_UID (c->value))
6093 break;
6094
6095 if (c == NULL)
6096 {
6097 c = (struct constant *) xmalloc (sizeof *c);
6098 c->value = insn;
6099 c->label = gen_label_rtx ();
6100 c->next = pool->execute;
6101 pool->execute = c;
6102 pool->size += 6;
6103 }
6104 }
6105
6106 /* Find execute target for INSN in the constant pool POOL.
6107 Return an RTX describing the distance from the start of
6108 the pool to the location of the execute target. */
6109
6110 static rtx
6111 s390_find_execute (struct constant_pool *pool, rtx insn)
6112 {
6113 struct constant *c;
6114
6115 for (c = pool->execute; c != NULL; c = c->next)
6116 if (INSN_UID (insn) == INSN_UID (c->value))
6117 break;
6118
6119 gcc_assert (c);
6120
6121 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6122 }
6123
6124 /* For an execute INSN, extract the execute target template. */
6125
6126 static rtx
6127 s390_execute_target (rtx insn)
6128 {
6129 rtx pattern = PATTERN (insn);
6130 gcc_assert (s390_execute_label (insn));
6131
6132 if (XVECLEN (pattern, 0) == 2)
6133 {
6134 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6135 }
6136 else
6137 {
6138 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6139 int i;
6140
6141 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6142 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6143
6144 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6145 }
6146
6147 return pattern;
6148 }
6149
6150 /* Indicate that INSN cannot be duplicated. This is the case for
6151 execute insns that carry a unique label. */
6152
6153 static bool
6154 s390_cannot_copy_insn_p (rtx insn)
6155 {
6156 rtx label = s390_execute_label (insn);
6157 return label && label != const0_rtx;
6158 }
6159
6160 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6161 do not emit the pool base label. */
6162
6163 static void
6164 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6165 {
6166 struct constant *c;
6167 rtx insn = pool->pool_insn;
6168 int i;
6169
6170 /* Switch to rodata section. */
6171 if (TARGET_CPU_ZARCH)
6172 {
6173 insn = emit_insn_after (gen_pool_section_start (), insn);
6174 INSN_ADDRESSES_NEW (insn, -1);
6175 }
6176
6177 /* Ensure minimum pool alignment. */
6178 if (TARGET_CPU_ZARCH)
6179 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6180 else
6181 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6182 INSN_ADDRESSES_NEW (insn, -1);
6183
6184 /* Emit pool base label. */
6185 if (!remote_label)
6186 {
6187 insn = emit_label_after (pool->label, insn);
6188 INSN_ADDRESSES_NEW (insn, -1);
6189 }
6190
6191 /* Dump constants in descending alignment requirement order,
6192 ensuring proper alignment for every constant. */
6193 for (i = 0; i < NR_C_MODES; i++)
6194 for (c = pool->constants[i]; c; c = c->next)
6195 {
6196 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6197 rtx value = copy_rtx (c->value);
6198 if (GET_CODE (value) == CONST
6199 && GET_CODE (XEXP (value, 0)) == UNSPEC
6200 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6201 && XVECLEN (XEXP (value, 0), 0) == 1)
6202 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6203
6204 insn = emit_label_after (c->label, insn);
6205 INSN_ADDRESSES_NEW (insn, -1);
6206
6207 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6208 gen_rtvec (1, value),
6209 UNSPECV_POOL_ENTRY);
6210 insn = emit_insn_after (value, insn);
6211 INSN_ADDRESSES_NEW (insn, -1);
6212 }
6213
6214 /* Ensure minimum alignment for instructions. */
6215 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6216 INSN_ADDRESSES_NEW (insn, -1);
6217
6218 /* Output in-pool execute template insns. */
6219 for (c = pool->execute; c; c = c->next)
6220 {
6221 insn = emit_label_after (c->label, insn);
6222 INSN_ADDRESSES_NEW (insn, -1);
6223
6224 insn = emit_insn_after (s390_execute_target (c->value), insn);
6225 INSN_ADDRESSES_NEW (insn, -1);
6226 }
6227
6228 /* Switch back to previous section. */
6229 if (TARGET_CPU_ZARCH)
6230 {
6231 insn = emit_insn_after (gen_pool_section_end (), insn);
6232 INSN_ADDRESSES_NEW (insn, -1);
6233 }
6234
6235 insn = emit_barrier_after (insn);
6236 INSN_ADDRESSES_NEW (insn, -1);
6237
6238 /* Remove placeholder insn. */
6239 remove_insn (pool->pool_insn);
6240 }
6241
6242 /* Free all memory used by POOL. */
6243
6244 static void
6245 s390_free_pool (struct constant_pool *pool)
6246 {
6247 struct constant *c, *next;
6248 int i;
6249
6250 for (i = 0; i < NR_C_MODES; i++)
6251 for (c = pool->constants[i]; c; c = next)
6252 {
6253 next = c->next;
6254 free (c);
6255 }
6256
6257 for (c = pool->execute; c; c = next)
6258 {
6259 next = c->next;
6260 free (c);
6261 }
6262
6263 BITMAP_FREE (pool->insns);
6264 free (pool);
6265 }
6266
6267
6268 /* Collect main literal pool. Return NULL on overflow. */
6269
6270 static struct constant_pool *
6271 s390_mainpool_start (void)
6272 {
6273 struct constant_pool *pool;
6274 rtx insn;
6275
6276 pool = s390_alloc_pool ();
6277
6278 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6279 {
6280 if (GET_CODE (insn) == INSN
6281 && GET_CODE (PATTERN (insn)) == SET
6282 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6283 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6284 {
6285 gcc_assert (!pool->pool_insn);
6286 pool->pool_insn = insn;
6287 }
6288
6289 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6290 {
6291 s390_add_execute (pool, insn);
6292 }
6293 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6294 {
6295 rtx pool_ref = NULL_RTX;
6296 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6297 if (pool_ref)
6298 {
6299 rtx constant = get_pool_constant (pool_ref);
6300 enum machine_mode mode = get_pool_mode (pool_ref);
6301 s390_add_constant (pool, constant, mode);
6302 }
6303 }
6304
6305 /* If hot/cold partitioning is enabled we have to make sure that
6306 the literal pool is emitted in the same section where the
6307 initialization of the literal pool base pointer takes place.
6308 emit_pool_after is only used in the non-overflow case on non
6309 Z cpus where we can emit the literal pool at the end of the
6310 function body within the text section. */
6311 if (NOTE_P (insn)
6312 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6313 && !pool->emit_pool_after)
6314 pool->emit_pool_after = PREV_INSN (insn);
6315 }
6316
6317 gcc_assert (pool->pool_insn || pool->size == 0);
6318
6319 if (pool->size >= 4096)
6320 {
6321 /* We're going to chunkify the pool, so remove the main
6322 pool placeholder insn. */
6323 remove_insn (pool->pool_insn);
6324
6325 s390_free_pool (pool);
6326 pool = NULL;
6327 }
6328
6329 /* If the functions ends with the section where the literal pool
6330 should be emitted set the marker to its end. */
6331 if (pool && !pool->emit_pool_after)
6332 pool->emit_pool_after = get_last_insn ();
6333
6334 return pool;
6335 }
6336
6337 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6338 Modify the current function to output the pool constants as well as
6339 the pool register setup instruction. */
6340
6341 static void
6342 s390_mainpool_finish (struct constant_pool *pool)
6343 {
6344 rtx base_reg = cfun->machine->base_reg;
6345 rtx insn;
6346
6347 /* If the pool is empty, we're done. */
6348 if (pool->size == 0)
6349 {
6350 /* We don't actually need a base register after all. */
6351 cfun->machine->base_reg = NULL_RTX;
6352
6353 if (pool->pool_insn)
6354 remove_insn (pool->pool_insn);
6355 s390_free_pool (pool);
6356 return;
6357 }
6358
6359 /* We need correct insn addresses. */
6360 shorten_branches (get_insns ());
6361
6362 /* On zSeries, we use a LARL to load the pool register. The pool is
6363 located in the .rodata section, so we emit it after the function. */
6364 if (TARGET_CPU_ZARCH)
6365 {
6366 insn = gen_main_base_64 (base_reg, pool->label);
6367 insn = emit_insn_after (insn, pool->pool_insn);
6368 INSN_ADDRESSES_NEW (insn, -1);
6369 remove_insn (pool->pool_insn);
6370
6371 insn = get_last_insn ();
6372 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6373 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6374
6375 s390_dump_pool (pool, 0);
6376 }
6377
6378 /* On S/390, if the total size of the function's code plus literal pool
6379 does not exceed 4096 bytes, we use BASR to set up a function base
6380 pointer, and emit the literal pool at the end of the function. */
6381 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6382 + pool->size + 8 /* alignment slop */ < 4096)
6383 {
6384 insn = gen_main_base_31_small (base_reg, pool->label);
6385 insn = emit_insn_after (insn, pool->pool_insn);
6386 INSN_ADDRESSES_NEW (insn, -1);
6387 remove_insn (pool->pool_insn);
6388
6389 insn = emit_label_after (pool->label, insn);
6390 INSN_ADDRESSES_NEW (insn, -1);
6391
6392 /* emit_pool_after will be set by s390_mainpool_start to the
6393 last insn of the section where the literal pool should be
6394 emitted. */
6395 insn = pool->emit_pool_after;
6396
6397 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6398 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6399
6400 s390_dump_pool (pool, 1);
6401 }
6402
6403 /* Otherwise, we emit an inline literal pool and use BASR to branch
6404 over it, setting up the pool register at the same time. */
6405 else
6406 {
6407 rtx pool_end = gen_label_rtx ();
6408
6409 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6410 insn = emit_insn_after (insn, pool->pool_insn);
6411 INSN_ADDRESSES_NEW (insn, -1);
6412 remove_insn (pool->pool_insn);
6413
6414 insn = emit_label_after (pool->label, insn);
6415 INSN_ADDRESSES_NEW (insn, -1);
6416
6417 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6418 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6419
6420 insn = emit_label_after (pool_end, pool->pool_insn);
6421 INSN_ADDRESSES_NEW (insn, -1);
6422
6423 s390_dump_pool (pool, 1);
6424 }
6425
6426
6427 /* Replace all literal pool references. */
6428
6429 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6430 {
6431 if (INSN_P (insn))
6432 replace_ltrel_base (&PATTERN (insn));
6433
6434 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6435 {
6436 rtx addr, pool_ref = NULL_RTX;
6437 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6438 if (pool_ref)
6439 {
6440 if (s390_execute_label (insn))
6441 addr = s390_find_execute (pool, insn);
6442 else
6443 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6444 get_pool_mode (pool_ref));
6445
6446 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6447 INSN_CODE (insn) = -1;
6448 }
6449 }
6450 }
6451
6452
6453 /* Free the pool. */
6454 s390_free_pool (pool);
6455 }
6456
6457 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6458 We have decided we cannot use this pool, so revert all changes
6459 to the current function that were done by s390_mainpool_start. */
6460 static void
6461 s390_mainpool_cancel (struct constant_pool *pool)
6462 {
6463 /* We didn't actually change the instruction stream, so simply
6464 free the pool memory. */
6465 s390_free_pool (pool);
6466 }
6467
6468
6469 /* Chunkify the literal pool. */
6470
6471 #define S390_POOL_CHUNK_MIN 0xc00
6472 #define S390_POOL_CHUNK_MAX 0xe00
6473
6474 static struct constant_pool *
6475 s390_chunkify_start (void)
6476 {
6477 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6478 int extra_size = 0;
6479 bitmap far_labels;
6480 rtx pending_ltrel = NULL_RTX;
6481 rtx insn;
6482
6483 rtx (*gen_reload_base) (rtx, rtx) =
6484 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6485
6486
6487 /* We need correct insn addresses. */
6488
6489 shorten_branches (get_insns ());
6490
6491 /* Scan all insns and move literals to pool chunks. */
6492
6493 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6494 {
6495 bool section_switch_p = false;
6496
6497 /* Check for pending LTREL_BASE. */
6498 if (INSN_P (insn))
6499 {
6500 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6501 if (ltrel_base)
6502 {
6503 gcc_assert (ltrel_base == pending_ltrel);
6504 pending_ltrel = NULL_RTX;
6505 }
6506 }
6507
6508 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6509 {
6510 if (!curr_pool)
6511 curr_pool = s390_start_pool (&pool_list, insn);
6512
6513 s390_add_execute (curr_pool, insn);
6514 s390_add_pool_insn (curr_pool, insn);
6515 }
6516 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6517 {
6518 rtx pool_ref = NULL_RTX;
6519 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6520 if (pool_ref)
6521 {
6522 rtx constant = get_pool_constant (pool_ref);
6523 enum machine_mode mode = get_pool_mode (pool_ref);
6524
6525 if (!curr_pool)
6526 curr_pool = s390_start_pool (&pool_list, insn);
6527
6528 s390_add_constant (curr_pool, constant, mode);
6529 s390_add_pool_insn (curr_pool, insn);
6530
6531 /* Don't split the pool chunk between a LTREL_OFFSET load
6532 and the corresponding LTREL_BASE. */
6533 if (GET_CODE (constant) == CONST
6534 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6535 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6536 {
6537 gcc_assert (!pending_ltrel);
6538 pending_ltrel = pool_ref;
6539 }
6540 }
6541 }
6542
6543 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6544 {
6545 if (curr_pool)
6546 s390_add_pool_insn (curr_pool, insn);
6547 /* An LTREL_BASE must follow within the same basic block. */
6548 gcc_assert (!pending_ltrel);
6549 }
6550
6551 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6552 section_switch_p = true;
6553
6554 if (!curr_pool
6555 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6556 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6557 continue;
6558
6559 if (TARGET_CPU_ZARCH)
6560 {
6561 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6562 continue;
6563
6564 s390_end_pool (curr_pool, NULL_RTX);
6565 curr_pool = NULL;
6566 }
6567 else
6568 {
6569 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6570 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6571 + extra_size;
6572
6573 /* We will later have to insert base register reload insns.
6574 Those will have an effect on code size, which we need to
6575 consider here. This calculation makes rather pessimistic
6576 worst-case assumptions. */
6577 if (GET_CODE (insn) == CODE_LABEL)
6578 extra_size += 6;
6579
6580 if (chunk_size < S390_POOL_CHUNK_MIN
6581 && curr_pool->size < S390_POOL_CHUNK_MIN
6582 && !section_switch_p)
6583 continue;
6584
6585 /* Pool chunks can only be inserted after BARRIERs ... */
6586 if (GET_CODE (insn) == BARRIER)
6587 {
6588 s390_end_pool (curr_pool, insn);
6589 curr_pool = NULL;
6590 extra_size = 0;
6591 }
6592
6593 /* ... so if we don't find one in time, create one. */
6594 else if (chunk_size > S390_POOL_CHUNK_MAX
6595 || curr_pool->size > S390_POOL_CHUNK_MAX
6596 || section_switch_p)
6597 {
6598 rtx label, jump, barrier;
6599
6600 if (!section_switch_p)
6601 {
6602 /* We can insert the barrier only after a 'real' insn. */
6603 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6604 continue;
6605 if (get_attr_length (insn) == 0)
6606 continue;
6607 /* Don't separate LTREL_BASE from the corresponding
6608 LTREL_OFFSET load. */
6609 if (pending_ltrel)
6610 continue;
6611 }
6612 else
6613 {
6614 gcc_assert (!pending_ltrel);
6615
6616 /* The old pool has to end before the section switch
6617 note in order to make it part of the current
6618 section. */
6619 insn = PREV_INSN (insn);
6620 }
6621
6622 label = gen_label_rtx ();
6623 jump = emit_jump_insn_after (gen_jump (label), insn);
6624 barrier = emit_barrier_after (jump);
6625 insn = emit_label_after (label, barrier);
6626 JUMP_LABEL (jump) = label;
6627 LABEL_NUSES (label) = 1;
6628
6629 INSN_ADDRESSES_NEW (jump, -1);
6630 INSN_ADDRESSES_NEW (barrier, -1);
6631 INSN_ADDRESSES_NEW (insn, -1);
6632
6633 s390_end_pool (curr_pool, barrier);
6634 curr_pool = NULL;
6635 extra_size = 0;
6636 }
6637 }
6638 }
6639
6640 if (curr_pool)
6641 s390_end_pool (curr_pool, NULL_RTX);
6642 gcc_assert (!pending_ltrel);
6643
6644 /* Find all labels that are branched into
6645 from an insn belonging to a different chunk. */
6646
6647 far_labels = BITMAP_ALLOC (NULL);
6648
6649 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6650 {
6651 /* Labels marked with LABEL_PRESERVE_P can be target
6652 of non-local jumps, so we have to mark them.
6653 The same holds for named labels.
6654
6655 Don't do that, however, if it is the label before
6656 a jump table. */
6657
6658 if (GET_CODE (insn) == CODE_LABEL
6659 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6660 {
6661 rtx vec_insn = next_real_insn (insn);
6662 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6663 PATTERN (vec_insn) : NULL_RTX;
6664 if (!vec_pat
6665 || !(GET_CODE (vec_pat) == ADDR_VEC
6666 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6667 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6668 }
6669
6670 /* If we have a direct jump (conditional or unconditional)
6671 or a casesi jump, check all potential targets. */
6672 else if (GET_CODE (insn) == JUMP_INSN)
6673 {
6674 rtx pat = PATTERN (insn);
6675 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6676 pat = XVECEXP (pat, 0, 0);
6677
6678 if (GET_CODE (pat) == SET)
6679 {
6680 rtx label = JUMP_LABEL (insn);
6681 if (label)
6682 {
6683 if (s390_find_pool (pool_list, label)
6684 != s390_find_pool (pool_list, insn))
6685 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6686 }
6687 }
6688 else if (GET_CODE (pat) == PARALLEL
6689 && XVECLEN (pat, 0) == 2
6690 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6691 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6692 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6693 {
6694 /* Find the jump table used by this casesi jump. */
6695 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6696 rtx vec_insn = next_real_insn (vec_label);
6697 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6698 PATTERN (vec_insn) : NULL_RTX;
6699 if (vec_pat
6700 && (GET_CODE (vec_pat) == ADDR_VEC
6701 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6702 {
6703 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6704
6705 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6706 {
6707 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6708
6709 if (s390_find_pool (pool_list, label)
6710 != s390_find_pool (pool_list, insn))
6711 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6712 }
6713 }
6714 }
6715 }
6716 }
6717
6718 /* Insert base register reload insns before every pool. */
6719
6720 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6721 {
6722 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6723 curr_pool->label);
6724 rtx insn = curr_pool->first_insn;
6725 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6726 }
6727
6728 /* Insert base register reload insns at every far label. */
6729
6730 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6731 if (GET_CODE (insn) == CODE_LABEL
6732 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6733 {
6734 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6735 if (pool)
6736 {
6737 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6738 pool->label);
6739 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6740 }
6741 }
6742
6743
6744 BITMAP_FREE (far_labels);
6745
6746
6747 /* Recompute insn addresses. */
6748
6749 init_insn_lengths ();
6750 shorten_branches (get_insns ());
6751
6752 return pool_list;
6753 }
6754
6755 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6756 After we have decided to use this list, finish implementing
6757 all changes to the current function as required. */
6758
6759 static void
6760 s390_chunkify_finish (struct constant_pool *pool_list)
6761 {
6762 struct constant_pool *curr_pool = NULL;
6763 rtx insn;
6764
6765
6766 /* Replace all literal pool references. */
6767
6768 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6769 {
6770 if (INSN_P (insn))
6771 replace_ltrel_base (&PATTERN (insn));
6772
6773 curr_pool = s390_find_pool (pool_list, insn);
6774 if (!curr_pool)
6775 continue;
6776
6777 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6778 {
6779 rtx addr, pool_ref = NULL_RTX;
6780 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6781 if (pool_ref)
6782 {
6783 if (s390_execute_label (insn))
6784 addr = s390_find_execute (curr_pool, insn);
6785 else
6786 addr = s390_find_constant (curr_pool,
6787 get_pool_constant (pool_ref),
6788 get_pool_mode (pool_ref));
6789
6790 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6791 INSN_CODE (insn) = -1;
6792 }
6793 }
6794 }
6795
6796 /* Dump out all literal pools. */
6797
6798 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6799 s390_dump_pool (curr_pool, 0);
6800
6801 /* Free pool list. */
6802
6803 while (pool_list)
6804 {
6805 struct constant_pool *next = pool_list->next;
6806 s390_free_pool (pool_list);
6807 pool_list = next;
6808 }
6809 }
6810
6811 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6812 We have decided we cannot use this list, so revert all changes
6813 to the current function that were done by s390_chunkify_start. */
6814
6815 static void
6816 s390_chunkify_cancel (struct constant_pool *pool_list)
6817 {
6818 struct constant_pool *curr_pool = NULL;
6819 rtx insn;
6820
6821 /* Remove all pool placeholder insns. */
6822
6823 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6824 {
6825 /* Did we insert an extra barrier? Remove it. */
6826 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6827 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6828 rtx label = NEXT_INSN (curr_pool->pool_insn);
6829
6830 if (jump && GET_CODE (jump) == JUMP_INSN
6831 && barrier && GET_CODE (barrier) == BARRIER
6832 && label && GET_CODE (label) == CODE_LABEL
6833 && GET_CODE (PATTERN (jump)) == SET
6834 && SET_DEST (PATTERN (jump)) == pc_rtx
6835 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6836 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6837 {
6838 remove_insn (jump);
6839 remove_insn (barrier);
6840 remove_insn (label);
6841 }
6842
6843 remove_insn (curr_pool->pool_insn);
6844 }
6845
6846 /* Remove all base register reload insns. */
6847
6848 for (insn = get_insns (); insn; )
6849 {
6850 rtx next_insn = NEXT_INSN (insn);
6851
6852 if (GET_CODE (insn) == INSN
6853 && GET_CODE (PATTERN (insn)) == SET
6854 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6855 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6856 remove_insn (insn);
6857
6858 insn = next_insn;
6859 }
6860
6861 /* Free pool list. */
6862
6863 while (pool_list)
6864 {
6865 struct constant_pool *next = pool_list->next;
6866 s390_free_pool (pool_list);
6867 pool_list = next;
6868 }
6869 }
6870
6871 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6872
6873 void
6874 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6875 {
6876 REAL_VALUE_TYPE r;
6877
6878 switch (GET_MODE_CLASS (mode))
6879 {
6880 case MODE_FLOAT:
6881 case MODE_DECIMAL_FLOAT:
6882 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6883
6884 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6885 assemble_real (r, mode, align);
6886 break;
6887
6888 case MODE_INT:
6889 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6890 mark_symbol_refs_as_used (exp);
6891 break;
6892
6893 default:
6894 gcc_unreachable ();
6895 }
6896 }
6897
6898
6899 /* Return an RTL expression representing the value of the return address
6900 for the frame COUNT steps up from the current frame. FRAME is the
6901 frame pointer of that frame. */
6902
6903 rtx
6904 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6905 {
6906 int offset;
6907 rtx addr;
6908
6909 /* Without backchain, we fail for all but the current frame. */
6910
6911 if (!TARGET_BACKCHAIN && count > 0)
6912 return NULL_RTX;
6913
6914 /* For the current frame, we need to make sure the initial
6915 value of RETURN_REGNUM is actually saved. */
6916
6917 if (count == 0)
6918 {
6919 /* On non-z architectures branch splitting could overwrite r14. */
6920 if (TARGET_CPU_ZARCH)
6921 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6922 else
6923 {
6924 cfun_frame_layout.save_return_addr_p = true;
6925 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
6926 }
6927 }
6928
6929 if (TARGET_PACKED_STACK)
6930 offset = -2 * UNITS_PER_LONG;
6931 else
6932 offset = RETURN_REGNUM * UNITS_PER_LONG;
6933
6934 addr = plus_constant (frame, offset);
6935 addr = memory_address (Pmode, addr);
6936 return gen_rtx_MEM (Pmode, addr);
6937 }
6938
6939 /* Return an RTL expression representing the back chain stored in
6940 the current stack frame. */
6941
6942 rtx
6943 s390_back_chain_rtx (void)
6944 {
6945 rtx chain;
6946
6947 gcc_assert (TARGET_BACKCHAIN);
6948
6949 if (TARGET_PACKED_STACK)
6950 chain = plus_constant (stack_pointer_rtx,
6951 STACK_POINTER_OFFSET - UNITS_PER_LONG);
6952 else
6953 chain = stack_pointer_rtx;
6954
6955 chain = gen_rtx_MEM (Pmode, chain);
6956 return chain;
6957 }
6958
6959 /* Find first call clobbered register unused in a function.
6960 This could be used as base register in a leaf function
6961 or for holding the return address before epilogue. */
6962
6963 static int
6964 find_unused_clobbered_reg (void)
6965 {
6966 int i;
6967 for (i = 0; i < 6; i++)
6968 if (!df_regs_ever_live_p (i))
6969 return i;
6970 return 0;
6971 }
6972
6973
6974 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
6975 clobbered hard regs in SETREG. */
6976
6977 static void
6978 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
6979 {
6980 int *regs_ever_clobbered = (int *)data;
6981 unsigned int i, regno;
6982 enum machine_mode mode = GET_MODE (setreg);
6983
6984 if (GET_CODE (setreg) == SUBREG)
6985 {
6986 rtx inner = SUBREG_REG (setreg);
6987 if (!GENERAL_REG_P (inner))
6988 return;
6989 regno = subreg_regno (setreg);
6990 }
6991 else if (GENERAL_REG_P (setreg))
6992 regno = REGNO (setreg);
6993 else
6994 return;
6995
6996 for (i = regno;
6997 i < regno + HARD_REGNO_NREGS (regno, mode);
6998 i++)
6999 regs_ever_clobbered[i] = 1;
7000 }
7001
7002 /* Walks through all basic blocks of the current function looking
7003 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7004 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7005 each of those regs. */
7006
7007 static void
7008 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7009 {
7010 basic_block cur_bb;
7011 rtx cur_insn;
7012 unsigned int i;
7013
7014 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7015
7016 /* For non-leaf functions we have to consider all call clobbered regs to be
7017 clobbered. */
7018 if (!current_function_is_leaf)
7019 {
7020 for (i = 0; i < 16; i++)
7021 regs_ever_clobbered[i] = call_really_used_regs[i];
7022 }
7023
7024 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7025 this work is done by liveness analysis (mark_regs_live_at_end).
7026 Special care is needed for functions containing landing pads. Landing pads
7027 may use the eh registers, but the code which sets these registers is not
7028 contained in that function. Hence s390_regs_ever_clobbered is not able to
7029 deal with this automatically. */
7030 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7031 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7032 if (crtl->calls_eh_return
7033 || (cfun->machine->has_landing_pad_p
7034 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7035 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7036
7037 /* For nonlocal gotos all call-saved registers have to be saved.
7038 This flag is also set for the unwinding code in libgcc.
7039 See expand_builtin_unwind_init. For regs_ever_live this is done by
7040 reload. */
7041 if (cfun->has_nonlocal_label)
7042 for (i = 0; i < 16; i++)
7043 if (!call_really_used_regs[i])
7044 regs_ever_clobbered[i] = 1;
7045
7046 FOR_EACH_BB (cur_bb)
7047 {
7048 FOR_BB_INSNS (cur_bb, cur_insn)
7049 {
7050 if (INSN_P (cur_insn))
7051 note_stores (PATTERN (cur_insn),
7052 s390_reg_clobbered_rtx,
7053 regs_ever_clobbered);
7054 }
7055 }
7056 }
7057
7058 /* Determine the frame area which actually has to be accessed
7059 in the function epilogue. The values are stored at the
7060 given pointers AREA_BOTTOM (address of the lowest used stack
7061 address) and AREA_TOP (address of the first item which does
7062 not belong to the stack frame). */
7063
7064 static void
7065 s390_frame_area (int *area_bottom, int *area_top)
7066 {
7067 int b, t;
7068 int i;
7069
7070 b = INT_MAX;
7071 t = INT_MIN;
7072
7073 if (cfun_frame_layout.first_restore_gpr != -1)
7074 {
7075 b = (cfun_frame_layout.gprs_offset
7076 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7077 t = b + (cfun_frame_layout.last_restore_gpr
7078 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7079 }
7080
7081 if (TARGET_64BIT && cfun_save_high_fprs_p)
7082 {
7083 b = MIN (b, cfun_frame_layout.f8_offset);
7084 t = MAX (t, (cfun_frame_layout.f8_offset
7085 + cfun_frame_layout.high_fprs * 8));
7086 }
7087
7088 if (!TARGET_64BIT)
7089 for (i = 2; i < 4; i++)
7090 if (cfun_fpr_bit_p (i))
7091 {
7092 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7093 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7094 }
7095
7096 *area_bottom = b;
7097 *area_top = t;
7098 }
7099
7100 /* Fill cfun->machine with info about register usage of current function.
7101 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7102
7103 static void
7104 s390_register_info (int clobbered_regs[])
7105 {
7106 int i, j;
7107
7108 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7109 cfun_frame_layout.fpr_bitmap = 0;
7110 cfun_frame_layout.high_fprs = 0;
7111 if (TARGET_64BIT)
7112 for (i = 24; i < 32; i++)
7113 if (df_regs_ever_live_p (i) && !global_regs[i])
7114 {
7115 cfun_set_fpr_bit (i - 16);
7116 cfun_frame_layout.high_fprs++;
7117 }
7118
7119 /* Find first and last gpr to be saved. We trust regs_ever_live
7120 data, except that we don't save and restore global registers.
7121
7122 Also, all registers with special meaning to the compiler need
7123 to be handled extra. */
7124
7125 s390_regs_ever_clobbered (clobbered_regs);
7126
7127 for (i = 0; i < 16; i++)
7128 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7129
7130 if (frame_pointer_needed)
7131 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7132
7133 if (flag_pic)
7134 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7135 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7136
7137 clobbered_regs[BASE_REGNUM]
7138 |= (cfun->machine->base_reg
7139 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7140
7141 clobbered_regs[RETURN_REGNUM]
7142 |= (!current_function_is_leaf
7143 || TARGET_TPF_PROFILING
7144 || cfun->machine->split_branches_pending_p
7145 || cfun_frame_layout.save_return_addr_p
7146 || crtl->calls_eh_return
7147 || cfun->stdarg);
7148
7149 clobbered_regs[STACK_POINTER_REGNUM]
7150 |= (!current_function_is_leaf
7151 || TARGET_TPF_PROFILING
7152 || cfun_save_high_fprs_p
7153 || get_frame_size () > 0
7154 || cfun->calls_alloca
7155 || cfun->stdarg);
7156
7157 for (i = 6; i < 16; i++)
7158 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7159 break;
7160 for (j = 15; j > i; j--)
7161 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7162 break;
7163
7164 if (i == 16)
7165 {
7166 /* Nothing to save/restore. */
7167 cfun_frame_layout.first_save_gpr_slot = -1;
7168 cfun_frame_layout.last_save_gpr_slot = -1;
7169 cfun_frame_layout.first_save_gpr = -1;
7170 cfun_frame_layout.first_restore_gpr = -1;
7171 cfun_frame_layout.last_save_gpr = -1;
7172 cfun_frame_layout.last_restore_gpr = -1;
7173 }
7174 else
7175 {
7176 /* Save slots for gprs from i to j. */
7177 cfun_frame_layout.first_save_gpr_slot = i;
7178 cfun_frame_layout.last_save_gpr_slot = j;
7179
7180 for (i = cfun_frame_layout.first_save_gpr_slot;
7181 i < cfun_frame_layout.last_save_gpr_slot + 1;
7182 i++)
7183 if (clobbered_regs[i])
7184 break;
7185
7186 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7187 if (clobbered_regs[j])
7188 break;
7189
7190 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7191 {
7192 /* Nothing to save/restore. */
7193 cfun_frame_layout.first_save_gpr = -1;
7194 cfun_frame_layout.first_restore_gpr = -1;
7195 cfun_frame_layout.last_save_gpr = -1;
7196 cfun_frame_layout.last_restore_gpr = -1;
7197 }
7198 else
7199 {
7200 /* Save / Restore from gpr i to j. */
7201 cfun_frame_layout.first_save_gpr = i;
7202 cfun_frame_layout.first_restore_gpr = i;
7203 cfun_frame_layout.last_save_gpr = j;
7204 cfun_frame_layout.last_restore_gpr = j;
7205 }
7206 }
7207
7208 if (cfun->stdarg)
7209 {
7210 /* Varargs functions need to save gprs 2 to 6. */
7211 if (cfun->va_list_gpr_size
7212 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7213 {
7214 int min_gpr = crtl->args.info.gprs;
7215 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7216 if (max_gpr > GP_ARG_NUM_REG)
7217 max_gpr = GP_ARG_NUM_REG;
7218
7219 if (cfun_frame_layout.first_save_gpr == -1
7220 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7221 {
7222 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7223 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7224 }
7225
7226 if (cfun_frame_layout.last_save_gpr == -1
7227 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7228 {
7229 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7230 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7231 }
7232 }
7233
7234 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7235 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7236 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7237 {
7238 int min_fpr = crtl->args.info.fprs;
7239 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7240 if (max_fpr > FP_ARG_NUM_REG)
7241 max_fpr = FP_ARG_NUM_REG;
7242
7243 /* ??? This is currently required to ensure proper location
7244 of the fpr save slots within the va_list save area. */
7245 if (TARGET_PACKED_STACK)
7246 min_fpr = 0;
7247
7248 for (i = min_fpr; i < max_fpr; i++)
7249 cfun_set_fpr_bit (i);
7250 }
7251 }
7252
7253 if (!TARGET_64BIT)
7254 for (i = 2; i < 4; i++)
7255 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7256 cfun_set_fpr_bit (i);
7257 }
7258
7259 /* Fill cfun->machine with info about frame of current function. */
7260
7261 static void
7262 s390_frame_info (void)
7263 {
7264 int i;
7265
7266 cfun_frame_layout.frame_size = get_frame_size ();
7267 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7268 fatal_error ("total size of local variables exceeds architecture limit");
7269
7270 if (!TARGET_PACKED_STACK)
7271 {
7272 cfun_frame_layout.backchain_offset = 0;
7273 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7274 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7275 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7276 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7277 * UNITS_PER_LONG);
7278 }
7279 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7280 {
7281 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7282 - UNITS_PER_LONG);
7283 cfun_frame_layout.gprs_offset
7284 = (cfun_frame_layout.backchain_offset
7285 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7286 * UNITS_PER_LONG);
7287
7288 if (TARGET_64BIT)
7289 {
7290 cfun_frame_layout.f4_offset
7291 = (cfun_frame_layout.gprs_offset
7292 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7293
7294 cfun_frame_layout.f0_offset
7295 = (cfun_frame_layout.f4_offset
7296 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7297 }
7298 else
7299 {
7300 /* On 31 bit we have to care about alignment of the
7301 floating point regs to provide fastest access. */
7302 cfun_frame_layout.f0_offset
7303 = ((cfun_frame_layout.gprs_offset
7304 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7305 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7306
7307 cfun_frame_layout.f4_offset
7308 = (cfun_frame_layout.f0_offset
7309 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7310 }
7311 }
7312 else /* no backchain */
7313 {
7314 cfun_frame_layout.f4_offset
7315 = (STACK_POINTER_OFFSET
7316 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7317
7318 cfun_frame_layout.f0_offset
7319 = (cfun_frame_layout.f4_offset
7320 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7321
7322 cfun_frame_layout.gprs_offset
7323 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7324 }
7325
7326 if (current_function_is_leaf
7327 && !TARGET_TPF_PROFILING
7328 && cfun_frame_layout.frame_size == 0
7329 && !cfun_save_high_fprs_p
7330 && !cfun->calls_alloca
7331 && !cfun->stdarg)
7332 return;
7333
7334 if (!TARGET_PACKED_STACK)
7335 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7336 + crtl->outgoing_args_size
7337 + cfun_frame_layout.high_fprs * 8);
7338 else
7339 {
7340 if (TARGET_BACKCHAIN)
7341 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7342
7343 /* No alignment trouble here because f8-f15 are only saved under
7344 64 bit. */
7345 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7346 cfun_frame_layout.f4_offset),
7347 cfun_frame_layout.gprs_offset)
7348 - cfun_frame_layout.high_fprs * 8);
7349
7350 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7351
7352 for (i = 0; i < 8; i++)
7353 if (cfun_fpr_bit_p (i))
7354 cfun_frame_layout.frame_size += 8;
7355
7356 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7357
7358 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7359 the frame size to sustain 8 byte alignment of stack frames. */
7360 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7361 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7362 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7363
7364 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7365 }
7366 }
7367
7368 /* Generate frame layout. Fills in register and frame data for the current
7369 function in cfun->machine. This routine can be called multiple times;
7370 it will re-do the complete frame layout every time. */
7371
7372 static void
7373 s390_init_frame_layout (void)
7374 {
7375 HOST_WIDE_INT frame_size;
7376 int base_used;
7377 int clobbered_regs[16];
7378
7379 /* On S/390 machines, we may need to perform branch splitting, which
7380 will require both base and return address register. We have no
7381 choice but to assume we're going to need them until right at the
7382 end of the machine dependent reorg phase. */
7383 if (!TARGET_CPU_ZARCH)
7384 cfun->machine->split_branches_pending_p = true;
7385
7386 do
7387 {
7388 frame_size = cfun_frame_layout.frame_size;
7389
7390 /* Try to predict whether we'll need the base register. */
7391 base_used = cfun->machine->split_branches_pending_p
7392 || crtl->uses_const_pool
7393 || (!DISP_IN_RANGE (frame_size)
7394 && !CONST_OK_FOR_K (frame_size));
7395
7396 /* Decide which register to use as literal pool base. In small
7397 leaf functions, try to use an unused call-clobbered register
7398 as base register to avoid save/restore overhead. */
7399 if (!base_used)
7400 cfun->machine->base_reg = NULL_RTX;
7401 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7402 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7403 else
7404 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7405
7406 s390_register_info (clobbered_regs);
7407 s390_frame_info ();
7408 }
7409 while (frame_size != cfun_frame_layout.frame_size);
7410 }
7411
7412 /* Update frame layout. Recompute actual register save data based on
7413 current info and update regs_ever_live for the special registers.
7414 May be called multiple times, but may never cause *more* registers
7415 to be saved than s390_init_frame_layout allocated room for. */
7416
7417 static void
7418 s390_update_frame_layout (void)
7419 {
7420 int clobbered_regs[16];
7421
7422 s390_register_info (clobbered_regs);
7423
7424 df_set_regs_ever_live (BASE_REGNUM,
7425 clobbered_regs[BASE_REGNUM] ? true : false);
7426 df_set_regs_ever_live (RETURN_REGNUM,
7427 clobbered_regs[RETURN_REGNUM] ? true : false);
7428 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7429 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7430
7431 if (cfun->machine->base_reg)
7432 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7433 }
7434
7435 /* Return true if it is legal to put a value with MODE into REGNO. */
7436
7437 bool
7438 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7439 {
7440 switch (REGNO_REG_CLASS (regno))
7441 {
7442 case FP_REGS:
7443 if (REGNO_PAIR_OK (regno, mode))
7444 {
7445 if (mode == SImode || mode == DImode)
7446 return true;
7447
7448 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7449 return true;
7450 }
7451 break;
7452 case ADDR_REGS:
7453 if (FRAME_REGNO_P (regno) && mode == Pmode)
7454 return true;
7455
7456 /* fallthrough */
7457 case GENERAL_REGS:
7458 if (REGNO_PAIR_OK (regno, mode))
7459 {
7460 if (TARGET_ZARCH
7461 || (mode != TFmode && mode != TCmode && mode != TDmode))
7462 return true;
7463 }
7464 break;
7465 case CC_REGS:
7466 if (GET_MODE_CLASS (mode) == MODE_CC)
7467 return true;
7468 break;
7469 case ACCESS_REGS:
7470 if (REGNO_PAIR_OK (regno, mode))
7471 {
7472 if (mode == SImode || mode == Pmode)
7473 return true;
7474 }
7475 break;
7476 default:
7477 return false;
7478 }
7479
7480 return false;
7481 }
7482
7483 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7484
7485 bool
7486 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7487 {
7488 /* Once we've decided upon a register to use as base register, it must
7489 no longer be used for any other purpose. */
7490 if (cfun->machine->base_reg)
7491 if (REGNO (cfun->machine->base_reg) == old_reg
7492 || REGNO (cfun->machine->base_reg) == new_reg)
7493 return false;
7494
7495 return true;
7496 }
7497
7498 /* Maximum number of registers to represent a value of mode MODE
7499 in a register of class RCLASS. */
7500
7501 bool
7502 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7503 {
7504 switch (rclass)
7505 {
7506 case FP_REGS:
7507 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7508 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7509 else
7510 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7511 case ACCESS_REGS:
7512 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7513 default:
7514 break;
7515 }
7516 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7517 }
7518
7519 /* Return true if register FROM can be eliminated via register TO. */
7520
7521 static bool
7522 s390_can_eliminate (const int from, const int to)
7523 {
7524 /* On zSeries machines, we have not marked the base register as fixed.
7525 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7526 If a function requires the base register, we say here that this
7527 elimination cannot be performed. This will cause reload to free
7528 up the base register (as if it were fixed). On the other hand,
7529 if the current function does *not* require the base register, we
7530 say here the elimination succeeds, which in turn allows reload
7531 to allocate the base register for any other purpose. */
7532 if (from == BASE_REGNUM && to == BASE_REGNUM)
7533 {
7534 if (TARGET_CPU_ZARCH)
7535 {
7536 s390_init_frame_layout ();
7537 return cfun->machine->base_reg == NULL_RTX;
7538 }
7539
7540 return false;
7541 }
7542
7543 /* Everything else must point into the stack frame. */
7544 gcc_assert (to == STACK_POINTER_REGNUM
7545 || to == HARD_FRAME_POINTER_REGNUM);
7546
7547 gcc_assert (from == FRAME_POINTER_REGNUM
7548 || from == ARG_POINTER_REGNUM
7549 || from == RETURN_ADDRESS_POINTER_REGNUM);
7550
7551 /* Make sure we actually saved the return address. */
7552 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7553 if (!crtl->calls_eh_return
7554 && !cfun->stdarg
7555 && !cfun_frame_layout.save_return_addr_p)
7556 return false;
7557
7558 return true;
7559 }
7560
7561 /* Return offset between register FROM and TO initially after prolog. */
7562
7563 HOST_WIDE_INT
7564 s390_initial_elimination_offset (int from, int to)
7565 {
7566 HOST_WIDE_INT offset;
7567 int index;
7568
7569 /* ??? Why are we called for non-eliminable pairs? */
7570 if (!s390_can_eliminate (from, to))
7571 return 0;
7572
7573 switch (from)
7574 {
7575 case FRAME_POINTER_REGNUM:
7576 offset = (get_frame_size()
7577 + STACK_POINTER_OFFSET
7578 + crtl->outgoing_args_size);
7579 break;
7580
7581 case ARG_POINTER_REGNUM:
7582 s390_init_frame_layout ();
7583 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7584 break;
7585
7586 case RETURN_ADDRESS_POINTER_REGNUM:
7587 s390_init_frame_layout ();
7588 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7589 gcc_assert (index >= 0);
7590 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7591 offset += index * UNITS_PER_LONG;
7592 break;
7593
7594 case BASE_REGNUM:
7595 offset = 0;
7596 break;
7597
7598 default:
7599 gcc_unreachable ();
7600 }
7601
7602 return offset;
7603 }
7604
7605 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7606 to register BASE. Return generated insn. */
7607
7608 static rtx
7609 save_fpr (rtx base, int offset, int regnum)
7610 {
7611 rtx addr;
7612 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7613
7614 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7615 set_mem_alias_set (addr, get_varargs_alias_set ());
7616 else
7617 set_mem_alias_set (addr, get_frame_alias_set ());
7618
7619 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7620 }
7621
7622 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7623 to register BASE. Return generated insn. */
7624
7625 static rtx
7626 restore_fpr (rtx base, int offset, int regnum)
7627 {
7628 rtx addr;
7629 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7630 set_mem_alias_set (addr, get_frame_alias_set ());
7631
7632 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7633 }
7634
7635 /* Return true if REGNO is a global register, but not one
7636 of the special ones that need to be saved/restored in anyway. */
7637
7638 static inline bool
7639 global_not_special_regno_p (int regno)
7640 {
7641 return (global_regs[regno]
7642 /* These registers are special and need to be
7643 restored in any case. */
7644 && !(regno == STACK_POINTER_REGNUM
7645 || regno == RETURN_REGNUM
7646 || regno == BASE_REGNUM
7647 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7648 }
7649
7650 /* Generate insn to save registers FIRST to LAST into
7651 the register save area located at offset OFFSET
7652 relative to register BASE. */
7653
7654 static rtx
7655 save_gprs (rtx base, int offset, int first, int last)
7656 {
7657 rtx addr, insn, note;
7658 int i;
7659
7660 addr = plus_constant (base, offset);
7661 addr = gen_rtx_MEM (Pmode, addr);
7662
7663 set_mem_alias_set (addr, get_frame_alias_set ());
7664
7665 /* Special-case single register. */
7666 if (first == last)
7667 {
7668 if (TARGET_64BIT)
7669 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7670 else
7671 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7672
7673 if (!global_not_special_regno_p (first))
7674 RTX_FRAME_RELATED_P (insn) = 1;
7675 return insn;
7676 }
7677
7678
7679 insn = gen_store_multiple (addr,
7680 gen_rtx_REG (Pmode, first),
7681 GEN_INT (last - first + 1));
7682
7683 if (first <= 6 && cfun->stdarg)
7684 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7685 {
7686 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7687
7688 if (first + i <= 6)
7689 set_mem_alias_set (mem, get_varargs_alias_set ());
7690 }
7691
7692 /* We need to set the FRAME_RELATED flag on all SETs
7693 inside the store-multiple pattern.
7694
7695 However, we must not emit DWARF records for registers 2..5
7696 if they are stored for use by variable arguments ...
7697
7698 ??? Unfortunately, it is not enough to simply not the
7699 FRAME_RELATED flags for those SETs, because the first SET
7700 of the PARALLEL is always treated as if it had the flag
7701 set, even if it does not. Therefore we emit a new pattern
7702 without those registers as REG_FRAME_RELATED_EXPR note. */
7703
7704 if (first >= 6 && !global_not_special_regno_p (first))
7705 {
7706 rtx pat = PATTERN (insn);
7707
7708 for (i = 0; i < XVECLEN (pat, 0); i++)
7709 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7710 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7711 0, i)))))
7712 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7713
7714 RTX_FRAME_RELATED_P (insn) = 1;
7715 }
7716 else if (last >= 6)
7717 {
7718 int start;
7719
7720 for (start = first >= 6 ? first : 6; start <= last; start++)
7721 if (!global_not_special_regno_p (start))
7722 break;
7723
7724 if (start > last)
7725 return insn;
7726
7727 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7728 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7729 gen_rtx_REG (Pmode, start),
7730 GEN_INT (last - start + 1));
7731 note = PATTERN (note);
7732
7733 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7734
7735 for (i = 0; i < XVECLEN (note, 0); i++)
7736 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7737 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7738 0, i)))))
7739 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7740
7741 RTX_FRAME_RELATED_P (insn) = 1;
7742 }
7743
7744 return insn;
7745 }
7746
7747 /* Generate insn to restore registers FIRST to LAST from
7748 the register save area located at offset OFFSET
7749 relative to register BASE. */
7750
7751 static rtx
7752 restore_gprs (rtx base, int offset, int first, int last)
7753 {
7754 rtx addr, insn;
7755
7756 addr = plus_constant (base, offset);
7757 addr = gen_rtx_MEM (Pmode, addr);
7758 set_mem_alias_set (addr, get_frame_alias_set ());
7759
7760 /* Special-case single register. */
7761 if (first == last)
7762 {
7763 if (TARGET_64BIT)
7764 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7765 else
7766 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7767
7768 return insn;
7769 }
7770
7771 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7772 addr,
7773 GEN_INT (last - first + 1));
7774 return insn;
7775 }
7776
7777 /* Return insn sequence to load the GOT register. */
7778
7779 static GTY(()) rtx got_symbol;
7780 rtx
7781 s390_load_got (void)
7782 {
7783 rtx insns;
7784
7785 if (!got_symbol)
7786 {
7787 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7788 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7789 }
7790
7791 start_sequence ();
7792
7793 if (TARGET_CPU_ZARCH)
7794 {
7795 emit_move_insn (pic_offset_table_rtx, got_symbol);
7796 }
7797 else
7798 {
7799 rtx offset;
7800
7801 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7802 UNSPEC_LTREL_OFFSET);
7803 offset = gen_rtx_CONST (Pmode, offset);
7804 offset = force_const_mem (Pmode, offset);
7805
7806 emit_move_insn (pic_offset_table_rtx, offset);
7807
7808 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7809 UNSPEC_LTREL_BASE);
7810 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7811
7812 emit_move_insn (pic_offset_table_rtx, offset);
7813 }
7814
7815 insns = get_insns ();
7816 end_sequence ();
7817 return insns;
7818 }
7819
7820 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7821 and the change to the stack pointer. */
7822
7823 static void
7824 s390_emit_stack_tie (void)
7825 {
7826 rtx mem = gen_frame_mem (BLKmode,
7827 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7828
7829 emit_insn (gen_stack_tie (mem));
7830 }
7831
7832 /* Expand the prologue into a bunch of separate insns. */
7833
7834 void
7835 s390_emit_prologue (void)
7836 {
7837 rtx insn, addr;
7838 rtx temp_reg;
7839 int i;
7840 int offset;
7841 int next_fpr = 0;
7842
7843 /* Complete frame layout. */
7844
7845 s390_update_frame_layout ();
7846
7847 /* Annotate all constant pool references to let the scheduler know
7848 they implicitly use the base register. */
7849
7850 push_topmost_sequence ();
7851
7852 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7853 if (INSN_P (insn))
7854 {
7855 annotate_constant_pool_refs (&PATTERN (insn));
7856 df_insn_rescan (insn);
7857 }
7858
7859 pop_topmost_sequence ();
7860
7861 /* Choose best register to use for temp use within prologue.
7862 See below for why TPF must use the register 1. */
7863
7864 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7865 && !current_function_is_leaf
7866 && !TARGET_TPF_PROFILING)
7867 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7868 else
7869 temp_reg = gen_rtx_REG (Pmode, 1);
7870
7871 /* Save call saved gprs. */
7872 if (cfun_frame_layout.first_save_gpr != -1)
7873 {
7874 insn = save_gprs (stack_pointer_rtx,
7875 cfun_frame_layout.gprs_offset +
7876 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7877 - cfun_frame_layout.first_save_gpr_slot),
7878 cfun_frame_layout.first_save_gpr,
7879 cfun_frame_layout.last_save_gpr);
7880 emit_insn (insn);
7881 }
7882
7883 /* Dummy insn to mark literal pool slot. */
7884
7885 if (cfun->machine->base_reg)
7886 emit_insn (gen_main_pool (cfun->machine->base_reg));
7887
7888 offset = cfun_frame_layout.f0_offset;
7889
7890 /* Save f0 and f2. */
7891 for (i = 0; i < 2; i++)
7892 {
7893 if (cfun_fpr_bit_p (i))
7894 {
7895 save_fpr (stack_pointer_rtx, offset, i + 16);
7896 offset += 8;
7897 }
7898 else if (!TARGET_PACKED_STACK)
7899 offset += 8;
7900 }
7901
7902 /* Save f4 and f6. */
7903 offset = cfun_frame_layout.f4_offset;
7904 for (i = 2; i < 4; i++)
7905 {
7906 if (cfun_fpr_bit_p (i))
7907 {
7908 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7909 offset += 8;
7910
7911 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7912 therefore are not frame related. */
7913 if (!call_really_used_regs[i + 16])
7914 RTX_FRAME_RELATED_P (insn) = 1;
7915 }
7916 else if (!TARGET_PACKED_STACK)
7917 offset += 8;
7918 }
7919
7920 if (TARGET_PACKED_STACK
7921 && cfun_save_high_fprs_p
7922 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7923 {
7924 offset = (cfun_frame_layout.f8_offset
7925 + (cfun_frame_layout.high_fprs - 1) * 8);
7926
7927 for (i = 15; i > 7 && offset >= 0; i--)
7928 if (cfun_fpr_bit_p (i))
7929 {
7930 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7931
7932 RTX_FRAME_RELATED_P (insn) = 1;
7933 offset -= 8;
7934 }
7935 if (offset >= cfun_frame_layout.f8_offset)
7936 next_fpr = i + 16;
7937 }
7938
7939 if (!TARGET_PACKED_STACK)
7940 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
7941
7942 /* Decrement stack pointer. */
7943
7944 if (cfun_frame_layout.frame_size > 0)
7945 {
7946 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
7947 rtx real_frame_off;
7948
7949 if (s390_stack_size)
7950 {
7951 HOST_WIDE_INT stack_guard;
7952
7953 if (s390_stack_guard)
7954 stack_guard = s390_stack_guard;
7955 else
7956 {
7957 /* If no value for stack guard is provided the smallest power of 2
7958 larger than the current frame size is chosen. */
7959 stack_guard = 1;
7960 while (stack_guard < cfun_frame_layout.frame_size)
7961 stack_guard <<= 1;
7962 }
7963
7964 if (cfun_frame_layout.frame_size >= s390_stack_size)
7965 {
7966 warning (0, "frame size of function %qs is "
7967 HOST_WIDE_INT_PRINT_DEC
7968 " bytes exceeding user provided stack limit of "
7969 HOST_WIDE_INT_PRINT_DEC " bytes. "
7970 "An unconditional trap is added.",
7971 current_function_name(), cfun_frame_layout.frame_size,
7972 s390_stack_size);
7973 emit_insn (gen_trap ());
7974 }
7975 else
7976 {
7977 /* stack_guard has to be smaller than s390_stack_size.
7978 Otherwise we would emit an AND with zero which would
7979 not match the test under mask pattern. */
7980 if (stack_guard >= s390_stack_size)
7981 {
7982 warning (0, "frame size of function %qs is "
7983 HOST_WIDE_INT_PRINT_DEC
7984 " bytes which is more than half the stack size. "
7985 "The dynamic check would not be reliable. "
7986 "No check emitted for this function.",
7987 current_function_name(),
7988 cfun_frame_layout.frame_size);
7989 }
7990 else
7991 {
7992 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
7993 & ~(stack_guard - 1));
7994
7995 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
7996 GEN_INT (stack_check_mask));
7997 if (TARGET_64BIT)
7998 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
7999 t, const0_rtx),
8000 t, const0_rtx, const0_rtx));
8001 else
8002 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8003 t, const0_rtx),
8004 t, const0_rtx, const0_rtx));
8005 }
8006 }
8007 }
8008
8009 if (s390_warn_framesize > 0
8010 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8011 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
8012 current_function_name (), cfun_frame_layout.frame_size);
8013
8014 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8015 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8016
8017 /* Save incoming stack pointer into temp reg. */
8018 if (TARGET_BACKCHAIN || next_fpr)
8019 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8020
8021 /* Subtract frame size from stack pointer. */
8022
8023 if (DISP_IN_RANGE (INTVAL (frame_off)))
8024 {
8025 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8026 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8027 frame_off));
8028 insn = emit_insn (insn);
8029 }
8030 else
8031 {
8032 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8033 frame_off = force_const_mem (Pmode, frame_off);
8034
8035 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8036 annotate_constant_pool_refs (&PATTERN (insn));
8037 }
8038
8039 RTX_FRAME_RELATED_P (insn) = 1;
8040 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8041 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8042 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8043 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8044 real_frame_off)));
8045
8046 /* Set backchain. */
8047
8048 if (TARGET_BACKCHAIN)
8049 {
8050 if (cfun_frame_layout.backchain_offset)
8051 addr = gen_rtx_MEM (Pmode,
8052 plus_constant (stack_pointer_rtx,
8053 cfun_frame_layout.backchain_offset));
8054 else
8055 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8056 set_mem_alias_set (addr, get_frame_alias_set ());
8057 insn = emit_insn (gen_move_insn (addr, temp_reg));
8058 }
8059
8060 /* If we support non-call exceptions (e.g. for Java),
8061 we need to make sure the backchain pointer is set up
8062 before any possibly trapping memory access. */
8063 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8064 {
8065 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8066 emit_clobber (addr);
8067 }
8068 }
8069
8070 /* Save fprs 8 - 15 (64 bit ABI). */
8071
8072 if (cfun_save_high_fprs_p && next_fpr)
8073 {
8074 /* If the stack might be accessed through a different register
8075 we have to make sure that the stack pointer decrement is not
8076 moved below the use of the stack slots. */
8077 s390_emit_stack_tie ();
8078
8079 insn = emit_insn (gen_add2_insn (temp_reg,
8080 GEN_INT (cfun_frame_layout.f8_offset)));
8081
8082 offset = 0;
8083
8084 for (i = 24; i <= next_fpr; i++)
8085 if (cfun_fpr_bit_p (i - 16))
8086 {
8087 rtx addr = plus_constant (stack_pointer_rtx,
8088 cfun_frame_layout.frame_size
8089 + cfun_frame_layout.f8_offset
8090 + offset);
8091
8092 insn = save_fpr (temp_reg, offset, i);
8093 offset += 8;
8094 RTX_FRAME_RELATED_P (insn) = 1;
8095 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8096 gen_rtx_SET (VOIDmode,
8097 gen_rtx_MEM (DFmode, addr),
8098 gen_rtx_REG (DFmode, i)));
8099 }
8100 }
8101
8102 /* Set frame pointer, if needed. */
8103
8104 if (frame_pointer_needed)
8105 {
8106 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8107 RTX_FRAME_RELATED_P (insn) = 1;
8108 }
8109
8110 /* Set up got pointer, if needed. */
8111
8112 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8113 {
8114 rtx insns = s390_load_got ();
8115
8116 for (insn = insns; insn; insn = NEXT_INSN (insn))
8117 annotate_constant_pool_refs (&PATTERN (insn));
8118
8119 emit_insn (insns);
8120 }
8121
8122 if (TARGET_TPF_PROFILING)
8123 {
8124 /* Generate a BAS instruction to serve as a function
8125 entry intercept to facilitate the use of tracing
8126 algorithms located at the branch target. */
8127 emit_insn (gen_prologue_tpf ());
8128
8129 /* Emit a blockage here so that all code
8130 lies between the profiling mechanisms. */
8131 emit_insn (gen_blockage ());
8132 }
8133 }
8134
8135 /* Expand the epilogue into a bunch of separate insns. */
8136
8137 void
8138 s390_emit_epilogue (bool sibcall)
8139 {
8140 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8141 int area_bottom, area_top, offset = 0;
8142 int next_offset;
8143 rtvec p;
8144 int i;
8145
8146 if (TARGET_TPF_PROFILING)
8147 {
8148
8149 /* Generate a BAS instruction to serve as a function
8150 entry intercept to facilitate the use of tracing
8151 algorithms located at the branch target. */
8152
8153 /* Emit a blockage here so that all code
8154 lies between the profiling mechanisms. */
8155 emit_insn (gen_blockage ());
8156
8157 emit_insn (gen_epilogue_tpf ());
8158 }
8159
8160 /* Check whether to use frame or stack pointer for restore. */
8161
8162 frame_pointer = (frame_pointer_needed
8163 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8164
8165 s390_frame_area (&area_bottom, &area_top);
8166
8167 /* Check whether we can access the register save area.
8168 If not, increment the frame pointer as required. */
8169
8170 if (area_top <= area_bottom)
8171 {
8172 /* Nothing to restore. */
8173 }
8174 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8175 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8176 {
8177 /* Area is in range. */
8178 offset = cfun_frame_layout.frame_size;
8179 }
8180 else
8181 {
8182 rtx insn, frame_off, cfa;
8183
8184 offset = area_bottom < 0 ? -area_bottom : 0;
8185 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8186
8187 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8188 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8189 if (DISP_IN_RANGE (INTVAL (frame_off)))
8190 {
8191 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8192 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8193 insn = emit_insn (insn);
8194 }
8195 else
8196 {
8197 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8198 frame_off = force_const_mem (Pmode, frame_off);
8199
8200 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8201 annotate_constant_pool_refs (&PATTERN (insn));
8202 }
8203 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8204 RTX_FRAME_RELATED_P (insn) = 1;
8205 }
8206
8207 /* Restore call saved fprs. */
8208
8209 if (TARGET_64BIT)
8210 {
8211 if (cfun_save_high_fprs_p)
8212 {
8213 next_offset = cfun_frame_layout.f8_offset;
8214 for (i = 24; i < 32; i++)
8215 {
8216 if (cfun_fpr_bit_p (i - 16))
8217 {
8218 restore_fpr (frame_pointer,
8219 offset + next_offset, i);
8220 cfa_restores
8221 = alloc_reg_note (REG_CFA_RESTORE,
8222 gen_rtx_REG (DFmode, i), cfa_restores);
8223 next_offset += 8;
8224 }
8225 }
8226 }
8227
8228 }
8229 else
8230 {
8231 next_offset = cfun_frame_layout.f4_offset;
8232 for (i = 18; i < 20; i++)
8233 {
8234 if (cfun_fpr_bit_p (i - 16))
8235 {
8236 restore_fpr (frame_pointer,
8237 offset + next_offset, i);
8238 cfa_restores
8239 = alloc_reg_note (REG_CFA_RESTORE,
8240 gen_rtx_REG (DFmode, i), cfa_restores);
8241 next_offset += 8;
8242 }
8243 else if (!TARGET_PACKED_STACK)
8244 next_offset += 8;
8245 }
8246
8247 }
8248
8249 /* Return register. */
8250
8251 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8252
8253 /* Restore call saved gprs. */
8254
8255 if (cfun_frame_layout.first_restore_gpr != -1)
8256 {
8257 rtx insn, addr;
8258 int i;
8259
8260 /* Check for global register and save them
8261 to stack location from where they get restored. */
8262
8263 for (i = cfun_frame_layout.first_restore_gpr;
8264 i <= cfun_frame_layout.last_restore_gpr;
8265 i++)
8266 {
8267 if (global_not_special_regno_p (i))
8268 {
8269 addr = plus_constant (frame_pointer,
8270 offset + cfun_frame_layout.gprs_offset
8271 + (i - cfun_frame_layout.first_save_gpr_slot)
8272 * UNITS_PER_LONG);
8273 addr = gen_rtx_MEM (Pmode, addr);
8274 set_mem_alias_set (addr, get_frame_alias_set ());
8275 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8276 }
8277 else
8278 cfa_restores
8279 = alloc_reg_note (REG_CFA_RESTORE,
8280 gen_rtx_REG (Pmode, i), cfa_restores);
8281 }
8282
8283 if (! sibcall)
8284 {
8285 /* Fetch return address from stack before load multiple,
8286 this will do good for scheduling. */
8287
8288 if (cfun_frame_layout.save_return_addr_p
8289 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8290 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8291 {
8292 int return_regnum = find_unused_clobbered_reg();
8293 if (!return_regnum)
8294 return_regnum = 4;
8295 return_reg = gen_rtx_REG (Pmode, return_regnum);
8296
8297 addr = plus_constant (frame_pointer,
8298 offset + cfun_frame_layout.gprs_offset
8299 + (RETURN_REGNUM
8300 - cfun_frame_layout.first_save_gpr_slot)
8301 * UNITS_PER_LONG);
8302 addr = gen_rtx_MEM (Pmode, addr);
8303 set_mem_alias_set (addr, get_frame_alias_set ());
8304 emit_move_insn (return_reg, addr);
8305 }
8306 }
8307
8308 insn = restore_gprs (frame_pointer,
8309 offset + cfun_frame_layout.gprs_offset
8310 + (cfun_frame_layout.first_restore_gpr
8311 - cfun_frame_layout.first_save_gpr_slot)
8312 * UNITS_PER_LONG,
8313 cfun_frame_layout.first_restore_gpr,
8314 cfun_frame_layout.last_restore_gpr);
8315 insn = emit_insn (insn);
8316 REG_NOTES (insn) = cfa_restores;
8317 add_reg_note (insn, REG_CFA_DEF_CFA,
8318 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8319 RTX_FRAME_RELATED_P (insn) = 1;
8320 }
8321
8322 if (! sibcall)
8323 {
8324
8325 /* Return to caller. */
8326
8327 p = rtvec_alloc (2);
8328
8329 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8330 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8331 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8332 }
8333 }
8334
8335
8336 /* Return the size in bytes of a function argument of
8337 type TYPE and/or mode MODE. At least one of TYPE or
8338 MODE must be specified. */
8339
8340 static int
8341 s390_function_arg_size (enum machine_mode mode, const_tree type)
8342 {
8343 if (type)
8344 return int_size_in_bytes (type);
8345
8346 /* No type info available for some library calls ... */
8347 if (mode != BLKmode)
8348 return GET_MODE_SIZE (mode);
8349
8350 /* If we have neither type nor mode, abort */
8351 gcc_unreachable ();
8352 }
8353
8354 /* Return true if a function argument of type TYPE and mode MODE
8355 is to be passed in a floating-point register, if available. */
8356
8357 static bool
8358 s390_function_arg_float (enum machine_mode mode, const_tree type)
8359 {
8360 int size = s390_function_arg_size (mode, type);
8361 if (size > 8)
8362 return false;
8363
8364 /* Soft-float changes the ABI: no floating-point registers are used. */
8365 if (TARGET_SOFT_FLOAT)
8366 return false;
8367
8368 /* No type info available for some library calls ... */
8369 if (!type)
8370 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8371
8372 /* The ABI says that record types with a single member are treated
8373 just like that member would be. */
8374 while (TREE_CODE (type) == RECORD_TYPE)
8375 {
8376 tree field, single = NULL_TREE;
8377
8378 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8379 {
8380 if (TREE_CODE (field) != FIELD_DECL)
8381 continue;
8382
8383 if (single == NULL_TREE)
8384 single = TREE_TYPE (field);
8385 else
8386 return false;
8387 }
8388
8389 if (single == NULL_TREE)
8390 return false;
8391 else
8392 type = single;
8393 }
8394
8395 return TREE_CODE (type) == REAL_TYPE;
8396 }
8397
8398 /* Return true if a function argument of type TYPE and mode MODE
8399 is to be passed in an integer register, or a pair of integer
8400 registers, if available. */
8401
8402 static bool
8403 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8404 {
8405 int size = s390_function_arg_size (mode, type);
8406 if (size > 8)
8407 return false;
8408
8409 /* No type info available for some library calls ... */
8410 if (!type)
8411 return GET_MODE_CLASS (mode) == MODE_INT
8412 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8413
8414 /* We accept small integral (and similar) types. */
8415 if (INTEGRAL_TYPE_P (type)
8416 || POINTER_TYPE_P (type)
8417 || TREE_CODE (type) == OFFSET_TYPE
8418 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8419 return true;
8420
8421 /* We also accept structs of size 1, 2, 4, 8 that are not
8422 passed in floating-point registers. */
8423 if (AGGREGATE_TYPE_P (type)
8424 && exact_log2 (size) >= 0
8425 && !s390_function_arg_float (mode, type))
8426 return true;
8427
8428 return false;
8429 }
8430
8431 /* Return 1 if a function argument of type TYPE and mode MODE
8432 is to be passed by reference. The ABI specifies that only
8433 structures of size 1, 2, 4, or 8 bytes are passed by value,
8434 all other structures (and complex numbers) are passed by
8435 reference. */
8436
8437 static bool
8438 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8439 enum machine_mode mode, const_tree type,
8440 bool named ATTRIBUTE_UNUSED)
8441 {
8442 int size = s390_function_arg_size (mode, type);
8443 if (size > 8)
8444 return true;
8445
8446 if (type)
8447 {
8448 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8449 return 1;
8450
8451 if (TREE_CODE (type) == COMPLEX_TYPE
8452 || TREE_CODE (type) == VECTOR_TYPE)
8453 return 1;
8454 }
8455
8456 return 0;
8457 }
8458
8459 /* Update the data in CUM to advance over an argument of mode MODE and
8460 data type TYPE. (TYPE is null for libcalls where that information
8461 may not be available.). The boolean NAMED specifies whether the
8462 argument is a named argument (as opposed to an unnamed argument
8463 matching an ellipsis). */
8464
8465 static void
8466 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8467 const_tree type, bool named ATTRIBUTE_UNUSED)
8468 {
8469 if (s390_function_arg_float (mode, type))
8470 {
8471 cum->fprs += 1;
8472 }
8473 else if (s390_function_arg_integer (mode, type))
8474 {
8475 int size = s390_function_arg_size (mode, type);
8476 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8477 }
8478 else
8479 gcc_unreachable ();
8480 }
8481
8482 /* Define where to put the arguments to a function.
8483 Value is zero to push the argument on the stack,
8484 or a hard register in which to store the argument.
8485
8486 MODE is the argument's machine mode.
8487 TYPE is the data type of the argument (as a tree).
8488 This is null for libcalls where that information may
8489 not be available.
8490 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8491 the preceding args and about the function being called.
8492 NAMED is nonzero if this argument is a named parameter
8493 (otherwise it is an extra parameter matching an ellipsis).
8494
8495 On S/390, we use general purpose registers 2 through 6 to
8496 pass integer, pointer, and certain structure arguments, and
8497 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8498 to pass floating point arguments. All remaining arguments
8499 are pushed to the stack. */
8500
8501 static rtx
8502 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8503 const_tree type, bool named ATTRIBUTE_UNUSED)
8504 {
8505 if (s390_function_arg_float (mode, type))
8506 {
8507 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8508 return 0;
8509 else
8510 return gen_rtx_REG (mode, cum->fprs + 16);
8511 }
8512 else if (s390_function_arg_integer (mode, type))
8513 {
8514 int size = s390_function_arg_size (mode, type);
8515 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8516
8517 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8518 return 0;
8519 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8520 return gen_rtx_REG (mode, cum->gprs + 2);
8521 else if (n_gprs == 2)
8522 {
8523 rtvec p = rtvec_alloc (2);
8524
8525 RTVEC_ELT (p, 0)
8526 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8527 const0_rtx);
8528 RTVEC_ELT (p, 1)
8529 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8530 GEN_INT (4));
8531
8532 return gen_rtx_PARALLEL (mode, p);
8533 }
8534 }
8535
8536 /* After the real arguments, expand_call calls us once again
8537 with a void_type_node type. Whatever we return here is
8538 passed as operand 2 to the call expanders.
8539
8540 We don't need this feature ... */
8541 else if (type == void_type_node)
8542 return const0_rtx;
8543
8544 gcc_unreachable ();
8545 }
8546
8547 /* Return true if return values of type TYPE should be returned
8548 in a memory buffer whose address is passed by the caller as
8549 hidden first argument. */
8550
8551 static bool
8552 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8553 {
8554 /* We accept small integral (and similar) types. */
8555 if (INTEGRAL_TYPE_P (type)
8556 || POINTER_TYPE_P (type)
8557 || TREE_CODE (type) == OFFSET_TYPE
8558 || TREE_CODE (type) == REAL_TYPE)
8559 return int_size_in_bytes (type) > 8;
8560
8561 /* Aggregates and similar constructs are always returned
8562 in memory. */
8563 if (AGGREGATE_TYPE_P (type)
8564 || TREE_CODE (type) == COMPLEX_TYPE
8565 || TREE_CODE (type) == VECTOR_TYPE)
8566 return true;
8567
8568 /* ??? We get called on all sorts of random stuff from
8569 aggregate_value_p. We can't abort, but it's not clear
8570 what's safe to return. Pretend it's a struct I guess. */
8571 return true;
8572 }
8573
8574 /* Function arguments and return values are promoted to word size. */
8575
8576 static enum machine_mode
8577 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8578 int *punsignedp,
8579 const_tree fntype ATTRIBUTE_UNUSED,
8580 int for_return ATTRIBUTE_UNUSED)
8581 {
8582 if (INTEGRAL_MODE_P (mode)
8583 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8584 {
8585 if (POINTER_TYPE_P (type))
8586 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8587 return Pmode;
8588 }
8589
8590 return mode;
8591 }
8592
8593 /* Define where to return a (scalar) value of type TYPE.
8594 If TYPE is null, define where to return a (scalar)
8595 value of mode MODE from a libcall. */
8596
8597 rtx
8598 s390_function_value (const_tree type, const_tree fn, enum machine_mode mode)
8599 {
8600 if (type)
8601 {
8602 int unsignedp = TYPE_UNSIGNED (type);
8603 mode = promote_function_mode (type, TYPE_MODE (type), &unsignedp, fn, 1);
8604 }
8605
8606 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8607 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8608
8609 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8610 return gen_rtx_REG (mode, 16);
8611 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8612 || UNITS_PER_LONG == UNITS_PER_WORD)
8613 return gen_rtx_REG (mode, 2);
8614 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8615 {
8616 rtvec p = rtvec_alloc (2);
8617
8618 RTVEC_ELT (p, 0)
8619 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8620 RTVEC_ELT (p, 1)
8621 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8622
8623 return gen_rtx_PARALLEL (mode, p);
8624 }
8625
8626 gcc_unreachable ();
8627 }
8628
8629
8630 /* Create and return the va_list datatype.
8631
8632 On S/390, va_list is an array type equivalent to
8633
8634 typedef struct __va_list_tag
8635 {
8636 long __gpr;
8637 long __fpr;
8638 void *__overflow_arg_area;
8639 void *__reg_save_area;
8640 } va_list[1];
8641
8642 where __gpr and __fpr hold the number of general purpose
8643 or floating point arguments used up to now, respectively,
8644 __overflow_arg_area points to the stack location of the
8645 next argument passed on the stack, and __reg_save_area
8646 always points to the start of the register area in the
8647 call frame of the current function. The function prologue
8648 saves all registers used for argument passing into this
8649 area if the function uses variable arguments. */
8650
8651 static tree
8652 s390_build_builtin_va_list (void)
8653 {
8654 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8655
8656 record = lang_hooks.types.make_type (RECORD_TYPE);
8657
8658 type_decl =
8659 build_decl (BUILTINS_LOCATION,
8660 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8661
8662 f_gpr = build_decl (BUILTINS_LOCATION,
8663 FIELD_DECL, get_identifier ("__gpr"),
8664 long_integer_type_node);
8665 f_fpr = build_decl (BUILTINS_LOCATION,
8666 FIELD_DECL, get_identifier ("__fpr"),
8667 long_integer_type_node);
8668 f_ovf = build_decl (BUILTINS_LOCATION,
8669 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8670 ptr_type_node);
8671 f_sav = build_decl (BUILTINS_LOCATION,
8672 FIELD_DECL, get_identifier ("__reg_save_area"),
8673 ptr_type_node);
8674
8675 va_list_gpr_counter_field = f_gpr;
8676 va_list_fpr_counter_field = f_fpr;
8677
8678 DECL_FIELD_CONTEXT (f_gpr) = record;
8679 DECL_FIELD_CONTEXT (f_fpr) = record;
8680 DECL_FIELD_CONTEXT (f_ovf) = record;
8681 DECL_FIELD_CONTEXT (f_sav) = record;
8682
8683 TYPE_STUB_DECL (record) = type_decl;
8684 TYPE_NAME (record) = type_decl;
8685 TYPE_FIELDS (record) = f_gpr;
8686 DECL_CHAIN (f_gpr) = f_fpr;
8687 DECL_CHAIN (f_fpr) = f_ovf;
8688 DECL_CHAIN (f_ovf) = f_sav;
8689
8690 layout_type (record);
8691
8692 /* The correct type is an array type of one element. */
8693 return build_array_type (record, build_index_type (size_zero_node));
8694 }
8695
8696 /* Implement va_start by filling the va_list structure VALIST.
8697 STDARG_P is always true, and ignored.
8698 NEXTARG points to the first anonymous stack argument.
8699
8700 The following global variables are used to initialize
8701 the va_list structure:
8702
8703 crtl->args.info:
8704 holds number of gprs and fprs used for named arguments.
8705 crtl->args.arg_offset_rtx:
8706 holds the offset of the first anonymous stack argument
8707 (relative to the virtual arg pointer). */
8708
8709 static void
8710 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8711 {
8712 HOST_WIDE_INT n_gpr, n_fpr;
8713 int off;
8714 tree f_gpr, f_fpr, f_ovf, f_sav;
8715 tree gpr, fpr, ovf, sav, t;
8716
8717 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8718 f_fpr = DECL_CHAIN (f_gpr);
8719 f_ovf = DECL_CHAIN (f_fpr);
8720 f_sav = DECL_CHAIN (f_ovf);
8721
8722 valist = build_simple_mem_ref (valist);
8723 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8724 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8725 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8726 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8727
8728 /* Count number of gp and fp argument registers used. */
8729
8730 n_gpr = crtl->args.info.gprs;
8731 n_fpr = crtl->args.info.fprs;
8732
8733 if (cfun->va_list_gpr_size)
8734 {
8735 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8736 build_int_cst (NULL_TREE, n_gpr));
8737 TREE_SIDE_EFFECTS (t) = 1;
8738 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8739 }
8740
8741 if (cfun->va_list_fpr_size)
8742 {
8743 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8744 build_int_cst (NULL_TREE, n_fpr));
8745 TREE_SIDE_EFFECTS (t) = 1;
8746 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8747 }
8748
8749 /* Find the overflow area. */
8750 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8751 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8752 {
8753 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8754
8755 off = INTVAL (crtl->args.arg_offset_rtx);
8756 off = off < 0 ? 0 : off;
8757 if (TARGET_DEBUG_ARG)
8758 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8759 (int)n_gpr, (int)n_fpr, off);
8760
8761 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8762
8763 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8764 TREE_SIDE_EFFECTS (t) = 1;
8765 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8766 }
8767
8768 /* Find the register save area. */
8769 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8770 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8771 {
8772 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8773 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8774 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8775
8776 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8777 TREE_SIDE_EFFECTS (t) = 1;
8778 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8779 }
8780 }
8781
8782 /* Implement va_arg by updating the va_list structure
8783 VALIST as required to retrieve an argument of type
8784 TYPE, and returning that argument.
8785
8786 Generates code equivalent to:
8787
8788 if (integral value) {
8789 if (size <= 4 && args.gpr < 5 ||
8790 size > 4 && args.gpr < 4 )
8791 ret = args.reg_save_area[args.gpr+8]
8792 else
8793 ret = *args.overflow_arg_area++;
8794 } else if (float value) {
8795 if (args.fgpr < 2)
8796 ret = args.reg_save_area[args.fpr+64]
8797 else
8798 ret = *args.overflow_arg_area++;
8799 } else if (aggregate value) {
8800 if (args.gpr < 5)
8801 ret = *args.reg_save_area[args.gpr]
8802 else
8803 ret = **args.overflow_arg_area++;
8804 } */
8805
8806 static tree
8807 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8808 gimple_seq *post_p ATTRIBUTE_UNUSED)
8809 {
8810 tree f_gpr, f_fpr, f_ovf, f_sav;
8811 tree gpr, fpr, ovf, sav, reg, t, u;
8812 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8813 tree lab_false, lab_over, addr;
8814
8815 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8816 f_fpr = DECL_CHAIN (f_gpr);
8817 f_ovf = DECL_CHAIN (f_fpr);
8818 f_sav = DECL_CHAIN (f_ovf);
8819
8820 valist = build_va_arg_indirect_ref (valist);
8821 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8822 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8823 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8824
8825 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8826 both appear on a lhs. */
8827 valist = unshare_expr (valist);
8828 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8829
8830 size = int_size_in_bytes (type);
8831
8832 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8833 {
8834 if (TARGET_DEBUG_ARG)
8835 {
8836 fprintf (stderr, "va_arg: aggregate type");
8837 debug_tree (type);
8838 }
8839
8840 /* Aggregates are passed by reference. */
8841 indirect_p = 1;
8842 reg = gpr;
8843 n_reg = 1;
8844
8845 /* kernel stack layout on 31 bit: It is assumed here that no padding
8846 will be added by s390_frame_info because for va_args always an even
8847 number of gprs has to be saved r15-r2 = 14 regs. */
8848 sav_ofs = 2 * UNITS_PER_LONG;
8849 sav_scale = UNITS_PER_LONG;
8850 size = UNITS_PER_LONG;
8851 max_reg = GP_ARG_NUM_REG - n_reg;
8852 }
8853 else if (s390_function_arg_float (TYPE_MODE (type), type))
8854 {
8855 if (TARGET_DEBUG_ARG)
8856 {
8857 fprintf (stderr, "va_arg: float type");
8858 debug_tree (type);
8859 }
8860
8861 /* FP args go in FP registers, if present. */
8862 indirect_p = 0;
8863 reg = fpr;
8864 n_reg = 1;
8865 sav_ofs = 16 * UNITS_PER_LONG;
8866 sav_scale = 8;
8867 max_reg = FP_ARG_NUM_REG - n_reg;
8868 }
8869 else
8870 {
8871 if (TARGET_DEBUG_ARG)
8872 {
8873 fprintf (stderr, "va_arg: other type");
8874 debug_tree (type);
8875 }
8876
8877 /* Otherwise into GP registers. */
8878 indirect_p = 0;
8879 reg = gpr;
8880 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8881
8882 /* kernel stack layout on 31 bit: It is assumed here that no padding
8883 will be added by s390_frame_info because for va_args always an even
8884 number of gprs has to be saved r15-r2 = 14 regs. */
8885 sav_ofs = 2 * UNITS_PER_LONG;
8886
8887 if (size < UNITS_PER_LONG)
8888 sav_ofs += UNITS_PER_LONG - size;
8889
8890 sav_scale = UNITS_PER_LONG;
8891 max_reg = GP_ARG_NUM_REG - n_reg;
8892 }
8893
8894 /* Pull the value out of the saved registers ... */
8895
8896 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8897 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8898 addr = create_tmp_var (ptr_type_node, "addr");
8899
8900 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8901 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8902 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8903 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8904 gimplify_and_add (t, pre_p);
8905
8906 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
8907 size_int (sav_ofs));
8908 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
8909 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
8910 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
8911
8912 gimplify_assign (addr, t, pre_p);
8913
8914 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8915
8916 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8917
8918
8919 /* ... Otherwise out of the overflow area. */
8920
8921 t = ovf;
8922 if (size < UNITS_PER_LONG)
8923 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8924 size_int (UNITS_PER_LONG - size));
8925
8926 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8927
8928 gimplify_assign (addr, t, pre_p);
8929
8930 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8931 size_int (size));
8932 gimplify_assign (ovf, t, pre_p);
8933
8934 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8935
8936
8937 /* Increment register save count. */
8938
8939 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
8940 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
8941 gimplify_and_add (u, pre_p);
8942
8943 if (indirect_p)
8944 {
8945 t = build_pointer_type_for_mode (build_pointer_type (type),
8946 ptr_mode, true);
8947 addr = fold_convert (t, addr);
8948 addr = build_va_arg_indirect_ref (addr);
8949 }
8950 else
8951 {
8952 t = build_pointer_type_for_mode (type, ptr_mode, true);
8953 addr = fold_convert (t, addr);
8954 }
8955
8956 return build_va_arg_indirect_ref (addr);
8957 }
8958
8959
8960 /* Builtins. */
8961
8962 enum s390_builtin
8963 {
8964 S390_BUILTIN_THREAD_POINTER,
8965 S390_BUILTIN_SET_THREAD_POINTER,
8966
8967 S390_BUILTIN_max
8968 };
8969
8970 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
8971 CODE_FOR_get_tp_64,
8972 CODE_FOR_set_tp_64
8973 };
8974
8975 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
8976 CODE_FOR_get_tp_31,
8977 CODE_FOR_set_tp_31
8978 };
8979
8980 static void
8981 s390_init_builtins (void)
8982 {
8983 tree ftype;
8984
8985 ftype = build_function_type (ptr_type_node, void_list_node);
8986 add_builtin_function ("__builtin_thread_pointer", ftype,
8987 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
8988 NULL, NULL_TREE);
8989
8990 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
8991 add_builtin_function ("__builtin_set_thread_pointer", ftype,
8992 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
8993 NULL, NULL_TREE);
8994 }
8995
8996 /* Expand an expression EXP that calls a built-in function,
8997 with result going to TARGET if that's convenient
8998 (and in mode MODE if that's convenient).
8999 SUBTARGET may be used as the target for computing one of EXP's operands.
9000 IGNORE is nonzero if the value is to be ignored. */
9001
9002 static rtx
9003 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9004 enum machine_mode mode ATTRIBUTE_UNUSED,
9005 int ignore ATTRIBUTE_UNUSED)
9006 {
9007 #define MAX_ARGS 2
9008
9009 enum insn_code const *code_for_builtin =
9010 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9011
9012 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9013 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9014 enum insn_code icode;
9015 rtx op[MAX_ARGS], pat;
9016 int arity;
9017 bool nonvoid;
9018 tree arg;
9019 call_expr_arg_iterator iter;
9020
9021 if (fcode >= S390_BUILTIN_max)
9022 internal_error ("bad builtin fcode");
9023 icode = code_for_builtin[fcode];
9024 if (icode == 0)
9025 internal_error ("bad builtin fcode");
9026
9027 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9028
9029 arity = 0;
9030 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9031 {
9032 const struct insn_operand_data *insn_op;
9033
9034 if (arg == error_mark_node)
9035 return NULL_RTX;
9036 if (arity > MAX_ARGS)
9037 return NULL_RTX;
9038
9039 insn_op = &insn_data[icode].operand[arity + nonvoid];
9040
9041 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9042
9043 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9044 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9045 arity++;
9046 }
9047
9048 if (nonvoid)
9049 {
9050 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9051 if (!target
9052 || GET_MODE (target) != tmode
9053 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9054 target = gen_reg_rtx (tmode);
9055 }
9056
9057 switch (arity)
9058 {
9059 case 0:
9060 pat = GEN_FCN (icode) (target);
9061 break;
9062 case 1:
9063 if (nonvoid)
9064 pat = GEN_FCN (icode) (target, op[0]);
9065 else
9066 pat = GEN_FCN (icode) (op[0]);
9067 break;
9068 case 2:
9069 pat = GEN_FCN (icode) (target, op[0], op[1]);
9070 break;
9071 default:
9072 gcc_unreachable ();
9073 }
9074 if (!pat)
9075 return NULL_RTX;
9076 emit_insn (pat);
9077
9078 if (nonvoid)
9079 return target;
9080 else
9081 return const0_rtx;
9082 }
9083
9084
9085 /* Output assembly code for the trampoline template to
9086 stdio stream FILE.
9087
9088 On S/390, we use gpr 1 internally in the trampoline code;
9089 gpr 0 is used to hold the static chain. */
9090
9091 static void
9092 s390_asm_trampoline_template (FILE *file)
9093 {
9094 rtx op[2];
9095 op[0] = gen_rtx_REG (Pmode, 0);
9096 op[1] = gen_rtx_REG (Pmode, 1);
9097
9098 if (TARGET_64BIT)
9099 {
9100 output_asm_insn ("basr\t%1,0", op);
9101 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9102 output_asm_insn ("br\t%1", op);
9103 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9104 }
9105 else
9106 {
9107 output_asm_insn ("basr\t%1,0", op);
9108 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9109 output_asm_insn ("br\t%1", op);
9110 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9111 }
9112 }
9113
9114 /* Emit RTL insns to initialize the variable parts of a trampoline.
9115 FNADDR is an RTX for the address of the function's pure code.
9116 CXT is an RTX for the static chain value for the function. */
9117
9118 static void
9119 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9120 {
9121 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9122 rtx mem;
9123
9124 emit_block_move (m_tramp, assemble_trampoline_template (),
9125 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9126
9127 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9128 emit_move_insn (mem, cxt);
9129 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9130 emit_move_insn (mem, fnaddr);
9131 }
9132
9133 /* Output assembler code to FILE to increment profiler label # LABELNO
9134 for profiling a function entry. */
9135
9136 void
9137 s390_function_profiler (FILE *file, int labelno)
9138 {
9139 rtx op[7];
9140
9141 char label[128];
9142 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9143
9144 fprintf (file, "# function profiler \n");
9145
9146 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9147 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9148 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9149
9150 op[2] = gen_rtx_REG (Pmode, 1);
9151 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9152 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9153
9154 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9155 if (flag_pic)
9156 {
9157 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9158 op[4] = gen_rtx_CONST (Pmode, op[4]);
9159 }
9160
9161 if (TARGET_64BIT)
9162 {
9163 output_asm_insn ("stg\t%0,%1", op);
9164 output_asm_insn ("larl\t%2,%3", op);
9165 output_asm_insn ("brasl\t%0,%4", op);
9166 output_asm_insn ("lg\t%0,%1", op);
9167 }
9168 else if (!flag_pic)
9169 {
9170 op[6] = gen_label_rtx ();
9171
9172 output_asm_insn ("st\t%0,%1", op);
9173 output_asm_insn ("bras\t%2,%l6", op);
9174 output_asm_insn (".long\t%4", op);
9175 output_asm_insn (".long\t%3", op);
9176 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9177 output_asm_insn ("l\t%0,0(%2)", op);
9178 output_asm_insn ("l\t%2,4(%2)", op);
9179 output_asm_insn ("basr\t%0,%0", op);
9180 output_asm_insn ("l\t%0,%1", op);
9181 }
9182 else
9183 {
9184 op[5] = gen_label_rtx ();
9185 op[6] = gen_label_rtx ();
9186
9187 output_asm_insn ("st\t%0,%1", op);
9188 output_asm_insn ("bras\t%2,%l6", op);
9189 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9190 output_asm_insn (".long\t%4-%l5", op);
9191 output_asm_insn (".long\t%3-%l5", op);
9192 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9193 output_asm_insn ("lr\t%0,%2", op);
9194 output_asm_insn ("a\t%0,0(%2)", op);
9195 output_asm_insn ("a\t%2,4(%2)", op);
9196 output_asm_insn ("basr\t%0,%0", op);
9197 output_asm_insn ("l\t%0,%1", op);
9198 }
9199 }
9200
9201 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9202 into its SYMBOL_REF_FLAGS. */
9203
9204 static void
9205 s390_encode_section_info (tree decl, rtx rtl, int first)
9206 {
9207 default_encode_section_info (decl, rtl, first);
9208
9209 if (TREE_CODE (decl) == VAR_DECL)
9210 {
9211 /* If a variable has a forced alignment to < 2 bytes, mark it
9212 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9213 operand. */
9214 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9215 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9216 if (!DECL_SIZE (decl)
9217 || !DECL_ALIGN (decl)
9218 || !host_integerp (DECL_SIZE (decl), 0)
9219 || (DECL_ALIGN (decl) <= 64
9220 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9221 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9222 }
9223
9224 /* Literal pool references don't have a decl so they are handled
9225 differently here. We rely on the information in the MEM_ALIGN
9226 entry to decide upon natural alignment. */
9227 if (MEM_P (rtl)
9228 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9229 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9230 && (MEM_ALIGN (rtl) == 0
9231 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9232 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9233 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9234 }
9235
9236 /* Output thunk to FILE that implements a C++ virtual function call (with
9237 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9238 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9239 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9240 relative to the resulting this pointer. */
9241
9242 static void
9243 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9244 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9245 tree function)
9246 {
9247 rtx op[10];
9248 int nonlocal = 0;
9249
9250 /* Make sure unwind info is emitted for the thunk if needed. */
9251 final_start_function (emit_barrier (), file, 1);
9252
9253 /* Operand 0 is the target function. */
9254 op[0] = XEXP (DECL_RTL (function), 0);
9255 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9256 {
9257 nonlocal = 1;
9258 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9259 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9260 op[0] = gen_rtx_CONST (Pmode, op[0]);
9261 }
9262
9263 /* Operand 1 is the 'this' pointer. */
9264 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9265 op[1] = gen_rtx_REG (Pmode, 3);
9266 else
9267 op[1] = gen_rtx_REG (Pmode, 2);
9268
9269 /* Operand 2 is the delta. */
9270 op[2] = GEN_INT (delta);
9271
9272 /* Operand 3 is the vcall_offset. */
9273 op[3] = GEN_INT (vcall_offset);
9274
9275 /* Operand 4 is the temporary register. */
9276 op[4] = gen_rtx_REG (Pmode, 1);
9277
9278 /* Operands 5 to 8 can be used as labels. */
9279 op[5] = NULL_RTX;
9280 op[6] = NULL_RTX;
9281 op[7] = NULL_RTX;
9282 op[8] = NULL_RTX;
9283
9284 /* Operand 9 can be used for temporary register. */
9285 op[9] = NULL_RTX;
9286
9287 /* Generate code. */
9288 if (TARGET_64BIT)
9289 {
9290 /* Setup literal pool pointer if required. */
9291 if ((!DISP_IN_RANGE (delta)
9292 && !CONST_OK_FOR_K (delta)
9293 && !CONST_OK_FOR_Os (delta))
9294 || (!DISP_IN_RANGE (vcall_offset)
9295 && !CONST_OK_FOR_K (vcall_offset)
9296 && !CONST_OK_FOR_Os (vcall_offset)))
9297 {
9298 op[5] = gen_label_rtx ();
9299 output_asm_insn ("larl\t%4,%5", op);
9300 }
9301
9302 /* Add DELTA to this pointer. */
9303 if (delta)
9304 {
9305 if (CONST_OK_FOR_J (delta))
9306 output_asm_insn ("la\t%1,%2(%1)", op);
9307 else if (DISP_IN_RANGE (delta))
9308 output_asm_insn ("lay\t%1,%2(%1)", op);
9309 else if (CONST_OK_FOR_K (delta))
9310 output_asm_insn ("aghi\t%1,%2", op);
9311 else if (CONST_OK_FOR_Os (delta))
9312 output_asm_insn ("agfi\t%1,%2", op);
9313 else
9314 {
9315 op[6] = gen_label_rtx ();
9316 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9317 }
9318 }
9319
9320 /* Perform vcall adjustment. */
9321 if (vcall_offset)
9322 {
9323 if (DISP_IN_RANGE (vcall_offset))
9324 {
9325 output_asm_insn ("lg\t%4,0(%1)", op);
9326 output_asm_insn ("ag\t%1,%3(%4)", op);
9327 }
9328 else if (CONST_OK_FOR_K (vcall_offset))
9329 {
9330 output_asm_insn ("lghi\t%4,%3", op);
9331 output_asm_insn ("ag\t%4,0(%1)", op);
9332 output_asm_insn ("ag\t%1,0(%4)", op);
9333 }
9334 else if (CONST_OK_FOR_Os (vcall_offset))
9335 {
9336 output_asm_insn ("lgfi\t%4,%3", op);
9337 output_asm_insn ("ag\t%4,0(%1)", op);
9338 output_asm_insn ("ag\t%1,0(%4)", op);
9339 }
9340 else
9341 {
9342 op[7] = gen_label_rtx ();
9343 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9344 output_asm_insn ("ag\t%4,0(%1)", op);
9345 output_asm_insn ("ag\t%1,0(%4)", op);
9346 }
9347 }
9348
9349 /* Jump to target. */
9350 output_asm_insn ("jg\t%0", op);
9351
9352 /* Output literal pool if required. */
9353 if (op[5])
9354 {
9355 output_asm_insn (".align\t4", op);
9356 targetm.asm_out.internal_label (file, "L",
9357 CODE_LABEL_NUMBER (op[5]));
9358 }
9359 if (op[6])
9360 {
9361 targetm.asm_out.internal_label (file, "L",
9362 CODE_LABEL_NUMBER (op[6]));
9363 output_asm_insn (".long\t%2", op);
9364 }
9365 if (op[7])
9366 {
9367 targetm.asm_out.internal_label (file, "L",
9368 CODE_LABEL_NUMBER (op[7]));
9369 output_asm_insn (".long\t%3", op);
9370 }
9371 }
9372 else
9373 {
9374 /* Setup base pointer if required. */
9375 if (!vcall_offset
9376 || (!DISP_IN_RANGE (delta)
9377 && !CONST_OK_FOR_K (delta)
9378 && !CONST_OK_FOR_Os (delta))
9379 || (!DISP_IN_RANGE (delta)
9380 && !CONST_OK_FOR_K (vcall_offset)
9381 && !CONST_OK_FOR_Os (vcall_offset)))
9382 {
9383 op[5] = gen_label_rtx ();
9384 output_asm_insn ("basr\t%4,0", op);
9385 targetm.asm_out.internal_label (file, "L",
9386 CODE_LABEL_NUMBER (op[5]));
9387 }
9388
9389 /* Add DELTA to this pointer. */
9390 if (delta)
9391 {
9392 if (CONST_OK_FOR_J (delta))
9393 output_asm_insn ("la\t%1,%2(%1)", op);
9394 else if (DISP_IN_RANGE (delta))
9395 output_asm_insn ("lay\t%1,%2(%1)", op);
9396 else if (CONST_OK_FOR_K (delta))
9397 output_asm_insn ("ahi\t%1,%2", op);
9398 else if (CONST_OK_FOR_Os (delta))
9399 output_asm_insn ("afi\t%1,%2", op);
9400 else
9401 {
9402 op[6] = gen_label_rtx ();
9403 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9404 }
9405 }
9406
9407 /* Perform vcall adjustment. */
9408 if (vcall_offset)
9409 {
9410 if (CONST_OK_FOR_J (vcall_offset))
9411 {
9412 output_asm_insn ("l\t%4,0(%1)", op);
9413 output_asm_insn ("a\t%1,%3(%4)", op);
9414 }
9415 else if (DISP_IN_RANGE (vcall_offset))
9416 {
9417 output_asm_insn ("l\t%4,0(%1)", op);
9418 output_asm_insn ("ay\t%1,%3(%4)", op);
9419 }
9420 else if (CONST_OK_FOR_K (vcall_offset))
9421 {
9422 output_asm_insn ("lhi\t%4,%3", op);
9423 output_asm_insn ("a\t%4,0(%1)", op);
9424 output_asm_insn ("a\t%1,0(%4)", op);
9425 }
9426 else if (CONST_OK_FOR_Os (vcall_offset))
9427 {
9428 output_asm_insn ("iilf\t%4,%3", op);
9429 output_asm_insn ("a\t%4,0(%1)", op);
9430 output_asm_insn ("a\t%1,0(%4)", op);
9431 }
9432 else
9433 {
9434 op[7] = gen_label_rtx ();
9435 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9436 output_asm_insn ("a\t%4,0(%1)", op);
9437 output_asm_insn ("a\t%1,0(%4)", op);
9438 }
9439
9440 /* We had to clobber the base pointer register.
9441 Re-setup the base pointer (with a different base). */
9442 op[5] = gen_label_rtx ();
9443 output_asm_insn ("basr\t%4,0", op);
9444 targetm.asm_out.internal_label (file, "L",
9445 CODE_LABEL_NUMBER (op[5]));
9446 }
9447
9448 /* Jump to target. */
9449 op[8] = gen_label_rtx ();
9450
9451 if (!flag_pic)
9452 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9453 else if (!nonlocal)
9454 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9455 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9456 else if (flag_pic == 1)
9457 {
9458 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9459 output_asm_insn ("l\t%4,%0(%4)", op);
9460 }
9461 else if (flag_pic == 2)
9462 {
9463 op[9] = gen_rtx_REG (Pmode, 0);
9464 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9465 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9466 output_asm_insn ("ar\t%4,%9", op);
9467 output_asm_insn ("l\t%4,0(%4)", op);
9468 }
9469
9470 output_asm_insn ("br\t%4", op);
9471
9472 /* Output literal pool. */
9473 output_asm_insn (".align\t4", op);
9474
9475 if (nonlocal && flag_pic == 2)
9476 output_asm_insn (".long\t%0", op);
9477 if (nonlocal)
9478 {
9479 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9480 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9481 }
9482
9483 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9484 if (!flag_pic)
9485 output_asm_insn (".long\t%0", op);
9486 else
9487 output_asm_insn (".long\t%0-%5", op);
9488
9489 if (op[6])
9490 {
9491 targetm.asm_out.internal_label (file, "L",
9492 CODE_LABEL_NUMBER (op[6]));
9493 output_asm_insn (".long\t%2", op);
9494 }
9495 if (op[7])
9496 {
9497 targetm.asm_out.internal_label (file, "L",
9498 CODE_LABEL_NUMBER (op[7]));
9499 output_asm_insn (".long\t%3", op);
9500 }
9501 }
9502 final_end_function ();
9503 }
9504
9505 static bool
9506 s390_valid_pointer_mode (enum machine_mode mode)
9507 {
9508 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9509 }
9510
9511 /* Checks whether the given CALL_EXPR would use a caller
9512 saved register. This is used to decide whether sibling call
9513 optimization could be performed on the respective function
9514 call. */
9515
9516 static bool
9517 s390_call_saved_register_used (tree call_expr)
9518 {
9519 CUMULATIVE_ARGS cum;
9520 tree parameter;
9521 enum machine_mode mode;
9522 tree type;
9523 rtx parm_rtx;
9524 int reg, i;
9525
9526 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9527
9528 for (i = 0; i < call_expr_nargs (call_expr); i++)
9529 {
9530 parameter = CALL_EXPR_ARG (call_expr, i);
9531 gcc_assert (parameter);
9532
9533 /* For an undeclared variable passed as parameter we will get
9534 an ERROR_MARK node here. */
9535 if (TREE_CODE (parameter) == ERROR_MARK)
9536 return true;
9537
9538 type = TREE_TYPE (parameter);
9539 gcc_assert (type);
9540
9541 mode = TYPE_MODE (type);
9542 gcc_assert (mode);
9543
9544 if (pass_by_reference (&cum, mode, type, true))
9545 {
9546 mode = Pmode;
9547 type = build_pointer_type (type);
9548 }
9549
9550 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9551
9552 s390_function_arg_advance (&cum, mode, type, 0);
9553
9554 if (!parm_rtx)
9555 continue;
9556
9557 if (REG_P (parm_rtx))
9558 {
9559 for (reg = 0;
9560 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9561 reg++)
9562 if (!call_used_regs[reg + REGNO (parm_rtx)])
9563 return true;
9564 }
9565
9566 if (GET_CODE (parm_rtx) == PARALLEL)
9567 {
9568 int i;
9569
9570 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9571 {
9572 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9573
9574 gcc_assert (REG_P (r));
9575
9576 for (reg = 0;
9577 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9578 reg++)
9579 if (!call_used_regs[reg + REGNO (r)])
9580 return true;
9581 }
9582 }
9583
9584 }
9585 return false;
9586 }
9587
9588 /* Return true if the given call expression can be
9589 turned into a sibling call.
9590 DECL holds the declaration of the function to be called whereas
9591 EXP is the call expression itself. */
9592
9593 static bool
9594 s390_function_ok_for_sibcall (tree decl, tree exp)
9595 {
9596 /* The TPF epilogue uses register 1. */
9597 if (TARGET_TPF_PROFILING)
9598 return false;
9599
9600 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9601 which would have to be restored before the sibcall. */
9602 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9603 return false;
9604
9605 /* Register 6 on s390 is available as an argument register but unfortunately
9606 "caller saved". This makes functions needing this register for arguments
9607 not suitable for sibcalls. */
9608 return !s390_call_saved_register_used (exp);
9609 }
9610
9611 /* Return the fixed registers used for condition codes. */
9612
9613 static bool
9614 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9615 {
9616 *p1 = CC_REGNUM;
9617 *p2 = INVALID_REGNUM;
9618
9619 return true;
9620 }
9621
9622 /* This function is used by the call expanders of the machine description.
9623 It emits the call insn itself together with the necessary operations
9624 to adjust the target address and returns the emitted insn.
9625 ADDR_LOCATION is the target address rtx
9626 TLS_CALL the location of the thread-local symbol
9627 RESULT_REG the register where the result of the call should be stored
9628 RETADDR_REG the register where the return address should be stored
9629 If this parameter is NULL_RTX the call is considered
9630 to be a sibling call. */
9631
9632 rtx
9633 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9634 rtx retaddr_reg)
9635 {
9636 bool plt_call = false;
9637 rtx insn;
9638 rtx call;
9639 rtx clobber;
9640 rtvec vec;
9641
9642 /* Direct function calls need special treatment. */
9643 if (GET_CODE (addr_location) == SYMBOL_REF)
9644 {
9645 /* When calling a global routine in PIC mode, we must
9646 replace the symbol itself with the PLT stub. */
9647 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9648 {
9649 if (retaddr_reg != NULL_RTX)
9650 {
9651 addr_location = gen_rtx_UNSPEC (Pmode,
9652 gen_rtvec (1, addr_location),
9653 UNSPEC_PLT);
9654 addr_location = gen_rtx_CONST (Pmode, addr_location);
9655 plt_call = true;
9656 }
9657 else
9658 /* For -fpic code the PLT entries might use r12 which is
9659 call-saved. Therefore we cannot do a sibcall when
9660 calling directly using a symbol ref. When reaching
9661 this point we decided (in s390_function_ok_for_sibcall)
9662 to do a sibcall for a function pointer but one of the
9663 optimizers was able to get rid of the function pointer
9664 by propagating the symbol ref into the call. This
9665 optimization is illegal for S/390 so we turn the direct
9666 call into a indirect call again. */
9667 addr_location = force_reg (Pmode, addr_location);
9668 }
9669
9670 /* Unless we can use the bras(l) insn, force the
9671 routine address into a register. */
9672 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9673 {
9674 if (flag_pic)
9675 addr_location = legitimize_pic_address (addr_location, 0);
9676 else
9677 addr_location = force_reg (Pmode, addr_location);
9678 }
9679 }
9680
9681 /* If it is already an indirect call or the code above moved the
9682 SYMBOL_REF to somewhere else make sure the address can be found in
9683 register 1. */
9684 if (retaddr_reg == NULL_RTX
9685 && GET_CODE (addr_location) != SYMBOL_REF
9686 && !plt_call)
9687 {
9688 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9689 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9690 }
9691
9692 addr_location = gen_rtx_MEM (QImode, addr_location);
9693 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9694
9695 if (result_reg != NULL_RTX)
9696 call = gen_rtx_SET (VOIDmode, result_reg, call);
9697
9698 if (retaddr_reg != NULL_RTX)
9699 {
9700 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9701
9702 if (tls_call != NULL_RTX)
9703 vec = gen_rtvec (3, call, clobber,
9704 gen_rtx_USE (VOIDmode, tls_call));
9705 else
9706 vec = gen_rtvec (2, call, clobber);
9707
9708 call = gen_rtx_PARALLEL (VOIDmode, vec);
9709 }
9710
9711 insn = emit_call_insn (call);
9712
9713 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9714 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9715 {
9716 /* s390_function_ok_for_sibcall should
9717 have denied sibcalls in this case. */
9718 gcc_assert (retaddr_reg != NULL_RTX);
9719
9720 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9721 }
9722 return insn;
9723 }
9724
9725 /* Implement CONDITIONAL_REGISTER_USAGE. */
9726
9727 void
9728 s390_conditional_register_usage (void)
9729 {
9730 int i;
9731
9732 if (flag_pic)
9733 {
9734 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9735 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9736 }
9737 if (TARGET_CPU_ZARCH)
9738 {
9739 fixed_regs[BASE_REGNUM] = 0;
9740 call_used_regs[BASE_REGNUM] = 0;
9741 fixed_regs[RETURN_REGNUM] = 0;
9742 call_used_regs[RETURN_REGNUM] = 0;
9743 }
9744 if (TARGET_64BIT)
9745 {
9746 for (i = 24; i < 32; i++)
9747 call_used_regs[i] = call_really_used_regs[i] = 0;
9748 }
9749 else
9750 {
9751 for (i = 18; i < 20; i++)
9752 call_used_regs[i] = call_really_used_regs[i] = 0;
9753 }
9754
9755 if (TARGET_SOFT_FLOAT)
9756 {
9757 for (i = 16; i < 32; i++)
9758 call_used_regs[i] = fixed_regs[i] = 1;
9759 }
9760 }
9761
9762 /* Corresponding function to eh_return expander. */
9763
9764 static GTY(()) rtx s390_tpf_eh_return_symbol;
9765 void
9766 s390_emit_tpf_eh_return (rtx target)
9767 {
9768 rtx insn, reg;
9769
9770 if (!s390_tpf_eh_return_symbol)
9771 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9772
9773 reg = gen_rtx_REG (Pmode, 2);
9774
9775 emit_move_insn (reg, target);
9776 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9777 gen_rtx_REG (Pmode, RETURN_REGNUM));
9778 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9779
9780 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9781 }
9782
9783 /* Rework the prologue/epilogue to avoid saving/restoring
9784 registers unnecessarily. */
9785
9786 static void
9787 s390_optimize_prologue (void)
9788 {
9789 rtx insn, new_insn, next_insn;
9790
9791 /* Do a final recompute of the frame-related data. */
9792
9793 s390_update_frame_layout ();
9794
9795 /* If all special registers are in fact used, there's nothing we
9796 can do, so no point in walking the insn list. */
9797
9798 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9799 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9800 && (TARGET_CPU_ZARCH
9801 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9802 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9803 return;
9804
9805 /* Search for prologue/epilogue insns and replace them. */
9806
9807 for (insn = get_insns (); insn; insn = next_insn)
9808 {
9809 int first, last, off;
9810 rtx set, base, offset;
9811
9812 next_insn = NEXT_INSN (insn);
9813
9814 if (GET_CODE (insn) != INSN)
9815 continue;
9816
9817 if (GET_CODE (PATTERN (insn)) == PARALLEL
9818 && store_multiple_operation (PATTERN (insn), VOIDmode))
9819 {
9820 set = XVECEXP (PATTERN (insn), 0, 0);
9821 first = REGNO (SET_SRC (set));
9822 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9823 offset = const0_rtx;
9824 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9825 off = INTVAL (offset);
9826
9827 if (GET_CODE (base) != REG || off < 0)
9828 continue;
9829 if (cfun_frame_layout.first_save_gpr != -1
9830 && (cfun_frame_layout.first_save_gpr < first
9831 || cfun_frame_layout.last_save_gpr > last))
9832 continue;
9833 if (REGNO (base) != STACK_POINTER_REGNUM
9834 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9835 continue;
9836 if (first > BASE_REGNUM || last < BASE_REGNUM)
9837 continue;
9838
9839 if (cfun_frame_layout.first_save_gpr != -1)
9840 {
9841 new_insn = save_gprs (base,
9842 off + (cfun_frame_layout.first_save_gpr
9843 - first) * UNITS_PER_LONG,
9844 cfun_frame_layout.first_save_gpr,
9845 cfun_frame_layout.last_save_gpr);
9846 new_insn = emit_insn_before (new_insn, insn);
9847 INSN_ADDRESSES_NEW (new_insn, -1);
9848 }
9849
9850 remove_insn (insn);
9851 continue;
9852 }
9853
9854 if (cfun_frame_layout.first_save_gpr == -1
9855 && GET_CODE (PATTERN (insn)) == SET
9856 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9857 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9858 || (!TARGET_CPU_ZARCH
9859 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9860 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9861 {
9862 set = PATTERN (insn);
9863 first = REGNO (SET_SRC (set));
9864 offset = const0_rtx;
9865 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9866 off = INTVAL (offset);
9867
9868 if (GET_CODE (base) != REG || off < 0)
9869 continue;
9870 if (REGNO (base) != STACK_POINTER_REGNUM
9871 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9872 continue;
9873
9874 remove_insn (insn);
9875 continue;
9876 }
9877
9878 if (GET_CODE (PATTERN (insn)) == PARALLEL
9879 && load_multiple_operation (PATTERN (insn), VOIDmode))
9880 {
9881 set = XVECEXP (PATTERN (insn), 0, 0);
9882 first = REGNO (SET_DEST (set));
9883 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9884 offset = const0_rtx;
9885 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9886 off = INTVAL (offset);
9887
9888 if (GET_CODE (base) != REG || off < 0)
9889 continue;
9890 if (cfun_frame_layout.first_restore_gpr != -1
9891 && (cfun_frame_layout.first_restore_gpr < first
9892 || cfun_frame_layout.last_restore_gpr > last))
9893 continue;
9894 if (REGNO (base) != STACK_POINTER_REGNUM
9895 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9896 continue;
9897 if (first > BASE_REGNUM || last < BASE_REGNUM)
9898 continue;
9899
9900 if (cfun_frame_layout.first_restore_gpr != -1)
9901 {
9902 new_insn = restore_gprs (base,
9903 off + (cfun_frame_layout.first_restore_gpr
9904 - first) * UNITS_PER_LONG,
9905 cfun_frame_layout.first_restore_gpr,
9906 cfun_frame_layout.last_restore_gpr);
9907 new_insn = emit_insn_before (new_insn, insn);
9908 INSN_ADDRESSES_NEW (new_insn, -1);
9909 }
9910
9911 remove_insn (insn);
9912 continue;
9913 }
9914
9915 if (cfun_frame_layout.first_restore_gpr == -1
9916 && GET_CODE (PATTERN (insn)) == SET
9917 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
9918 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
9919 || (!TARGET_CPU_ZARCH
9920 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
9921 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
9922 {
9923 set = PATTERN (insn);
9924 first = REGNO (SET_DEST (set));
9925 offset = const0_rtx;
9926 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9927 off = INTVAL (offset);
9928
9929 if (GET_CODE (base) != REG || off < 0)
9930 continue;
9931 if (REGNO (base) != STACK_POINTER_REGNUM
9932 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9933 continue;
9934
9935 remove_insn (insn);
9936 continue;
9937 }
9938 }
9939 }
9940
9941 /* On z10 and later the dynamic branch prediction must see the
9942 backward jump within a certain windows. If not it falls back to
9943 the static prediction. This function rearranges the loop backward
9944 branch in a way which makes the static prediction always correct.
9945 The function returns true if it added an instruction. */
9946 static bool
9947 s390_fix_long_loop_prediction (rtx insn)
9948 {
9949 rtx set = single_set (insn);
9950 rtx code_label, label_ref, new_label;
9951 rtx uncond_jump;
9952 rtx cur_insn;
9953 rtx tmp;
9954 int distance;
9955
9956 /* This will exclude branch on count and branch on index patterns
9957 since these are correctly statically predicted. */
9958 if (!set
9959 || SET_DEST (set) != pc_rtx
9960 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
9961 return false;
9962
9963 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
9964 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
9965
9966 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
9967
9968 code_label = XEXP (label_ref, 0);
9969
9970 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
9971 || INSN_ADDRESSES (INSN_UID (insn)) == -1
9972 || (INSN_ADDRESSES (INSN_UID (insn))
9973 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
9974 return false;
9975
9976 for (distance = 0, cur_insn = PREV_INSN (insn);
9977 distance < PREDICT_DISTANCE - 6;
9978 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
9979 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
9980 return false;
9981
9982 new_label = gen_label_rtx ();
9983 uncond_jump = emit_jump_insn_after (
9984 gen_rtx_SET (VOIDmode, pc_rtx,
9985 gen_rtx_LABEL_REF (VOIDmode, code_label)),
9986 insn);
9987 emit_label_after (new_label, uncond_jump);
9988
9989 tmp = XEXP (SET_SRC (set), 1);
9990 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
9991 XEXP (SET_SRC (set), 2) = tmp;
9992 INSN_CODE (insn) = -1;
9993
9994 XEXP (label_ref, 0) = new_label;
9995 JUMP_LABEL (insn) = new_label;
9996 JUMP_LABEL (uncond_jump) = code_label;
9997
9998 return true;
9999 }
10000
10001 /* Returns 1 if INSN reads the value of REG for purposes not related
10002 to addressing of memory, and 0 otherwise. */
10003 static int
10004 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10005 {
10006 return reg_referenced_p (reg, PATTERN (insn))
10007 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10008 }
10009
10010 /* Starting from INSN find_cond_jump looks downwards in the insn
10011 stream for a single jump insn which is the last user of the
10012 condition code set in INSN. */
10013 static rtx
10014 find_cond_jump (rtx insn)
10015 {
10016 for (; insn; insn = NEXT_INSN (insn))
10017 {
10018 rtx ite, cc;
10019
10020 if (LABEL_P (insn))
10021 break;
10022
10023 if (!JUMP_P (insn))
10024 {
10025 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10026 break;
10027 continue;
10028 }
10029
10030 /* This will be triggered by a return. */
10031 if (GET_CODE (PATTERN (insn)) != SET)
10032 break;
10033
10034 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10035 ite = SET_SRC (PATTERN (insn));
10036
10037 if (GET_CODE (ite) != IF_THEN_ELSE)
10038 break;
10039
10040 cc = XEXP (XEXP (ite, 0), 0);
10041 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10042 break;
10043
10044 if (find_reg_note (insn, REG_DEAD, cc))
10045 return insn;
10046 break;
10047 }
10048
10049 return NULL_RTX;
10050 }
10051
10052 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10053 the semantics does not change. If NULL_RTX is passed as COND the
10054 function tries to find the conditional jump starting with INSN. */
10055 static void
10056 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10057 {
10058 rtx tmp = *op0;
10059
10060 if (cond == NULL_RTX)
10061 {
10062 rtx jump = find_cond_jump (NEXT_INSN (insn));
10063 jump = jump ? single_set (jump) : NULL_RTX;
10064
10065 if (jump == NULL_RTX)
10066 return;
10067
10068 cond = XEXP (XEXP (jump, 1), 0);
10069 }
10070
10071 *op0 = *op1;
10072 *op1 = tmp;
10073 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10074 }
10075
10076 /* On z10, instructions of the compare-and-branch family have the
10077 property to access the register occurring as second operand with
10078 its bits complemented. If such a compare is grouped with a second
10079 instruction that accesses the same register non-complemented, and
10080 if that register's value is delivered via a bypass, then the
10081 pipeline recycles, thereby causing significant performance decline.
10082 This function locates such situations and exchanges the two
10083 operands of the compare. The function return true whenever it
10084 added an insn. */
10085 static bool
10086 s390_z10_optimize_cmp (rtx insn)
10087 {
10088 rtx prev_insn, next_insn;
10089 bool insn_added_p = false;
10090 rtx cond, *op0, *op1;
10091
10092 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10093 {
10094 /* Handle compare and branch and branch on count
10095 instructions. */
10096 rtx pattern = single_set (insn);
10097
10098 if (!pattern
10099 || SET_DEST (pattern) != pc_rtx
10100 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10101 return false;
10102
10103 cond = XEXP (SET_SRC (pattern), 0);
10104 op0 = &XEXP (cond, 0);
10105 op1 = &XEXP (cond, 1);
10106 }
10107 else if (GET_CODE (PATTERN (insn)) == SET)
10108 {
10109 rtx src, dest;
10110
10111 /* Handle normal compare instructions. */
10112 src = SET_SRC (PATTERN (insn));
10113 dest = SET_DEST (PATTERN (insn));
10114
10115 if (!REG_P (dest)
10116 || !CC_REGNO_P (REGNO (dest))
10117 || GET_CODE (src) != COMPARE)
10118 return false;
10119
10120 /* s390_swap_cmp will try to find the conditional
10121 jump when passing NULL_RTX as condition. */
10122 cond = NULL_RTX;
10123 op0 = &XEXP (src, 0);
10124 op1 = &XEXP (src, 1);
10125 }
10126 else
10127 return false;
10128
10129 if (!REG_P (*op0) || !REG_P (*op1))
10130 return false;
10131
10132 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10133 return false;
10134
10135 /* Swap the COMPARE arguments and its mask if there is a
10136 conflicting access in the previous insn. */
10137 prev_insn = prev_active_insn (insn);
10138 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10139 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10140 s390_swap_cmp (cond, op0, op1, insn);
10141
10142 /* Check if there is a conflict with the next insn. If there
10143 was no conflict with the previous insn, then swap the
10144 COMPARE arguments and its mask. If we already swapped
10145 the operands, or if swapping them would cause a conflict
10146 with the previous insn, issue a NOP after the COMPARE in
10147 order to separate the two instuctions. */
10148 next_insn = next_active_insn (insn);
10149 if (next_insn != NULL_RTX && INSN_P (next_insn)
10150 && s390_non_addr_reg_read_p (*op1, next_insn))
10151 {
10152 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10153 && s390_non_addr_reg_read_p (*op0, prev_insn))
10154 {
10155 if (REGNO (*op1) == 0)
10156 emit_insn_after (gen_nop1 (), insn);
10157 else
10158 emit_insn_after (gen_nop (), insn);
10159 insn_added_p = true;
10160 }
10161 else
10162 s390_swap_cmp (cond, op0, op1, insn);
10163 }
10164 return insn_added_p;
10165 }
10166
10167 /* Perform machine-dependent processing. */
10168
10169 static void
10170 s390_reorg (void)
10171 {
10172 bool pool_overflow = false;
10173
10174 /* Make sure all splits have been performed; splits after
10175 machine_dependent_reorg might confuse insn length counts. */
10176 split_all_insns_noflow ();
10177
10178 /* Install the main literal pool and the associated base
10179 register load insns.
10180
10181 In addition, there are two problematic situations we need
10182 to correct:
10183
10184 - the literal pool might be > 4096 bytes in size, so that
10185 some of its elements cannot be directly accessed
10186
10187 - a branch target might be > 64K away from the branch, so that
10188 it is not possible to use a PC-relative instruction.
10189
10190 To fix those, we split the single literal pool into multiple
10191 pool chunks, reloading the pool base register at various
10192 points throughout the function to ensure it always points to
10193 the pool chunk the following code expects, and / or replace
10194 PC-relative branches by absolute branches.
10195
10196 However, the two problems are interdependent: splitting the
10197 literal pool can move a branch further away from its target,
10198 causing the 64K limit to overflow, and on the other hand,
10199 replacing a PC-relative branch by an absolute branch means
10200 we need to put the branch target address into the literal
10201 pool, possibly causing it to overflow.
10202
10203 So, we loop trying to fix up both problems until we manage
10204 to satisfy both conditions at the same time. Note that the
10205 loop is guaranteed to terminate as every pass of the loop
10206 strictly decreases the total number of PC-relative branches
10207 in the function. (This is not completely true as there
10208 might be branch-over-pool insns introduced by chunkify_start.
10209 Those never need to be split however.) */
10210
10211 for (;;)
10212 {
10213 struct constant_pool *pool = NULL;
10214
10215 /* Collect the literal pool. */
10216 if (!pool_overflow)
10217 {
10218 pool = s390_mainpool_start ();
10219 if (!pool)
10220 pool_overflow = true;
10221 }
10222
10223 /* If literal pool overflowed, start to chunkify it. */
10224 if (pool_overflow)
10225 pool = s390_chunkify_start ();
10226
10227 /* Split out-of-range branches. If this has created new
10228 literal pool entries, cancel current chunk list and
10229 recompute it. zSeries machines have large branch
10230 instructions, so we never need to split a branch. */
10231 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10232 {
10233 if (pool_overflow)
10234 s390_chunkify_cancel (pool);
10235 else
10236 s390_mainpool_cancel (pool);
10237
10238 continue;
10239 }
10240
10241 /* If we made it up to here, both conditions are satisfied.
10242 Finish up literal pool related changes. */
10243 if (pool_overflow)
10244 s390_chunkify_finish (pool);
10245 else
10246 s390_mainpool_finish (pool);
10247
10248 /* We're done splitting branches. */
10249 cfun->machine->split_branches_pending_p = false;
10250 break;
10251 }
10252
10253 /* Generate out-of-pool execute target insns. */
10254 if (TARGET_CPU_ZARCH)
10255 {
10256 rtx insn, label, target;
10257
10258 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10259 {
10260 label = s390_execute_label (insn);
10261 if (!label)
10262 continue;
10263
10264 gcc_assert (label != const0_rtx);
10265
10266 target = emit_label (XEXP (label, 0));
10267 INSN_ADDRESSES_NEW (target, -1);
10268
10269 target = emit_insn (s390_execute_target (insn));
10270 INSN_ADDRESSES_NEW (target, -1);
10271 }
10272 }
10273
10274 /* Try to optimize prologue and epilogue further. */
10275 s390_optimize_prologue ();
10276
10277 /* Walk over the insns and do some >=z10 specific changes. */
10278 if (s390_tune == PROCESSOR_2097_Z10
10279 || s390_tune == PROCESSOR_2817_Z196)
10280 {
10281 rtx insn;
10282 bool insn_added_p = false;
10283
10284 /* The insn lengths and addresses have to be up to date for the
10285 following manipulations. */
10286 shorten_branches (get_insns ());
10287
10288 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10289 {
10290 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10291 continue;
10292
10293 if (JUMP_P (insn))
10294 insn_added_p |= s390_fix_long_loop_prediction (insn);
10295
10296 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10297 || GET_CODE (PATTERN (insn)) == SET)
10298 && s390_tune == PROCESSOR_2097_Z10)
10299 insn_added_p |= s390_z10_optimize_cmp (insn);
10300 }
10301
10302 /* Adjust branches if we added new instructions. */
10303 if (insn_added_p)
10304 shorten_branches (get_insns ());
10305 }
10306 }
10307
10308 /* Return true if INSN is a fp load insn writing register REGNO. */
10309 static inline bool
10310 s390_fpload_toreg (rtx insn, unsigned int regno)
10311 {
10312 rtx set;
10313 enum attr_type flag = s390_safe_attr_type (insn);
10314
10315 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10316 return false;
10317
10318 set = single_set (insn);
10319
10320 if (set == NULL_RTX)
10321 return false;
10322
10323 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10324 return false;
10325
10326 if (REGNO (SET_DEST (set)) != regno)
10327 return false;
10328
10329 return true;
10330 }
10331
10332 /* This value describes the distance to be avoided between an
10333 aritmetic fp instruction and an fp load writing the same register.
10334 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10335 fine but the exact value has to be avoided. Otherwise the FP
10336 pipeline will throw an exception causing a major penalty. */
10337 #define Z10_EARLYLOAD_DISTANCE 7
10338
10339 /* Rearrange the ready list in order to avoid the situation described
10340 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10341 moved to the very end of the ready list. */
10342 static void
10343 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10344 {
10345 unsigned int regno;
10346 int nready = *nready_p;
10347 rtx tmp;
10348 int i;
10349 rtx insn;
10350 rtx set;
10351 enum attr_type flag;
10352 int distance;
10353
10354 /* Skip DISTANCE - 1 active insns. */
10355 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10356 distance > 0 && insn != NULL_RTX;
10357 distance--, insn = prev_active_insn (insn))
10358 if (CALL_P (insn) || JUMP_P (insn))
10359 return;
10360
10361 if (insn == NULL_RTX)
10362 return;
10363
10364 set = single_set (insn);
10365
10366 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10367 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10368 return;
10369
10370 flag = s390_safe_attr_type (insn);
10371
10372 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10373 return;
10374
10375 regno = REGNO (SET_DEST (set));
10376 i = nready - 1;
10377
10378 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10379 i--;
10380
10381 if (!i)
10382 return;
10383
10384 tmp = ready[i];
10385 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10386 ready[0] = tmp;
10387 }
10388
10389 /* This function is called via hook TARGET_SCHED_REORDER before
10390 issueing one insn from list READY which contains *NREADYP entries.
10391 For target z10 it reorders load instructions to avoid early load
10392 conflicts in the floating point pipeline */
10393 static int
10394 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10395 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10396 {
10397 if (s390_tune == PROCESSOR_2097_Z10)
10398 if (reload_completed && *nreadyp > 1)
10399 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10400
10401 return s390_issue_rate ();
10402 }
10403
10404 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10405 the scheduler has issued INSN. It stores the last issued insn into
10406 last_scheduled_insn in order to make it available for
10407 s390_sched_reorder. */
10408 static int
10409 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10410 int verbose ATTRIBUTE_UNUSED,
10411 rtx insn, int more)
10412 {
10413 last_scheduled_insn = insn;
10414
10415 if (GET_CODE (PATTERN (insn)) != USE
10416 && GET_CODE (PATTERN (insn)) != CLOBBER)
10417 return more - 1;
10418 else
10419 return more;
10420 }
10421
10422 static void
10423 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10424 int verbose ATTRIBUTE_UNUSED,
10425 int max_ready ATTRIBUTE_UNUSED)
10426 {
10427 last_scheduled_insn = NULL_RTX;
10428 }
10429
10430 /* This function checks the whole of insn X for memory references. The
10431 function always returns zero because the framework it is called
10432 from would stop recursively analyzing the insn upon a return value
10433 other than zero. The real result of this function is updating
10434 counter variable MEM_COUNT. */
10435 static int
10436 check_dpu (rtx *x, unsigned *mem_count)
10437 {
10438 if (*x != NULL_RTX && MEM_P (*x))
10439 (*mem_count)++;
10440 return 0;
10441 }
10442
10443 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10444 a new number struct loop *loop should be unrolled if tuned for cpus with
10445 a built-in stride prefetcher.
10446 The loop is analyzed for memory accesses by calling check_dpu for
10447 each rtx of the loop. Depending on the loop_depth and the amount of
10448 memory accesses a new number <=nunroll is returned to improve the
10449 behaviour of the hardware prefetch unit. */
10450 static unsigned
10451 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10452 {
10453 basic_block *bbs;
10454 rtx insn;
10455 unsigned i;
10456 unsigned mem_count = 0;
10457
10458 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10459 return nunroll;
10460
10461 /* Count the number of memory references within the loop body. */
10462 bbs = get_loop_body (loop);
10463 for (i = 0; i < loop->num_nodes; i++)
10464 {
10465 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10466 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10467 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10468 }
10469 free (bbs);
10470
10471 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10472 if (mem_count == 0)
10473 return nunroll;
10474
10475 switch (loop_depth(loop))
10476 {
10477 case 1:
10478 return MIN (nunroll, 28 / mem_count);
10479 case 2:
10480 return MIN (nunroll, 22 / mem_count);
10481 default:
10482 return MIN (nunroll, 16 / mem_count);
10483 }
10484 }
10485
10486 /* Initialize GCC target structure. */
10487
10488 #undef TARGET_ASM_ALIGNED_HI_OP
10489 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10490 #undef TARGET_ASM_ALIGNED_DI_OP
10491 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10492 #undef TARGET_ASM_INTEGER
10493 #define TARGET_ASM_INTEGER s390_assemble_integer
10494
10495 #undef TARGET_ASM_OPEN_PAREN
10496 #define TARGET_ASM_OPEN_PAREN ""
10497
10498 #undef TARGET_ASM_CLOSE_PAREN
10499 #define TARGET_ASM_CLOSE_PAREN ""
10500
10501 #undef TARGET_DEFAULT_TARGET_FLAGS
10502 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
10503
10504 #undef TARGET_HANDLE_OPTION
10505 #define TARGET_HANDLE_OPTION s390_handle_option
10506
10507 #undef TARGET_OPTION_OVERRIDE
10508 #define TARGET_OPTION_OVERRIDE s390_option_override
10509
10510 #undef TARGET_OPTION_OPTIMIZATION_TABLE
10511 #define TARGET_OPTION_OPTIMIZATION_TABLE s390_option_optimization_table
10512
10513 #undef TARGET_OPTION_INIT_STRUCT
10514 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10515
10516 #undef TARGET_ENCODE_SECTION_INFO
10517 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10518
10519 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10520 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10521
10522 #ifdef HAVE_AS_TLS
10523 #undef TARGET_HAVE_TLS
10524 #define TARGET_HAVE_TLS true
10525 #endif
10526 #undef TARGET_CANNOT_FORCE_CONST_MEM
10527 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10528
10529 #undef TARGET_DELEGITIMIZE_ADDRESS
10530 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10531
10532 #undef TARGET_LEGITIMIZE_ADDRESS
10533 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10534
10535 #undef TARGET_RETURN_IN_MEMORY
10536 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10537
10538 #undef TARGET_INIT_BUILTINS
10539 #define TARGET_INIT_BUILTINS s390_init_builtins
10540 #undef TARGET_EXPAND_BUILTIN
10541 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10542
10543 #undef TARGET_ASM_OUTPUT_MI_THUNK
10544 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10545 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10546 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10547
10548 #undef TARGET_SCHED_ADJUST_PRIORITY
10549 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10550 #undef TARGET_SCHED_ISSUE_RATE
10551 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10552 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10553 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10554
10555 #undef TARGET_SCHED_VARIABLE_ISSUE
10556 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10557 #undef TARGET_SCHED_REORDER
10558 #define TARGET_SCHED_REORDER s390_sched_reorder
10559 #undef TARGET_SCHED_INIT
10560 #define TARGET_SCHED_INIT s390_sched_init
10561
10562 #undef TARGET_CANNOT_COPY_INSN_P
10563 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10564 #undef TARGET_RTX_COSTS
10565 #define TARGET_RTX_COSTS s390_rtx_costs
10566 #undef TARGET_ADDRESS_COST
10567 #define TARGET_ADDRESS_COST s390_address_cost
10568
10569 #undef TARGET_MACHINE_DEPENDENT_REORG
10570 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10571
10572 #undef TARGET_VALID_POINTER_MODE
10573 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10574
10575 #undef TARGET_BUILD_BUILTIN_VA_LIST
10576 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10577 #undef TARGET_EXPAND_BUILTIN_VA_START
10578 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10579 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10580 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10581
10582 #undef TARGET_PROMOTE_FUNCTION_MODE
10583 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10584 #undef TARGET_PASS_BY_REFERENCE
10585 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10586
10587 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10588 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10589 #undef TARGET_FUNCTION_ARG
10590 #define TARGET_FUNCTION_ARG s390_function_arg
10591 #undef TARGET_FUNCTION_ARG_ADVANCE
10592 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10593
10594 #undef TARGET_FIXED_CONDITION_CODE_REGS
10595 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10596
10597 #undef TARGET_CC_MODES_COMPATIBLE
10598 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10599
10600 #undef TARGET_INVALID_WITHIN_DOLOOP
10601 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10602
10603 #ifdef HAVE_AS_TLS
10604 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10605 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10606 #endif
10607
10608 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10609 #undef TARGET_MANGLE_TYPE
10610 #define TARGET_MANGLE_TYPE s390_mangle_type
10611 #endif
10612
10613 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10614 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10615
10616 #undef TARGET_SECONDARY_RELOAD
10617 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10618
10619 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10620 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10621
10622 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10623 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10624
10625 #undef TARGET_LEGITIMATE_ADDRESS_P
10626 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10627
10628 #undef TARGET_CAN_ELIMINATE
10629 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10630
10631 #undef TARGET_LOOP_UNROLL_ADJUST
10632 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10633
10634 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10635 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10636 #undef TARGET_TRAMPOLINE_INIT
10637 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10638
10639 #undef TARGET_UNWIND_WORD_MODE
10640 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10641
10642 struct gcc_target targetm = TARGET_INITIALIZER;
10643
10644 #include "gt-s390.h"