s390.c (print_operand_address): Replace assert with error message.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "integrate.h"
47 #include "ggc.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "optabs.h"
53 #include "gimple.h"
54 #include "df.h"
55 #include "params.h"
56 #include "cfgloop.h"
57
58
59 /* Define the specific costs for a given cpu. */
60
61 struct processor_costs
62 {
63 /* multiplication */
64 const int m; /* cost of an M instruction. */
65 const int mghi; /* cost of an MGHI instruction. */
66 const int mh; /* cost of an MH instruction. */
67 const int mhi; /* cost of an MHI instruction. */
68 const int ml; /* cost of an ML instruction. */
69 const int mr; /* cost of an MR instruction. */
70 const int ms; /* cost of an MS instruction. */
71 const int msg; /* cost of an MSG instruction. */
72 const int msgf; /* cost of an MSGF instruction. */
73 const int msgfr; /* cost of an MSGFR instruction. */
74 const int msgr; /* cost of an MSGR instruction. */
75 const int msr; /* cost of an MSR instruction. */
76 const int mult_df; /* cost of multiplication in DFmode. */
77 const int mxbr;
78 /* square root */
79 const int sqxbr; /* cost of square root in TFmode. */
80 const int sqdbr; /* cost of square root in DFmode. */
81 const int sqebr; /* cost of square root in SFmode. */
82 /* multiply and add */
83 const int madbr; /* cost of multiply and add in DFmode. */
84 const int maebr; /* cost of multiply and add in SFmode. */
85 /* division */
86 const int dxbr;
87 const int ddbr;
88 const int debr;
89 const int dlgr;
90 const int dlr;
91 const int dr;
92 const int dsgfr;
93 const int dsgr;
94 };
95
96 const struct processor_costs *s390_cost;
97
98 static const
99 struct processor_costs z900_cost =
100 {
101 COSTS_N_INSNS (5), /* M */
102 COSTS_N_INSNS (10), /* MGHI */
103 COSTS_N_INSNS (5), /* MH */
104 COSTS_N_INSNS (4), /* MHI */
105 COSTS_N_INSNS (5), /* ML */
106 COSTS_N_INSNS (5), /* MR */
107 COSTS_N_INSNS (4), /* MS */
108 COSTS_N_INSNS (15), /* MSG */
109 COSTS_N_INSNS (7), /* MSGF */
110 COSTS_N_INSNS (7), /* MSGFR */
111 COSTS_N_INSNS (10), /* MSGR */
112 COSTS_N_INSNS (4), /* MSR */
113 COSTS_N_INSNS (7), /* multiplication in DFmode */
114 COSTS_N_INSNS (13), /* MXBR */
115 COSTS_N_INSNS (136), /* SQXBR */
116 COSTS_N_INSNS (44), /* SQDBR */
117 COSTS_N_INSNS (35), /* SQEBR */
118 COSTS_N_INSNS (18), /* MADBR */
119 COSTS_N_INSNS (13), /* MAEBR */
120 COSTS_N_INSNS (134), /* DXBR */
121 COSTS_N_INSNS (30), /* DDBR */
122 COSTS_N_INSNS (27), /* DEBR */
123 COSTS_N_INSNS (220), /* DLGR */
124 COSTS_N_INSNS (34), /* DLR */
125 COSTS_N_INSNS (34), /* DR */
126 COSTS_N_INSNS (32), /* DSGFR */
127 COSTS_N_INSNS (32), /* DSGR */
128 };
129
130 static const
131 struct processor_costs z990_cost =
132 {
133 COSTS_N_INSNS (4), /* M */
134 COSTS_N_INSNS (2), /* MGHI */
135 COSTS_N_INSNS (2), /* MH */
136 COSTS_N_INSNS (2), /* MHI */
137 COSTS_N_INSNS (4), /* ML */
138 COSTS_N_INSNS (4), /* MR */
139 COSTS_N_INSNS (5), /* MS */
140 COSTS_N_INSNS (6), /* MSG */
141 COSTS_N_INSNS (4), /* MSGF */
142 COSTS_N_INSNS (4), /* MSGFR */
143 COSTS_N_INSNS (4), /* MSGR */
144 COSTS_N_INSNS (4), /* MSR */
145 COSTS_N_INSNS (1), /* multiplication in DFmode */
146 COSTS_N_INSNS (28), /* MXBR */
147 COSTS_N_INSNS (130), /* SQXBR */
148 COSTS_N_INSNS (66), /* SQDBR */
149 COSTS_N_INSNS (38), /* SQEBR */
150 COSTS_N_INSNS (1), /* MADBR */
151 COSTS_N_INSNS (1), /* MAEBR */
152 COSTS_N_INSNS (60), /* DXBR */
153 COSTS_N_INSNS (40), /* DDBR */
154 COSTS_N_INSNS (26), /* DEBR */
155 COSTS_N_INSNS (176), /* DLGR */
156 COSTS_N_INSNS (31), /* DLR */
157 COSTS_N_INSNS (31), /* DR */
158 COSTS_N_INSNS (31), /* DSGFR */
159 COSTS_N_INSNS (31), /* DSGR */
160 };
161
162 static const
163 struct processor_costs z9_109_cost =
164 {
165 COSTS_N_INSNS (4), /* M */
166 COSTS_N_INSNS (2), /* MGHI */
167 COSTS_N_INSNS (2), /* MH */
168 COSTS_N_INSNS (2), /* MHI */
169 COSTS_N_INSNS (4), /* ML */
170 COSTS_N_INSNS (4), /* MR */
171 COSTS_N_INSNS (5), /* MS */
172 COSTS_N_INSNS (6), /* MSG */
173 COSTS_N_INSNS (4), /* MSGF */
174 COSTS_N_INSNS (4), /* MSGFR */
175 COSTS_N_INSNS (4), /* MSGR */
176 COSTS_N_INSNS (4), /* MSR */
177 COSTS_N_INSNS (1), /* multiplication in DFmode */
178 COSTS_N_INSNS (28), /* MXBR */
179 COSTS_N_INSNS (130), /* SQXBR */
180 COSTS_N_INSNS (66), /* SQDBR */
181 COSTS_N_INSNS (38), /* SQEBR */
182 COSTS_N_INSNS (1), /* MADBR */
183 COSTS_N_INSNS (1), /* MAEBR */
184 COSTS_N_INSNS (60), /* DXBR */
185 COSTS_N_INSNS (40), /* DDBR */
186 COSTS_N_INSNS (26), /* DEBR */
187 COSTS_N_INSNS (30), /* DLGR */
188 COSTS_N_INSNS (23), /* DLR */
189 COSTS_N_INSNS (23), /* DR */
190 COSTS_N_INSNS (24), /* DSGFR */
191 COSTS_N_INSNS (24), /* DSGR */
192 };
193
194 static const
195 struct processor_costs z10_cost =
196 {
197 COSTS_N_INSNS (10), /* M */
198 COSTS_N_INSNS (10), /* MGHI */
199 COSTS_N_INSNS (10), /* MH */
200 COSTS_N_INSNS (10), /* MHI */
201 COSTS_N_INSNS (10), /* ML */
202 COSTS_N_INSNS (10), /* MR */
203 COSTS_N_INSNS (10), /* MS */
204 COSTS_N_INSNS (10), /* MSG */
205 COSTS_N_INSNS (10), /* MSGF */
206 COSTS_N_INSNS (10), /* MSGFR */
207 COSTS_N_INSNS (10), /* MSGR */
208 COSTS_N_INSNS (10), /* MSR */
209 COSTS_N_INSNS (1) , /* multiplication in DFmode */
210 COSTS_N_INSNS (50), /* MXBR */
211 COSTS_N_INSNS (120), /* SQXBR */
212 COSTS_N_INSNS (52), /* SQDBR */
213 COSTS_N_INSNS (38), /* SQEBR */
214 COSTS_N_INSNS (1), /* MADBR */
215 COSTS_N_INSNS (1), /* MAEBR */
216 COSTS_N_INSNS (111), /* DXBR */
217 COSTS_N_INSNS (39), /* DDBR */
218 COSTS_N_INSNS (32), /* DEBR */
219 COSTS_N_INSNS (160), /* DLGR */
220 COSTS_N_INSNS (71), /* DLR */
221 COSTS_N_INSNS (71), /* DR */
222 COSTS_N_INSNS (71), /* DSGFR */
223 COSTS_N_INSNS (71), /* DSGR */
224 };
225
226 static const
227 struct processor_costs z196_cost =
228 {
229 COSTS_N_INSNS (7), /* M */
230 COSTS_N_INSNS (5), /* MGHI */
231 COSTS_N_INSNS (5), /* MH */
232 COSTS_N_INSNS (5), /* MHI */
233 COSTS_N_INSNS (7), /* ML */
234 COSTS_N_INSNS (7), /* MR */
235 COSTS_N_INSNS (6), /* MS */
236 COSTS_N_INSNS (8), /* MSG */
237 COSTS_N_INSNS (6), /* MSGF */
238 COSTS_N_INSNS (6), /* MSGFR */
239 COSTS_N_INSNS (8), /* MSGR */
240 COSTS_N_INSNS (6), /* MSR */
241 COSTS_N_INSNS (1) , /* multiplication in DFmode */
242 COSTS_N_INSNS (40), /* MXBR B+40 */
243 COSTS_N_INSNS (100), /* SQXBR B+100 */
244 COSTS_N_INSNS (42), /* SQDBR B+42 */
245 COSTS_N_INSNS (28), /* SQEBR B+28 */
246 COSTS_N_INSNS (1), /* MADBR B */
247 COSTS_N_INSNS (1), /* MAEBR B */
248 COSTS_N_INSNS (101), /* DXBR B+101 */
249 COSTS_N_INSNS (29), /* DDBR */
250 COSTS_N_INSNS (22), /* DEBR */
251 COSTS_N_INSNS (160), /* DLGR cracked */
252 COSTS_N_INSNS (160), /* DLR cracked */
253 COSTS_N_INSNS (160), /* DR expanded */
254 COSTS_N_INSNS (160), /* DSGFR cracked */
255 COSTS_N_INSNS (160), /* DSGR cracked */
256 };
257
258 extern int reload_completed;
259
260 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
261 static rtx last_scheduled_insn;
262
263 /* Structure used to hold the components of a S/390 memory
264 address. A legitimate address on S/390 is of the general
265 form
266 base + index + displacement
267 where any of the components is optional.
268
269 base and index are registers of the class ADDR_REGS,
270 displacement is an unsigned 12-bit immediate constant. */
271
272 struct s390_address
273 {
274 rtx base;
275 rtx indx;
276 rtx disp;
277 bool pointer;
278 bool literal_pool;
279 };
280
281 /* Which cpu are we tuning for. */
282 enum processor_type s390_tune = PROCESSOR_max;
283 int s390_tune_flags;
284 /* Which instruction set architecture to use. */
285 enum processor_type s390_arch;
286 int s390_arch_flags;
287
288 HOST_WIDE_INT s390_warn_framesize = 0;
289 HOST_WIDE_INT s390_stack_size = 0;
290 HOST_WIDE_INT s390_stack_guard = 0;
291
292 /* The following structure is embedded in the machine
293 specific part of struct function. */
294
295 struct GTY (()) s390_frame_layout
296 {
297 /* Offset within stack frame. */
298 HOST_WIDE_INT gprs_offset;
299 HOST_WIDE_INT f0_offset;
300 HOST_WIDE_INT f4_offset;
301 HOST_WIDE_INT f8_offset;
302 HOST_WIDE_INT backchain_offset;
303
304 /* Number of first and last gpr where slots in the register
305 save area are reserved for. */
306 int first_save_gpr_slot;
307 int last_save_gpr_slot;
308
309 /* Number of first and last gpr to be saved, restored. */
310 int first_save_gpr;
311 int first_restore_gpr;
312 int last_save_gpr;
313 int last_restore_gpr;
314
315 /* Bits standing for floating point registers. Set, if the
316 respective register has to be saved. Starting with reg 16 (f0)
317 at the rightmost bit.
318 Bit 15 - 8 7 6 5 4 3 2 1 0
319 fpr 15 - 8 7 5 3 1 6 4 2 0
320 reg 31 - 24 23 22 21 20 19 18 17 16 */
321 unsigned int fpr_bitmap;
322
323 /* Number of floating point registers f8-f15 which must be saved. */
324 int high_fprs;
325
326 /* Set if return address needs to be saved.
327 This flag is set by s390_return_addr_rtx if it could not use
328 the initial value of r14 and therefore depends on r14 saved
329 to the stack. */
330 bool save_return_addr_p;
331
332 /* Size of stack frame. */
333 HOST_WIDE_INT frame_size;
334 };
335
336 /* Define the structure for the machine field in struct function. */
337
338 struct GTY(()) machine_function
339 {
340 struct s390_frame_layout frame_layout;
341
342 /* Literal pool base register. */
343 rtx base_reg;
344
345 /* True if we may need to perform branch splitting. */
346 bool split_branches_pending_p;
347
348 /* Some local-dynamic TLS symbol name. */
349 const char *some_ld_name;
350
351 bool has_landing_pad_p;
352 };
353
354 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
355
356 #define cfun_frame_layout (cfun->machine->frame_layout)
357 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
358 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
359 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
360 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
361 (1 << (BITNUM)))
362 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
363 (1 << (BITNUM))))
364
365 /* Number of GPRs and FPRs used for argument passing. */
366 #define GP_ARG_NUM_REG 5
367 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
368
369 /* A couple of shortcuts. */
370 #define CONST_OK_FOR_J(x) \
371 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
372 #define CONST_OK_FOR_K(x) \
373 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
374 #define CONST_OK_FOR_Os(x) \
375 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
376 #define CONST_OK_FOR_Op(x) \
377 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
378 #define CONST_OK_FOR_On(x) \
379 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
380
381 #define REGNO_PAIR_OK(REGNO, MODE) \
382 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
383
384 /* That's the read ahead of the dynamic branch prediction unit in
385 bytes on a z10 (or higher) CPU. */
386 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
387
388 static enum machine_mode
389 s390_libgcc_cmp_return_mode (void)
390 {
391 return TARGET_64BIT ? DImode : SImode;
392 }
393
394 static enum machine_mode
395 s390_libgcc_shift_count_mode (void)
396 {
397 return TARGET_64BIT ? DImode : SImode;
398 }
399
400 static enum machine_mode
401 s390_unwind_word_mode (void)
402 {
403 return TARGET_64BIT ? DImode : SImode;
404 }
405
406 /* Return true if the back end supports mode MODE. */
407 static bool
408 s390_scalar_mode_supported_p (enum machine_mode mode)
409 {
410 /* In contrast to the default implementation reject TImode constants on 31bit
411 TARGET_ZARCH for ABI compliance. */
412 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
413 return false;
414
415 if (DECIMAL_FLOAT_MODE_P (mode))
416 return default_decimal_float_supported_p ();
417
418 return default_scalar_mode_supported_p (mode);
419 }
420
421 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
422
423 void
424 s390_set_has_landing_pad_p (bool value)
425 {
426 cfun->machine->has_landing_pad_p = value;
427 }
428
429 /* If two condition code modes are compatible, return a condition code
430 mode which is compatible with both. Otherwise, return
431 VOIDmode. */
432
433 static enum machine_mode
434 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
435 {
436 if (m1 == m2)
437 return m1;
438
439 switch (m1)
440 {
441 case CCZmode:
442 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
443 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
444 return m2;
445 return VOIDmode;
446
447 case CCSmode:
448 case CCUmode:
449 case CCTmode:
450 case CCSRmode:
451 case CCURmode:
452 case CCZ1mode:
453 if (m2 == CCZmode)
454 return m1;
455
456 return VOIDmode;
457
458 default:
459 return VOIDmode;
460 }
461 return VOIDmode;
462 }
463
464 /* Return true if SET either doesn't set the CC register, or else
465 the source and destination have matching CC modes and that
466 CC mode is at least as constrained as REQ_MODE. */
467
468 static bool
469 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
470 {
471 enum machine_mode set_mode;
472
473 gcc_assert (GET_CODE (set) == SET);
474
475 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
476 return 1;
477
478 set_mode = GET_MODE (SET_DEST (set));
479 switch (set_mode)
480 {
481 case CCSmode:
482 case CCSRmode:
483 case CCUmode:
484 case CCURmode:
485 case CCLmode:
486 case CCL1mode:
487 case CCL2mode:
488 case CCL3mode:
489 case CCT1mode:
490 case CCT2mode:
491 case CCT3mode:
492 if (req_mode != set_mode)
493 return 0;
494 break;
495
496 case CCZmode:
497 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
498 && req_mode != CCSRmode && req_mode != CCURmode)
499 return 0;
500 break;
501
502 case CCAPmode:
503 case CCANmode:
504 if (req_mode != CCAmode)
505 return 0;
506 break;
507
508 default:
509 gcc_unreachable ();
510 }
511
512 return (GET_MODE (SET_SRC (set)) == set_mode);
513 }
514
515 /* Return true if every SET in INSN that sets the CC register
516 has source and destination with matching CC modes and that
517 CC mode is at least as constrained as REQ_MODE.
518 If REQ_MODE is VOIDmode, always return false. */
519
520 bool
521 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
522 {
523 int i;
524
525 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
526 if (req_mode == VOIDmode)
527 return false;
528
529 if (GET_CODE (PATTERN (insn)) == SET)
530 return s390_match_ccmode_set (PATTERN (insn), req_mode);
531
532 if (GET_CODE (PATTERN (insn)) == PARALLEL)
533 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
534 {
535 rtx set = XVECEXP (PATTERN (insn), 0, i);
536 if (GET_CODE (set) == SET)
537 if (!s390_match_ccmode_set (set, req_mode))
538 return false;
539 }
540
541 return true;
542 }
543
544 /* If a test-under-mask instruction can be used to implement
545 (compare (and ... OP1) OP2), return the CC mode required
546 to do that. Otherwise, return VOIDmode.
547 MIXED is true if the instruction can distinguish between
548 CC1 and CC2 for mixed selected bits (TMxx), it is false
549 if the instruction cannot (TM). */
550
551 enum machine_mode
552 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
553 {
554 int bit0, bit1;
555
556 /* ??? Fixme: should work on CONST_DOUBLE as well. */
557 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
558 return VOIDmode;
559
560 /* Selected bits all zero: CC0.
561 e.g.: int a; if ((a & (16 + 128)) == 0) */
562 if (INTVAL (op2) == 0)
563 return CCTmode;
564
565 /* Selected bits all one: CC3.
566 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
567 if (INTVAL (op2) == INTVAL (op1))
568 return CCT3mode;
569
570 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
571 int a;
572 if ((a & (16 + 128)) == 16) -> CCT1
573 if ((a & (16 + 128)) == 128) -> CCT2 */
574 if (mixed)
575 {
576 bit1 = exact_log2 (INTVAL (op2));
577 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
578 if (bit0 != -1 && bit1 != -1)
579 return bit0 > bit1 ? CCT1mode : CCT2mode;
580 }
581
582 return VOIDmode;
583 }
584
585 /* Given a comparison code OP (EQ, NE, etc.) and the operands
586 OP0 and OP1 of a COMPARE, return the mode to be used for the
587 comparison. */
588
589 enum machine_mode
590 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
591 {
592 switch (code)
593 {
594 case EQ:
595 case NE:
596 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
597 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
598 return CCAPmode;
599 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
600 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
601 return CCAPmode;
602 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
603 || GET_CODE (op1) == NEG)
604 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
605 return CCLmode;
606
607 if (GET_CODE (op0) == AND)
608 {
609 /* Check whether we can potentially do it via TM. */
610 enum machine_mode ccmode;
611 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
612 if (ccmode != VOIDmode)
613 {
614 /* Relax CCTmode to CCZmode to allow fall-back to AND
615 if that turns out to be beneficial. */
616 return ccmode == CCTmode ? CCZmode : ccmode;
617 }
618 }
619
620 if (register_operand (op0, HImode)
621 && GET_CODE (op1) == CONST_INT
622 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
623 return CCT3mode;
624 if (register_operand (op0, QImode)
625 && GET_CODE (op1) == CONST_INT
626 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
627 return CCT3mode;
628
629 return CCZmode;
630
631 case LE:
632 case LT:
633 case GE:
634 case GT:
635 /* The only overflow condition of NEG and ABS happens when
636 -INT_MAX is used as parameter, which stays negative. So
637 we have an overflow from a positive value to a negative.
638 Using CCAP mode the resulting cc can be used for comparisons. */
639 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
640 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
641 return CCAPmode;
642
643 /* If constants are involved in an add instruction it is possible to use
644 the resulting cc for comparisons with zero. Knowing the sign of the
645 constant the overflow behavior gets predictable. e.g.:
646 int a, b; if ((b = a + c) > 0)
647 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
648 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
649 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
650 {
651 if (INTVAL (XEXP((op0), 1)) < 0)
652 return CCANmode;
653 else
654 return CCAPmode;
655 }
656 /* Fall through. */
657 case UNORDERED:
658 case ORDERED:
659 case UNEQ:
660 case UNLE:
661 case UNLT:
662 case UNGE:
663 case UNGT:
664 case LTGT:
665 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
666 && GET_CODE (op1) != CONST_INT)
667 return CCSRmode;
668 return CCSmode;
669
670 case LTU:
671 case GEU:
672 if (GET_CODE (op0) == PLUS
673 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
674 return CCL1mode;
675
676 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
677 && GET_CODE (op1) != CONST_INT)
678 return CCURmode;
679 return CCUmode;
680
681 case LEU:
682 case GTU:
683 if (GET_CODE (op0) == MINUS
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCL2mode;
686
687 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
688 && GET_CODE (op1) != CONST_INT)
689 return CCURmode;
690 return CCUmode;
691
692 default:
693 gcc_unreachable ();
694 }
695 }
696
697 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
698 that we can implement more efficiently. */
699
700 void
701 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
702 {
703 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
704 if ((*code == EQ || *code == NE)
705 && *op1 == const0_rtx
706 && GET_CODE (*op0) == ZERO_EXTRACT
707 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
708 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
709 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
710 {
711 rtx inner = XEXP (*op0, 0);
712 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
713 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
714 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
715
716 if (len > 0 && len < modesize
717 && pos >= 0 && pos + len <= modesize
718 && modesize <= HOST_BITS_PER_WIDE_INT)
719 {
720 unsigned HOST_WIDE_INT block;
721 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
722 block <<= modesize - pos - len;
723
724 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
725 gen_int_mode (block, GET_MODE (inner)));
726 }
727 }
728
729 /* Narrow AND of memory against immediate to enable TM. */
730 if ((*code == EQ || *code == NE)
731 && *op1 == const0_rtx
732 && GET_CODE (*op0) == AND
733 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
735 {
736 rtx inner = XEXP (*op0, 0);
737 rtx mask = XEXP (*op0, 1);
738
739 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
740 if (GET_CODE (inner) == SUBREG
741 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
742 && (GET_MODE_SIZE (GET_MODE (inner))
743 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
744 && ((INTVAL (mask)
745 & GET_MODE_MASK (GET_MODE (inner))
746 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
747 == 0))
748 inner = SUBREG_REG (inner);
749
750 /* Do not change volatile MEMs. */
751 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
752 {
753 int part = s390_single_part (XEXP (*op0, 1),
754 GET_MODE (inner), QImode, 0);
755 if (part >= 0)
756 {
757 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
758 inner = adjust_address_nv (inner, QImode, part);
759 *op0 = gen_rtx_AND (QImode, inner, mask);
760 }
761 }
762 }
763
764 /* Narrow comparisons against 0xffff to HImode if possible. */
765 if ((*code == EQ || *code == NE)
766 && GET_CODE (*op1) == CONST_INT
767 && INTVAL (*op1) == 0xffff
768 && SCALAR_INT_MODE_P (GET_MODE (*op0))
769 && (nonzero_bits (*op0, GET_MODE (*op0))
770 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
771 {
772 *op0 = gen_lowpart (HImode, *op0);
773 *op1 = constm1_rtx;
774 }
775
776 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
777 if (GET_CODE (*op0) == UNSPEC
778 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
779 && XVECLEN (*op0, 0) == 1
780 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
781 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
782 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
783 && *op1 == const0_rtx)
784 {
785 enum rtx_code new_code = UNKNOWN;
786 switch (*code)
787 {
788 case EQ: new_code = EQ; break;
789 case NE: new_code = NE; break;
790 case LT: new_code = GTU; break;
791 case GT: new_code = LTU; break;
792 case LE: new_code = GEU; break;
793 case GE: new_code = LEU; break;
794 default: break;
795 }
796
797 if (new_code != UNKNOWN)
798 {
799 *op0 = XVECEXP (*op0, 0, 0);
800 *code = new_code;
801 }
802 }
803
804 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
805 if (GET_CODE (*op0) == UNSPEC
806 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
807 && XVECLEN (*op0, 0) == 1
808 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
809 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
810 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
811 && *op1 == const0_rtx)
812 {
813 enum rtx_code new_code = UNKNOWN;
814 switch (*code)
815 {
816 case EQ: new_code = EQ; break;
817 case NE: new_code = NE; break;
818 default: break;
819 }
820
821 if (new_code != UNKNOWN)
822 {
823 *op0 = XVECEXP (*op0, 0, 0);
824 *code = new_code;
825 }
826 }
827
828 /* Simplify cascaded EQ, NE with const0_rtx. */
829 if ((*code == NE || *code == EQ)
830 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
831 && GET_MODE (*op0) == SImode
832 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
833 && REG_P (XEXP (*op0, 0))
834 && XEXP (*op0, 1) == const0_rtx
835 && *op1 == const0_rtx)
836 {
837 if ((*code == EQ && GET_CODE (*op0) == NE)
838 || (*code == NE && GET_CODE (*op0) == EQ))
839 *code = EQ;
840 else
841 *code = NE;
842 *op0 = XEXP (*op0, 0);
843 }
844
845 /* Prefer register over memory as first operand. */
846 if (MEM_P (*op0) && REG_P (*op1))
847 {
848 rtx tem = *op0; *op0 = *op1; *op1 = tem;
849 *code = swap_condition (*code);
850 }
851 }
852
853 /* Emit a compare instruction suitable to implement the comparison
854 OP0 CODE OP1. Return the correct condition RTL to be placed in
855 the IF_THEN_ELSE of the conditional branch testing the result. */
856
857 rtx
858 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
859 {
860 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
861 rtx cc;
862
863 /* Do not output a redundant compare instruction if a compare_and_swap
864 pattern already computed the result and the machine modes are compatible. */
865 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
866 {
867 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
868 == GET_MODE (op0));
869 cc = op0;
870 }
871 else
872 {
873 cc = gen_rtx_REG (mode, CC_REGNUM);
874 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
875 }
876
877 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
878 }
879
880 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
881 matches CMP.
882 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
883 conditional branch testing the result. */
884
885 static rtx
886 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
887 {
888 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
889 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
890 }
891
892 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
893 unconditional jump, else a conditional jump under condition COND. */
894
895 void
896 s390_emit_jump (rtx target, rtx cond)
897 {
898 rtx insn;
899
900 target = gen_rtx_LABEL_REF (VOIDmode, target);
901 if (cond)
902 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
903
904 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
905 emit_jump_insn (insn);
906 }
907
908 /* Return branch condition mask to implement a branch
909 specified by CODE. Return -1 for invalid comparisons. */
910
911 int
912 s390_branch_condition_mask (rtx code)
913 {
914 const int CC0 = 1 << 3;
915 const int CC1 = 1 << 2;
916 const int CC2 = 1 << 1;
917 const int CC3 = 1 << 0;
918
919 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
920 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
921 gcc_assert (XEXP (code, 1) == const0_rtx);
922
923 switch (GET_MODE (XEXP (code, 0)))
924 {
925 case CCZmode:
926 case CCZ1mode:
927 switch (GET_CODE (code))
928 {
929 case EQ: return CC0;
930 case NE: return CC1 | CC2 | CC3;
931 default: return -1;
932 }
933 break;
934
935 case CCT1mode:
936 switch (GET_CODE (code))
937 {
938 case EQ: return CC1;
939 case NE: return CC0 | CC2 | CC3;
940 default: return -1;
941 }
942 break;
943
944 case CCT2mode:
945 switch (GET_CODE (code))
946 {
947 case EQ: return CC2;
948 case NE: return CC0 | CC1 | CC3;
949 default: return -1;
950 }
951 break;
952
953 case CCT3mode:
954 switch (GET_CODE (code))
955 {
956 case EQ: return CC3;
957 case NE: return CC0 | CC1 | CC2;
958 default: return -1;
959 }
960 break;
961
962 case CCLmode:
963 switch (GET_CODE (code))
964 {
965 case EQ: return CC0 | CC2;
966 case NE: return CC1 | CC3;
967 default: return -1;
968 }
969 break;
970
971 case CCL1mode:
972 switch (GET_CODE (code))
973 {
974 case LTU: return CC2 | CC3; /* carry */
975 case GEU: return CC0 | CC1; /* no carry */
976 default: return -1;
977 }
978 break;
979
980 case CCL2mode:
981 switch (GET_CODE (code))
982 {
983 case GTU: return CC0 | CC1; /* borrow */
984 case LEU: return CC2 | CC3; /* no borrow */
985 default: return -1;
986 }
987 break;
988
989 case CCL3mode:
990 switch (GET_CODE (code))
991 {
992 case EQ: return CC0 | CC2;
993 case NE: return CC1 | CC3;
994 case LTU: return CC1;
995 case GTU: return CC3;
996 case LEU: return CC1 | CC2;
997 case GEU: return CC2 | CC3;
998 default: return -1;
999 }
1000
1001 case CCUmode:
1002 switch (GET_CODE (code))
1003 {
1004 case EQ: return CC0;
1005 case NE: return CC1 | CC2 | CC3;
1006 case LTU: return CC1;
1007 case GTU: return CC2;
1008 case LEU: return CC0 | CC1;
1009 case GEU: return CC0 | CC2;
1010 default: return -1;
1011 }
1012 break;
1013
1014 case CCURmode:
1015 switch (GET_CODE (code))
1016 {
1017 case EQ: return CC0;
1018 case NE: return CC2 | CC1 | CC3;
1019 case LTU: return CC2;
1020 case GTU: return CC1;
1021 case LEU: return CC0 | CC2;
1022 case GEU: return CC0 | CC1;
1023 default: return -1;
1024 }
1025 break;
1026
1027 case CCAPmode:
1028 switch (GET_CODE (code))
1029 {
1030 case EQ: return CC0;
1031 case NE: return CC1 | CC2 | CC3;
1032 case LT: return CC1 | CC3;
1033 case GT: return CC2;
1034 case LE: return CC0 | CC1 | CC3;
1035 case GE: return CC0 | CC2;
1036 default: return -1;
1037 }
1038 break;
1039
1040 case CCANmode:
1041 switch (GET_CODE (code))
1042 {
1043 case EQ: return CC0;
1044 case NE: return CC1 | CC2 | CC3;
1045 case LT: return CC1;
1046 case GT: return CC2 | CC3;
1047 case LE: return CC0 | CC1;
1048 case GE: return CC0 | CC2 | CC3;
1049 default: return -1;
1050 }
1051 break;
1052
1053 case CCSmode:
1054 switch (GET_CODE (code))
1055 {
1056 case EQ: return CC0;
1057 case NE: return CC1 | CC2 | CC3;
1058 case LT: return CC1;
1059 case GT: return CC2;
1060 case LE: return CC0 | CC1;
1061 case GE: return CC0 | CC2;
1062 case UNORDERED: return CC3;
1063 case ORDERED: return CC0 | CC1 | CC2;
1064 case UNEQ: return CC0 | CC3;
1065 case UNLT: return CC1 | CC3;
1066 case UNGT: return CC2 | CC3;
1067 case UNLE: return CC0 | CC1 | CC3;
1068 case UNGE: return CC0 | CC2 | CC3;
1069 case LTGT: return CC1 | CC2;
1070 default: return -1;
1071 }
1072 break;
1073
1074 case CCSRmode:
1075 switch (GET_CODE (code))
1076 {
1077 case EQ: return CC0;
1078 case NE: return CC2 | CC1 | CC3;
1079 case LT: return CC2;
1080 case GT: return CC1;
1081 case LE: return CC0 | CC2;
1082 case GE: return CC0 | CC1;
1083 case UNORDERED: return CC3;
1084 case ORDERED: return CC0 | CC2 | CC1;
1085 case UNEQ: return CC0 | CC3;
1086 case UNLT: return CC2 | CC3;
1087 case UNGT: return CC1 | CC3;
1088 case UNLE: return CC0 | CC2 | CC3;
1089 case UNGE: return CC0 | CC1 | CC3;
1090 case LTGT: return CC2 | CC1;
1091 default: return -1;
1092 }
1093 break;
1094
1095 default:
1096 return -1;
1097 }
1098 }
1099
1100
1101 /* Return branch condition mask to implement a compare and branch
1102 specified by CODE. Return -1 for invalid comparisons. */
1103
1104 int
1105 s390_compare_and_branch_condition_mask (rtx code)
1106 {
1107 const int CC0 = 1 << 3;
1108 const int CC1 = 1 << 2;
1109 const int CC2 = 1 << 1;
1110
1111 switch (GET_CODE (code))
1112 {
1113 case EQ:
1114 return CC0;
1115 case NE:
1116 return CC1 | CC2;
1117 case LT:
1118 case LTU:
1119 return CC1;
1120 case GT:
1121 case GTU:
1122 return CC2;
1123 case LE:
1124 case LEU:
1125 return CC0 | CC1;
1126 case GE:
1127 case GEU:
1128 return CC0 | CC2;
1129 default:
1130 gcc_unreachable ();
1131 }
1132 return -1;
1133 }
1134
1135 /* If INV is false, return assembler mnemonic string to implement
1136 a branch specified by CODE. If INV is true, return mnemonic
1137 for the corresponding inverted branch. */
1138
1139 static const char *
1140 s390_branch_condition_mnemonic (rtx code, int inv)
1141 {
1142 int mask;
1143
1144 static const char *const mnemonic[16] =
1145 {
1146 NULL, "o", "h", "nle",
1147 "l", "nhe", "lh", "ne",
1148 "e", "nlh", "he", "nl",
1149 "le", "nh", "no", NULL
1150 };
1151
1152 if (GET_CODE (XEXP (code, 0)) == REG
1153 && REGNO (XEXP (code, 0)) == CC_REGNUM
1154 && XEXP (code, 1) == const0_rtx)
1155 mask = s390_branch_condition_mask (code);
1156 else
1157 mask = s390_compare_and_branch_condition_mask (code);
1158
1159 gcc_assert (mask >= 0);
1160
1161 if (inv)
1162 mask ^= 15;
1163
1164 gcc_assert (mask >= 1 && mask <= 14);
1165
1166 return mnemonic[mask];
1167 }
1168
1169 /* Return the part of op which has a value different from def.
1170 The size of the part is determined by mode.
1171 Use this function only if you already know that op really
1172 contains such a part. */
1173
1174 unsigned HOST_WIDE_INT
1175 s390_extract_part (rtx op, enum machine_mode mode, int def)
1176 {
1177 unsigned HOST_WIDE_INT value = 0;
1178 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1179 int part_bits = GET_MODE_BITSIZE (mode);
1180 unsigned HOST_WIDE_INT part_mask
1181 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1182 int i;
1183
1184 for (i = 0; i < max_parts; i++)
1185 {
1186 if (i == 0)
1187 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1188 else
1189 value >>= part_bits;
1190
1191 if ((value & part_mask) != (def & part_mask))
1192 return value & part_mask;
1193 }
1194
1195 gcc_unreachable ();
1196 }
1197
1198 /* If OP is an integer constant of mode MODE with exactly one
1199 part of mode PART_MODE unequal to DEF, return the number of that
1200 part. Otherwise, return -1. */
1201
1202 int
1203 s390_single_part (rtx op,
1204 enum machine_mode mode,
1205 enum machine_mode part_mode,
1206 int def)
1207 {
1208 unsigned HOST_WIDE_INT value = 0;
1209 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1210 unsigned HOST_WIDE_INT part_mask
1211 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1212 int i, part = -1;
1213
1214 if (GET_CODE (op) != CONST_INT)
1215 return -1;
1216
1217 for (i = 0; i < n_parts; i++)
1218 {
1219 if (i == 0)
1220 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1221 else
1222 value >>= GET_MODE_BITSIZE (part_mode);
1223
1224 if ((value & part_mask) != (def & part_mask))
1225 {
1226 if (part != -1)
1227 return -1;
1228 else
1229 part = i;
1230 }
1231 }
1232 return part == -1 ? -1 : n_parts - 1 - part;
1233 }
1234
1235 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1236 bits and no other bits are set in IN. POS and LENGTH can be used
1237 to obtain the start position and the length of the bitfield.
1238
1239 POS gives the position of the first bit of the bitfield counting
1240 from the lowest order bit starting with zero. In order to use this
1241 value for S/390 instructions this has to be converted to "bits big
1242 endian" style. */
1243
1244 bool
1245 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1246 int *pos, int *length)
1247 {
1248 int tmp_pos = 0;
1249 int tmp_length = 0;
1250 int i;
1251 unsigned HOST_WIDE_INT mask = 1ULL;
1252 bool contiguous = false;
1253
1254 for (i = 0; i < size; mask <<= 1, i++)
1255 {
1256 if (contiguous)
1257 {
1258 if (mask & in)
1259 tmp_length++;
1260 else
1261 break;
1262 }
1263 else
1264 {
1265 if (mask & in)
1266 {
1267 contiguous = true;
1268 tmp_length++;
1269 }
1270 else
1271 tmp_pos++;
1272 }
1273 }
1274
1275 if (!tmp_length)
1276 return false;
1277
1278 /* Calculate a mask for all bits beyond the contiguous bits. */
1279 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1280
1281 if (mask & in)
1282 return false;
1283
1284 if (tmp_length + tmp_pos - 1 > size)
1285 return false;
1286
1287 if (length)
1288 *length = tmp_length;
1289
1290 if (pos)
1291 *pos = tmp_pos;
1292
1293 return true;
1294 }
1295
1296 /* Check whether we can (and want to) split a double-word
1297 move in mode MODE from SRC to DST into two single-word
1298 moves, moving the subword FIRST_SUBWORD first. */
1299
1300 bool
1301 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1302 {
1303 /* Floating point registers cannot be split. */
1304 if (FP_REG_P (src) || FP_REG_P (dst))
1305 return false;
1306
1307 /* We don't need to split if operands are directly accessible. */
1308 if (s_operand (src, mode) || s_operand (dst, mode))
1309 return false;
1310
1311 /* Non-offsettable memory references cannot be split. */
1312 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1313 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1314 return false;
1315
1316 /* Moving the first subword must not clobber a register
1317 needed to move the second subword. */
1318 if (register_operand (dst, mode))
1319 {
1320 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1321 if (reg_overlap_mentioned_p (subreg, src))
1322 return false;
1323 }
1324
1325 return true;
1326 }
1327
1328 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1329 and [MEM2, MEM2 + SIZE] do overlap and false
1330 otherwise. */
1331
1332 bool
1333 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1334 {
1335 rtx addr1, addr2, addr_delta;
1336 HOST_WIDE_INT delta;
1337
1338 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1339 return true;
1340
1341 if (size == 0)
1342 return false;
1343
1344 addr1 = XEXP (mem1, 0);
1345 addr2 = XEXP (mem2, 0);
1346
1347 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1348
1349 /* This overlapping check is used by peepholes merging memory block operations.
1350 Overlapping operations would otherwise be recognized by the S/390 hardware
1351 and would fall back to a slower implementation. Allowing overlapping
1352 operations would lead to slow code but not to wrong code. Therefore we are
1353 somewhat optimistic if we cannot prove that the memory blocks are
1354 overlapping.
1355 That's why we return false here although this may accept operations on
1356 overlapping memory areas. */
1357 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1358 return false;
1359
1360 delta = INTVAL (addr_delta);
1361
1362 if (delta == 0
1363 || (delta > 0 && delta < size)
1364 || (delta < 0 && -delta < size))
1365 return true;
1366
1367 return false;
1368 }
1369
1370 /* Check whether the address of memory reference MEM2 equals exactly
1371 the address of memory reference MEM1 plus DELTA. Return true if
1372 we can prove this to be the case, false otherwise. */
1373
1374 bool
1375 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1376 {
1377 rtx addr1, addr2, addr_delta;
1378
1379 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1380 return false;
1381
1382 addr1 = XEXP (mem1, 0);
1383 addr2 = XEXP (mem2, 0);
1384
1385 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1386 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1387 return false;
1388
1389 return true;
1390 }
1391
1392 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1393
1394 void
1395 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1396 rtx *operands)
1397 {
1398 enum machine_mode wmode = mode;
1399 rtx dst = operands[0];
1400 rtx src1 = operands[1];
1401 rtx src2 = operands[2];
1402 rtx op, clob, tem;
1403
1404 /* If we cannot handle the operation directly, use a temp register. */
1405 if (!s390_logical_operator_ok_p (operands))
1406 dst = gen_reg_rtx (mode);
1407
1408 /* QImode and HImode patterns make sense only if we have a destination
1409 in memory. Otherwise perform the operation in SImode. */
1410 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1411 wmode = SImode;
1412
1413 /* Widen operands if required. */
1414 if (mode != wmode)
1415 {
1416 if (GET_CODE (dst) == SUBREG
1417 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1418 dst = tem;
1419 else if (REG_P (dst))
1420 dst = gen_rtx_SUBREG (wmode, dst, 0);
1421 else
1422 dst = gen_reg_rtx (wmode);
1423
1424 if (GET_CODE (src1) == SUBREG
1425 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1426 src1 = tem;
1427 else if (GET_MODE (src1) != VOIDmode)
1428 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1429
1430 if (GET_CODE (src2) == SUBREG
1431 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1432 src2 = tem;
1433 else if (GET_MODE (src2) != VOIDmode)
1434 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1435 }
1436
1437 /* Emit the instruction. */
1438 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1439 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1440 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1441
1442 /* Fix up the destination if needed. */
1443 if (dst != operands[0])
1444 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1445 }
1446
1447 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1448
1449 bool
1450 s390_logical_operator_ok_p (rtx *operands)
1451 {
1452 /* If the destination operand is in memory, it needs to coincide
1453 with one of the source operands. After reload, it has to be
1454 the first source operand. */
1455 if (GET_CODE (operands[0]) == MEM)
1456 return rtx_equal_p (operands[0], operands[1])
1457 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1458
1459 return true;
1460 }
1461
1462 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1463 operand IMMOP to switch from SS to SI type instructions. */
1464
1465 void
1466 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1467 {
1468 int def = code == AND ? -1 : 0;
1469 HOST_WIDE_INT mask;
1470 int part;
1471
1472 gcc_assert (GET_CODE (*memop) == MEM);
1473 gcc_assert (!MEM_VOLATILE_P (*memop));
1474
1475 mask = s390_extract_part (*immop, QImode, def);
1476 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1477 gcc_assert (part >= 0);
1478
1479 *memop = adjust_address (*memop, QImode, part);
1480 *immop = gen_int_mode (mask, QImode);
1481 }
1482
1483
1484 /* How to allocate a 'struct machine_function'. */
1485
1486 static struct machine_function *
1487 s390_init_machine_status (void)
1488 {
1489 return ggc_alloc_cleared_machine_function ();
1490 }
1491
1492 /* Change optimizations to be performed, depending on the
1493 optimization level. */
1494
1495 static const struct default_options s390_option_optimization_table[] =
1496 {
1497 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
1498
1499 /* ??? There are apparently still problems with -fcaller-saves. */
1500 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
1501
1502 /* Use MVCLE instructions to decrease code size if requested. */
1503 { OPT_LEVELS_SIZE, OPT_mmvcle, NULL, 1 },
1504
1505 { OPT_LEVELS_NONE, 0, NULL, 0 }
1506 };
1507
1508 /* Implement TARGET_OPTION_INIT_STRUCT. */
1509
1510 static void
1511 s390_option_init_struct (struct gcc_options *opts)
1512 {
1513 /* By default, always emit DWARF-2 unwind info. This allows debugging
1514 without maintaining a stack frame back-chain. */
1515 opts->x_flag_asynchronous_unwind_tables = 1;
1516 }
1517
1518 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1519 to the associated processor_type and processor_flags if so. */
1520
1521 static bool
1522 s390_handle_arch_option (const char *arg,
1523 enum processor_type *type,
1524 int *flags)
1525 {
1526 static struct pta
1527 {
1528 const char *const name; /* processor name or nickname. */
1529 const enum processor_type processor;
1530 const int flags; /* From enum processor_flags. */
1531 }
1532 const processor_alias_table[] =
1533 {
1534 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1535 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1536 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1537 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1538 | PF_LONG_DISPLACEMENT},
1539 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1540 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1541 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1542 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1543 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1544 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1545 {"z196", PROCESSOR_2817_Z196, PF_IEEE_FLOAT | PF_ZARCH
1546 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10 | PF_Z196 },
1547 };
1548 size_t i;
1549
1550 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1551 if (strcmp (arg, processor_alias_table[i].name) == 0)
1552 {
1553 *type = processor_alias_table[i].processor;
1554 *flags = processor_alias_table[i].flags;
1555 return true;
1556 }
1557
1558 *type = PROCESSOR_max;
1559 *flags = 0;
1560 return false;
1561 }
1562
1563 /* Implement TARGET_HANDLE_OPTION. */
1564
1565 static bool
1566 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1567 {
1568 switch (code)
1569 {
1570 case OPT_march_:
1571 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1572
1573 case OPT_mstack_guard_:
1574 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1575 return false;
1576 if (exact_log2 (s390_stack_guard) == -1)
1577 error ("stack guard value must be an exact power of 2");
1578 return true;
1579
1580 case OPT_mstack_size_:
1581 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1582 return false;
1583 if (exact_log2 (s390_stack_size) == -1)
1584 error ("stack size must be an exact power of 2");
1585 return true;
1586
1587 case OPT_mtune_:
1588 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1589
1590 case OPT_mwarn_framesize_:
1591 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1592
1593 default:
1594 return true;
1595 }
1596 }
1597
1598 static void
1599 s390_option_override (void)
1600 {
1601 /* Set up function hooks. */
1602 init_machine_status = s390_init_machine_status;
1603
1604 /* Architecture mode defaults according to ABI. */
1605 if (!(target_flags_explicit & MASK_ZARCH))
1606 {
1607 if (TARGET_64BIT)
1608 target_flags |= MASK_ZARCH;
1609 else
1610 target_flags &= ~MASK_ZARCH;
1611 }
1612
1613 /* Determine processor architectural level. */
1614 if (!s390_arch_string)
1615 {
1616 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1617 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1618 }
1619
1620 /* This check is triggered when the user specified a wrong -march=
1621 string and prevents subsequent error messages from being
1622 issued. */
1623 if (s390_arch == PROCESSOR_max)
1624 return;
1625
1626 /* Determine processor to tune for. */
1627 if (s390_tune == PROCESSOR_max)
1628 {
1629 s390_tune = s390_arch;
1630 s390_tune_flags = s390_arch_flags;
1631 }
1632
1633 /* Sanity checks. */
1634 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1635 error ("z/Architecture mode not supported on %s", s390_arch_string);
1636 if (TARGET_64BIT && !TARGET_ZARCH)
1637 error ("64-bit ABI not supported in ESA/390 mode");
1638
1639 if (TARGET_HARD_DFP && !TARGET_DFP)
1640 {
1641 if (target_flags_explicit & MASK_HARD_DFP)
1642 {
1643 if (!TARGET_CPU_DFP)
1644 error ("hardware decimal floating point instructions"
1645 " not available on %s", s390_arch_string);
1646 if (!TARGET_ZARCH)
1647 error ("hardware decimal floating point instructions"
1648 " not available in ESA/390 mode");
1649 }
1650 else
1651 target_flags &= ~MASK_HARD_DFP;
1652 }
1653
1654 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1655 {
1656 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1657 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1658
1659 target_flags &= ~MASK_HARD_DFP;
1660 }
1661
1662 /* Set processor cost function. */
1663 switch (s390_tune)
1664 {
1665 case PROCESSOR_2084_Z990:
1666 s390_cost = &z990_cost;
1667 break;
1668 case PROCESSOR_2094_Z9_109:
1669 s390_cost = &z9_109_cost;
1670 break;
1671 case PROCESSOR_2097_Z10:
1672 s390_cost = &z10_cost;
1673 case PROCESSOR_2817_Z196:
1674 s390_cost = &z196_cost;
1675 break;
1676 default:
1677 s390_cost = &z900_cost;
1678 }
1679
1680 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1681 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1682 "in combination");
1683
1684 if (s390_stack_size)
1685 {
1686 if (s390_stack_guard >= s390_stack_size)
1687 error ("stack size must be greater than the stack guard value");
1688 else if (s390_stack_size > 1 << 16)
1689 error ("stack size must not be greater than 64k");
1690 }
1691 else if (s390_stack_guard)
1692 error ("-mstack-guard implies use of -mstack-size");
1693
1694 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1695 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1696 target_flags |= MASK_LONG_DOUBLE_128;
1697 #endif
1698
1699 if (s390_tune == PROCESSOR_2097_Z10
1700 || s390_tune == PROCESSOR_2817_Z196)
1701 {
1702 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1703 global_options.x_param_values,
1704 global_options_set.x_param_values);
1705 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1706 global_options.x_param_values,
1707 global_options_set.x_param_values);
1708 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1709 global_options.x_param_values,
1710 global_options_set.x_param_values);
1711 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1712 global_options.x_param_values,
1713 global_options_set.x_param_values);
1714 }
1715
1716 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1717 global_options.x_param_values,
1718 global_options_set.x_param_values);
1719 /* values for loop prefetching */
1720 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1721 global_options.x_param_values,
1722 global_options_set.x_param_values);
1723 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1724 global_options.x_param_values,
1725 global_options_set.x_param_values);
1726 /* s390 has more than 2 levels and the size is much larger. Since
1727 we are always running virtualized assume that we only get a small
1728 part of the caches above l1. */
1729 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1730 global_options.x_param_values,
1731 global_options_set.x_param_values);
1732 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1733 global_options.x_param_values,
1734 global_options_set.x_param_values);
1735 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1736 global_options.x_param_values,
1737 global_options_set.x_param_values);
1738
1739 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1740 requires the arch flags to be evaluated already. Since prefetching
1741 is beneficial on s390, we enable it if available. */
1742 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1743 flag_prefetch_loop_arrays = 1;
1744 }
1745
1746 /* Map for smallest class containing reg regno. */
1747
1748 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1749 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1750 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1751 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1752 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1753 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1754 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1755 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1756 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1757 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1758 ACCESS_REGS, ACCESS_REGS
1759 };
1760
1761 /* Return attribute type of insn. */
1762
1763 static enum attr_type
1764 s390_safe_attr_type (rtx insn)
1765 {
1766 if (recog_memoized (insn) >= 0)
1767 return get_attr_type (insn);
1768 else
1769 return TYPE_NONE;
1770 }
1771
1772 /* Return true if DISP is a valid short displacement. */
1773
1774 static bool
1775 s390_short_displacement (rtx disp)
1776 {
1777 /* No displacement is OK. */
1778 if (!disp)
1779 return true;
1780
1781 /* Without the long displacement facility we don't need to
1782 distingiush between long and short displacement. */
1783 if (!TARGET_LONG_DISPLACEMENT)
1784 return true;
1785
1786 /* Integer displacement in range. */
1787 if (GET_CODE (disp) == CONST_INT)
1788 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1789
1790 /* GOT offset is not OK, the GOT can be large. */
1791 if (GET_CODE (disp) == CONST
1792 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1793 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1794 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1795 return false;
1796
1797 /* All other symbolic constants are literal pool references,
1798 which are OK as the literal pool must be small. */
1799 if (GET_CODE (disp) == CONST)
1800 return true;
1801
1802 return false;
1803 }
1804
1805 /* Decompose a RTL expression ADDR for a memory address into
1806 its components, returned in OUT.
1807
1808 Returns false if ADDR is not a valid memory address, true
1809 otherwise. If OUT is NULL, don't return the components,
1810 but check for validity only.
1811
1812 Note: Only addresses in canonical form are recognized.
1813 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1814 canonical form so that they will be recognized. */
1815
1816 static int
1817 s390_decompose_address (rtx addr, struct s390_address *out)
1818 {
1819 HOST_WIDE_INT offset = 0;
1820 rtx base = NULL_RTX;
1821 rtx indx = NULL_RTX;
1822 rtx disp = NULL_RTX;
1823 rtx orig_disp;
1824 bool pointer = false;
1825 bool base_ptr = false;
1826 bool indx_ptr = false;
1827 bool literal_pool = false;
1828
1829 /* We may need to substitute the literal pool base register into the address
1830 below. However, at this point we do not know which register is going to
1831 be used as base, so we substitute the arg pointer register. This is going
1832 to be treated as holding a pointer below -- it shouldn't be used for any
1833 other purpose. */
1834 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1835
1836 /* Decompose address into base + index + displacement. */
1837
1838 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1839 base = addr;
1840
1841 else if (GET_CODE (addr) == PLUS)
1842 {
1843 rtx op0 = XEXP (addr, 0);
1844 rtx op1 = XEXP (addr, 1);
1845 enum rtx_code code0 = GET_CODE (op0);
1846 enum rtx_code code1 = GET_CODE (op1);
1847
1848 if (code0 == REG || code0 == UNSPEC)
1849 {
1850 if (code1 == REG || code1 == UNSPEC)
1851 {
1852 indx = op0; /* index + base */
1853 base = op1;
1854 }
1855
1856 else
1857 {
1858 base = op0; /* base + displacement */
1859 disp = op1;
1860 }
1861 }
1862
1863 else if (code0 == PLUS)
1864 {
1865 indx = XEXP (op0, 0); /* index + base + disp */
1866 base = XEXP (op0, 1);
1867 disp = op1;
1868 }
1869
1870 else
1871 {
1872 return false;
1873 }
1874 }
1875
1876 else
1877 disp = addr; /* displacement */
1878
1879 /* Extract integer part of displacement. */
1880 orig_disp = disp;
1881 if (disp)
1882 {
1883 if (GET_CODE (disp) == CONST_INT)
1884 {
1885 offset = INTVAL (disp);
1886 disp = NULL_RTX;
1887 }
1888 else if (GET_CODE (disp) == CONST
1889 && GET_CODE (XEXP (disp, 0)) == PLUS
1890 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1891 {
1892 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1893 disp = XEXP (XEXP (disp, 0), 0);
1894 }
1895 }
1896
1897 /* Strip off CONST here to avoid special case tests later. */
1898 if (disp && GET_CODE (disp) == CONST)
1899 disp = XEXP (disp, 0);
1900
1901 /* We can convert literal pool addresses to
1902 displacements by basing them off the base register. */
1903 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1904 {
1905 /* Either base or index must be free to hold the base register. */
1906 if (!base)
1907 base = fake_pool_base, literal_pool = true;
1908 else if (!indx)
1909 indx = fake_pool_base, literal_pool = true;
1910 else
1911 return false;
1912
1913 /* Mark up the displacement. */
1914 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1915 UNSPEC_LTREL_OFFSET);
1916 }
1917
1918 /* Validate base register. */
1919 if (base)
1920 {
1921 if (GET_CODE (base) == UNSPEC)
1922 switch (XINT (base, 1))
1923 {
1924 case UNSPEC_LTREF:
1925 if (!disp)
1926 disp = gen_rtx_UNSPEC (Pmode,
1927 gen_rtvec (1, XVECEXP (base, 0, 0)),
1928 UNSPEC_LTREL_OFFSET);
1929 else
1930 return false;
1931
1932 base = XVECEXP (base, 0, 1);
1933 break;
1934
1935 case UNSPEC_LTREL_BASE:
1936 if (XVECLEN (base, 0) == 1)
1937 base = fake_pool_base, literal_pool = true;
1938 else
1939 base = XVECEXP (base, 0, 1);
1940 break;
1941
1942 default:
1943 return false;
1944 }
1945
1946 if (!REG_P (base)
1947 || (GET_MODE (base) != SImode
1948 && GET_MODE (base) != Pmode))
1949 return false;
1950
1951 if (REGNO (base) == STACK_POINTER_REGNUM
1952 || REGNO (base) == FRAME_POINTER_REGNUM
1953 || ((reload_completed || reload_in_progress)
1954 && frame_pointer_needed
1955 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1956 || REGNO (base) == ARG_POINTER_REGNUM
1957 || (flag_pic
1958 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1959 pointer = base_ptr = true;
1960
1961 if ((reload_completed || reload_in_progress)
1962 && base == cfun->machine->base_reg)
1963 pointer = base_ptr = literal_pool = true;
1964 }
1965
1966 /* Validate index register. */
1967 if (indx)
1968 {
1969 if (GET_CODE (indx) == UNSPEC)
1970 switch (XINT (indx, 1))
1971 {
1972 case UNSPEC_LTREF:
1973 if (!disp)
1974 disp = gen_rtx_UNSPEC (Pmode,
1975 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1976 UNSPEC_LTREL_OFFSET);
1977 else
1978 return false;
1979
1980 indx = XVECEXP (indx, 0, 1);
1981 break;
1982
1983 case UNSPEC_LTREL_BASE:
1984 if (XVECLEN (indx, 0) == 1)
1985 indx = fake_pool_base, literal_pool = true;
1986 else
1987 indx = XVECEXP (indx, 0, 1);
1988 break;
1989
1990 default:
1991 return false;
1992 }
1993
1994 if (!REG_P (indx)
1995 || (GET_MODE (indx) != SImode
1996 && GET_MODE (indx) != Pmode))
1997 return false;
1998
1999 if (REGNO (indx) == STACK_POINTER_REGNUM
2000 || REGNO (indx) == FRAME_POINTER_REGNUM
2001 || ((reload_completed || reload_in_progress)
2002 && frame_pointer_needed
2003 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2004 || REGNO (indx) == ARG_POINTER_REGNUM
2005 || (flag_pic
2006 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2007 pointer = indx_ptr = true;
2008
2009 if ((reload_completed || reload_in_progress)
2010 && indx == cfun->machine->base_reg)
2011 pointer = indx_ptr = literal_pool = true;
2012 }
2013
2014 /* Prefer to use pointer as base, not index. */
2015 if (base && indx && !base_ptr
2016 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2017 {
2018 rtx tmp = base;
2019 base = indx;
2020 indx = tmp;
2021 }
2022
2023 /* Validate displacement. */
2024 if (!disp)
2025 {
2026 /* If virtual registers are involved, the displacement will change later
2027 anyway as the virtual registers get eliminated. This could make a
2028 valid displacement invalid, but it is more likely to make an invalid
2029 displacement valid, because we sometimes access the register save area
2030 via negative offsets to one of those registers.
2031 Thus we don't check the displacement for validity here. If after
2032 elimination the displacement turns out to be invalid after all,
2033 this is fixed up by reload in any case. */
2034 if (base != arg_pointer_rtx
2035 && indx != arg_pointer_rtx
2036 && base != return_address_pointer_rtx
2037 && indx != return_address_pointer_rtx
2038 && base != frame_pointer_rtx
2039 && indx != frame_pointer_rtx
2040 && base != virtual_stack_vars_rtx
2041 && indx != virtual_stack_vars_rtx)
2042 if (!DISP_IN_RANGE (offset))
2043 return false;
2044 }
2045 else
2046 {
2047 /* All the special cases are pointers. */
2048 pointer = true;
2049
2050 /* In the small-PIC case, the linker converts @GOT
2051 and @GOTNTPOFF offsets to possible displacements. */
2052 if (GET_CODE (disp) == UNSPEC
2053 && (XINT (disp, 1) == UNSPEC_GOT
2054 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2055 && flag_pic == 1)
2056 {
2057 ;
2058 }
2059
2060 /* Accept pool label offsets. */
2061 else if (GET_CODE (disp) == UNSPEC
2062 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2063 ;
2064
2065 /* Accept literal pool references. */
2066 else if (GET_CODE (disp) == UNSPEC
2067 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2068 {
2069 orig_disp = gen_rtx_CONST (Pmode, disp);
2070 if (offset)
2071 {
2072 /* If we have an offset, make sure it does not
2073 exceed the size of the constant pool entry. */
2074 rtx sym = XVECEXP (disp, 0, 0);
2075 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2076 return false;
2077
2078 orig_disp = plus_constant (orig_disp, offset);
2079 }
2080 }
2081
2082 else
2083 return false;
2084 }
2085
2086 if (!base && !indx)
2087 pointer = true;
2088
2089 if (out)
2090 {
2091 out->base = base;
2092 out->indx = indx;
2093 out->disp = orig_disp;
2094 out->pointer = pointer;
2095 out->literal_pool = literal_pool;
2096 }
2097
2098 return true;
2099 }
2100
2101 /* Decompose a RTL expression OP for a shift count into its components,
2102 and return the base register in BASE and the offset in OFFSET.
2103
2104 Return true if OP is a valid shift count, false if not. */
2105
2106 bool
2107 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2108 {
2109 HOST_WIDE_INT off = 0;
2110
2111 /* We can have an integer constant, an address register,
2112 or a sum of the two. */
2113 if (GET_CODE (op) == CONST_INT)
2114 {
2115 off = INTVAL (op);
2116 op = NULL_RTX;
2117 }
2118 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2119 {
2120 off = INTVAL (XEXP (op, 1));
2121 op = XEXP (op, 0);
2122 }
2123 while (op && GET_CODE (op) == SUBREG)
2124 op = SUBREG_REG (op);
2125
2126 if (op && GET_CODE (op) != REG)
2127 return false;
2128
2129 if (offset)
2130 *offset = off;
2131 if (base)
2132 *base = op;
2133
2134 return true;
2135 }
2136
2137
2138 /* Return true if CODE is a valid address without index. */
2139
2140 bool
2141 s390_legitimate_address_without_index_p (rtx op)
2142 {
2143 struct s390_address addr;
2144
2145 if (!s390_decompose_address (XEXP (op, 0), &addr))
2146 return false;
2147 if (addr.indx)
2148 return false;
2149
2150 return true;
2151 }
2152
2153
2154 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2155 and return these parts in SYMREF and ADDEND. You can pass NULL in
2156 SYMREF and/or ADDEND if you are not interested in these values.
2157 Literal pool references are *not* considered symbol references. */
2158
2159 static bool
2160 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2161 {
2162 HOST_WIDE_INT tmpaddend = 0;
2163
2164 if (GET_CODE (addr) == CONST)
2165 addr = XEXP (addr, 0);
2166
2167 if (GET_CODE (addr) == PLUS)
2168 {
2169 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2170 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2171 && CONST_INT_P (XEXP (addr, 1)))
2172 {
2173 tmpaddend = INTVAL (XEXP (addr, 1));
2174 addr = XEXP (addr, 0);
2175 }
2176 else
2177 return false;
2178 }
2179 else
2180 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2181 return false;
2182
2183 if (symref)
2184 *symref = addr;
2185 if (addend)
2186 *addend = tmpaddend;
2187
2188 return true;
2189 }
2190
2191
2192 /* Return true if the address in OP is valid for constraint letter C
2193 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2194 pool MEMs should be accepted. Only the Q, R, S, T constraint
2195 letters are allowed for C. */
2196
2197 static int
2198 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2199 {
2200 struct s390_address addr;
2201 bool decomposed = false;
2202
2203 /* This check makes sure that no symbolic address (except literal
2204 pool references) are accepted by the R or T constraints. */
2205 if (s390_symref_operand_p (op, NULL, NULL))
2206 return 0;
2207
2208 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2209 if (!lit_pool_ok)
2210 {
2211 if (!s390_decompose_address (op, &addr))
2212 return 0;
2213 if (addr.literal_pool)
2214 return 0;
2215 decomposed = true;
2216 }
2217
2218 switch (c)
2219 {
2220 case 'Q': /* no index short displacement */
2221 if (!decomposed && !s390_decompose_address (op, &addr))
2222 return 0;
2223 if (addr.indx)
2224 return 0;
2225 if (!s390_short_displacement (addr.disp))
2226 return 0;
2227 break;
2228
2229 case 'R': /* with index short displacement */
2230 if (TARGET_LONG_DISPLACEMENT)
2231 {
2232 if (!decomposed && !s390_decompose_address (op, &addr))
2233 return 0;
2234 if (!s390_short_displacement (addr.disp))
2235 return 0;
2236 }
2237 /* Any invalid address here will be fixed up by reload,
2238 so accept it for the most generic constraint. */
2239 break;
2240
2241 case 'S': /* no index long displacement */
2242 if (!TARGET_LONG_DISPLACEMENT)
2243 return 0;
2244 if (!decomposed && !s390_decompose_address (op, &addr))
2245 return 0;
2246 if (addr.indx)
2247 return 0;
2248 if (s390_short_displacement (addr.disp))
2249 return 0;
2250 break;
2251
2252 case 'T': /* with index long displacement */
2253 if (!TARGET_LONG_DISPLACEMENT)
2254 return 0;
2255 /* Any invalid address here will be fixed up by reload,
2256 so accept it for the most generic constraint. */
2257 if ((decomposed || s390_decompose_address (op, &addr))
2258 && s390_short_displacement (addr.disp))
2259 return 0;
2260 break;
2261 default:
2262 return 0;
2263 }
2264 return 1;
2265 }
2266
2267
2268 /* Evaluates constraint strings described by the regular expression
2269 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2270 the constraint given in STR, or 0 else. */
2271
2272 int
2273 s390_mem_constraint (const char *str, rtx op)
2274 {
2275 char c = str[0];
2276
2277 switch (c)
2278 {
2279 case 'A':
2280 /* Check for offsettable variants of memory constraints. */
2281 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2282 return 0;
2283 if ((reload_completed || reload_in_progress)
2284 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2285 return 0;
2286 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2287 case 'B':
2288 /* Check for non-literal-pool variants of memory constraints. */
2289 if (!MEM_P (op))
2290 return 0;
2291 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2292 case 'Q':
2293 case 'R':
2294 case 'S':
2295 case 'T':
2296 if (GET_CODE (op) != MEM)
2297 return 0;
2298 return s390_check_qrst_address (c, XEXP (op, 0), true);
2299 case 'U':
2300 return (s390_check_qrst_address ('Q', op, true)
2301 || s390_check_qrst_address ('R', op, true));
2302 case 'W':
2303 return (s390_check_qrst_address ('S', op, true)
2304 || s390_check_qrst_address ('T', op, true));
2305 case 'Y':
2306 /* Simply check for the basic form of a shift count. Reload will
2307 take care of making sure we have a proper base register. */
2308 if (!s390_decompose_shift_count (op, NULL, NULL))
2309 return 0;
2310 break;
2311 case 'Z':
2312 return s390_check_qrst_address (str[1], op, true);
2313 default:
2314 return 0;
2315 }
2316 return 1;
2317 }
2318
2319
2320 /* Evaluates constraint strings starting with letter O. Input
2321 parameter C is the second letter following the "O" in the constraint
2322 string. Returns 1 if VALUE meets the respective constraint and 0
2323 otherwise. */
2324
2325 int
2326 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2327 {
2328 if (!TARGET_EXTIMM)
2329 return 0;
2330
2331 switch (c)
2332 {
2333 case 's':
2334 return trunc_int_for_mode (value, SImode) == value;
2335
2336 case 'p':
2337 return value == 0
2338 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2339
2340 case 'n':
2341 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2342
2343 default:
2344 gcc_unreachable ();
2345 }
2346 }
2347
2348
2349 /* Evaluates constraint strings starting with letter N. Parameter STR
2350 contains the letters following letter "N" in the constraint string.
2351 Returns true if VALUE matches the constraint. */
2352
2353 int
2354 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2355 {
2356 enum machine_mode mode, part_mode;
2357 int def;
2358 int part, part_goal;
2359
2360
2361 if (str[0] == 'x')
2362 part_goal = -1;
2363 else
2364 part_goal = str[0] - '0';
2365
2366 switch (str[1])
2367 {
2368 case 'Q':
2369 part_mode = QImode;
2370 break;
2371 case 'H':
2372 part_mode = HImode;
2373 break;
2374 case 'S':
2375 part_mode = SImode;
2376 break;
2377 default:
2378 return 0;
2379 }
2380
2381 switch (str[2])
2382 {
2383 case 'H':
2384 mode = HImode;
2385 break;
2386 case 'S':
2387 mode = SImode;
2388 break;
2389 case 'D':
2390 mode = DImode;
2391 break;
2392 default:
2393 return 0;
2394 }
2395
2396 switch (str[3])
2397 {
2398 case '0':
2399 def = 0;
2400 break;
2401 case 'F':
2402 def = -1;
2403 break;
2404 default:
2405 return 0;
2406 }
2407
2408 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2409 return 0;
2410
2411 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2412 if (part < 0)
2413 return 0;
2414 if (part_goal != -1 && part_goal != part)
2415 return 0;
2416
2417 return 1;
2418 }
2419
2420
2421 /* Returns true if the input parameter VALUE is a float zero. */
2422
2423 int
2424 s390_float_const_zero_p (rtx value)
2425 {
2426 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2427 && value == CONST0_RTX (GET_MODE (value)));
2428 }
2429
2430
2431 /* Compute a (partial) cost for rtx X. Return true if the complete
2432 cost has been computed, and false if subexpressions should be
2433 scanned. In either case, *TOTAL contains the cost result.
2434 CODE contains GET_CODE (x), OUTER_CODE contains the code
2435 of the superexpression of x. */
2436
2437 static bool
2438 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2439 bool speed ATTRIBUTE_UNUSED)
2440 {
2441 switch (code)
2442 {
2443 case CONST:
2444 case CONST_INT:
2445 case LABEL_REF:
2446 case SYMBOL_REF:
2447 case CONST_DOUBLE:
2448 case MEM:
2449 *total = 0;
2450 return true;
2451
2452 case ASHIFT:
2453 case ASHIFTRT:
2454 case LSHIFTRT:
2455 case ROTATE:
2456 case ROTATERT:
2457 case AND:
2458 case IOR:
2459 case XOR:
2460 case NEG:
2461 case NOT:
2462 *total = COSTS_N_INSNS (1);
2463 return false;
2464
2465 case PLUS:
2466 case MINUS:
2467 *total = COSTS_N_INSNS (1);
2468 return false;
2469
2470 case MULT:
2471 switch (GET_MODE (x))
2472 {
2473 case SImode:
2474 {
2475 rtx left = XEXP (x, 0);
2476 rtx right = XEXP (x, 1);
2477 if (GET_CODE (right) == CONST_INT
2478 && CONST_OK_FOR_K (INTVAL (right)))
2479 *total = s390_cost->mhi;
2480 else if (GET_CODE (left) == SIGN_EXTEND)
2481 *total = s390_cost->mh;
2482 else
2483 *total = s390_cost->ms; /* msr, ms, msy */
2484 break;
2485 }
2486 case DImode:
2487 {
2488 rtx left = XEXP (x, 0);
2489 rtx right = XEXP (x, 1);
2490 if (TARGET_ZARCH)
2491 {
2492 if (GET_CODE (right) == CONST_INT
2493 && CONST_OK_FOR_K (INTVAL (right)))
2494 *total = s390_cost->mghi;
2495 else if (GET_CODE (left) == SIGN_EXTEND)
2496 *total = s390_cost->msgf;
2497 else
2498 *total = s390_cost->msg; /* msgr, msg */
2499 }
2500 else /* TARGET_31BIT */
2501 {
2502 if (GET_CODE (left) == SIGN_EXTEND
2503 && GET_CODE (right) == SIGN_EXTEND)
2504 /* mulsidi case: mr, m */
2505 *total = s390_cost->m;
2506 else if (GET_CODE (left) == ZERO_EXTEND
2507 && GET_CODE (right) == ZERO_EXTEND
2508 && TARGET_CPU_ZARCH)
2509 /* umulsidi case: ml, mlr */
2510 *total = s390_cost->ml;
2511 else
2512 /* Complex calculation is required. */
2513 *total = COSTS_N_INSNS (40);
2514 }
2515 break;
2516 }
2517 case SFmode:
2518 case DFmode:
2519 *total = s390_cost->mult_df;
2520 break;
2521 case TFmode:
2522 *total = s390_cost->mxbr;
2523 break;
2524 default:
2525 return false;
2526 }
2527 return false;
2528
2529 case FMA:
2530 switch (GET_MODE (x))
2531 {
2532 case DFmode:
2533 *total = s390_cost->madbr;
2534 break;
2535 case SFmode:
2536 *total = s390_cost->maebr;
2537 break;
2538 default:
2539 return false;
2540 }
2541 /* Negate in the third argument is free: FMSUB. */
2542 if (GET_CODE (XEXP (x, 2)) == NEG)
2543 {
2544 *total += (rtx_cost (XEXP (x, 0), FMA, speed)
2545 + rtx_cost (XEXP (x, 1), FMA, speed)
2546 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, speed));
2547 return true;
2548 }
2549 return false;
2550
2551 case UDIV:
2552 case UMOD:
2553 if (GET_MODE (x) == TImode) /* 128 bit division */
2554 *total = s390_cost->dlgr;
2555 else if (GET_MODE (x) == DImode)
2556 {
2557 rtx right = XEXP (x, 1);
2558 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2559 *total = s390_cost->dlr;
2560 else /* 64 by 64 bit division */
2561 *total = s390_cost->dlgr;
2562 }
2563 else if (GET_MODE (x) == SImode) /* 32 bit division */
2564 *total = s390_cost->dlr;
2565 return false;
2566
2567 case DIV:
2568 case MOD:
2569 if (GET_MODE (x) == DImode)
2570 {
2571 rtx right = XEXP (x, 1);
2572 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2573 if (TARGET_ZARCH)
2574 *total = s390_cost->dsgfr;
2575 else
2576 *total = s390_cost->dr;
2577 else /* 64 by 64 bit division */
2578 *total = s390_cost->dsgr;
2579 }
2580 else if (GET_MODE (x) == SImode) /* 32 bit division */
2581 *total = s390_cost->dlr;
2582 else if (GET_MODE (x) == SFmode)
2583 {
2584 *total = s390_cost->debr;
2585 }
2586 else if (GET_MODE (x) == DFmode)
2587 {
2588 *total = s390_cost->ddbr;
2589 }
2590 else if (GET_MODE (x) == TFmode)
2591 {
2592 *total = s390_cost->dxbr;
2593 }
2594 return false;
2595
2596 case SQRT:
2597 if (GET_MODE (x) == SFmode)
2598 *total = s390_cost->sqebr;
2599 else if (GET_MODE (x) == DFmode)
2600 *total = s390_cost->sqdbr;
2601 else /* TFmode */
2602 *total = s390_cost->sqxbr;
2603 return false;
2604
2605 case SIGN_EXTEND:
2606 case ZERO_EXTEND:
2607 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2608 || outer_code == PLUS || outer_code == MINUS
2609 || outer_code == COMPARE)
2610 *total = 0;
2611 return false;
2612
2613 case COMPARE:
2614 *total = COSTS_N_INSNS (1);
2615 if (GET_CODE (XEXP (x, 0)) == AND
2616 && GET_CODE (XEXP (x, 1)) == CONST_INT
2617 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2618 {
2619 rtx op0 = XEXP (XEXP (x, 0), 0);
2620 rtx op1 = XEXP (XEXP (x, 0), 1);
2621 rtx op2 = XEXP (x, 1);
2622
2623 if (memory_operand (op0, GET_MODE (op0))
2624 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2625 return true;
2626 if (register_operand (op0, GET_MODE (op0))
2627 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2628 return true;
2629 }
2630 return false;
2631
2632 default:
2633 return false;
2634 }
2635 }
2636
2637 /* Return the cost of an address rtx ADDR. */
2638
2639 static int
2640 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2641 {
2642 struct s390_address ad;
2643 if (!s390_decompose_address (addr, &ad))
2644 return 1000;
2645
2646 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2647 }
2648
2649 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2650 otherwise return 0. */
2651
2652 int
2653 tls_symbolic_operand (rtx op)
2654 {
2655 if (GET_CODE (op) != SYMBOL_REF)
2656 return 0;
2657 return SYMBOL_REF_TLS_MODEL (op);
2658 }
2659 \f
2660 /* Split DImode access register reference REG (on 64-bit) into its constituent
2661 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2662 gen_highpart cannot be used as they assume all registers are word-sized,
2663 while our access registers have only half that size. */
2664
2665 void
2666 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2667 {
2668 gcc_assert (TARGET_64BIT);
2669 gcc_assert (ACCESS_REG_P (reg));
2670 gcc_assert (GET_MODE (reg) == DImode);
2671 gcc_assert (!(REGNO (reg) & 1));
2672
2673 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2674 *hi = gen_rtx_REG (SImode, REGNO (reg));
2675 }
2676
2677 /* Return true if OP contains a symbol reference */
2678
2679 bool
2680 symbolic_reference_mentioned_p (rtx op)
2681 {
2682 const char *fmt;
2683 int i;
2684
2685 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2686 return 1;
2687
2688 fmt = GET_RTX_FORMAT (GET_CODE (op));
2689 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2690 {
2691 if (fmt[i] == 'E')
2692 {
2693 int j;
2694
2695 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2696 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2697 return 1;
2698 }
2699
2700 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2701 return 1;
2702 }
2703
2704 return 0;
2705 }
2706
2707 /* Return true if OP contains a reference to a thread-local symbol. */
2708
2709 bool
2710 tls_symbolic_reference_mentioned_p (rtx op)
2711 {
2712 const char *fmt;
2713 int i;
2714
2715 if (GET_CODE (op) == SYMBOL_REF)
2716 return tls_symbolic_operand (op);
2717
2718 fmt = GET_RTX_FORMAT (GET_CODE (op));
2719 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2720 {
2721 if (fmt[i] == 'E')
2722 {
2723 int j;
2724
2725 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2726 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2727 return true;
2728 }
2729
2730 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2731 return true;
2732 }
2733
2734 return false;
2735 }
2736
2737
2738 /* Return true if OP is a legitimate general operand when
2739 generating PIC code. It is given that flag_pic is on
2740 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2741
2742 int
2743 legitimate_pic_operand_p (rtx op)
2744 {
2745 /* Accept all non-symbolic constants. */
2746 if (!SYMBOLIC_CONST (op))
2747 return 1;
2748
2749 /* Reject everything else; must be handled
2750 via emit_symbolic_move. */
2751 return 0;
2752 }
2753
2754 /* Returns true if the constant value OP is a legitimate general operand.
2755 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2756
2757 int
2758 legitimate_constant_p (rtx op)
2759 {
2760 /* Accept all non-symbolic constants. */
2761 if (!SYMBOLIC_CONST (op))
2762 return 1;
2763
2764 /* Accept immediate LARL operands. */
2765 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2766 return 1;
2767
2768 /* Thread-local symbols are never legal constants. This is
2769 so that emit_call knows that computing such addresses
2770 might require a function call. */
2771 if (TLS_SYMBOLIC_CONST (op))
2772 return 0;
2773
2774 /* In the PIC case, symbolic constants must *not* be
2775 forced into the literal pool. We accept them here,
2776 so that they will be handled by emit_symbolic_move. */
2777 if (flag_pic)
2778 return 1;
2779
2780 /* All remaining non-PIC symbolic constants are
2781 forced into the literal pool. */
2782 return 0;
2783 }
2784
2785 /* Determine if it's legal to put X into the constant pool. This
2786 is not possible if X contains the address of a symbol that is
2787 not constant (TLS) or not known at final link time (PIC). */
2788
2789 static bool
2790 s390_cannot_force_const_mem (rtx x)
2791 {
2792 switch (GET_CODE (x))
2793 {
2794 case CONST_INT:
2795 case CONST_DOUBLE:
2796 /* Accept all non-symbolic constants. */
2797 return false;
2798
2799 case LABEL_REF:
2800 /* Labels are OK iff we are non-PIC. */
2801 return flag_pic != 0;
2802
2803 case SYMBOL_REF:
2804 /* 'Naked' TLS symbol references are never OK,
2805 non-TLS symbols are OK iff we are non-PIC. */
2806 if (tls_symbolic_operand (x))
2807 return true;
2808 else
2809 return flag_pic != 0;
2810
2811 case CONST:
2812 return s390_cannot_force_const_mem (XEXP (x, 0));
2813 case PLUS:
2814 case MINUS:
2815 return s390_cannot_force_const_mem (XEXP (x, 0))
2816 || s390_cannot_force_const_mem (XEXP (x, 1));
2817
2818 case UNSPEC:
2819 switch (XINT (x, 1))
2820 {
2821 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2822 case UNSPEC_LTREL_OFFSET:
2823 case UNSPEC_GOT:
2824 case UNSPEC_GOTOFF:
2825 case UNSPEC_PLTOFF:
2826 case UNSPEC_TLSGD:
2827 case UNSPEC_TLSLDM:
2828 case UNSPEC_NTPOFF:
2829 case UNSPEC_DTPOFF:
2830 case UNSPEC_GOTNTPOFF:
2831 case UNSPEC_INDNTPOFF:
2832 return false;
2833
2834 /* If the literal pool shares the code section, be put
2835 execute template placeholders into the pool as well. */
2836 case UNSPEC_INSN:
2837 return TARGET_CPU_ZARCH;
2838
2839 default:
2840 return true;
2841 }
2842 break;
2843
2844 default:
2845 gcc_unreachable ();
2846 }
2847 }
2848
2849 /* Returns true if the constant value OP is a legitimate general
2850 operand during and after reload. The difference to
2851 legitimate_constant_p is that this function will not accept
2852 a constant that would need to be forced to the literal pool
2853 before it can be used as operand.
2854 This function accepts all constants which can be loaded directly
2855 into a GPR. */
2856
2857 bool
2858 legitimate_reload_constant_p (rtx op)
2859 {
2860 /* Accept la(y) operands. */
2861 if (GET_CODE (op) == CONST_INT
2862 && DISP_IN_RANGE (INTVAL (op)))
2863 return true;
2864
2865 /* Accept l(g)hi/l(g)fi operands. */
2866 if (GET_CODE (op) == CONST_INT
2867 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2868 return true;
2869
2870 /* Accept lliXX operands. */
2871 if (TARGET_ZARCH
2872 && GET_CODE (op) == CONST_INT
2873 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2874 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2875 return true;
2876
2877 if (TARGET_EXTIMM
2878 && GET_CODE (op) == CONST_INT
2879 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2880 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2881 return true;
2882
2883 /* Accept larl operands. */
2884 if (TARGET_CPU_ZARCH
2885 && larl_operand (op, VOIDmode))
2886 return true;
2887
2888 /* Accept floating-point zero operands that fit into a single GPR. */
2889 if (GET_CODE (op) == CONST_DOUBLE
2890 && s390_float_const_zero_p (op)
2891 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2892 return true;
2893
2894 /* Accept double-word operands that can be split. */
2895 if (GET_CODE (op) == CONST_INT
2896 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2897 {
2898 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2899 rtx hi = operand_subword (op, 0, 0, dword_mode);
2900 rtx lo = operand_subword (op, 1, 0, dword_mode);
2901 return legitimate_reload_constant_p (hi)
2902 && legitimate_reload_constant_p (lo);
2903 }
2904
2905 /* Everything else cannot be handled without reload. */
2906 return false;
2907 }
2908
2909 /* Returns true if the constant value OP is a legitimate fp operand
2910 during and after reload.
2911 This function accepts all constants which can be loaded directly
2912 into an FPR. */
2913
2914 static bool
2915 legitimate_reload_fp_constant_p (rtx op)
2916 {
2917 /* Accept floating-point zero operands if the load zero instruction
2918 can be used. */
2919 if (TARGET_Z196
2920 && GET_CODE (op) == CONST_DOUBLE
2921 && s390_float_const_zero_p (op))
2922 return true;
2923
2924 return false;
2925 }
2926
2927 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2928 return the class of reg to actually use. */
2929
2930 enum reg_class
2931 s390_preferred_reload_class (rtx op, enum reg_class rclass)
2932 {
2933 switch (GET_CODE (op))
2934 {
2935 /* Constants we cannot reload into general registers
2936 must be forced into the literal pool. */
2937 case CONST_DOUBLE:
2938 case CONST_INT:
2939 if (reg_class_subset_p (GENERAL_REGS, rclass)
2940 && legitimate_reload_constant_p (op))
2941 return GENERAL_REGS;
2942 else if (reg_class_subset_p (ADDR_REGS, rclass)
2943 && legitimate_reload_constant_p (op))
2944 return ADDR_REGS;
2945 else if (reg_class_subset_p (FP_REGS, rclass)
2946 && legitimate_reload_fp_constant_p (op))
2947 return FP_REGS;
2948 return NO_REGS;
2949
2950 /* If a symbolic constant or a PLUS is reloaded,
2951 it is most likely being used as an address, so
2952 prefer ADDR_REGS. If 'class' is not a superset
2953 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2954 case PLUS:
2955 case LABEL_REF:
2956 case SYMBOL_REF:
2957 case CONST:
2958 if (reg_class_subset_p (ADDR_REGS, rclass))
2959 return ADDR_REGS;
2960 else
2961 return NO_REGS;
2962
2963 default:
2964 break;
2965 }
2966
2967 return rclass;
2968 }
2969
2970 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2971 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2972 aligned. */
2973
2974 bool
2975 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2976 {
2977 HOST_WIDE_INT addend;
2978 rtx symref;
2979
2980 if (!s390_symref_operand_p (addr, &symref, &addend))
2981 return false;
2982
2983 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2984 && !(addend & (alignment - 1)));
2985 }
2986
2987 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2988 operand SCRATCH is used to reload the even part of the address and
2989 adding one. */
2990
2991 void
2992 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2993 {
2994 HOST_WIDE_INT addend;
2995 rtx symref;
2996
2997 if (!s390_symref_operand_p (addr, &symref, &addend))
2998 gcc_unreachable ();
2999
3000 if (!(addend & 1))
3001 /* Easy case. The addend is even so larl will do fine. */
3002 emit_move_insn (reg, addr);
3003 else
3004 {
3005 /* We can leave the scratch register untouched if the target
3006 register is a valid base register. */
3007 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3008 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3009 scratch = reg;
3010
3011 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3012 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3013
3014 if (addend != 1)
3015 emit_move_insn (scratch,
3016 gen_rtx_CONST (Pmode,
3017 gen_rtx_PLUS (Pmode, symref,
3018 GEN_INT (addend - 1))));
3019 else
3020 emit_move_insn (scratch, symref);
3021
3022 /* Increment the address using la in order to avoid clobbering cc. */
3023 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3024 }
3025 }
3026
3027 /* Generate what is necessary to move between REG and MEM using
3028 SCRATCH. The direction is given by TOMEM. */
3029
3030 void
3031 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3032 {
3033 /* Reload might have pulled a constant out of the literal pool.
3034 Force it back in. */
3035 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3036 || GET_CODE (mem) == CONST)
3037 mem = force_const_mem (GET_MODE (reg), mem);
3038
3039 gcc_assert (MEM_P (mem));
3040
3041 /* For a load from memory we can leave the scratch register
3042 untouched if the target register is a valid base register. */
3043 if (!tomem
3044 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3045 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3046 && GET_MODE (reg) == GET_MODE (scratch))
3047 scratch = reg;
3048
3049 /* Load address into scratch register. Since we can't have a
3050 secondary reload for a secondary reload we have to cover the case
3051 where larl would need a secondary reload here as well. */
3052 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3053
3054 /* Now we can use a standard load/store to do the move. */
3055 if (tomem)
3056 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3057 else
3058 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3059 }
3060
3061 /* Inform reload about cases where moving X with a mode MODE to a register in
3062 RCLASS requires an extra scratch or immediate register. Return the class
3063 needed for the immediate register. */
3064
3065 static reg_class_t
3066 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3067 enum machine_mode mode, secondary_reload_info *sri)
3068 {
3069 enum reg_class rclass = (enum reg_class) rclass_i;
3070
3071 /* Intermediate register needed. */
3072 if (reg_classes_intersect_p (CC_REGS, rclass))
3073 return GENERAL_REGS;
3074
3075 if (TARGET_Z10)
3076 {
3077 /* On z10 several optimizer steps may generate larl operands with
3078 an odd addend. */
3079 if (in_p
3080 && s390_symref_operand_p (x, NULL, NULL)
3081 && mode == Pmode
3082 && !s390_check_symref_alignment (x, 2))
3083 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3084 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3085
3086 /* On z10 we need a scratch register when moving QI, TI or floating
3087 point mode values from or to a memory location with a SYMBOL_REF
3088 or if the symref addend of a SI or DI move is not aligned to the
3089 width of the access. */
3090 if (MEM_P (x)
3091 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3092 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3093 || (!TARGET_ZARCH && mode == DImode)
3094 || ((mode == HImode || mode == SImode || mode == DImode)
3095 && (!s390_check_symref_alignment (XEXP (x, 0),
3096 GET_MODE_SIZE (mode))))))
3097 {
3098 #define __SECONDARY_RELOAD_CASE(M,m) \
3099 case M##mode: \
3100 if (TARGET_64BIT) \
3101 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3102 CODE_FOR_reload##m##di_tomem_z10; \
3103 else \
3104 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3105 CODE_FOR_reload##m##si_tomem_z10; \
3106 break;
3107
3108 switch (GET_MODE (x))
3109 {
3110 __SECONDARY_RELOAD_CASE (QI, qi);
3111 __SECONDARY_RELOAD_CASE (HI, hi);
3112 __SECONDARY_RELOAD_CASE (SI, si);
3113 __SECONDARY_RELOAD_CASE (DI, di);
3114 __SECONDARY_RELOAD_CASE (TI, ti);
3115 __SECONDARY_RELOAD_CASE (SF, sf);
3116 __SECONDARY_RELOAD_CASE (DF, df);
3117 __SECONDARY_RELOAD_CASE (TF, tf);
3118 __SECONDARY_RELOAD_CASE (SD, sd);
3119 __SECONDARY_RELOAD_CASE (DD, dd);
3120 __SECONDARY_RELOAD_CASE (TD, td);
3121
3122 default:
3123 gcc_unreachable ();
3124 }
3125 #undef __SECONDARY_RELOAD_CASE
3126 }
3127 }
3128
3129 /* We need a scratch register when loading a PLUS expression which
3130 is not a legitimate operand of the LOAD ADDRESS instruction. */
3131 if (in_p && s390_plus_operand (x, mode))
3132 sri->icode = (TARGET_64BIT ?
3133 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3134
3135 /* Performing a multiword move from or to memory we have to make sure the
3136 second chunk in memory is addressable without causing a displacement
3137 overflow. If that would be the case we calculate the address in
3138 a scratch register. */
3139 if (MEM_P (x)
3140 && GET_CODE (XEXP (x, 0)) == PLUS
3141 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3142 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3143 + GET_MODE_SIZE (mode) - 1))
3144 {
3145 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3146 in a s_operand address since we may fallback to lm/stm. So we only
3147 have to care about overflows in the b+i+d case. */
3148 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3149 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3150 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3151 /* For FP_REGS no lm/stm is available so this check is triggered
3152 for displacement overflows in b+i+d and b+d like addresses. */
3153 || (reg_classes_intersect_p (FP_REGS, rclass)
3154 && s390_class_max_nregs (FP_REGS, mode) > 1))
3155 {
3156 if (in_p)
3157 sri->icode = (TARGET_64BIT ?
3158 CODE_FOR_reloaddi_nonoffmem_in :
3159 CODE_FOR_reloadsi_nonoffmem_in);
3160 else
3161 sri->icode = (TARGET_64BIT ?
3162 CODE_FOR_reloaddi_nonoffmem_out :
3163 CODE_FOR_reloadsi_nonoffmem_out);
3164 }
3165 }
3166
3167 /* A scratch address register is needed when a symbolic constant is
3168 copied to r0 compiling with -fPIC. In other cases the target
3169 register might be used as temporary (see legitimize_pic_address). */
3170 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3171 sri->icode = (TARGET_64BIT ?
3172 CODE_FOR_reloaddi_PIC_addr :
3173 CODE_FOR_reloadsi_PIC_addr);
3174
3175 /* Either scratch or no register needed. */
3176 return NO_REGS;
3177 }
3178
3179 /* Generate code to load SRC, which is PLUS that is not a
3180 legitimate operand for the LA instruction, into TARGET.
3181 SCRATCH may be used as scratch register. */
3182
3183 void
3184 s390_expand_plus_operand (rtx target, rtx src,
3185 rtx scratch)
3186 {
3187 rtx sum1, sum2;
3188 struct s390_address ad;
3189
3190 /* src must be a PLUS; get its two operands. */
3191 gcc_assert (GET_CODE (src) == PLUS);
3192 gcc_assert (GET_MODE (src) == Pmode);
3193
3194 /* Check if any of the two operands is already scheduled
3195 for replacement by reload. This can happen e.g. when
3196 float registers occur in an address. */
3197 sum1 = find_replacement (&XEXP (src, 0));
3198 sum2 = find_replacement (&XEXP (src, 1));
3199 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3200
3201 /* If the address is already strictly valid, there's nothing to do. */
3202 if (!s390_decompose_address (src, &ad)
3203 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3204 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3205 {
3206 /* Otherwise, one of the operands cannot be an address register;
3207 we reload its value into the scratch register. */
3208 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3209 {
3210 emit_move_insn (scratch, sum1);
3211 sum1 = scratch;
3212 }
3213 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3214 {
3215 emit_move_insn (scratch, sum2);
3216 sum2 = scratch;
3217 }
3218
3219 /* According to the way these invalid addresses are generated
3220 in reload.c, it should never happen (at least on s390) that
3221 *neither* of the PLUS components, after find_replacements
3222 was applied, is an address register. */
3223 if (sum1 == scratch && sum2 == scratch)
3224 {
3225 debug_rtx (src);
3226 gcc_unreachable ();
3227 }
3228
3229 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3230 }
3231
3232 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3233 is only ever performed on addresses, so we can mark the
3234 sum as legitimate for LA in any case. */
3235 s390_load_address (target, src);
3236 }
3237
3238
3239 /* Return true if ADDR is a valid memory address.
3240 STRICT specifies whether strict register checking applies. */
3241
3242 static bool
3243 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3244 {
3245 struct s390_address ad;
3246
3247 if (TARGET_Z10
3248 && larl_operand (addr, VOIDmode)
3249 && (mode == VOIDmode
3250 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3251 return true;
3252
3253 if (!s390_decompose_address (addr, &ad))
3254 return false;
3255
3256 if (strict)
3257 {
3258 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3259 return false;
3260
3261 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3262 return false;
3263 }
3264 else
3265 {
3266 if (ad.base
3267 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3268 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3269 return false;
3270
3271 if (ad.indx
3272 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3273 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3274 return false;
3275 }
3276 return true;
3277 }
3278
3279 /* Return true if OP is a valid operand for the LA instruction.
3280 In 31-bit, we need to prove that the result is used as an
3281 address, as LA performs only a 31-bit addition. */
3282
3283 bool
3284 legitimate_la_operand_p (rtx op)
3285 {
3286 struct s390_address addr;
3287 if (!s390_decompose_address (op, &addr))
3288 return false;
3289
3290 return (TARGET_64BIT || addr.pointer);
3291 }
3292
3293 /* Return true if it is valid *and* preferable to use LA to
3294 compute the sum of OP1 and OP2. */
3295
3296 bool
3297 preferred_la_operand_p (rtx op1, rtx op2)
3298 {
3299 struct s390_address addr;
3300
3301 if (op2 != const0_rtx)
3302 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3303
3304 if (!s390_decompose_address (op1, &addr))
3305 return false;
3306 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3307 return false;
3308 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3309 return false;
3310
3311 /* Avoid LA instructions with index register on z196; it is
3312 preferable to use regular add instructions when possible. */
3313 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3314 return false;
3315
3316 if (!TARGET_64BIT && !addr.pointer)
3317 return false;
3318
3319 if (addr.pointer)
3320 return true;
3321
3322 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3323 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3324 return true;
3325
3326 return false;
3327 }
3328
3329 /* Emit a forced load-address operation to load SRC into DST.
3330 This will use the LOAD ADDRESS instruction even in situations
3331 where legitimate_la_operand_p (SRC) returns false. */
3332
3333 void
3334 s390_load_address (rtx dst, rtx src)
3335 {
3336 if (TARGET_64BIT)
3337 emit_move_insn (dst, src);
3338 else
3339 emit_insn (gen_force_la_31 (dst, src));
3340 }
3341
3342 /* Return a legitimate reference for ORIG (an address) using the
3343 register REG. If REG is 0, a new pseudo is generated.
3344
3345 There are two types of references that must be handled:
3346
3347 1. Global data references must load the address from the GOT, via
3348 the PIC reg. An insn is emitted to do this load, and the reg is
3349 returned.
3350
3351 2. Static data references, constant pool addresses, and code labels
3352 compute the address as an offset from the GOT, whose base is in
3353 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3354 differentiate them from global data objects. The returned
3355 address is the PIC reg + an unspec constant.
3356
3357 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3358 reg also appears in the address. */
3359
3360 rtx
3361 legitimize_pic_address (rtx orig, rtx reg)
3362 {
3363 rtx addr = orig;
3364 rtx new_rtx = orig;
3365 rtx base;
3366
3367 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3368
3369 if (GET_CODE (addr) == LABEL_REF
3370 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3371 {
3372 /* This is a local symbol. */
3373 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3374 {
3375 /* Access local symbols PC-relative via LARL.
3376 This is the same as in the non-PIC case, so it is
3377 handled automatically ... */
3378 }
3379 else
3380 {
3381 /* Access local symbols relative to the GOT. */
3382
3383 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3384
3385 if (reload_in_progress || reload_completed)
3386 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3387
3388 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3389 addr = gen_rtx_CONST (Pmode, addr);
3390 addr = force_const_mem (Pmode, addr);
3391 emit_move_insn (temp, addr);
3392
3393 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3394 if (reg != 0)
3395 {
3396 s390_load_address (reg, new_rtx);
3397 new_rtx = reg;
3398 }
3399 }
3400 }
3401 else if (GET_CODE (addr) == SYMBOL_REF)
3402 {
3403 if (reg == 0)
3404 reg = gen_reg_rtx (Pmode);
3405
3406 if (flag_pic == 1)
3407 {
3408 /* Assume GOT offset < 4k. This is handled the same way
3409 in both 31- and 64-bit code (@GOT). */
3410
3411 if (reload_in_progress || reload_completed)
3412 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3413
3414 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3415 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3416 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3417 new_rtx = gen_const_mem (Pmode, new_rtx);
3418 emit_move_insn (reg, new_rtx);
3419 new_rtx = reg;
3420 }
3421 else if (TARGET_CPU_ZARCH)
3422 {
3423 /* If the GOT offset might be >= 4k, we determine the position
3424 of the GOT entry via a PC-relative LARL (@GOTENT). */
3425
3426 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3427
3428 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3429 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3430
3431 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3432 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3433 emit_move_insn (temp, new_rtx);
3434
3435 new_rtx = gen_const_mem (Pmode, temp);
3436 emit_move_insn (reg, new_rtx);
3437 new_rtx = reg;
3438 }
3439 else
3440 {
3441 /* If the GOT offset might be >= 4k, we have to load it
3442 from the literal pool (@GOT). */
3443
3444 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3445
3446 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3447 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3448
3449 if (reload_in_progress || reload_completed)
3450 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3451
3452 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3453 addr = gen_rtx_CONST (Pmode, addr);
3454 addr = force_const_mem (Pmode, addr);
3455 emit_move_insn (temp, addr);
3456
3457 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3458 new_rtx = gen_const_mem (Pmode, new_rtx);
3459 emit_move_insn (reg, new_rtx);
3460 new_rtx = reg;
3461 }
3462 }
3463 else
3464 {
3465 if (GET_CODE (addr) == CONST)
3466 {
3467 addr = XEXP (addr, 0);
3468 if (GET_CODE (addr) == UNSPEC)
3469 {
3470 gcc_assert (XVECLEN (addr, 0) == 1);
3471 switch (XINT (addr, 1))
3472 {
3473 /* If someone moved a GOT-relative UNSPEC
3474 out of the literal pool, force them back in. */
3475 case UNSPEC_GOTOFF:
3476 case UNSPEC_PLTOFF:
3477 new_rtx = force_const_mem (Pmode, orig);
3478 break;
3479
3480 /* @GOT is OK as is if small. */
3481 case UNSPEC_GOT:
3482 if (flag_pic == 2)
3483 new_rtx = force_const_mem (Pmode, orig);
3484 break;
3485
3486 /* @GOTENT is OK as is. */
3487 case UNSPEC_GOTENT:
3488 break;
3489
3490 /* @PLT is OK as is on 64-bit, must be converted to
3491 GOT-relative @PLTOFF on 31-bit. */
3492 case UNSPEC_PLT:
3493 if (!TARGET_CPU_ZARCH)
3494 {
3495 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3496
3497 if (reload_in_progress || reload_completed)
3498 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3499
3500 addr = XVECEXP (addr, 0, 0);
3501 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3502 UNSPEC_PLTOFF);
3503 addr = gen_rtx_CONST (Pmode, addr);
3504 addr = force_const_mem (Pmode, addr);
3505 emit_move_insn (temp, addr);
3506
3507 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3508 if (reg != 0)
3509 {
3510 s390_load_address (reg, new_rtx);
3511 new_rtx = reg;
3512 }
3513 }
3514 break;
3515
3516 /* Everything else cannot happen. */
3517 default:
3518 gcc_unreachable ();
3519 }
3520 }
3521 else
3522 gcc_assert (GET_CODE (addr) == PLUS);
3523 }
3524 if (GET_CODE (addr) == PLUS)
3525 {
3526 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3527
3528 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3529 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3530
3531 /* Check first to see if this is a constant offset
3532 from a local symbol reference. */
3533 if ((GET_CODE (op0) == LABEL_REF
3534 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3535 && GET_CODE (op1) == CONST_INT)
3536 {
3537 if (TARGET_CPU_ZARCH
3538 && larl_operand (op0, VOIDmode)
3539 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3540 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3541 {
3542 if (INTVAL (op1) & 1)
3543 {
3544 /* LARL can't handle odd offsets, so emit a
3545 pair of LARL and LA. */
3546 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3547
3548 if (!DISP_IN_RANGE (INTVAL (op1)))
3549 {
3550 HOST_WIDE_INT even = INTVAL (op1) - 1;
3551 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3552 op0 = gen_rtx_CONST (Pmode, op0);
3553 op1 = const1_rtx;
3554 }
3555
3556 emit_move_insn (temp, op0);
3557 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3558
3559 if (reg != 0)
3560 {
3561 s390_load_address (reg, new_rtx);
3562 new_rtx = reg;
3563 }
3564 }
3565 else
3566 {
3567 /* If the offset is even, we can just use LARL.
3568 This will happen automatically. */
3569 }
3570 }
3571 else
3572 {
3573 /* Access local symbols relative to the GOT. */
3574
3575 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3576
3577 if (reload_in_progress || reload_completed)
3578 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3579
3580 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3581 UNSPEC_GOTOFF);
3582 addr = gen_rtx_PLUS (Pmode, addr, op1);
3583 addr = gen_rtx_CONST (Pmode, addr);
3584 addr = force_const_mem (Pmode, addr);
3585 emit_move_insn (temp, addr);
3586
3587 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3588 if (reg != 0)
3589 {
3590 s390_load_address (reg, new_rtx);
3591 new_rtx = reg;
3592 }
3593 }
3594 }
3595
3596 /* Now, check whether it is a GOT relative symbol plus offset
3597 that was pulled out of the literal pool. Force it back in. */
3598
3599 else if (GET_CODE (op0) == UNSPEC
3600 && GET_CODE (op1) == CONST_INT
3601 && XINT (op0, 1) == UNSPEC_GOTOFF)
3602 {
3603 gcc_assert (XVECLEN (op0, 0) == 1);
3604
3605 new_rtx = force_const_mem (Pmode, orig);
3606 }
3607
3608 /* Otherwise, compute the sum. */
3609 else
3610 {
3611 base = legitimize_pic_address (XEXP (addr, 0), reg);
3612 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3613 base == reg ? NULL_RTX : reg);
3614 if (GET_CODE (new_rtx) == CONST_INT)
3615 new_rtx = plus_constant (base, INTVAL (new_rtx));
3616 else
3617 {
3618 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3619 {
3620 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3621 new_rtx = XEXP (new_rtx, 1);
3622 }
3623 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3624 }
3625
3626 if (GET_CODE (new_rtx) == CONST)
3627 new_rtx = XEXP (new_rtx, 0);
3628 new_rtx = force_operand (new_rtx, 0);
3629 }
3630 }
3631 }
3632 return new_rtx;
3633 }
3634
3635 /* Load the thread pointer into a register. */
3636
3637 rtx
3638 s390_get_thread_pointer (void)
3639 {
3640 rtx tp = gen_reg_rtx (Pmode);
3641
3642 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3643 mark_reg_pointer (tp, BITS_PER_WORD);
3644
3645 return tp;
3646 }
3647
3648 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3649 in s390_tls_symbol which always refers to __tls_get_offset.
3650 The returned offset is written to RESULT_REG and an USE rtx is
3651 generated for TLS_CALL. */
3652
3653 static GTY(()) rtx s390_tls_symbol;
3654
3655 static void
3656 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3657 {
3658 rtx insn;
3659
3660 gcc_assert (flag_pic);
3661
3662 if (!s390_tls_symbol)
3663 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3664
3665 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3666 gen_rtx_REG (Pmode, RETURN_REGNUM));
3667
3668 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3669 RTL_CONST_CALL_P (insn) = 1;
3670 }
3671
3672 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3673 this (thread-local) address. REG may be used as temporary. */
3674
3675 static rtx
3676 legitimize_tls_address (rtx addr, rtx reg)
3677 {
3678 rtx new_rtx, tls_call, temp, base, r2, insn;
3679
3680 if (GET_CODE (addr) == SYMBOL_REF)
3681 switch (tls_symbolic_operand (addr))
3682 {
3683 case TLS_MODEL_GLOBAL_DYNAMIC:
3684 start_sequence ();
3685 r2 = gen_rtx_REG (Pmode, 2);
3686 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3687 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3688 new_rtx = force_const_mem (Pmode, new_rtx);
3689 emit_move_insn (r2, new_rtx);
3690 s390_emit_tls_call_insn (r2, tls_call);
3691 insn = get_insns ();
3692 end_sequence ();
3693
3694 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3695 temp = gen_reg_rtx (Pmode);
3696 emit_libcall_block (insn, temp, r2, new_rtx);
3697
3698 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3699 if (reg != 0)
3700 {
3701 s390_load_address (reg, new_rtx);
3702 new_rtx = reg;
3703 }
3704 break;
3705
3706 case TLS_MODEL_LOCAL_DYNAMIC:
3707 start_sequence ();
3708 r2 = gen_rtx_REG (Pmode, 2);
3709 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3710 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3711 new_rtx = force_const_mem (Pmode, new_rtx);
3712 emit_move_insn (r2, new_rtx);
3713 s390_emit_tls_call_insn (r2, tls_call);
3714 insn = get_insns ();
3715 end_sequence ();
3716
3717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3718 temp = gen_reg_rtx (Pmode);
3719 emit_libcall_block (insn, temp, r2, new_rtx);
3720
3721 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3722 base = gen_reg_rtx (Pmode);
3723 s390_load_address (base, new_rtx);
3724
3725 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3726 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3727 new_rtx = force_const_mem (Pmode, new_rtx);
3728 temp = gen_reg_rtx (Pmode);
3729 emit_move_insn (temp, new_rtx);
3730
3731 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3732 if (reg != 0)
3733 {
3734 s390_load_address (reg, new_rtx);
3735 new_rtx = reg;
3736 }
3737 break;
3738
3739 case TLS_MODEL_INITIAL_EXEC:
3740 if (flag_pic == 1)
3741 {
3742 /* Assume GOT offset < 4k. This is handled the same way
3743 in both 31- and 64-bit code. */
3744
3745 if (reload_in_progress || reload_completed)
3746 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3747
3748 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3749 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3750 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3751 new_rtx = gen_const_mem (Pmode, new_rtx);
3752 temp = gen_reg_rtx (Pmode);
3753 emit_move_insn (temp, new_rtx);
3754 }
3755 else if (TARGET_CPU_ZARCH)
3756 {
3757 /* If the GOT offset might be >= 4k, we determine the position
3758 of the GOT entry via a PC-relative LARL. */
3759
3760 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3761 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3762 temp = gen_reg_rtx (Pmode);
3763 emit_move_insn (temp, new_rtx);
3764
3765 new_rtx = gen_const_mem (Pmode, temp);
3766 temp = gen_reg_rtx (Pmode);
3767 emit_move_insn (temp, new_rtx);
3768 }
3769 else if (flag_pic)
3770 {
3771 /* If the GOT offset might be >= 4k, we have to load it
3772 from the literal pool. */
3773
3774 if (reload_in_progress || reload_completed)
3775 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3776
3777 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3778 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3779 new_rtx = force_const_mem (Pmode, new_rtx);
3780 temp = gen_reg_rtx (Pmode);
3781 emit_move_insn (temp, new_rtx);
3782
3783 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3784 new_rtx = gen_const_mem (Pmode, new_rtx);
3785
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3787 temp = gen_reg_rtx (Pmode);
3788 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3789 }
3790 else
3791 {
3792 /* In position-dependent code, load the absolute address of
3793 the GOT entry from the literal pool. */
3794
3795 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3796 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3797 new_rtx = force_const_mem (Pmode, new_rtx);
3798 temp = gen_reg_rtx (Pmode);
3799 emit_move_insn (temp, new_rtx);
3800
3801 new_rtx = temp;
3802 new_rtx = gen_const_mem (Pmode, new_rtx);
3803 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3804 temp = gen_reg_rtx (Pmode);
3805 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3806 }
3807
3808 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3809 if (reg != 0)
3810 {
3811 s390_load_address (reg, new_rtx);
3812 new_rtx = reg;
3813 }
3814 break;
3815
3816 case TLS_MODEL_LOCAL_EXEC:
3817 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3818 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3819 new_rtx = force_const_mem (Pmode, new_rtx);
3820 temp = gen_reg_rtx (Pmode);
3821 emit_move_insn (temp, new_rtx);
3822
3823 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3824 if (reg != 0)
3825 {
3826 s390_load_address (reg, new_rtx);
3827 new_rtx = reg;
3828 }
3829 break;
3830
3831 default:
3832 gcc_unreachable ();
3833 }
3834
3835 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3836 {
3837 switch (XINT (XEXP (addr, 0), 1))
3838 {
3839 case UNSPEC_INDNTPOFF:
3840 gcc_assert (TARGET_CPU_ZARCH);
3841 new_rtx = addr;
3842 break;
3843
3844 default:
3845 gcc_unreachable ();
3846 }
3847 }
3848
3849 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3850 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3851 {
3852 new_rtx = XEXP (XEXP (addr, 0), 0);
3853 if (GET_CODE (new_rtx) != SYMBOL_REF)
3854 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3855
3856 new_rtx = legitimize_tls_address (new_rtx, reg);
3857 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3858 new_rtx = force_operand (new_rtx, 0);
3859 }
3860
3861 else
3862 gcc_unreachable (); /* for now ... */
3863
3864 return new_rtx;
3865 }
3866
3867 /* Emit insns making the address in operands[1] valid for a standard
3868 move to operands[0]. operands[1] is replaced by an address which
3869 should be used instead of the former RTX to emit the move
3870 pattern. */
3871
3872 void
3873 emit_symbolic_move (rtx *operands)
3874 {
3875 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3876
3877 if (GET_CODE (operands[0]) == MEM)
3878 operands[1] = force_reg (Pmode, operands[1]);
3879 else if (TLS_SYMBOLIC_CONST (operands[1]))
3880 operands[1] = legitimize_tls_address (operands[1], temp);
3881 else if (flag_pic)
3882 operands[1] = legitimize_pic_address (operands[1], temp);
3883 }
3884
3885 /* Try machine-dependent ways of modifying an illegitimate address X
3886 to be legitimate. If we find one, return the new, valid address.
3887
3888 OLDX is the address as it was before break_out_memory_refs was called.
3889 In some cases it is useful to look at this to decide what needs to be done.
3890
3891 MODE is the mode of the operand pointed to by X.
3892
3893 When -fpic is used, special handling is needed for symbolic references.
3894 See comments by legitimize_pic_address for details. */
3895
3896 static rtx
3897 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3898 enum machine_mode mode ATTRIBUTE_UNUSED)
3899 {
3900 rtx constant_term = const0_rtx;
3901
3902 if (TLS_SYMBOLIC_CONST (x))
3903 {
3904 x = legitimize_tls_address (x, 0);
3905
3906 if (s390_legitimate_address_p (mode, x, FALSE))
3907 return x;
3908 }
3909 else if (GET_CODE (x) == PLUS
3910 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3911 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3912 {
3913 return x;
3914 }
3915 else if (flag_pic)
3916 {
3917 if (SYMBOLIC_CONST (x)
3918 || (GET_CODE (x) == PLUS
3919 && (SYMBOLIC_CONST (XEXP (x, 0))
3920 || SYMBOLIC_CONST (XEXP (x, 1)))))
3921 x = legitimize_pic_address (x, 0);
3922
3923 if (s390_legitimate_address_p (mode, x, FALSE))
3924 return x;
3925 }
3926
3927 x = eliminate_constant_term (x, &constant_term);
3928
3929 /* Optimize loading of large displacements by splitting them
3930 into the multiple of 4K and the rest; this allows the
3931 former to be CSE'd if possible.
3932
3933 Don't do this if the displacement is added to a register
3934 pointing into the stack frame, as the offsets will
3935 change later anyway. */
3936
3937 if (GET_CODE (constant_term) == CONST_INT
3938 && !TARGET_LONG_DISPLACEMENT
3939 && !DISP_IN_RANGE (INTVAL (constant_term))
3940 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3941 {
3942 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3943 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3944
3945 rtx temp = gen_reg_rtx (Pmode);
3946 rtx val = force_operand (GEN_INT (upper), temp);
3947 if (val != temp)
3948 emit_move_insn (temp, val);
3949
3950 x = gen_rtx_PLUS (Pmode, x, temp);
3951 constant_term = GEN_INT (lower);
3952 }
3953
3954 if (GET_CODE (x) == PLUS)
3955 {
3956 if (GET_CODE (XEXP (x, 0)) == REG)
3957 {
3958 rtx temp = gen_reg_rtx (Pmode);
3959 rtx val = force_operand (XEXP (x, 1), temp);
3960 if (val != temp)
3961 emit_move_insn (temp, val);
3962
3963 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3964 }
3965
3966 else if (GET_CODE (XEXP (x, 1)) == REG)
3967 {
3968 rtx temp = gen_reg_rtx (Pmode);
3969 rtx val = force_operand (XEXP (x, 0), temp);
3970 if (val != temp)
3971 emit_move_insn (temp, val);
3972
3973 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3974 }
3975 }
3976
3977 if (constant_term != const0_rtx)
3978 x = gen_rtx_PLUS (Pmode, x, constant_term);
3979
3980 return x;
3981 }
3982
3983 /* Try a machine-dependent way of reloading an illegitimate address AD
3984 operand. If we find one, push the reload and and return the new address.
3985
3986 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3987 and TYPE is the reload type of the current reload. */
3988
3989 rtx
3990 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3991 int opnum, int type)
3992 {
3993 if (!optimize || TARGET_LONG_DISPLACEMENT)
3994 return NULL_RTX;
3995
3996 if (GET_CODE (ad) == PLUS)
3997 {
3998 rtx tem = simplify_binary_operation (PLUS, Pmode,
3999 XEXP (ad, 0), XEXP (ad, 1));
4000 if (tem)
4001 ad = tem;
4002 }
4003
4004 if (GET_CODE (ad) == PLUS
4005 && GET_CODE (XEXP (ad, 0)) == REG
4006 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4007 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4008 {
4009 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4010 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4011 rtx cst, tem, new_rtx;
4012
4013 cst = GEN_INT (upper);
4014 if (!legitimate_reload_constant_p (cst))
4015 cst = force_const_mem (Pmode, cst);
4016
4017 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4018 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4019
4020 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4021 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4022 opnum, (enum reload_type) type);
4023 return new_rtx;
4024 }
4025
4026 return NULL_RTX;
4027 }
4028
4029 /* Emit code to move LEN bytes from DST to SRC. */
4030
4031 void
4032 s390_expand_movmem (rtx dst, rtx src, rtx len)
4033 {
4034 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4035 {
4036 if (INTVAL (len) > 0)
4037 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4038 }
4039
4040 else if (TARGET_MVCLE)
4041 {
4042 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4043 }
4044
4045 else
4046 {
4047 rtx dst_addr, src_addr, count, blocks, temp;
4048 rtx loop_start_label = gen_label_rtx ();
4049 rtx loop_end_label = gen_label_rtx ();
4050 rtx end_label = gen_label_rtx ();
4051 enum machine_mode mode;
4052
4053 mode = GET_MODE (len);
4054 if (mode == VOIDmode)
4055 mode = Pmode;
4056
4057 dst_addr = gen_reg_rtx (Pmode);
4058 src_addr = gen_reg_rtx (Pmode);
4059 count = gen_reg_rtx (mode);
4060 blocks = gen_reg_rtx (mode);
4061
4062 convert_move (count, len, 1);
4063 emit_cmp_and_jump_insns (count, const0_rtx,
4064 EQ, NULL_RTX, mode, 1, end_label);
4065
4066 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4067 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4068 dst = change_address (dst, VOIDmode, dst_addr);
4069 src = change_address (src, VOIDmode, src_addr);
4070
4071 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4072 OPTAB_DIRECT);
4073 if (temp != count)
4074 emit_move_insn (count, temp);
4075
4076 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4077 OPTAB_DIRECT);
4078 if (temp != blocks)
4079 emit_move_insn (blocks, temp);
4080
4081 emit_cmp_and_jump_insns (blocks, const0_rtx,
4082 EQ, NULL_RTX, mode, 1, loop_end_label);
4083
4084 emit_label (loop_start_label);
4085
4086 if (TARGET_Z10
4087 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4088 {
4089 rtx prefetch;
4090
4091 /* Issue a read prefetch for the +3 cache line. */
4092 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4093 const0_rtx, const0_rtx);
4094 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4095 emit_insn (prefetch);
4096
4097 /* Issue a write prefetch for the +3 cache line. */
4098 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4099 const1_rtx, const0_rtx);
4100 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4101 emit_insn (prefetch);
4102 }
4103
4104 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4105 s390_load_address (dst_addr,
4106 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4107 s390_load_address (src_addr,
4108 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4109
4110 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4111 OPTAB_DIRECT);
4112 if (temp != blocks)
4113 emit_move_insn (blocks, temp);
4114
4115 emit_cmp_and_jump_insns (blocks, const0_rtx,
4116 EQ, NULL_RTX, mode, 1, loop_end_label);
4117
4118 emit_jump (loop_start_label);
4119 emit_label (loop_end_label);
4120
4121 emit_insn (gen_movmem_short (dst, src,
4122 convert_to_mode (Pmode, count, 1)));
4123 emit_label (end_label);
4124 }
4125 }
4126
4127 /* Emit code to set LEN bytes at DST to VAL.
4128 Make use of clrmem if VAL is zero. */
4129
4130 void
4131 s390_expand_setmem (rtx dst, rtx len, rtx val)
4132 {
4133 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4134 return;
4135
4136 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4137
4138 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4139 {
4140 if (val == const0_rtx && INTVAL (len) <= 256)
4141 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4142 else
4143 {
4144 /* Initialize memory by storing the first byte. */
4145 emit_move_insn (adjust_address (dst, QImode, 0), val);
4146
4147 if (INTVAL (len) > 1)
4148 {
4149 /* Initiate 1 byte overlap move.
4150 The first byte of DST is propagated through DSTP1.
4151 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4152 DST is set to size 1 so the rest of the memory location
4153 does not count as source operand. */
4154 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4155 set_mem_size (dst, const1_rtx);
4156
4157 emit_insn (gen_movmem_short (dstp1, dst,
4158 GEN_INT (INTVAL (len) - 2)));
4159 }
4160 }
4161 }
4162
4163 else if (TARGET_MVCLE)
4164 {
4165 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4166 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4167 }
4168
4169 else
4170 {
4171 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4172 rtx loop_start_label = gen_label_rtx ();
4173 rtx loop_end_label = gen_label_rtx ();
4174 rtx end_label = gen_label_rtx ();
4175 enum machine_mode mode;
4176
4177 mode = GET_MODE (len);
4178 if (mode == VOIDmode)
4179 mode = Pmode;
4180
4181 dst_addr = gen_reg_rtx (Pmode);
4182 count = gen_reg_rtx (mode);
4183 blocks = gen_reg_rtx (mode);
4184
4185 convert_move (count, len, 1);
4186 emit_cmp_and_jump_insns (count, const0_rtx,
4187 EQ, NULL_RTX, mode, 1, end_label);
4188
4189 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4190 dst = change_address (dst, VOIDmode, dst_addr);
4191
4192 if (val == const0_rtx)
4193 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4194 OPTAB_DIRECT);
4195 else
4196 {
4197 dstp1 = adjust_address (dst, VOIDmode, 1);
4198 set_mem_size (dst, const1_rtx);
4199
4200 /* Initialize memory by storing the first byte. */
4201 emit_move_insn (adjust_address (dst, QImode, 0), val);
4202
4203 /* If count is 1 we are done. */
4204 emit_cmp_and_jump_insns (count, const1_rtx,
4205 EQ, NULL_RTX, mode, 1, end_label);
4206
4207 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4208 OPTAB_DIRECT);
4209 }
4210 if (temp != count)
4211 emit_move_insn (count, temp);
4212
4213 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4214 OPTAB_DIRECT);
4215 if (temp != blocks)
4216 emit_move_insn (blocks, temp);
4217
4218 emit_cmp_and_jump_insns (blocks, const0_rtx,
4219 EQ, NULL_RTX, mode, 1, loop_end_label);
4220
4221 emit_label (loop_start_label);
4222
4223 if (TARGET_Z10
4224 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4225 {
4226 /* Issue a write prefetch for the +4 cache line. */
4227 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4228 GEN_INT (1024)),
4229 const1_rtx, const0_rtx);
4230 emit_insn (prefetch);
4231 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4232 }
4233
4234 if (val == const0_rtx)
4235 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4236 else
4237 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4238 s390_load_address (dst_addr,
4239 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4240
4241 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4242 OPTAB_DIRECT);
4243 if (temp != blocks)
4244 emit_move_insn (blocks, temp);
4245
4246 emit_cmp_and_jump_insns (blocks, const0_rtx,
4247 EQ, NULL_RTX, mode, 1, loop_end_label);
4248
4249 emit_jump (loop_start_label);
4250 emit_label (loop_end_label);
4251
4252 if (val == const0_rtx)
4253 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4254 else
4255 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4256 emit_label (end_label);
4257 }
4258 }
4259
4260 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4261 and return the result in TARGET. */
4262
4263 void
4264 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4265 {
4266 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4267 rtx tmp;
4268
4269 /* As the result of CMPINT is inverted compared to what we need,
4270 we have to swap the operands. */
4271 tmp = op0; op0 = op1; op1 = tmp;
4272
4273 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4274 {
4275 if (INTVAL (len) > 0)
4276 {
4277 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4278 emit_insn (gen_cmpint (target, ccreg));
4279 }
4280 else
4281 emit_move_insn (target, const0_rtx);
4282 }
4283 else if (TARGET_MVCLE)
4284 {
4285 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4286 emit_insn (gen_cmpint (target, ccreg));
4287 }
4288 else
4289 {
4290 rtx addr0, addr1, count, blocks, temp;
4291 rtx loop_start_label = gen_label_rtx ();
4292 rtx loop_end_label = gen_label_rtx ();
4293 rtx end_label = gen_label_rtx ();
4294 enum machine_mode mode;
4295
4296 mode = GET_MODE (len);
4297 if (mode == VOIDmode)
4298 mode = Pmode;
4299
4300 addr0 = gen_reg_rtx (Pmode);
4301 addr1 = gen_reg_rtx (Pmode);
4302 count = gen_reg_rtx (mode);
4303 blocks = gen_reg_rtx (mode);
4304
4305 convert_move (count, len, 1);
4306 emit_cmp_and_jump_insns (count, const0_rtx,
4307 EQ, NULL_RTX, mode, 1, end_label);
4308
4309 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4310 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4311 op0 = change_address (op0, VOIDmode, addr0);
4312 op1 = change_address (op1, VOIDmode, addr1);
4313
4314 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4315 OPTAB_DIRECT);
4316 if (temp != count)
4317 emit_move_insn (count, temp);
4318
4319 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4320 OPTAB_DIRECT);
4321 if (temp != blocks)
4322 emit_move_insn (blocks, temp);
4323
4324 emit_cmp_and_jump_insns (blocks, const0_rtx,
4325 EQ, NULL_RTX, mode, 1, loop_end_label);
4326
4327 emit_label (loop_start_label);
4328
4329 if (TARGET_Z10
4330 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4331 {
4332 rtx prefetch;
4333
4334 /* Issue a read prefetch for the +2 cache line of operand 1. */
4335 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4336 const0_rtx, const0_rtx);
4337 emit_insn (prefetch);
4338 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4339
4340 /* Issue a read prefetch for the +2 cache line of operand 2. */
4341 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4342 const0_rtx, const0_rtx);
4343 emit_insn (prefetch);
4344 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4345 }
4346
4347 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4348 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4349 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4350 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4351 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4352 emit_jump_insn (temp);
4353
4354 s390_load_address (addr0,
4355 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4356 s390_load_address (addr1,
4357 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4358
4359 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4360 OPTAB_DIRECT);
4361 if (temp != blocks)
4362 emit_move_insn (blocks, temp);
4363
4364 emit_cmp_and_jump_insns (blocks, const0_rtx,
4365 EQ, NULL_RTX, mode, 1, loop_end_label);
4366
4367 emit_jump (loop_start_label);
4368 emit_label (loop_end_label);
4369
4370 emit_insn (gen_cmpmem_short (op0, op1,
4371 convert_to_mode (Pmode, count, 1)));
4372 emit_label (end_label);
4373
4374 emit_insn (gen_cmpint (target, ccreg));
4375 }
4376 }
4377
4378
4379 /* Expand conditional increment or decrement using alc/slb instructions.
4380 Should generate code setting DST to either SRC or SRC + INCREMENT,
4381 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4382 Returns true if successful, false otherwise.
4383
4384 That makes it possible to implement some if-constructs without jumps e.g.:
4385 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4386 unsigned int a, b, c;
4387 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4388 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4389 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4390 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4391
4392 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4393 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4394 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4395 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4396 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4397
4398 bool
4399 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4400 rtx dst, rtx src, rtx increment)
4401 {
4402 enum machine_mode cmp_mode;
4403 enum machine_mode cc_mode;
4404 rtx op_res;
4405 rtx insn;
4406 rtvec p;
4407 int ret;
4408
4409 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4410 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4411 cmp_mode = SImode;
4412 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4413 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4414 cmp_mode = DImode;
4415 else
4416 return false;
4417
4418 /* Try ADD LOGICAL WITH CARRY. */
4419 if (increment == const1_rtx)
4420 {
4421 /* Determine CC mode to use. */
4422 if (cmp_code == EQ || cmp_code == NE)
4423 {
4424 if (cmp_op1 != const0_rtx)
4425 {
4426 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4427 NULL_RTX, 0, OPTAB_WIDEN);
4428 cmp_op1 = const0_rtx;
4429 }
4430
4431 cmp_code = cmp_code == EQ ? LEU : GTU;
4432 }
4433
4434 if (cmp_code == LTU || cmp_code == LEU)
4435 {
4436 rtx tem = cmp_op0;
4437 cmp_op0 = cmp_op1;
4438 cmp_op1 = tem;
4439 cmp_code = swap_condition (cmp_code);
4440 }
4441
4442 switch (cmp_code)
4443 {
4444 case GTU:
4445 cc_mode = CCUmode;
4446 break;
4447
4448 case GEU:
4449 cc_mode = CCL3mode;
4450 break;
4451
4452 default:
4453 return false;
4454 }
4455
4456 /* Emit comparison instruction pattern. */
4457 if (!register_operand (cmp_op0, cmp_mode))
4458 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4459
4460 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4461 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4462 /* We use insn_invalid_p here to add clobbers if required. */
4463 ret = insn_invalid_p (emit_insn (insn));
4464 gcc_assert (!ret);
4465
4466 /* Emit ALC instruction pattern. */
4467 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4468 gen_rtx_REG (cc_mode, CC_REGNUM),
4469 const0_rtx);
4470
4471 if (src != const0_rtx)
4472 {
4473 if (!register_operand (src, GET_MODE (dst)))
4474 src = force_reg (GET_MODE (dst), src);
4475
4476 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4477 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4478 }
4479
4480 p = rtvec_alloc (2);
4481 RTVEC_ELT (p, 0) =
4482 gen_rtx_SET (VOIDmode, dst, op_res);
4483 RTVEC_ELT (p, 1) =
4484 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4485 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4486
4487 return true;
4488 }
4489
4490 /* Try SUBTRACT LOGICAL WITH BORROW. */
4491 if (increment == constm1_rtx)
4492 {
4493 /* Determine CC mode to use. */
4494 if (cmp_code == EQ || cmp_code == NE)
4495 {
4496 if (cmp_op1 != const0_rtx)
4497 {
4498 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4499 NULL_RTX, 0, OPTAB_WIDEN);
4500 cmp_op1 = const0_rtx;
4501 }
4502
4503 cmp_code = cmp_code == EQ ? LEU : GTU;
4504 }
4505
4506 if (cmp_code == GTU || cmp_code == GEU)
4507 {
4508 rtx tem = cmp_op0;
4509 cmp_op0 = cmp_op1;
4510 cmp_op1 = tem;
4511 cmp_code = swap_condition (cmp_code);
4512 }
4513
4514 switch (cmp_code)
4515 {
4516 case LEU:
4517 cc_mode = CCUmode;
4518 break;
4519
4520 case LTU:
4521 cc_mode = CCL3mode;
4522 break;
4523
4524 default:
4525 return false;
4526 }
4527
4528 /* Emit comparison instruction pattern. */
4529 if (!register_operand (cmp_op0, cmp_mode))
4530 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4531
4532 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4533 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4534 /* We use insn_invalid_p here to add clobbers if required. */
4535 ret = insn_invalid_p (emit_insn (insn));
4536 gcc_assert (!ret);
4537
4538 /* Emit SLB instruction pattern. */
4539 if (!register_operand (src, GET_MODE (dst)))
4540 src = force_reg (GET_MODE (dst), src);
4541
4542 op_res = gen_rtx_MINUS (GET_MODE (dst),
4543 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4544 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4545 gen_rtx_REG (cc_mode, CC_REGNUM),
4546 const0_rtx));
4547 p = rtvec_alloc (2);
4548 RTVEC_ELT (p, 0) =
4549 gen_rtx_SET (VOIDmode, dst, op_res);
4550 RTVEC_ELT (p, 1) =
4551 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4552 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4553
4554 return true;
4555 }
4556
4557 return false;
4558 }
4559
4560 /* Expand code for the insv template. Return true if successful. */
4561
4562 bool
4563 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4564 {
4565 int bitsize = INTVAL (op1);
4566 int bitpos = INTVAL (op2);
4567
4568 /* On z10 we can use the risbg instruction to implement insv. */
4569 if (TARGET_Z10
4570 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4571 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4572 {
4573 rtx op;
4574 rtx clobber;
4575
4576 op = gen_rtx_SET (GET_MODE(src),
4577 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4578 src);
4579 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4580 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4581
4582 return true;
4583 }
4584
4585 /* We need byte alignment. */
4586 if (bitsize % BITS_PER_UNIT)
4587 return false;
4588
4589 if (bitpos == 0
4590 && memory_operand (dest, VOIDmode)
4591 && (register_operand (src, word_mode)
4592 || const_int_operand (src, VOIDmode)))
4593 {
4594 /* Emit standard pattern if possible. */
4595 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4596 if (GET_MODE_BITSIZE (mode) == bitsize)
4597 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4598
4599 /* (set (ze (mem)) (const_int)). */
4600 else if (const_int_operand (src, VOIDmode))
4601 {
4602 int size = bitsize / BITS_PER_UNIT;
4603 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4604 GET_MODE_SIZE (word_mode) - size);
4605
4606 dest = adjust_address (dest, BLKmode, 0);
4607 set_mem_size (dest, GEN_INT (size));
4608 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4609 }
4610
4611 /* (set (ze (mem)) (reg)). */
4612 else if (register_operand (src, word_mode))
4613 {
4614 if (bitsize <= GET_MODE_BITSIZE (SImode))
4615 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4616 const0_rtx), src);
4617 else
4618 {
4619 /* Emit st,stcmh sequence. */
4620 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4621 int size = stcmh_width / BITS_PER_UNIT;
4622
4623 emit_move_insn (adjust_address (dest, SImode, size),
4624 gen_lowpart (SImode, src));
4625 set_mem_size (dest, GEN_INT (size));
4626 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4627 (stcmh_width), const0_rtx),
4628 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4629 (GET_MODE_BITSIZE (SImode))));
4630 }
4631 }
4632 else
4633 return false;
4634
4635 return true;
4636 }
4637
4638 /* (set (ze (reg)) (const_int)). */
4639 if (TARGET_ZARCH
4640 && register_operand (dest, word_mode)
4641 && (bitpos % 16) == 0
4642 && (bitsize % 16) == 0
4643 && const_int_operand (src, VOIDmode))
4644 {
4645 HOST_WIDE_INT val = INTVAL (src);
4646 int regpos = bitpos + bitsize;
4647
4648 while (regpos > bitpos)
4649 {
4650 enum machine_mode putmode;
4651 int putsize;
4652
4653 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4654 putmode = SImode;
4655 else
4656 putmode = HImode;
4657
4658 putsize = GET_MODE_BITSIZE (putmode);
4659 regpos -= putsize;
4660 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4661 GEN_INT (putsize),
4662 GEN_INT (regpos)),
4663 gen_int_mode (val, putmode));
4664 val >>= putsize;
4665 }
4666 gcc_assert (regpos == bitpos);
4667 return true;
4668 }
4669
4670 return false;
4671 }
4672
4673 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4674 register that holds VAL of mode MODE shifted by COUNT bits. */
4675
4676 static inline rtx
4677 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4678 {
4679 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4680 NULL_RTX, 1, OPTAB_DIRECT);
4681 return expand_simple_binop (SImode, ASHIFT, val, count,
4682 NULL_RTX, 1, OPTAB_DIRECT);
4683 }
4684
4685 /* Structure to hold the initial parameters for a compare_and_swap operation
4686 in HImode and QImode. */
4687
4688 struct alignment_context
4689 {
4690 rtx memsi; /* SI aligned memory location. */
4691 rtx shift; /* Bit offset with regard to lsb. */
4692 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4693 rtx modemaski; /* ~modemask */
4694 bool aligned; /* True if memory is aligned, false else. */
4695 };
4696
4697 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4698 structure AC for transparent simplifying, if the memory alignment is known
4699 to be at least 32bit. MEM is the memory location for the actual operation
4700 and MODE its mode. */
4701
4702 static void
4703 init_alignment_context (struct alignment_context *ac, rtx mem,
4704 enum machine_mode mode)
4705 {
4706 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4707 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4708
4709 if (ac->aligned)
4710 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4711 else
4712 {
4713 /* Alignment is unknown. */
4714 rtx byteoffset, addr, align;
4715
4716 /* Force the address into a register. */
4717 addr = force_reg (Pmode, XEXP (mem, 0));
4718
4719 /* Align it to SImode. */
4720 align = expand_simple_binop (Pmode, AND, addr,
4721 GEN_INT (-GET_MODE_SIZE (SImode)),
4722 NULL_RTX, 1, OPTAB_DIRECT);
4723 /* Generate MEM. */
4724 ac->memsi = gen_rtx_MEM (SImode, align);
4725 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4726 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4727 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4728
4729 /* Calculate shiftcount. */
4730 byteoffset = expand_simple_binop (Pmode, AND, addr,
4731 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4732 NULL_RTX, 1, OPTAB_DIRECT);
4733 /* As we already have some offset, evaluate the remaining distance. */
4734 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4735 NULL_RTX, 1, OPTAB_DIRECT);
4736
4737 }
4738 /* Shift is the byte count, but we need the bitcount. */
4739 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4740 NULL_RTX, 1, OPTAB_DIRECT);
4741 /* Calculate masks. */
4742 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4743 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4744 NULL_RTX, 1, OPTAB_DIRECT);
4745 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4746 }
4747
4748 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4749 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4750 to set if CMP == MEM.
4751 CMP is never in memory for compare_and_swap_cc because
4752 expand_bool_compare_and_swap puts it into a register for later compare. */
4753
4754 void
4755 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4756 {
4757 struct alignment_context ac;
4758 rtx cmpv, newv, val, resv, cc;
4759 rtx res = gen_reg_rtx (SImode);
4760 rtx csloop = gen_label_rtx ();
4761 rtx csend = gen_label_rtx ();
4762
4763 gcc_assert (register_operand (target, VOIDmode));
4764 gcc_assert (MEM_P (mem));
4765
4766 init_alignment_context (&ac, mem, mode);
4767
4768 /* Shift the values to the correct bit positions. */
4769 if (!(ac.aligned && MEM_P (cmp)))
4770 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4771 if (!(ac.aligned && MEM_P (new_rtx)))
4772 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4773
4774 /* Load full word. Subsequent loads are performed by CS. */
4775 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4776 NULL_RTX, 1, OPTAB_DIRECT);
4777
4778 /* Start CS loop. */
4779 emit_label (csloop);
4780 /* val = "<mem>00..0<mem>"
4781 * cmp = "00..0<cmp>00..0"
4782 * new = "00..0<new>00..0"
4783 */
4784
4785 /* Patch cmp and new with val at correct position. */
4786 if (ac.aligned && MEM_P (cmp))
4787 {
4788 cmpv = force_reg (SImode, val);
4789 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4790 }
4791 else
4792 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4793 NULL_RTX, 1, OPTAB_DIRECT));
4794 if (ac.aligned && MEM_P (new_rtx))
4795 {
4796 newv = force_reg (SImode, val);
4797 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4798 }
4799 else
4800 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4801 NULL_RTX, 1, OPTAB_DIRECT));
4802
4803 /* Jump to end if we're done (likely?). */
4804 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4805 cmpv, newv));
4806
4807 /* Check for changes outside mode. */
4808 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4809 NULL_RTX, 1, OPTAB_DIRECT);
4810 cc = s390_emit_compare (NE, resv, val);
4811 emit_move_insn (val, resv);
4812 /* Loop internal if so. */
4813 s390_emit_jump (csloop, cc);
4814
4815 emit_label (csend);
4816
4817 /* Return the correct part of the bitfield. */
4818 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4819 NULL_RTX, 1, OPTAB_DIRECT), 1);
4820 }
4821
4822 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4823 and VAL the value to play with. If AFTER is true then store the value
4824 MEM holds after the operation, if AFTER is false then store the value MEM
4825 holds before the operation. If TARGET is zero then discard that value, else
4826 store it to TARGET. */
4827
4828 void
4829 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4830 rtx target, rtx mem, rtx val, bool after)
4831 {
4832 struct alignment_context ac;
4833 rtx cmp;
4834 rtx new_rtx = gen_reg_rtx (SImode);
4835 rtx orig = gen_reg_rtx (SImode);
4836 rtx csloop = gen_label_rtx ();
4837
4838 gcc_assert (!target || register_operand (target, VOIDmode));
4839 gcc_assert (MEM_P (mem));
4840
4841 init_alignment_context (&ac, mem, mode);
4842
4843 /* Shift val to the correct bit positions.
4844 Preserve "icm", but prevent "ex icm". */
4845 if (!(ac.aligned && code == SET && MEM_P (val)))
4846 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4847
4848 /* Further preparation insns. */
4849 if (code == PLUS || code == MINUS)
4850 emit_move_insn (orig, val);
4851 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4852 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4853 NULL_RTX, 1, OPTAB_DIRECT);
4854
4855 /* Load full word. Subsequent loads are performed by CS. */
4856 cmp = force_reg (SImode, ac.memsi);
4857
4858 /* Start CS loop. */
4859 emit_label (csloop);
4860 emit_move_insn (new_rtx, cmp);
4861
4862 /* Patch new with val at correct position. */
4863 switch (code)
4864 {
4865 case PLUS:
4866 case MINUS:
4867 val = expand_simple_binop (SImode, code, new_rtx, orig,
4868 NULL_RTX, 1, OPTAB_DIRECT);
4869 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4870 NULL_RTX, 1, OPTAB_DIRECT);
4871 /* FALLTHRU */
4872 case SET:
4873 if (ac.aligned && MEM_P (val))
4874 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4875 else
4876 {
4877 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4878 NULL_RTX, 1, OPTAB_DIRECT);
4879 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4880 NULL_RTX, 1, OPTAB_DIRECT);
4881 }
4882 break;
4883 case AND:
4884 case IOR:
4885 case XOR:
4886 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4887 NULL_RTX, 1, OPTAB_DIRECT);
4888 break;
4889 case MULT: /* NAND */
4890 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4891 NULL_RTX, 1, OPTAB_DIRECT);
4892 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4893 NULL_RTX, 1, OPTAB_DIRECT);
4894 break;
4895 default:
4896 gcc_unreachable ();
4897 }
4898
4899 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4900 ac.memsi, cmp, new_rtx));
4901
4902 /* Return the correct part of the bitfield. */
4903 if (target)
4904 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4905 after ? new_rtx : cmp, ac.shift,
4906 NULL_RTX, 1, OPTAB_DIRECT), 1);
4907 }
4908
4909 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4910 We need to emit DTP-relative relocations. */
4911
4912 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4913
4914 static void
4915 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4916 {
4917 switch (size)
4918 {
4919 case 4:
4920 fputs ("\t.long\t", file);
4921 break;
4922 case 8:
4923 fputs ("\t.quad\t", file);
4924 break;
4925 default:
4926 gcc_unreachable ();
4927 }
4928 output_addr_const (file, x);
4929 fputs ("@DTPOFF", file);
4930 }
4931
4932 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4933 /* Implement TARGET_MANGLE_TYPE. */
4934
4935 static const char *
4936 s390_mangle_type (const_tree type)
4937 {
4938 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4939 && TARGET_LONG_DOUBLE_128)
4940 return "g";
4941
4942 /* For all other types, use normal C++ mangling. */
4943 return NULL;
4944 }
4945 #endif
4946
4947 /* In the name of slightly smaller debug output, and to cater to
4948 general assembler lossage, recognize various UNSPEC sequences
4949 and turn them back into a direct symbol reference. */
4950
4951 static rtx
4952 s390_delegitimize_address (rtx orig_x)
4953 {
4954 rtx x, y;
4955
4956 orig_x = delegitimize_mem_from_attrs (orig_x);
4957 x = orig_x;
4958 if (GET_CODE (x) != MEM)
4959 return orig_x;
4960
4961 x = XEXP (x, 0);
4962 if (GET_CODE (x) == PLUS
4963 && GET_CODE (XEXP (x, 1)) == CONST
4964 && GET_CODE (XEXP (x, 0)) == REG
4965 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4966 {
4967 y = XEXP (XEXP (x, 1), 0);
4968 if (GET_CODE (y) == UNSPEC
4969 && XINT (y, 1) == UNSPEC_GOT)
4970 return XVECEXP (y, 0, 0);
4971 return orig_x;
4972 }
4973
4974 if (GET_CODE (x) == CONST)
4975 {
4976 y = XEXP (x, 0);
4977 if (GET_CODE (y) == UNSPEC
4978 && XINT (y, 1) == UNSPEC_GOTENT)
4979 return XVECEXP (y, 0, 0);
4980 return orig_x;
4981 }
4982
4983 return orig_x;
4984 }
4985
4986 /* Output operand OP to stdio stream FILE.
4987 OP is an address (register + offset) which is not used to address data;
4988 instead the rightmost bits are interpreted as the value. */
4989
4990 static void
4991 print_shift_count_operand (FILE *file, rtx op)
4992 {
4993 HOST_WIDE_INT offset;
4994 rtx base;
4995
4996 /* Extract base register and offset. */
4997 if (!s390_decompose_shift_count (op, &base, &offset))
4998 gcc_unreachable ();
4999
5000 /* Sanity check. */
5001 if (base)
5002 {
5003 gcc_assert (GET_CODE (base) == REG);
5004 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5005 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5006 }
5007
5008 /* Offsets are constricted to twelve bits. */
5009 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5010 if (base)
5011 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5012 }
5013
5014 /* See 'get_some_local_dynamic_name'. */
5015
5016 static int
5017 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5018 {
5019 rtx x = *px;
5020
5021 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5022 {
5023 x = get_pool_constant (x);
5024 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5025 }
5026
5027 if (GET_CODE (x) == SYMBOL_REF
5028 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5029 {
5030 cfun->machine->some_ld_name = XSTR (x, 0);
5031 return 1;
5032 }
5033
5034 return 0;
5035 }
5036
5037 /* Locate some local-dynamic symbol still in use by this function
5038 so that we can print its name in local-dynamic base patterns. */
5039
5040 static const char *
5041 get_some_local_dynamic_name (void)
5042 {
5043 rtx insn;
5044
5045 if (cfun->machine->some_ld_name)
5046 return cfun->machine->some_ld_name;
5047
5048 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5049 if (INSN_P (insn)
5050 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5051 return cfun->machine->some_ld_name;
5052
5053 gcc_unreachable ();
5054 }
5055
5056 /* Output machine-dependent UNSPECs occurring in address constant X
5057 in assembler syntax to stdio stream FILE. Returns true if the
5058 constant X could be recognized, false otherwise. */
5059
5060 bool
5061 s390_output_addr_const_extra (FILE *file, rtx x)
5062 {
5063 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5064 switch (XINT (x, 1))
5065 {
5066 case UNSPEC_GOTENT:
5067 output_addr_const (file, XVECEXP (x, 0, 0));
5068 fprintf (file, "@GOTENT");
5069 return true;
5070 case UNSPEC_GOT:
5071 output_addr_const (file, XVECEXP (x, 0, 0));
5072 fprintf (file, "@GOT");
5073 return true;
5074 case UNSPEC_GOTOFF:
5075 output_addr_const (file, XVECEXP (x, 0, 0));
5076 fprintf (file, "@GOTOFF");
5077 return true;
5078 case UNSPEC_PLT:
5079 output_addr_const (file, XVECEXP (x, 0, 0));
5080 fprintf (file, "@PLT");
5081 return true;
5082 case UNSPEC_PLTOFF:
5083 output_addr_const (file, XVECEXP (x, 0, 0));
5084 fprintf (file, "@PLTOFF");
5085 return true;
5086 case UNSPEC_TLSGD:
5087 output_addr_const (file, XVECEXP (x, 0, 0));
5088 fprintf (file, "@TLSGD");
5089 return true;
5090 case UNSPEC_TLSLDM:
5091 assemble_name (file, get_some_local_dynamic_name ());
5092 fprintf (file, "@TLSLDM");
5093 return true;
5094 case UNSPEC_DTPOFF:
5095 output_addr_const (file, XVECEXP (x, 0, 0));
5096 fprintf (file, "@DTPOFF");
5097 return true;
5098 case UNSPEC_NTPOFF:
5099 output_addr_const (file, XVECEXP (x, 0, 0));
5100 fprintf (file, "@NTPOFF");
5101 return true;
5102 case UNSPEC_GOTNTPOFF:
5103 output_addr_const (file, XVECEXP (x, 0, 0));
5104 fprintf (file, "@GOTNTPOFF");
5105 return true;
5106 case UNSPEC_INDNTPOFF:
5107 output_addr_const (file, XVECEXP (x, 0, 0));
5108 fprintf (file, "@INDNTPOFF");
5109 return true;
5110 }
5111
5112 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5113 switch (XINT (x, 1))
5114 {
5115 case UNSPEC_POOL_OFFSET:
5116 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5117 output_addr_const (file, x);
5118 return true;
5119 }
5120 return false;
5121 }
5122
5123 /* Output address operand ADDR in assembler syntax to
5124 stdio stream FILE. */
5125
5126 void
5127 print_operand_address (FILE *file, rtx addr)
5128 {
5129 struct s390_address ad;
5130
5131 if (s390_symref_operand_p (addr, NULL, NULL))
5132 {
5133 if (!TARGET_Z10)
5134 {
5135 error ("symbolic memory references are only supported on z10 or later");
5136 return;
5137 }
5138 output_addr_const (file, addr);
5139 return;
5140 }
5141
5142 if (!s390_decompose_address (addr, &ad)
5143 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5144 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5145 output_operand_lossage ("cannot decompose address");
5146
5147 if (ad.disp)
5148 output_addr_const (file, ad.disp);
5149 else
5150 fprintf (file, "0");
5151
5152 if (ad.base && ad.indx)
5153 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5154 reg_names[REGNO (ad.base)]);
5155 else if (ad.base)
5156 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5157 }
5158
5159 /* Output operand X in assembler syntax to stdio stream FILE.
5160 CODE specified the format flag. The following format flags
5161 are recognized:
5162
5163 'C': print opcode suffix for branch condition.
5164 'D': print opcode suffix for inverse branch condition.
5165 'E': print opcode suffix for branch on index instruction.
5166 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5167 'G': print the size of the operand in bytes.
5168 'O': print only the displacement of a memory reference.
5169 'R': print only the base register of a memory reference.
5170 'S': print S-type memory reference (base+displacement).
5171 'N': print the second word of a DImode operand.
5172 'M': print the second word of a TImode operand.
5173 'Y': print shift count operand.
5174
5175 'b': print integer X as if it's an unsigned byte.
5176 'c': print integer X as if it's an signed byte.
5177 'x': print integer X as if it's an unsigned halfword.
5178 'h': print integer X as if it's a signed halfword.
5179 'i': print the first nonzero HImode part of X.
5180 'j': print the first HImode part unequal to -1 of X.
5181 'k': print the first nonzero SImode part of X.
5182 'm': print the first SImode part unequal to -1 of X.
5183 'o': print integer X as if it's an unsigned 32bit word. */
5184
5185 void
5186 print_operand (FILE *file, rtx x, int code)
5187 {
5188 switch (code)
5189 {
5190 case 'C':
5191 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5192 return;
5193
5194 case 'D':
5195 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5196 return;
5197
5198 case 'E':
5199 if (GET_CODE (x) == LE)
5200 fprintf (file, "l");
5201 else if (GET_CODE (x) == GT)
5202 fprintf (file, "h");
5203 else
5204 error ("invalid comparison operator for 'E' output modifier");
5205 return;
5206
5207 case 'J':
5208 if (GET_CODE (x) == SYMBOL_REF)
5209 {
5210 fprintf (file, "%s", ":tls_load:");
5211 output_addr_const (file, x);
5212 }
5213 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5214 {
5215 fprintf (file, "%s", ":tls_gdcall:");
5216 output_addr_const (file, XVECEXP (x, 0, 0));
5217 }
5218 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5219 {
5220 fprintf (file, "%s", ":tls_ldcall:");
5221 assemble_name (file, get_some_local_dynamic_name ());
5222 }
5223 else
5224 error ("invalid reference for 'J' output modifier");
5225 return;
5226
5227 case 'G':
5228 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5229 return;
5230
5231 case 'O':
5232 {
5233 struct s390_address ad;
5234 int ret;
5235
5236 if (!MEM_P (x))
5237 {
5238 error ("memory reference expected for 'O' output modifier");
5239 return;
5240 }
5241
5242 ret = s390_decompose_address (XEXP (x, 0), &ad);
5243
5244 if (!ret
5245 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5246 || ad.indx)
5247 {
5248 error ("invalid address for 'O' output modifier");
5249 return;
5250 }
5251
5252 if (ad.disp)
5253 output_addr_const (file, ad.disp);
5254 else
5255 fprintf (file, "0");
5256 }
5257 return;
5258
5259 case 'R':
5260 {
5261 struct s390_address ad;
5262 int ret;
5263
5264 if (!MEM_P (x))
5265 {
5266 error ("memory reference expected for 'R' output modifier");
5267 return;
5268 }
5269
5270 ret = s390_decompose_address (XEXP (x, 0), &ad);
5271
5272 if (!ret
5273 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5274 || ad.indx)
5275 {
5276 error ("invalid address for 'R' output modifier");
5277 return;
5278 }
5279
5280 if (ad.base)
5281 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5282 else
5283 fprintf (file, "0");
5284 }
5285 return;
5286
5287 case 'S':
5288 {
5289 struct s390_address ad;
5290 int ret;
5291
5292 if (!MEM_P (x))
5293 {
5294 error ("memory reference expected for 'S' output modifier");
5295 return;
5296 }
5297 ret = s390_decompose_address (XEXP (x, 0), &ad);
5298
5299 if (!ret
5300 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5301 || ad.indx)
5302 {
5303 error ("invalid address for 'S' output modifier");
5304 return;
5305 }
5306
5307 if (ad.disp)
5308 output_addr_const (file, ad.disp);
5309 else
5310 fprintf (file, "0");
5311
5312 if (ad.base)
5313 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5314 }
5315 return;
5316
5317 case 'N':
5318 if (GET_CODE (x) == REG)
5319 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5320 else if (GET_CODE (x) == MEM)
5321 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5322 else
5323 error ("register or memory expression expected for 'N' output modifier");
5324 break;
5325
5326 case 'M':
5327 if (GET_CODE (x) == REG)
5328 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5329 else if (GET_CODE (x) == MEM)
5330 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5331 else
5332 error ("register or memory expression expected for 'M' output modifier");
5333 break;
5334
5335 case 'Y':
5336 print_shift_count_operand (file, x);
5337 return;
5338 }
5339
5340 switch (GET_CODE (x))
5341 {
5342 case REG:
5343 fprintf (file, "%s", reg_names[REGNO (x)]);
5344 break;
5345
5346 case MEM:
5347 output_address (XEXP (x, 0));
5348 break;
5349
5350 case CONST:
5351 case CODE_LABEL:
5352 case LABEL_REF:
5353 case SYMBOL_REF:
5354 output_addr_const (file, x);
5355 break;
5356
5357 case CONST_INT:
5358 if (code == 'b')
5359 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5360 else if (code == 'c')
5361 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5362 else if (code == 'x')
5363 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5364 else if (code == 'h')
5365 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5366 else if (code == 'i')
5367 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5368 s390_extract_part (x, HImode, 0));
5369 else if (code == 'j')
5370 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5371 s390_extract_part (x, HImode, -1));
5372 else if (code == 'k')
5373 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5374 s390_extract_part (x, SImode, 0));
5375 else if (code == 'm')
5376 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5377 s390_extract_part (x, SImode, -1));
5378 else if (code == 'o')
5379 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5380 else
5381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5382 break;
5383
5384 case CONST_DOUBLE:
5385 gcc_assert (GET_MODE (x) == VOIDmode);
5386 if (code == 'b')
5387 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5388 else if (code == 'x')
5389 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5390 else if (code == 'h')
5391 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5392 else
5393 {
5394 if (code == 0)
5395 error ("invalid constant - try using an output modifier");
5396 else
5397 error ("invalid constant for output modifier '%c'", code);
5398 }
5399 break;
5400
5401 default:
5402 if (code == 0)
5403 error ("invalid expression - try using an output modifier");
5404 else
5405 error ("invalid expression for output modifier '%c'", code);
5406 break;
5407 }
5408 }
5409
5410 /* Target hook for assembling integer objects. We need to define it
5411 here to work a round a bug in some versions of GAS, which couldn't
5412 handle values smaller than INT_MIN when printed in decimal. */
5413
5414 static bool
5415 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5416 {
5417 if (size == 8 && aligned_p
5418 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5419 {
5420 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5421 INTVAL (x));
5422 return true;
5423 }
5424 return default_assemble_integer (x, size, aligned_p);
5425 }
5426
5427 /* Returns true if register REGNO is used for forming
5428 a memory address in expression X. */
5429
5430 static bool
5431 reg_used_in_mem_p (int regno, rtx x)
5432 {
5433 enum rtx_code code = GET_CODE (x);
5434 int i, j;
5435 const char *fmt;
5436
5437 if (code == MEM)
5438 {
5439 if (refers_to_regno_p (regno, regno+1,
5440 XEXP (x, 0), 0))
5441 return true;
5442 }
5443 else if (code == SET
5444 && GET_CODE (SET_DEST (x)) == PC)
5445 {
5446 if (refers_to_regno_p (regno, regno+1,
5447 SET_SRC (x), 0))
5448 return true;
5449 }
5450
5451 fmt = GET_RTX_FORMAT (code);
5452 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5453 {
5454 if (fmt[i] == 'e'
5455 && reg_used_in_mem_p (regno, XEXP (x, i)))
5456 return true;
5457
5458 else if (fmt[i] == 'E')
5459 for (j = 0; j < XVECLEN (x, i); j++)
5460 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5461 return true;
5462 }
5463 return false;
5464 }
5465
5466 /* Returns true if expression DEP_RTX sets an address register
5467 used by instruction INSN to address memory. */
5468
5469 static bool
5470 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5471 {
5472 rtx target, pat;
5473
5474 if (GET_CODE (dep_rtx) == INSN)
5475 dep_rtx = PATTERN (dep_rtx);
5476
5477 if (GET_CODE (dep_rtx) == SET)
5478 {
5479 target = SET_DEST (dep_rtx);
5480 if (GET_CODE (target) == STRICT_LOW_PART)
5481 target = XEXP (target, 0);
5482 while (GET_CODE (target) == SUBREG)
5483 target = SUBREG_REG (target);
5484
5485 if (GET_CODE (target) == REG)
5486 {
5487 int regno = REGNO (target);
5488
5489 if (s390_safe_attr_type (insn) == TYPE_LA)
5490 {
5491 pat = PATTERN (insn);
5492 if (GET_CODE (pat) == PARALLEL)
5493 {
5494 gcc_assert (XVECLEN (pat, 0) == 2);
5495 pat = XVECEXP (pat, 0, 0);
5496 }
5497 gcc_assert (GET_CODE (pat) == SET);
5498 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5499 }
5500 else if (get_attr_atype (insn) == ATYPE_AGEN)
5501 return reg_used_in_mem_p (regno, PATTERN (insn));
5502 }
5503 }
5504 return false;
5505 }
5506
5507 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5508
5509 int
5510 s390_agen_dep_p (rtx dep_insn, rtx insn)
5511 {
5512 rtx dep_rtx = PATTERN (dep_insn);
5513 int i;
5514
5515 if (GET_CODE (dep_rtx) == SET
5516 && addr_generation_dependency_p (dep_rtx, insn))
5517 return 1;
5518 else if (GET_CODE (dep_rtx) == PARALLEL)
5519 {
5520 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5521 {
5522 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5523 return 1;
5524 }
5525 }
5526 return 0;
5527 }
5528
5529
5530 /* A C statement (sans semicolon) to update the integer scheduling priority
5531 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5532 reduce the priority to execute INSN later. Do not define this macro if
5533 you do not need to adjust the scheduling priorities of insns.
5534
5535 A STD instruction should be scheduled earlier,
5536 in order to use the bypass. */
5537 static int
5538 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5539 {
5540 if (! INSN_P (insn))
5541 return priority;
5542
5543 if (s390_tune != PROCESSOR_2084_Z990
5544 && s390_tune != PROCESSOR_2094_Z9_109
5545 && s390_tune != PROCESSOR_2097_Z10
5546 && s390_tune != PROCESSOR_2817_Z196)
5547 return priority;
5548
5549 switch (s390_safe_attr_type (insn))
5550 {
5551 case TYPE_FSTOREDF:
5552 case TYPE_FSTORESF:
5553 priority = priority << 3;
5554 break;
5555 case TYPE_STORE:
5556 case TYPE_STM:
5557 priority = priority << 1;
5558 break;
5559 default:
5560 break;
5561 }
5562 return priority;
5563 }
5564
5565
5566 /* The number of instructions that can be issued per cycle. */
5567
5568 static int
5569 s390_issue_rate (void)
5570 {
5571 switch (s390_tune)
5572 {
5573 case PROCESSOR_2084_Z990:
5574 case PROCESSOR_2094_Z9_109:
5575 case PROCESSOR_2817_Z196:
5576 return 3;
5577 case PROCESSOR_2097_Z10:
5578 return 2;
5579 default:
5580 return 1;
5581 }
5582 }
5583
5584 static int
5585 s390_first_cycle_multipass_dfa_lookahead (void)
5586 {
5587 return 4;
5588 }
5589
5590 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5591 Fix up MEMs as required. */
5592
5593 static void
5594 annotate_constant_pool_refs (rtx *x)
5595 {
5596 int i, j;
5597 const char *fmt;
5598
5599 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5600 || !CONSTANT_POOL_ADDRESS_P (*x));
5601
5602 /* Literal pool references can only occur inside a MEM ... */
5603 if (GET_CODE (*x) == MEM)
5604 {
5605 rtx memref = XEXP (*x, 0);
5606
5607 if (GET_CODE (memref) == SYMBOL_REF
5608 && CONSTANT_POOL_ADDRESS_P (memref))
5609 {
5610 rtx base = cfun->machine->base_reg;
5611 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5612 UNSPEC_LTREF);
5613
5614 *x = replace_equiv_address (*x, addr);
5615 return;
5616 }
5617
5618 if (GET_CODE (memref) == CONST
5619 && GET_CODE (XEXP (memref, 0)) == PLUS
5620 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5621 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5622 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5623 {
5624 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5625 rtx sym = XEXP (XEXP (memref, 0), 0);
5626 rtx base = cfun->machine->base_reg;
5627 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5628 UNSPEC_LTREF);
5629
5630 *x = replace_equiv_address (*x, plus_constant (addr, off));
5631 return;
5632 }
5633 }
5634
5635 /* ... or a load-address type pattern. */
5636 if (GET_CODE (*x) == SET)
5637 {
5638 rtx addrref = SET_SRC (*x);
5639
5640 if (GET_CODE (addrref) == SYMBOL_REF
5641 && CONSTANT_POOL_ADDRESS_P (addrref))
5642 {
5643 rtx base = cfun->machine->base_reg;
5644 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5645 UNSPEC_LTREF);
5646
5647 SET_SRC (*x) = addr;
5648 return;
5649 }
5650
5651 if (GET_CODE (addrref) == CONST
5652 && GET_CODE (XEXP (addrref, 0)) == PLUS
5653 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5654 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5655 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5656 {
5657 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5658 rtx sym = XEXP (XEXP (addrref, 0), 0);
5659 rtx base = cfun->machine->base_reg;
5660 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5661 UNSPEC_LTREF);
5662
5663 SET_SRC (*x) = plus_constant (addr, off);
5664 return;
5665 }
5666 }
5667
5668 /* Annotate LTREL_BASE as well. */
5669 if (GET_CODE (*x) == UNSPEC
5670 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5671 {
5672 rtx base = cfun->machine->base_reg;
5673 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5674 UNSPEC_LTREL_BASE);
5675 return;
5676 }
5677
5678 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5679 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5680 {
5681 if (fmt[i] == 'e')
5682 {
5683 annotate_constant_pool_refs (&XEXP (*x, i));
5684 }
5685 else if (fmt[i] == 'E')
5686 {
5687 for (j = 0; j < XVECLEN (*x, i); j++)
5688 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5689 }
5690 }
5691 }
5692
5693 /* Split all branches that exceed the maximum distance.
5694 Returns true if this created a new literal pool entry. */
5695
5696 static int
5697 s390_split_branches (void)
5698 {
5699 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5700 int new_literal = 0, ret;
5701 rtx insn, pat, tmp, target;
5702 rtx *label;
5703
5704 /* We need correct insn addresses. */
5705
5706 shorten_branches (get_insns ());
5707
5708 /* Find all branches that exceed 64KB, and split them. */
5709
5710 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5711 {
5712 if (GET_CODE (insn) != JUMP_INSN)
5713 continue;
5714
5715 pat = PATTERN (insn);
5716 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5717 pat = XVECEXP (pat, 0, 0);
5718 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5719 continue;
5720
5721 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5722 {
5723 label = &SET_SRC (pat);
5724 }
5725 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5726 {
5727 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5728 label = &XEXP (SET_SRC (pat), 1);
5729 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5730 label = &XEXP (SET_SRC (pat), 2);
5731 else
5732 continue;
5733 }
5734 else
5735 continue;
5736
5737 if (get_attr_length (insn) <= 4)
5738 continue;
5739
5740 /* We are going to use the return register as scratch register,
5741 make sure it will be saved/restored by the prologue/epilogue. */
5742 cfun_frame_layout.save_return_addr_p = 1;
5743
5744 if (!flag_pic)
5745 {
5746 new_literal = 1;
5747 tmp = force_const_mem (Pmode, *label);
5748 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5749 INSN_ADDRESSES_NEW (tmp, -1);
5750 annotate_constant_pool_refs (&PATTERN (tmp));
5751
5752 target = temp_reg;
5753 }
5754 else
5755 {
5756 new_literal = 1;
5757 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5758 UNSPEC_LTREL_OFFSET);
5759 target = gen_rtx_CONST (Pmode, target);
5760 target = force_const_mem (Pmode, target);
5761 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5762 INSN_ADDRESSES_NEW (tmp, -1);
5763 annotate_constant_pool_refs (&PATTERN (tmp));
5764
5765 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5766 cfun->machine->base_reg),
5767 UNSPEC_LTREL_BASE);
5768 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5769 }
5770
5771 ret = validate_change (insn, label, target, 0);
5772 gcc_assert (ret);
5773 }
5774
5775 return new_literal;
5776 }
5777
5778
5779 /* Find an annotated literal pool symbol referenced in RTX X,
5780 and store it at REF. Will abort if X contains references to
5781 more than one such pool symbol; multiple references to the same
5782 symbol are allowed, however.
5783
5784 The rtx pointed to by REF must be initialized to NULL_RTX
5785 by the caller before calling this routine. */
5786
5787 static void
5788 find_constant_pool_ref (rtx x, rtx *ref)
5789 {
5790 int i, j;
5791 const char *fmt;
5792
5793 /* Ignore LTREL_BASE references. */
5794 if (GET_CODE (x) == UNSPEC
5795 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5796 return;
5797 /* Likewise POOL_ENTRY insns. */
5798 if (GET_CODE (x) == UNSPEC_VOLATILE
5799 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5800 return;
5801
5802 gcc_assert (GET_CODE (x) != SYMBOL_REF
5803 || !CONSTANT_POOL_ADDRESS_P (x));
5804
5805 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5806 {
5807 rtx sym = XVECEXP (x, 0, 0);
5808 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5809 && CONSTANT_POOL_ADDRESS_P (sym));
5810
5811 if (*ref == NULL_RTX)
5812 *ref = sym;
5813 else
5814 gcc_assert (*ref == sym);
5815
5816 return;
5817 }
5818
5819 fmt = GET_RTX_FORMAT (GET_CODE (x));
5820 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5821 {
5822 if (fmt[i] == 'e')
5823 {
5824 find_constant_pool_ref (XEXP (x, i), ref);
5825 }
5826 else if (fmt[i] == 'E')
5827 {
5828 for (j = 0; j < XVECLEN (x, i); j++)
5829 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5830 }
5831 }
5832 }
5833
5834 /* Replace every reference to the annotated literal pool
5835 symbol REF in X by its base plus OFFSET. */
5836
5837 static void
5838 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5839 {
5840 int i, j;
5841 const char *fmt;
5842
5843 gcc_assert (*x != ref);
5844
5845 if (GET_CODE (*x) == UNSPEC
5846 && XINT (*x, 1) == UNSPEC_LTREF
5847 && XVECEXP (*x, 0, 0) == ref)
5848 {
5849 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5850 return;
5851 }
5852
5853 if (GET_CODE (*x) == PLUS
5854 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5855 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5856 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5857 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5858 {
5859 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5860 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5861 return;
5862 }
5863
5864 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5865 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5866 {
5867 if (fmt[i] == 'e')
5868 {
5869 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5870 }
5871 else if (fmt[i] == 'E')
5872 {
5873 for (j = 0; j < XVECLEN (*x, i); j++)
5874 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5875 }
5876 }
5877 }
5878
5879 /* Check whether X contains an UNSPEC_LTREL_BASE.
5880 Return its constant pool symbol if found, NULL_RTX otherwise. */
5881
5882 static rtx
5883 find_ltrel_base (rtx x)
5884 {
5885 int i, j;
5886 const char *fmt;
5887
5888 if (GET_CODE (x) == UNSPEC
5889 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5890 return XVECEXP (x, 0, 0);
5891
5892 fmt = GET_RTX_FORMAT (GET_CODE (x));
5893 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5894 {
5895 if (fmt[i] == 'e')
5896 {
5897 rtx fnd = find_ltrel_base (XEXP (x, i));
5898 if (fnd)
5899 return fnd;
5900 }
5901 else if (fmt[i] == 'E')
5902 {
5903 for (j = 0; j < XVECLEN (x, i); j++)
5904 {
5905 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5906 if (fnd)
5907 return fnd;
5908 }
5909 }
5910 }
5911
5912 return NULL_RTX;
5913 }
5914
5915 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5916
5917 static void
5918 replace_ltrel_base (rtx *x)
5919 {
5920 int i, j;
5921 const char *fmt;
5922
5923 if (GET_CODE (*x) == UNSPEC
5924 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5925 {
5926 *x = XVECEXP (*x, 0, 1);
5927 return;
5928 }
5929
5930 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5931 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5932 {
5933 if (fmt[i] == 'e')
5934 {
5935 replace_ltrel_base (&XEXP (*x, i));
5936 }
5937 else if (fmt[i] == 'E')
5938 {
5939 for (j = 0; j < XVECLEN (*x, i); j++)
5940 replace_ltrel_base (&XVECEXP (*x, i, j));
5941 }
5942 }
5943 }
5944
5945
5946 /* We keep a list of constants which we have to add to internal
5947 constant tables in the middle of large functions. */
5948
5949 #define NR_C_MODES 11
5950 enum machine_mode constant_modes[NR_C_MODES] =
5951 {
5952 TFmode, TImode, TDmode,
5953 DFmode, DImode, DDmode,
5954 SFmode, SImode, SDmode,
5955 HImode,
5956 QImode
5957 };
5958
5959 struct constant
5960 {
5961 struct constant *next;
5962 rtx value;
5963 rtx label;
5964 };
5965
5966 struct constant_pool
5967 {
5968 struct constant_pool *next;
5969 rtx first_insn;
5970 rtx pool_insn;
5971 bitmap insns;
5972 rtx emit_pool_after;
5973
5974 struct constant *constants[NR_C_MODES];
5975 struct constant *execute;
5976 rtx label;
5977 int size;
5978 };
5979
5980 /* Allocate new constant_pool structure. */
5981
5982 static struct constant_pool *
5983 s390_alloc_pool (void)
5984 {
5985 struct constant_pool *pool;
5986 int i;
5987
5988 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5989 pool->next = NULL;
5990 for (i = 0; i < NR_C_MODES; i++)
5991 pool->constants[i] = NULL;
5992
5993 pool->execute = NULL;
5994 pool->label = gen_label_rtx ();
5995 pool->first_insn = NULL_RTX;
5996 pool->pool_insn = NULL_RTX;
5997 pool->insns = BITMAP_ALLOC (NULL);
5998 pool->size = 0;
5999 pool->emit_pool_after = NULL_RTX;
6000
6001 return pool;
6002 }
6003
6004 /* Create new constant pool covering instructions starting at INSN
6005 and chain it to the end of POOL_LIST. */
6006
6007 static struct constant_pool *
6008 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6009 {
6010 struct constant_pool *pool, **prev;
6011
6012 pool = s390_alloc_pool ();
6013 pool->first_insn = insn;
6014
6015 for (prev = pool_list; *prev; prev = &(*prev)->next)
6016 ;
6017 *prev = pool;
6018
6019 return pool;
6020 }
6021
6022 /* End range of instructions covered by POOL at INSN and emit
6023 placeholder insn representing the pool. */
6024
6025 static void
6026 s390_end_pool (struct constant_pool *pool, rtx insn)
6027 {
6028 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6029
6030 if (!insn)
6031 insn = get_last_insn ();
6032
6033 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6034 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6035 }
6036
6037 /* Add INSN to the list of insns covered by POOL. */
6038
6039 static void
6040 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6041 {
6042 bitmap_set_bit (pool->insns, INSN_UID (insn));
6043 }
6044
6045 /* Return pool out of POOL_LIST that covers INSN. */
6046
6047 static struct constant_pool *
6048 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6049 {
6050 struct constant_pool *pool;
6051
6052 for (pool = pool_list; pool; pool = pool->next)
6053 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6054 break;
6055
6056 return pool;
6057 }
6058
6059 /* Add constant VAL of mode MODE to the constant pool POOL. */
6060
6061 static void
6062 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6063 {
6064 struct constant *c;
6065 int i;
6066
6067 for (i = 0; i < NR_C_MODES; i++)
6068 if (constant_modes[i] == mode)
6069 break;
6070 gcc_assert (i != NR_C_MODES);
6071
6072 for (c = pool->constants[i]; c != NULL; c = c->next)
6073 if (rtx_equal_p (val, c->value))
6074 break;
6075
6076 if (c == NULL)
6077 {
6078 c = (struct constant *) xmalloc (sizeof *c);
6079 c->value = val;
6080 c->label = gen_label_rtx ();
6081 c->next = pool->constants[i];
6082 pool->constants[i] = c;
6083 pool->size += GET_MODE_SIZE (mode);
6084 }
6085 }
6086
6087 /* Return an rtx that represents the offset of X from the start of
6088 pool POOL. */
6089
6090 static rtx
6091 s390_pool_offset (struct constant_pool *pool, rtx x)
6092 {
6093 rtx label;
6094
6095 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6096 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6097 UNSPEC_POOL_OFFSET);
6098 return gen_rtx_CONST (GET_MODE (x), x);
6099 }
6100
6101 /* Find constant VAL of mode MODE in the constant pool POOL.
6102 Return an RTX describing the distance from the start of
6103 the pool to the location of the new constant. */
6104
6105 static rtx
6106 s390_find_constant (struct constant_pool *pool, rtx val,
6107 enum machine_mode mode)
6108 {
6109 struct constant *c;
6110 int i;
6111
6112 for (i = 0; i < NR_C_MODES; i++)
6113 if (constant_modes[i] == mode)
6114 break;
6115 gcc_assert (i != NR_C_MODES);
6116
6117 for (c = pool->constants[i]; c != NULL; c = c->next)
6118 if (rtx_equal_p (val, c->value))
6119 break;
6120
6121 gcc_assert (c);
6122
6123 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6124 }
6125
6126 /* Check whether INSN is an execute. Return the label_ref to its
6127 execute target template if so, NULL_RTX otherwise. */
6128
6129 static rtx
6130 s390_execute_label (rtx insn)
6131 {
6132 if (GET_CODE (insn) == INSN
6133 && GET_CODE (PATTERN (insn)) == PARALLEL
6134 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6135 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6136 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6137
6138 return NULL_RTX;
6139 }
6140
6141 /* Add execute target for INSN to the constant pool POOL. */
6142
6143 static void
6144 s390_add_execute (struct constant_pool *pool, rtx insn)
6145 {
6146 struct constant *c;
6147
6148 for (c = pool->execute; c != NULL; c = c->next)
6149 if (INSN_UID (insn) == INSN_UID (c->value))
6150 break;
6151
6152 if (c == NULL)
6153 {
6154 c = (struct constant *) xmalloc (sizeof *c);
6155 c->value = insn;
6156 c->label = gen_label_rtx ();
6157 c->next = pool->execute;
6158 pool->execute = c;
6159 pool->size += 6;
6160 }
6161 }
6162
6163 /* Find execute target for INSN in the constant pool POOL.
6164 Return an RTX describing the distance from the start of
6165 the pool to the location of the execute target. */
6166
6167 static rtx
6168 s390_find_execute (struct constant_pool *pool, rtx insn)
6169 {
6170 struct constant *c;
6171
6172 for (c = pool->execute; c != NULL; c = c->next)
6173 if (INSN_UID (insn) == INSN_UID (c->value))
6174 break;
6175
6176 gcc_assert (c);
6177
6178 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6179 }
6180
6181 /* For an execute INSN, extract the execute target template. */
6182
6183 static rtx
6184 s390_execute_target (rtx insn)
6185 {
6186 rtx pattern = PATTERN (insn);
6187 gcc_assert (s390_execute_label (insn));
6188
6189 if (XVECLEN (pattern, 0) == 2)
6190 {
6191 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6192 }
6193 else
6194 {
6195 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6196 int i;
6197
6198 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6199 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6200
6201 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6202 }
6203
6204 return pattern;
6205 }
6206
6207 /* Indicate that INSN cannot be duplicated. This is the case for
6208 execute insns that carry a unique label. */
6209
6210 static bool
6211 s390_cannot_copy_insn_p (rtx insn)
6212 {
6213 rtx label = s390_execute_label (insn);
6214 return label && label != const0_rtx;
6215 }
6216
6217 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6218 do not emit the pool base label. */
6219
6220 static void
6221 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6222 {
6223 struct constant *c;
6224 rtx insn = pool->pool_insn;
6225 int i;
6226
6227 /* Switch to rodata section. */
6228 if (TARGET_CPU_ZARCH)
6229 {
6230 insn = emit_insn_after (gen_pool_section_start (), insn);
6231 INSN_ADDRESSES_NEW (insn, -1);
6232 }
6233
6234 /* Ensure minimum pool alignment. */
6235 if (TARGET_CPU_ZARCH)
6236 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6237 else
6238 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6239 INSN_ADDRESSES_NEW (insn, -1);
6240
6241 /* Emit pool base label. */
6242 if (!remote_label)
6243 {
6244 insn = emit_label_after (pool->label, insn);
6245 INSN_ADDRESSES_NEW (insn, -1);
6246 }
6247
6248 /* Dump constants in descending alignment requirement order,
6249 ensuring proper alignment for every constant. */
6250 for (i = 0; i < NR_C_MODES; i++)
6251 for (c = pool->constants[i]; c; c = c->next)
6252 {
6253 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6254 rtx value = copy_rtx (c->value);
6255 if (GET_CODE (value) == CONST
6256 && GET_CODE (XEXP (value, 0)) == UNSPEC
6257 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6258 && XVECLEN (XEXP (value, 0), 0) == 1)
6259 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6260
6261 insn = emit_label_after (c->label, insn);
6262 INSN_ADDRESSES_NEW (insn, -1);
6263
6264 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6265 gen_rtvec (1, value),
6266 UNSPECV_POOL_ENTRY);
6267 insn = emit_insn_after (value, insn);
6268 INSN_ADDRESSES_NEW (insn, -1);
6269 }
6270
6271 /* Ensure minimum alignment for instructions. */
6272 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6273 INSN_ADDRESSES_NEW (insn, -1);
6274
6275 /* Output in-pool execute template insns. */
6276 for (c = pool->execute; c; c = c->next)
6277 {
6278 insn = emit_label_after (c->label, insn);
6279 INSN_ADDRESSES_NEW (insn, -1);
6280
6281 insn = emit_insn_after (s390_execute_target (c->value), insn);
6282 INSN_ADDRESSES_NEW (insn, -1);
6283 }
6284
6285 /* Switch back to previous section. */
6286 if (TARGET_CPU_ZARCH)
6287 {
6288 insn = emit_insn_after (gen_pool_section_end (), insn);
6289 INSN_ADDRESSES_NEW (insn, -1);
6290 }
6291
6292 insn = emit_barrier_after (insn);
6293 INSN_ADDRESSES_NEW (insn, -1);
6294
6295 /* Remove placeholder insn. */
6296 remove_insn (pool->pool_insn);
6297 }
6298
6299 /* Free all memory used by POOL. */
6300
6301 static void
6302 s390_free_pool (struct constant_pool *pool)
6303 {
6304 struct constant *c, *next;
6305 int i;
6306
6307 for (i = 0; i < NR_C_MODES; i++)
6308 for (c = pool->constants[i]; c; c = next)
6309 {
6310 next = c->next;
6311 free (c);
6312 }
6313
6314 for (c = pool->execute; c; c = next)
6315 {
6316 next = c->next;
6317 free (c);
6318 }
6319
6320 BITMAP_FREE (pool->insns);
6321 free (pool);
6322 }
6323
6324
6325 /* Collect main literal pool. Return NULL on overflow. */
6326
6327 static struct constant_pool *
6328 s390_mainpool_start (void)
6329 {
6330 struct constant_pool *pool;
6331 rtx insn;
6332
6333 pool = s390_alloc_pool ();
6334
6335 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6336 {
6337 if (GET_CODE (insn) == INSN
6338 && GET_CODE (PATTERN (insn)) == SET
6339 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6340 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6341 {
6342 gcc_assert (!pool->pool_insn);
6343 pool->pool_insn = insn;
6344 }
6345
6346 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6347 {
6348 s390_add_execute (pool, insn);
6349 }
6350 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6351 {
6352 rtx pool_ref = NULL_RTX;
6353 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6354 if (pool_ref)
6355 {
6356 rtx constant = get_pool_constant (pool_ref);
6357 enum machine_mode mode = get_pool_mode (pool_ref);
6358 s390_add_constant (pool, constant, mode);
6359 }
6360 }
6361
6362 /* If hot/cold partitioning is enabled we have to make sure that
6363 the literal pool is emitted in the same section where the
6364 initialization of the literal pool base pointer takes place.
6365 emit_pool_after is only used in the non-overflow case on non
6366 Z cpus where we can emit the literal pool at the end of the
6367 function body within the text section. */
6368 if (NOTE_P (insn)
6369 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6370 && !pool->emit_pool_after)
6371 pool->emit_pool_after = PREV_INSN (insn);
6372 }
6373
6374 gcc_assert (pool->pool_insn || pool->size == 0);
6375
6376 if (pool->size >= 4096)
6377 {
6378 /* We're going to chunkify the pool, so remove the main
6379 pool placeholder insn. */
6380 remove_insn (pool->pool_insn);
6381
6382 s390_free_pool (pool);
6383 pool = NULL;
6384 }
6385
6386 /* If the functions ends with the section where the literal pool
6387 should be emitted set the marker to its end. */
6388 if (pool && !pool->emit_pool_after)
6389 pool->emit_pool_after = get_last_insn ();
6390
6391 return pool;
6392 }
6393
6394 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6395 Modify the current function to output the pool constants as well as
6396 the pool register setup instruction. */
6397
6398 static void
6399 s390_mainpool_finish (struct constant_pool *pool)
6400 {
6401 rtx base_reg = cfun->machine->base_reg;
6402 rtx insn;
6403
6404 /* If the pool is empty, we're done. */
6405 if (pool->size == 0)
6406 {
6407 /* We don't actually need a base register after all. */
6408 cfun->machine->base_reg = NULL_RTX;
6409
6410 if (pool->pool_insn)
6411 remove_insn (pool->pool_insn);
6412 s390_free_pool (pool);
6413 return;
6414 }
6415
6416 /* We need correct insn addresses. */
6417 shorten_branches (get_insns ());
6418
6419 /* On zSeries, we use a LARL to load the pool register. The pool is
6420 located in the .rodata section, so we emit it after the function. */
6421 if (TARGET_CPU_ZARCH)
6422 {
6423 insn = gen_main_base_64 (base_reg, pool->label);
6424 insn = emit_insn_after (insn, pool->pool_insn);
6425 INSN_ADDRESSES_NEW (insn, -1);
6426 remove_insn (pool->pool_insn);
6427
6428 insn = get_last_insn ();
6429 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6430 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6431
6432 s390_dump_pool (pool, 0);
6433 }
6434
6435 /* On S/390, if the total size of the function's code plus literal pool
6436 does not exceed 4096 bytes, we use BASR to set up a function base
6437 pointer, and emit the literal pool at the end of the function. */
6438 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6439 + pool->size + 8 /* alignment slop */ < 4096)
6440 {
6441 insn = gen_main_base_31_small (base_reg, pool->label);
6442 insn = emit_insn_after (insn, pool->pool_insn);
6443 INSN_ADDRESSES_NEW (insn, -1);
6444 remove_insn (pool->pool_insn);
6445
6446 insn = emit_label_after (pool->label, insn);
6447 INSN_ADDRESSES_NEW (insn, -1);
6448
6449 /* emit_pool_after will be set by s390_mainpool_start to the
6450 last insn of the section where the literal pool should be
6451 emitted. */
6452 insn = pool->emit_pool_after;
6453
6454 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6455 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6456
6457 s390_dump_pool (pool, 1);
6458 }
6459
6460 /* Otherwise, we emit an inline literal pool and use BASR to branch
6461 over it, setting up the pool register at the same time. */
6462 else
6463 {
6464 rtx pool_end = gen_label_rtx ();
6465
6466 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6467 insn = emit_insn_after (insn, pool->pool_insn);
6468 INSN_ADDRESSES_NEW (insn, -1);
6469 remove_insn (pool->pool_insn);
6470
6471 insn = emit_label_after (pool->label, insn);
6472 INSN_ADDRESSES_NEW (insn, -1);
6473
6474 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6475 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6476
6477 insn = emit_label_after (pool_end, pool->pool_insn);
6478 INSN_ADDRESSES_NEW (insn, -1);
6479
6480 s390_dump_pool (pool, 1);
6481 }
6482
6483
6484 /* Replace all literal pool references. */
6485
6486 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6487 {
6488 if (INSN_P (insn))
6489 replace_ltrel_base (&PATTERN (insn));
6490
6491 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6492 {
6493 rtx addr, pool_ref = NULL_RTX;
6494 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6495 if (pool_ref)
6496 {
6497 if (s390_execute_label (insn))
6498 addr = s390_find_execute (pool, insn);
6499 else
6500 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6501 get_pool_mode (pool_ref));
6502
6503 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6504 INSN_CODE (insn) = -1;
6505 }
6506 }
6507 }
6508
6509
6510 /* Free the pool. */
6511 s390_free_pool (pool);
6512 }
6513
6514 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6515 We have decided we cannot use this pool, so revert all changes
6516 to the current function that were done by s390_mainpool_start. */
6517 static void
6518 s390_mainpool_cancel (struct constant_pool *pool)
6519 {
6520 /* We didn't actually change the instruction stream, so simply
6521 free the pool memory. */
6522 s390_free_pool (pool);
6523 }
6524
6525
6526 /* Chunkify the literal pool. */
6527
6528 #define S390_POOL_CHUNK_MIN 0xc00
6529 #define S390_POOL_CHUNK_MAX 0xe00
6530
6531 static struct constant_pool *
6532 s390_chunkify_start (void)
6533 {
6534 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6535 int extra_size = 0;
6536 bitmap far_labels;
6537 rtx pending_ltrel = NULL_RTX;
6538 rtx insn;
6539
6540 rtx (*gen_reload_base) (rtx, rtx) =
6541 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6542
6543
6544 /* We need correct insn addresses. */
6545
6546 shorten_branches (get_insns ());
6547
6548 /* Scan all insns and move literals to pool chunks. */
6549
6550 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6551 {
6552 bool section_switch_p = false;
6553
6554 /* Check for pending LTREL_BASE. */
6555 if (INSN_P (insn))
6556 {
6557 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6558 if (ltrel_base)
6559 {
6560 gcc_assert (ltrel_base == pending_ltrel);
6561 pending_ltrel = NULL_RTX;
6562 }
6563 }
6564
6565 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6566 {
6567 if (!curr_pool)
6568 curr_pool = s390_start_pool (&pool_list, insn);
6569
6570 s390_add_execute (curr_pool, insn);
6571 s390_add_pool_insn (curr_pool, insn);
6572 }
6573 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6574 {
6575 rtx pool_ref = NULL_RTX;
6576 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6577 if (pool_ref)
6578 {
6579 rtx constant = get_pool_constant (pool_ref);
6580 enum machine_mode mode = get_pool_mode (pool_ref);
6581
6582 if (!curr_pool)
6583 curr_pool = s390_start_pool (&pool_list, insn);
6584
6585 s390_add_constant (curr_pool, constant, mode);
6586 s390_add_pool_insn (curr_pool, insn);
6587
6588 /* Don't split the pool chunk between a LTREL_OFFSET load
6589 and the corresponding LTREL_BASE. */
6590 if (GET_CODE (constant) == CONST
6591 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6592 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6593 {
6594 gcc_assert (!pending_ltrel);
6595 pending_ltrel = pool_ref;
6596 }
6597 }
6598 }
6599
6600 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6601 {
6602 if (curr_pool)
6603 s390_add_pool_insn (curr_pool, insn);
6604 /* An LTREL_BASE must follow within the same basic block. */
6605 gcc_assert (!pending_ltrel);
6606 }
6607
6608 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6609 section_switch_p = true;
6610
6611 if (!curr_pool
6612 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6613 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6614 continue;
6615
6616 if (TARGET_CPU_ZARCH)
6617 {
6618 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6619 continue;
6620
6621 s390_end_pool (curr_pool, NULL_RTX);
6622 curr_pool = NULL;
6623 }
6624 else
6625 {
6626 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6627 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6628 + extra_size;
6629
6630 /* We will later have to insert base register reload insns.
6631 Those will have an effect on code size, which we need to
6632 consider here. This calculation makes rather pessimistic
6633 worst-case assumptions. */
6634 if (GET_CODE (insn) == CODE_LABEL)
6635 extra_size += 6;
6636
6637 if (chunk_size < S390_POOL_CHUNK_MIN
6638 && curr_pool->size < S390_POOL_CHUNK_MIN
6639 && !section_switch_p)
6640 continue;
6641
6642 /* Pool chunks can only be inserted after BARRIERs ... */
6643 if (GET_CODE (insn) == BARRIER)
6644 {
6645 s390_end_pool (curr_pool, insn);
6646 curr_pool = NULL;
6647 extra_size = 0;
6648 }
6649
6650 /* ... so if we don't find one in time, create one. */
6651 else if (chunk_size > S390_POOL_CHUNK_MAX
6652 || curr_pool->size > S390_POOL_CHUNK_MAX
6653 || section_switch_p)
6654 {
6655 rtx label, jump, barrier;
6656
6657 if (!section_switch_p)
6658 {
6659 /* We can insert the barrier only after a 'real' insn. */
6660 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6661 continue;
6662 if (get_attr_length (insn) == 0)
6663 continue;
6664 /* Don't separate LTREL_BASE from the corresponding
6665 LTREL_OFFSET load. */
6666 if (pending_ltrel)
6667 continue;
6668 }
6669 else
6670 {
6671 gcc_assert (!pending_ltrel);
6672
6673 /* The old pool has to end before the section switch
6674 note in order to make it part of the current
6675 section. */
6676 insn = PREV_INSN (insn);
6677 }
6678
6679 label = gen_label_rtx ();
6680 jump = emit_jump_insn_after (gen_jump (label), insn);
6681 barrier = emit_barrier_after (jump);
6682 insn = emit_label_after (label, barrier);
6683 JUMP_LABEL (jump) = label;
6684 LABEL_NUSES (label) = 1;
6685
6686 INSN_ADDRESSES_NEW (jump, -1);
6687 INSN_ADDRESSES_NEW (barrier, -1);
6688 INSN_ADDRESSES_NEW (insn, -1);
6689
6690 s390_end_pool (curr_pool, barrier);
6691 curr_pool = NULL;
6692 extra_size = 0;
6693 }
6694 }
6695 }
6696
6697 if (curr_pool)
6698 s390_end_pool (curr_pool, NULL_RTX);
6699 gcc_assert (!pending_ltrel);
6700
6701 /* Find all labels that are branched into
6702 from an insn belonging to a different chunk. */
6703
6704 far_labels = BITMAP_ALLOC (NULL);
6705
6706 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6707 {
6708 /* Labels marked with LABEL_PRESERVE_P can be target
6709 of non-local jumps, so we have to mark them.
6710 The same holds for named labels.
6711
6712 Don't do that, however, if it is the label before
6713 a jump table. */
6714
6715 if (GET_CODE (insn) == CODE_LABEL
6716 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6717 {
6718 rtx vec_insn = next_real_insn (insn);
6719 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6720 PATTERN (vec_insn) : NULL_RTX;
6721 if (!vec_pat
6722 || !(GET_CODE (vec_pat) == ADDR_VEC
6723 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6724 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6725 }
6726
6727 /* If we have a direct jump (conditional or unconditional)
6728 or a casesi jump, check all potential targets. */
6729 else if (GET_CODE (insn) == JUMP_INSN)
6730 {
6731 rtx pat = PATTERN (insn);
6732 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6733 pat = XVECEXP (pat, 0, 0);
6734
6735 if (GET_CODE (pat) == SET)
6736 {
6737 rtx label = JUMP_LABEL (insn);
6738 if (label)
6739 {
6740 if (s390_find_pool (pool_list, label)
6741 != s390_find_pool (pool_list, insn))
6742 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6743 }
6744 }
6745 else if (GET_CODE (pat) == PARALLEL
6746 && XVECLEN (pat, 0) == 2
6747 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6748 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6749 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6750 {
6751 /* Find the jump table used by this casesi jump. */
6752 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6753 rtx vec_insn = next_real_insn (vec_label);
6754 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6755 PATTERN (vec_insn) : NULL_RTX;
6756 if (vec_pat
6757 && (GET_CODE (vec_pat) == ADDR_VEC
6758 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6759 {
6760 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6761
6762 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6763 {
6764 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6765
6766 if (s390_find_pool (pool_list, label)
6767 != s390_find_pool (pool_list, insn))
6768 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6769 }
6770 }
6771 }
6772 }
6773 }
6774
6775 /* Insert base register reload insns before every pool. */
6776
6777 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6778 {
6779 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6780 curr_pool->label);
6781 rtx insn = curr_pool->first_insn;
6782 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6783 }
6784
6785 /* Insert base register reload insns at every far label. */
6786
6787 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6788 if (GET_CODE (insn) == CODE_LABEL
6789 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6790 {
6791 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6792 if (pool)
6793 {
6794 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6795 pool->label);
6796 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6797 }
6798 }
6799
6800
6801 BITMAP_FREE (far_labels);
6802
6803
6804 /* Recompute insn addresses. */
6805
6806 init_insn_lengths ();
6807 shorten_branches (get_insns ());
6808
6809 return pool_list;
6810 }
6811
6812 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6813 After we have decided to use this list, finish implementing
6814 all changes to the current function as required. */
6815
6816 static void
6817 s390_chunkify_finish (struct constant_pool *pool_list)
6818 {
6819 struct constant_pool *curr_pool = NULL;
6820 rtx insn;
6821
6822
6823 /* Replace all literal pool references. */
6824
6825 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6826 {
6827 if (INSN_P (insn))
6828 replace_ltrel_base (&PATTERN (insn));
6829
6830 curr_pool = s390_find_pool (pool_list, insn);
6831 if (!curr_pool)
6832 continue;
6833
6834 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6835 {
6836 rtx addr, pool_ref = NULL_RTX;
6837 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6838 if (pool_ref)
6839 {
6840 if (s390_execute_label (insn))
6841 addr = s390_find_execute (curr_pool, insn);
6842 else
6843 addr = s390_find_constant (curr_pool,
6844 get_pool_constant (pool_ref),
6845 get_pool_mode (pool_ref));
6846
6847 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6848 INSN_CODE (insn) = -1;
6849 }
6850 }
6851 }
6852
6853 /* Dump out all literal pools. */
6854
6855 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6856 s390_dump_pool (curr_pool, 0);
6857
6858 /* Free pool list. */
6859
6860 while (pool_list)
6861 {
6862 struct constant_pool *next = pool_list->next;
6863 s390_free_pool (pool_list);
6864 pool_list = next;
6865 }
6866 }
6867
6868 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6869 We have decided we cannot use this list, so revert all changes
6870 to the current function that were done by s390_chunkify_start. */
6871
6872 static void
6873 s390_chunkify_cancel (struct constant_pool *pool_list)
6874 {
6875 struct constant_pool *curr_pool = NULL;
6876 rtx insn;
6877
6878 /* Remove all pool placeholder insns. */
6879
6880 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6881 {
6882 /* Did we insert an extra barrier? Remove it. */
6883 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6884 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6885 rtx label = NEXT_INSN (curr_pool->pool_insn);
6886
6887 if (jump && GET_CODE (jump) == JUMP_INSN
6888 && barrier && GET_CODE (barrier) == BARRIER
6889 && label && GET_CODE (label) == CODE_LABEL
6890 && GET_CODE (PATTERN (jump)) == SET
6891 && SET_DEST (PATTERN (jump)) == pc_rtx
6892 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6893 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6894 {
6895 remove_insn (jump);
6896 remove_insn (barrier);
6897 remove_insn (label);
6898 }
6899
6900 remove_insn (curr_pool->pool_insn);
6901 }
6902
6903 /* Remove all base register reload insns. */
6904
6905 for (insn = get_insns (); insn; )
6906 {
6907 rtx next_insn = NEXT_INSN (insn);
6908
6909 if (GET_CODE (insn) == INSN
6910 && GET_CODE (PATTERN (insn)) == SET
6911 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6912 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6913 remove_insn (insn);
6914
6915 insn = next_insn;
6916 }
6917
6918 /* Free pool list. */
6919
6920 while (pool_list)
6921 {
6922 struct constant_pool *next = pool_list->next;
6923 s390_free_pool (pool_list);
6924 pool_list = next;
6925 }
6926 }
6927
6928 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6929
6930 void
6931 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6932 {
6933 REAL_VALUE_TYPE r;
6934
6935 switch (GET_MODE_CLASS (mode))
6936 {
6937 case MODE_FLOAT:
6938 case MODE_DECIMAL_FLOAT:
6939 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6940
6941 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6942 assemble_real (r, mode, align);
6943 break;
6944
6945 case MODE_INT:
6946 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6947 mark_symbol_refs_as_used (exp);
6948 break;
6949
6950 default:
6951 gcc_unreachable ();
6952 }
6953 }
6954
6955
6956 /* Return an RTL expression representing the value of the return address
6957 for the frame COUNT steps up from the current frame. FRAME is the
6958 frame pointer of that frame. */
6959
6960 rtx
6961 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6962 {
6963 int offset;
6964 rtx addr;
6965
6966 /* Without backchain, we fail for all but the current frame. */
6967
6968 if (!TARGET_BACKCHAIN && count > 0)
6969 return NULL_RTX;
6970
6971 /* For the current frame, we need to make sure the initial
6972 value of RETURN_REGNUM is actually saved. */
6973
6974 if (count == 0)
6975 {
6976 /* On non-z architectures branch splitting could overwrite r14. */
6977 if (TARGET_CPU_ZARCH)
6978 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6979 else
6980 {
6981 cfun_frame_layout.save_return_addr_p = true;
6982 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
6983 }
6984 }
6985
6986 if (TARGET_PACKED_STACK)
6987 offset = -2 * UNITS_PER_LONG;
6988 else
6989 offset = RETURN_REGNUM * UNITS_PER_LONG;
6990
6991 addr = plus_constant (frame, offset);
6992 addr = memory_address (Pmode, addr);
6993 return gen_rtx_MEM (Pmode, addr);
6994 }
6995
6996 /* Return an RTL expression representing the back chain stored in
6997 the current stack frame. */
6998
6999 rtx
7000 s390_back_chain_rtx (void)
7001 {
7002 rtx chain;
7003
7004 gcc_assert (TARGET_BACKCHAIN);
7005
7006 if (TARGET_PACKED_STACK)
7007 chain = plus_constant (stack_pointer_rtx,
7008 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7009 else
7010 chain = stack_pointer_rtx;
7011
7012 chain = gen_rtx_MEM (Pmode, chain);
7013 return chain;
7014 }
7015
7016 /* Find first call clobbered register unused in a function.
7017 This could be used as base register in a leaf function
7018 or for holding the return address before epilogue. */
7019
7020 static int
7021 find_unused_clobbered_reg (void)
7022 {
7023 int i;
7024 for (i = 0; i < 6; i++)
7025 if (!df_regs_ever_live_p (i))
7026 return i;
7027 return 0;
7028 }
7029
7030
7031 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7032 clobbered hard regs in SETREG. */
7033
7034 static void
7035 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7036 {
7037 int *regs_ever_clobbered = (int *)data;
7038 unsigned int i, regno;
7039 enum machine_mode mode = GET_MODE (setreg);
7040
7041 if (GET_CODE (setreg) == SUBREG)
7042 {
7043 rtx inner = SUBREG_REG (setreg);
7044 if (!GENERAL_REG_P (inner))
7045 return;
7046 regno = subreg_regno (setreg);
7047 }
7048 else if (GENERAL_REG_P (setreg))
7049 regno = REGNO (setreg);
7050 else
7051 return;
7052
7053 for (i = regno;
7054 i < regno + HARD_REGNO_NREGS (regno, mode);
7055 i++)
7056 regs_ever_clobbered[i] = 1;
7057 }
7058
7059 /* Walks through all basic blocks of the current function looking
7060 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7061 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7062 each of those regs. */
7063
7064 static void
7065 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7066 {
7067 basic_block cur_bb;
7068 rtx cur_insn;
7069 unsigned int i;
7070
7071 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7072
7073 /* For non-leaf functions we have to consider all call clobbered regs to be
7074 clobbered. */
7075 if (!current_function_is_leaf)
7076 {
7077 for (i = 0; i < 16; i++)
7078 regs_ever_clobbered[i] = call_really_used_regs[i];
7079 }
7080
7081 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7082 this work is done by liveness analysis (mark_regs_live_at_end).
7083 Special care is needed for functions containing landing pads. Landing pads
7084 may use the eh registers, but the code which sets these registers is not
7085 contained in that function. Hence s390_regs_ever_clobbered is not able to
7086 deal with this automatically. */
7087 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7088 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7089 if (crtl->calls_eh_return
7090 || (cfun->machine->has_landing_pad_p
7091 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7092 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7093
7094 /* For nonlocal gotos all call-saved registers have to be saved.
7095 This flag is also set for the unwinding code in libgcc.
7096 See expand_builtin_unwind_init. For regs_ever_live this is done by
7097 reload. */
7098 if (cfun->has_nonlocal_label)
7099 for (i = 0; i < 16; i++)
7100 if (!call_really_used_regs[i])
7101 regs_ever_clobbered[i] = 1;
7102
7103 FOR_EACH_BB (cur_bb)
7104 {
7105 FOR_BB_INSNS (cur_bb, cur_insn)
7106 {
7107 if (INSN_P (cur_insn))
7108 note_stores (PATTERN (cur_insn),
7109 s390_reg_clobbered_rtx,
7110 regs_ever_clobbered);
7111 }
7112 }
7113 }
7114
7115 /* Determine the frame area which actually has to be accessed
7116 in the function epilogue. The values are stored at the
7117 given pointers AREA_BOTTOM (address of the lowest used stack
7118 address) and AREA_TOP (address of the first item which does
7119 not belong to the stack frame). */
7120
7121 static void
7122 s390_frame_area (int *area_bottom, int *area_top)
7123 {
7124 int b, t;
7125 int i;
7126
7127 b = INT_MAX;
7128 t = INT_MIN;
7129
7130 if (cfun_frame_layout.first_restore_gpr != -1)
7131 {
7132 b = (cfun_frame_layout.gprs_offset
7133 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7134 t = b + (cfun_frame_layout.last_restore_gpr
7135 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7136 }
7137
7138 if (TARGET_64BIT && cfun_save_high_fprs_p)
7139 {
7140 b = MIN (b, cfun_frame_layout.f8_offset);
7141 t = MAX (t, (cfun_frame_layout.f8_offset
7142 + cfun_frame_layout.high_fprs * 8));
7143 }
7144
7145 if (!TARGET_64BIT)
7146 for (i = 2; i < 4; i++)
7147 if (cfun_fpr_bit_p (i))
7148 {
7149 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7150 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7151 }
7152
7153 *area_bottom = b;
7154 *area_top = t;
7155 }
7156
7157 /* Fill cfun->machine with info about register usage of current function.
7158 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7159
7160 static void
7161 s390_register_info (int clobbered_regs[])
7162 {
7163 int i, j;
7164
7165 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7166 cfun_frame_layout.fpr_bitmap = 0;
7167 cfun_frame_layout.high_fprs = 0;
7168 if (TARGET_64BIT)
7169 for (i = 24; i < 32; i++)
7170 if (df_regs_ever_live_p (i) && !global_regs[i])
7171 {
7172 cfun_set_fpr_bit (i - 16);
7173 cfun_frame_layout.high_fprs++;
7174 }
7175
7176 /* Find first and last gpr to be saved. We trust regs_ever_live
7177 data, except that we don't save and restore global registers.
7178
7179 Also, all registers with special meaning to the compiler need
7180 to be handled extra. */
7181
7182 s390_regs_ever_clobbered (clobbered_regs);
7183
7184 for (i = 0; i < 16; i++)
7185 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7186
7187 if (frame_pointer_needed)
7188 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7189
7190 if (flag_pic)
7191 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7192 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7193
7194 clobbered_regs[BASE_REGNUM]
7195 |= (cfun->machine->base_reg
7196 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7197
7198 clobbered_regs[RETURN_REGNUM]
7199 |= (!current_function_is_leaf
7200 || TARGET_TPF_PROFILING
7201 || cfun->machine->split_branches_pending_p
7202 || cfun_frame_layout.save_return_addr_p
7203 || crtl->calls_eh_return
7204 || cfun->stdarg);
7205
7206 clobbered_regs[STACK_POINTER_REGNUM]
7207 |= (!current_function_is_leaf
7208 || TARGET_TPF_PROFILING
7209 || cfun_save_high_fprs_p
7210 || get_frame_size () > 0
7211 || cfun->calls_alloca
7212 || cfun->stdarg);
7213
7214 for (i = 6; i < 16; i++)
7215 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7216 break;
7217 for (j = 15; j > i; j--)
7218 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7219 break;
7220
7221 if (i == 16)
7222 {
7223 /* Nothing to save/restore. */
7224 cfun_frame_layout.first_save_gpr_slot = -1;
7225 cfun_frame_layout.last_save_gpr_slot = -1;
7226 cfun_frame_layout.first_save_gpr = -1;
7227 cfun_frame_layout.first_restore_gpr = -1;
7228 cfun_frame_layout.last_save_gpr = -1;
7229 cfun_frame_layout.last_restore_gpr = -1;
7230 }
7231 else
7232 {
7233 /* Save slots for gprs from i to j. */
7234 cfun_frame_layout.first_save_gpr_slot = i;
7235 cfun_frame_layout.last_save_gpr_slot = j;
7236
7237 for (i = cfun_frame_layout.first_save_gpr_slot;
7238 i < cfun_frame_layout.last_save_gpr_slot + 1;
7239 i++)
7240 if (clobbered_regs[i])
7241 break;
7242
7243 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7244 if (clobbered_regs[j])
7245 break;
7246
7247 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7248 {
7249 /* Nothing to save/restore. */
7250 cfun_frame_layout.first_save_gpr = -1;
7251 cfun_frame_layout.first_restore_gpr = -1;
7252 cfun_frame_layout.last_save_gpr = -1;
7253 cfun_frame_layout.last_restore_gpr = -1;
7254 }
7255 else
7256 {
7257 /* Save / Restore from gpr i to j. */
7258 cfun_frame_layout.first_save_gpr = i;
7259 cfun_frame_layout.first_restore_gpr = i;
7260 cfun_frame_layout.last_save_gpr = j;
7261 cfun_frame_layout.last_restore_gpr = j;
7262 }
7263 }
7264
7265 if (cfun->stdarg)
7266 {
7267 /* Varargs functions need to save gprs 2 to 6. */
7268 if (cfun->va_list_gpr_size
7269 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7270 {
7271 int min_gpr = crtl->args.info.gprs;
7272 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7273 if (max_gpr > GP_ARG_NUM_REG)
7274 max_gpr = GP_ARG_NUM_REG;
7275
7276 if (cfun_frame_layout.first_save_gpr == -1
7277 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7278 {
7279 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7280 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7281 }
7282
7283 if (cfun_frame_layout.last_save_gpr == -1
7284 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7285 {
7286 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7287 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7288 }
7289 }
7290
7291 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7292 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7293 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7294 {
7295 int min_fpr = crtl->args.info.fprs;
7296 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7297 if (max_fpr > FP_ARG_NUM_REG)
7298 max_fpr = FP_ARG_NUM_REG;
7299
7300 /* ??? This is currently required to ensure proper location
7301 of the fpr save slots within the va_list save area. */
7302 if (TARGET_PACKED_STACK)
7303 min_fpr = 0;
7304
7305 for (i = min_fpr; i < max_fpr; i++)
7306 cfun_set_fpr_bit (i);
7307 }
7308 }
7309
7310 if (!TARGET_64BIT)
7311 for (i = 2; i < 4; i++)
7312 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7313 cfun_set_fpr_bit (i);
7314 }
7315
7316 /* Fill cfun->machine with info about frame of current function. */
7317
7318 static void
7319 s390_frame_info (void)
7320 {
7321 int i;
7322
7323 cfun_frame_layout.frame_size = get_frame_size ();
7324 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7325 fatal_error ("total size of local variables exceeds architecture limit");
7326
7327 if (!TARGET_PACKED_STACK)
7328 {
7329 cfun_frame_layout.backchain_offset = 0;
7330 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7331 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7332 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7333 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7334 * UNITS_PER_LONG);
7335 }
7336 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7337 {
7338 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7339 - UNITS_PER_LONG);
7340 cfun_frame_layout.gprs_offset
7341 = (cfun_frame_layout.backchain_offset
7342 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7343 * UNITS_PER_LONG);
7344
7345 if (TARGET_64BIT)
7346 {
7347 cfun_frame_layout.f4_offset
7348 = (cfun_frame_layout.gprs_offset
7349 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7350
7351 cfun_frame_layout.f0_offset
7352 = (cfun_frame_layout.f4_offset
7353 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7354 }
7355 else
7356 {
7357 /* On 31 bit we have to care about alignment of the
7358 floating point regs to provide fastest access. */
7359 cfun_frame_layout.f0_offset
7360 = ((cfun_frame_layout.gprs_offset
7361 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7362 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7363
7364 cfun_frame_layout.f4_offset
7365 = (cfun_frame_layout.f0_offset
7366 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7367 }
7368 }
7369 else /* no backchain */
7370 {
7371 cfun_frame_layout.f4_offset
7372 = (STACK_POINTER_OFFSET
7373 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7374
7375 cfun_frame_layout.f0_offset
7376 = (cfun_frame_layout.f4_offset
7377 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7378
7379 cfun_frame_layout.gprs_offset
7380 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7381 }
7382
7383 if (current_function_is_leaf
7384 && !TARGET_TPF_PROFILING
7385 && cfun_frame_layout.frame_size == 0
7386 && !cfun_save_high_fprs_p
7387 && !cfun->calls_alloca
7388 && !cfun->stdarg)
7389 return;
7390
7391 if (!TARGET_PACKED_STACK)
7392 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7393 + crtl->outgoing_args_size
7394 + cfun_frame_layout.high_fprs * 8);
7395 else
7396 {
7397 if (TARGET_BACKCHAIN)
7398 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7399
7400 /* No alignment trouble here because f8-f15 are only saved under
7401 64 bit. */
7402 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7403 cfun_frame_layout.f4_offset),
7404 cfun_frame_layout.gprs_offset)
7405 - cfun_frame_layout.high_fprs * 8);
7406
7407 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7408
7409 for (i = 0; i < 8; i++)
7410 if (cfun_fpr_bit_p (i))
7411 cfun_frame_layout.frame_size += 8;
7412
7413 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7414
7415 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7416 the frame size to sustain 8 byte alignment of stack frames. */
7417 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7418 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7419 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7420
7421 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7422 }
7423 }
7424
7425 /* Generate frame layout. Fills in register and frame data for the current
7426 function in cfun->machine. This routine can be called multiple times;
7427 it will re-do the complete frame layout every time. */
7428
7429 static void
7430 s390_init_frame_layout (void)
7431 {
7432 HOST_WIDE_INT frame_size;
7433 int base_used;
7434 int clobbered_regs[16];
7435
7436 /* On S/390 machines, we may need to perform branch splitting, which
7437 will require both base and return address register. We have no
7438 choice but to assume we're going to need them until right at the
7439 end of the machine dependent reorg phase. */
7440 if (!TARGET_CPU_ZARCH)
7441 cfun->machine->split_branches_pending_p = true;
7442
7443 do
7444 {
7445 frame_size = cfun_frame_layout.frame_size;
7446
7447 /* Try to predict whether we'll need the base register. */
7448 base_used = cfun->machine->split_branches_pending_p
7449 || crtl->uses_const_pool
7450 || (!DISP_IN_RANGE (frame_size)
7451 && !CONST_OK_FOR_K (frame_size));
7452
7453 /* Decide which register to use as literal pool base. In small
7454 leaf functions, try to use an unused call-clobbered register
7455 as base register to avoid save/restore overhead. */
7456 if (!base_used)
7457 cfun->machine->base_reg = NULL_RTX;
7458 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7459 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7460 else
7461 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7462
7463 s390_register_info (clobbered_regs);
7464 s390_frame_info ();
7465 }
7466 while (frame_size != cfun_frame_layout.frame_size);
7467 }
7468
7469 /* Update frame layout. Recompute actual register save data based on
7470 current info and update regs_ever_live for the special registers.
7471 May be called multiple times, but may never cause *more* registers
7472 to be saved than s390_init_frame_layout allocated room for. */
7473
7474 static void
7475 s390_update_frame_layout (void)
7476 {
7477 int clobbered_regs[16];
7478
7479 s390_register_info (clobbered_regs);
7480
7481 df_set_regs_ever_live (BASE_REGNUM,
7482 clobbered_regs[BASE_REGNUM] ? true : false);
7483 df_set_regs_ever_live (RETURN_REGNUM,
7484 clobbered_regs[RETURN_REGNUM] ? true : false);
7485 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7486 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7487
7488 if (cfun->machine->base_reg)
7489 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7490 }
7491
7492 /* Return true if it is legal to put a value with MODE into REGNO. */
7493
7494 bool
7495 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7496 {
7497 switch (REGNO_REG_CLASS (regno))
7498 {
7499 case FP_REGS:
7500 if (REGNO_PAIR_OK (regno, mode))
7501 {
7502 if (mode == SImode || mode == DImode)
7503 return true;
7504
7505 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7506 return true;
7507 }
7508 break;
7509 case ADDR_REGS:
7510 if (FRAME_REGNO_P (regno) && mode == Pmode)
7511 return true;
7512
7513 /* fallthrough */
7514 case GENERAL_REGS:
7515 if (REGNO_PAIR_OK (regno, mode))
7516 {
7517 if (TARGET_ZARCH
7518 || (mode != TFmode && mode != TCmode && mode != TDmode))
7519 return true;
7520 }
7521 break;
7522 case CC_REGS:
7523 if (GET_MODE_CLASS (mode) == MODE_CC)
7524 return true;
7525 break;
7526 case ACCESS_REGS:
7527 if (REGNO_PAIR_OK (regno, mode))
7528 {
7529 if (mode == SImode || mode == Pmode)
7530 return true;
7531 }
7532 break;
7533 default:
7534 return false;
7535 }
7536
7537 return false;
7538 }
7539
7540 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7541
7542 bool
7543 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7544 {
7545 /* Once we've decided upon a register to use as base register, it must
7546 no longer be used for any other purpose. */
7547 if (cfun->machine->base_reg)
7548 if (REGNO (cfun->machine->base_reg) == old_reg
7549 || REGNO (cfun->machine->base_reg) == new_reg)
7550 return false;
7551
7552 return true;
7553 }
7554
7555 /* Maximum number of registers to represent a value of mode MODE
7556 in a register of class RCLASS. */
7557
7558 bool
7559 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7560 {
7561 switch (rclass)
7562 {
7563 case FP_REGS:
7564 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7565 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7566 else
7567 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7568 case ACCESS_REGS:
7569 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7570 default:
7571 break;
7572 }
7573 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7574 }
7575
7576 /* Return true if register FROM can be eliminated via register TO. */
7577
7578 static bool
7579 s390_can_eliminate (const int from, const int to)
7580 {
7581 /* On zSeries machines, we have not marked the base register as fixed.
7582 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7583 If a function requires the base register, we say here that this
7584 elimination cannot be performed. This will cause reload to free
7585 up the base register (as if it were fixed). On the other hand,
7586 if the current function does *not* require the base register, we
7587 say here the elimination succeeds, which in turn allows reload
7588 to allocate the base register for any other purpose. */
7589 if (from == BASE_REGNUM && to == BASE_REGNUM)
7590 {
7591 if (TARGET_CPU_ZARCH)
7592 {
7593 s390_init_frame_layout ();
7594 return cfun->machine->base_reg == NULL_RTX;
7595 }
7596
7597 return false;
7598 }
7599
7600 /* Everything else must point into the stack frame. */
7601 gcc_assert (to == STACK_POINTER_REGNUM
7602 || to == HARD_FRAME_POINTER_REGNUM);
7603
7604 gcc_assert (from == FRAME_POINTER_REGNUM
7605 || from == ARG_POINTER_REGNUM
7606 || from == RETURN_ADDRESS_POINTER_REGNUM);
7607
7608 /* Make sure we actually saved the return address. */
7609 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7610 if (!crtl->calls_eh_return
7611 && !cfun->stdarg
7612 && !cfun_frame_layout.save_return_addr_p)
7613 return false;
7614
7615 return true;
7616 }
7617
7618 /* Return offset between register FROM and TO initially after prolog. */
7619
7620 HOST_WIDE_INT
7621 s390_initial_elimination_offset (int from, int to)
7622 {
7623 HOST_WIDE_INT offset;
7624 int index;
7625
7626 /* ??? Why are we called for non-eliminable pairs? */
7627 if (!s390_can_eliminate (from, to))
7628 return 0;
7629
7630 switch (from)
7631 {
7632 case FRAME_POINTER_REGNUM:
7633 offset = (get_frame_size()
7634 + STACK_POINTER_OFFSET
7635 + crtl->outgoing_args_size);
7636 break;
7637
7638 case ARG_POINTER_REGNUM:
7639 s390_init_frame_layout ();
7640 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7641 break;
7642
7643 case RETURN_ADDRESS_POINTER_REGNUM:
7644 s390_init_frame_layout ();
7645 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7646 gcc_assert (index >= 0);
7647 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7648 offset += index * UNITS_PER_LONG;
7649 break;
7650
7651 case BASE_REGNUM:
7652 offset = 0;
7653 break;
7654
7655 default:
7656 gcc_unreachable ();
7657 }
7658
7659 return offset;
7660 }
7661
7662 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7663 to register BASE. Return generated insn. */
7664
7665 static rtx
7666 save_fpr (rtx base, int offset, int regnum)
7667 {
7668 rtx addr;
7669 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7670
7671 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7672 set_mem_alias_set (addr, get_varargs_alias_set ());
7673 else
7674 set_mem_alias_set (addr, get_frame_alias_set ());
7675
7676 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7677 }
7678
7679 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7680 to register BASE. Return generated insn. */
7681
7682 static rtx
7683 restore_fpr (rtx base, int offset, int regnum)
7684 {
7685 rtx addr;
7686 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7687 set_mem_alias_set (addr, get_frame_alias_set ());
7688
7689 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7690 }
7691
7692 /* Return true if REGNO is a global register, but not one
7693 of the special ones that need to be saved/restored in anyway. */
7694
7695 static inline bool
7696 global_not_special_regno_p (int regno)
7697 {
7698 return (global_regs[regno]
7699 /* These registers are special and need to be
7700 restored in any case. */
7701 && !(regno == STACK_POINTER_REGNUM
7702 || regno == RETURN_REGNUM
7703 || regno == BASE_REGNUM
7704 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7705 }
7706
7707 /* Generate insn to save registers FIRST to LAST into
7708 the register save area located at offset OFFSET
7709 relative to register BASE. */
7710
7711 static rtx
7712 save_gprs (rtx base, int offset, int first, int last)
7713 {
7714 rtx addr, insn, note;
7715 int i;
7716
7717 addr = plus_constant (base, offset);
7718 addr = gen_rtx_MEM (Pmode, addr);
7719
7720 set_mem_alias_set (addr, get_frame_alias_set ());
7721
7722 /* Special-case single register. */
7723 if (first == last)
7724 {
7725 if (TARGET_64BIT)
7726 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7727 else
7728 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7729
7730 if (!global_not_special_regno_p (first))
7731 RTX_FRAME_RELATED_P (insn) = 1;
7732 return insn;
7733 }
7734
7735
7736 insn = gen_store_multiple (addr,
7737 gen_rtx_REG (Pmode, first),
7738 GEN_INT (last - first + 1));
7739
7740 if (first <= 6 && cfun->stdarg)
7741 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7742 {
7743 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7744
7745 if (first + i <= 6)
7746 set_mem_alias_set (mem, get_varargs_alias_set ());
7747 }
7748
7749 /* We need to set the FRAME_RELATED flag on all SETs
7750 inside the store-multiple pattern.
7751
7752 However, we must not emit DWARF records for registers 2..5
7753 if they are stored for use by variable arguments ...
7754
7755 ??? Unfortunately, it is not enough to simply not the
7756 FRAME_RELATED flags for those SETs, because the first SET
7757 of the PARALLEL is always treated as if it had the flag
7758 set, even if it does not. Therefore we emit a new pattern
7759 without those registers as REG_FRAME_RELATED_EXPR note. */
7760
7761 if (first >= 6 && !global_not_special_regno_p (first))
7762 {
7763 rtx pat = PATTERN (insn);
7764
7765 for (i = 0; i < XVECLEN (pat, 0); i++)
7766 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7767 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7768 0, i)))))
7769 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7770
7771 RTX_FRAME_RELATED_P (insn) = 1;
7772 }
7773 else if (last >= 6)
7774 {
7775 int start;
7776
7777 for (start = first >= 6 ? first : 6; start <= last; start++)
7778 if (!global_not_special_regno_p (start))
7779 break;
7780
7781 if (start > last)
7782 return insn;
7783
7784 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7785 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7786 gen_rtx_REG (Pmode, start),
7787 GEN_INT (last - start + 1));
7788 note = PATTERN (note);
7789
7790 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7791
7792 for (i = 0; i < XVECLEN (note, 0); i++)
7793 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7794 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7795 0, i)))))
7796 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7797
7798 RTX_FRAME_RELATED_P (insn) = 1;
7799 }
7800
7801 return insn;
7802 }
7803
7804 /* Generate insn to restore registers FIRST to LAST from
7805 the register save area located at offset OFFSET
7806 relative to register BASE. */
7807
7808 static rtx
7809 restore_gprs (rtx base, int offset, int first, int last)
7810 {
7811 rtx addr, insn;
7812
7813 addr = plus_constant (base, offset);
7814 addr = gen_rtx_MEM (Pmode, addr);
7815 set_mem_alias_set (addr, get_frame_alias_set ());
7816
7817 /* Special-case single register. */
7818 if (first == last)
7819 {
7820 if (TARGET_64BIT)
7821 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7822 else
7823 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7824
7825 return insn;
7826 }
7827
7828 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7829 addr,
7830 GEN_INT (last - first + 1));
7831 return insn;
7832 }
7833
7834 /* Return insn sequence to load the GOT register. */
7835
7836 static GTY(()) rtx got_symbol;
7837 rtx
7838 s390_load_got (void)
7839 {
7840 rtx insns;
7841
7842 if (!got_symbol)
7843 {
7844 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7845 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7846 }
7847
7848 start_sequence ();
7849
7850 if (TARGET_CPU_ZARCH)
7851 {
7852 emit_move_insn (pic_offset_table_rtx, got_symbol);
7853 }
7854 else
7855 {
7856 rtx offset;
7857
7858 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7859 UNSPEC_LTREL_OFFSET);
7860 offset = gen_rtx_CONST (Pmode, offset);
7861 offset = force_const_mem (Pmode, offset);
7862
7863 emit_move_insn (pic_offset_table_rtx, offset);
7864
7865 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7866 UNSPEC_LTREL_BASE);
7867 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7868
7869 emit_move_insn (pic_offset_table_rtx, offset);
7870 }
7871
7872 insns = get_insns ();
7873 end_sequence ();
7874 return insns;
7875 }
7876
7877 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7878 and the change to the stack pointer. */
7879
7880 static void
7881 s390_emit_stack_tie (void)
7882 {
7883 rtx mem = gen_frame_mem (BLKmode,
7884 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7885
7886 emit_insn (gen_stack_tie (mem));
7887 }
7888
7889 /* Expand the prologue into a bunch of separate insns. */
7890
7891 void
7892 s390_emit_prologue (void)
7893 {
7894 rtx insn, addr;
7895 rtx temp_reg;
7896 int i;
7897 int offset;
7898 int next_fpr = 0;
7899
7900 /* Complete frame layout. */
7901
7902 s390_update_frame_layout ();
7903
7904 /* Annotate all constant pool references to let the scheduler know
7905 they implicitly use the base register. */
7906
7907 push_topmost_sequence ();
7908
7909 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7910 if (INSN_P (insn))
7911 {
7912 annotate_constant_pool_refs (&PATTERN (insn));
7913 df_insn_rescan (insn);
7914 }
7915
7916 pop_topmost_sequence ();
7917
7918 /* Choose best register to use for temp use within prologue.
7919 See below for why TPF must use the register 1. */
7920
7921 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7922 && !current_function_is_leaf
7923 && !TARGET_TPF_PROFILING)
7924 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7925 else
7926 temp_reg = gen_rtx_REG (Pmode, 1);
7927
7928 /* Save call saved gprs. */
7929 if (cfun_frame_layout.first_save_gpr != -1)
7930 {
7931 insn = save_gprs (stack_pointer_rtx,
7932 cfun_frame_layout.gprs_offset +
7933 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7934 - cfun_frame_layout.first_save_gpr_slot),
7935 cfun_frame_layout.first_save_gpr,
7936 cfun_frame_layout.last_save_gpr);
7937 emit_insn (insn);
7938 }
7939
7940 /* Dummy insn to mark literal pool slot. */
7941
7942 if (cfun->machine->base_reg)
7943 emit_insn (gen_main_pool (cfun->machine->base_reg));
7944
7945 offset = cfun_frame_layout.f0_offset;
7946
7947 /* Save f0 and f2. */
7948 for (i = 0; i < 2; i++)
7949 {
7950 if (cfun_fpr_bit_p (i))
7951 {
7952 save_fpr (stack_pointer_rtx, offset, i + 16);
7953 offset += 8;
7954 }
7955 else if (!TARGET_PACKED_STACK)
7956 offset += 8;
7957 }
7958
7959 /* Save f4 and f6. */
7960 offset = cfun_frame_layout.f4_offset;
7961 for (i = 2; i < 4; i++)
7962 {
7963 if (cfun_fpr_bit_p (i))
7964 {
7965 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7966 offset += 8;
7967
7968 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7969 therefore are not frame related. */
7970 if (!call_really_used_regs[i + 16])
7971 RTX_FRAME_RELATED_P (insn) = 1;
7972 }
7973 else if (!TARGET_PACKED_STACK)
7974 offset += 8;
7975 }
7976
7977 if (TARGET_PACKED_STACK
7978 && cfun_save_high_fprs_p
7979 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7980 {
7981 offset = (cfun_frame_layout.f8_offset
7982 + (cfun_frame_layout.high_fprs - 1) * 8);
7983
7984 for (i = 15; i > 7 && offset >= 0; i--)
7985 if (cfun_fpr_bit_p (i))
7986 {
7987 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7988
7989 RTX_FRAME_RELATED_P (insn) = 1;
7990 offset -= 8;
7991 }
7992 if (offset >= cfun_frame_layout.f8_offset)
7993 next_fpr = i + 16;
7994 }
7995
7996 if (!TARGET_PACKED_STACK)
7997 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
7998
7999 if (flag_stack_usage)
8000 current_function_static_stack_size = cfun_frame_layout.frame_size;
8001
8002 /* Decrement stack pointer. */
8003
8004 if (cfun_frame_layout.frame_size > 0)
8005 {
8006 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8007 rtx real_frame_off;
8008
8009 if (s390_stack_size)
8010 {
8011 HOST_WIDE_INT stack_guard;
8012
8013 if (s390_stack_guard)
8014 stack_guard = s390_stack_guard;
8015 else
8016 {
8017 /* If no value for stack guard is provided the smallest power of 2
8018 larger than the current frame size is chosen. */
8019 stack_guard = 1;
8020 while (stack_guard < cfun_frame_layout.frame_size)
8021 stack_guard <<= 1;
8022 }
8023
8024 if (cfun_frame_layout.frame_size >= s390_stack_size)
8025 {
8026 warning (0, "frame size of function %qs is "
8027 HOST_WIDE_INT_PRINT_DEC
8028 " bytes exceeding user provided stack limit of "
8029 HOST_WIDE_INT_PRINT_DEC " bytes. "
8030 "An unconditional trap is added.",
8031 current_function_name(), cfun_frame_layout.frame_size,
8032 s390_stack_size);
8033 emit_insn (gen_trap ());
8034 }
8035 else
8036 {
8037 /* stack_guard has to be smaller than s390_stack_size.
8038 Otherwise we would emit an AND with zero which would
8039 not match the test under mask pattern. */
8040 if (stack_guard >= s390_stack_size)
8041 {
8042 warning (0, "frame size of function %qs is "
8043 HOST_WIDE_INT_PRINT_DEC
8044 " bytes which is more than half the stack size. "
8045 "The dynamic check would not be reliable. "
8046 "No check emitted for this function.",
8047 current_function_name(),
8048 cfun_frame_layout.frame_size);
8049 }
8050 else
8051 {
8052 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8053 & ~(stack_guard - 1));
8054
8055 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8056 GEN_INT (stack_check_mask));
8057 if (TARGET_64BIT)
8058 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8059 t, const0_rtx),
8060 t, const0_rtx, const0_rtx));
8061 else
8062 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8063 t, const0_rtx),
8064 t, const0_rtx, const0_rtx));
8065 }
8066 }
8067 }
8068
8069 if (s390_warn_framesize > 0
8070 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8071 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
8072 current_function_name (), cfun_frame_layout.frame_size);
8073
8074 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8075 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8076
8077 /* Save incoming stack pointer into temp reg. */
8078 if (TARGET_BACKCHAIN || next_fpr)
8079 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8080
8081 /* Subtract frame size from stack pointer. */
8082
8083 if (DISP_IN_RANGE (INTVAL (frame_off)))
8084 {
8085 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8086 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8087 frame_off));
8088 insn = emit_insn (insn);
8089 }
8090 else
8091 {
8092 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8093 frame_off = force_const_mem (Pmode, frame_off);
8094
8095 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8096 annotate_constant_pool_refs (&PATTERN (insn));
8097 }
8098
8099 RTX_FRAME_RELATED_P (insn) = 1;
8100 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8101 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8102 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8103 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8104 real_frame_off)));
8105
8106 /* Set backchain. */
8107
8108 if (TARGET_BACKCHAIN)
8109 {
8110 if (cfun_frame_layout.backchain_offset)
8111 addr = gen_rtx_MEM (Pmode,
8112 plus_constant (stack_pointer_rtx,
8113 cfun_frame_layout.backchain_offset));
8114 else
8115 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8116 set_mem_alias_set (addr, get_frame_alias_set ());
8117 insn = emit_insn (gen_move_insn (addr, temp_reg));
8118 }
8119
8120 /* If we support non-call exceptions (e.g. for Java),
8121 we need to make sure the backchain pointer is set up
8122 before any possibly trapping memory access. */
8123 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8124 {
8125 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8126 emit_clobber (addr);
8127 }
8128 }
8129
8130 /* Save fprs 8 - 15 (64 bit ABI). */
8131
8132 if (cfun_save_high_fprs_p && next_fpr)
8133 {
8134 /* If the stack might be accessed through a different register
8135 we have to make sure that the stack pointer decrement is not
8136 moved below the use of the stack slots. */
8137 s390_emit_stack_tie ();
8138
8139 insn = emit_insn (gen_add2_insn (temp_reg,
8140 GEN_INT (cfun_frame_layout.f8_offset)));
8141
8142 offset = 0;
8143
8144 for (i = 24; i <= next_fpr; i++)
8145 if (cfun_fpr_bit_p (i - 16))
8146 {
8147 rtx addr = plus_constant (stack_pointer_rtx,
8148 cfun_frame_layout.frame_size
8149 + cfun_frame_layout.f8_offset
8150 + offset);
8151
8152 insn = save_fpr (temp_reg, offset, i);
8153 offset += 8;
8154 RTX_FRAME_RELATED_P (insn) = 1;
8155 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8156 gen_rtx_SET (VOIDmode,
8157 gen_rtx_MEM (DFmode, addr),
8158 gen_rtx_REG (DFmode, i)));
8159 }
8160 }
8161
8162 /* Set frame pointer, if needed. */
8163
8164 if (frame_pointer_needed)
8165 {
8166 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8167 RTX_FRAME_RELATED_P (insn) = 1;
8168 }
8169
8170 /* Set up got pointer, if needed. */
8171
8172 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8173 {
8174 rtx insns = s390_load_got ();
8175
8176 for (insn = insns; insn; insn = NEXT_INSN (insn))
8177 annotate_constant_pool_refs (&PATTERN (insn));
8178
8179 emit_insn (insns);
8180 }
8181
8182 if (TARGET_TPF_PROFILING)
8183 {
8184 /* Generate a BAS instruction to serve as a function
8185 entry intercept to facilitate the use of tracing
8186 algorithms located at the branch target. */
8187 emit_insn (gen_prologue_tpf ());
8188
8189 /* Emit a blockage here so that all code
8190 lies between the profiling mechanisms. */
8191 emit_insn (gen_blockage ());
8192 }
8193 }
8194
8195 /* Expand the epilogue into a bunch of separate insns. */
8196
8197 void
8198 s390_emit_epilogue (bool sibcall)
8199 {
8200 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8201 int area_bottom, area_top, offset = 0;
8202 int next_offset;
8203 rtvec p;
8204 int i;
8205
8206 if (TARGET_TPF_PROFILING)
8207 {
8208
8209 /* Generate a BAS instruction to serve as a function
8210 entry intercept to facilitate the use of tracing
8211 algorithms located at the branch target. */
8212
8213 /* Emit a blockage here so that all code
8214 lies between the profiling mechanisms. */
8215 emit_insn (gen_blockage ());
8216
8217 emit_insn (gen_epilogue_tpf ());
8218 }
8219
8220 /* Check whether to use frame or stack pointer for restore. */
8221
8222 frame_pointer = (frame_pointer_needed
8223 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8224
8225 s390_frame_area (&area_bottom, &area_top);
8226
8227 /* Check whether we can access the register save area.
8228 If not, increment the frame pointer as required. */
8229
8230 if (area_top <= area_bottom)
8231 {
8232 /* Nothing to restore. */
8233 }
8234 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8235 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8236 {
8237 /* Area is in range. */
8238 offset = cfun_frame_layout.frame_size;
8239 }
8240 else
8241 {
8242 rtx insn, frame_off, cfa;
8243
8244 offset = area_bottom < 0 ? -area_bottom : 0;
8245 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8246
8247 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8248 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8249 if (DISP_IN_RANGE (INTVAL (frame_off)))
8250 {
8251 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8252 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8253 insn = emit_insn (insn);
8254 }
8255 else
8256 {
8257 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8258 frame_off = force_const_mem (Pmode, frame_off);
8259
8260 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8261 annotate_constant_pool_refs (&PATTERN (insn));
8262 }
8263 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8264 RTX_FRAME_RELATED_P (insn) = 1;
8265 }
8266
8267 /* Restore call saved fprs. */
8268
8269 if (TARGET_64BIT)
8270 {
8271 if (cfun_save_high_fprs_p)
8272 {
8273 next_offset = cfun_frame_layout.f8_offset;
8274 for (i = 24; i < 32; i++)
8275 {
8276 if (cfun_fpr_bit_p (i - 16))
8277 {
8278 restore_fpr (frame_pointer,
8279 offset + next_offset, i);
8280 cfa_restores
8281 = alloc_reg_note (REG_CFA_RESTORE,
8282 gen_rtx_REG (DFmode, i), cfa_restores);
8283 next_offset += 8;
8284 }
8285 }
8286 }
8287
8288 }
8289 else
8290 {
8291 next_offset = cfun_frame_layout.f4_offset;
8292 for (i = 18; i < 20; i++)
8293 {
8294 if (cfun_fpr_bit_p (i - 16))
8295 {
8296 restore_fpr (frame_pointer,
8297 offset + next_offset, i);
8298 cfa_restores
8299 = alloc_reg_note (REG_CFA_RESTORE,
8300 gen_rtx_REG (DFmode, i), cfa_restores);
8301 next_offset += 8;
8302 }
8303 else if (!TARGET_PACKED_STACK)
8304 next_offset += 8;
8305 }
8306
8307 }
8308
8309 /* Return register. */
8310
8311 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8312
8313 /* Restore call saved gprs. */
8314
8315 if (cfun_frame_layout.first_restore_gpr != -1)
8316 {
8317 rtx insn, addr;
8318 int i;
8319
8320 /* Check for global register and save them
8321 to stack location from where they get restored. */
8322
8323 for (i = cfun_frame_layout.first_restore_gpr;
8324 i <= cfun_frame_layout.last_restore_gpr;
8325 i++)
8326 {
8327 if (global_not_special_regno_p (i))
8328 {
8329 addr = plus_constant (frame_pointer,
8330 offset + cfun_frame_layout.gprs_offset
8331 + (i - cfun_frame_layout.first_save_gpr_slot)
8332 * UNITS_PER_LONG);
8333 addr = gen_rtx_MEM (Pmode, addr);
8334 set_mem_alias_set (addr, get_frame_alias_set ());
8335 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8336 }
8337 else
8338 cfa_restores
8339 = alloc_reg_note (REG_CFA_RESTORE,
8340 gen_rtx_REG (Pmode, i), cfa_restores);
8341 }
8342
8343 if (! sibcall)
8344 {
8345 /* Fetch return address from stack before load multiple,
8346 this will do good for scheduling. */
8347
8348 if (cfun_frame_layout.save_return_addr_p
8349 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8350 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8351 {
8352 int return_regnum = find_unused_clobbered_reg();
8353 if (!return_regnum)
8354 return_regnum = 4;
8355 return_reg = gen_rtx_REG (Pmode, return_regnum);
8356
8357 addr = plus_constant (frame_pointer,
8358 offset + cfun_frame_layout.gprs_offset
8359 + (RETURN_REGNUM
8360 - cfun_frame_layout.first_save_gpr_slot)
8361 * UNITS_PER_LONG);
8362 addr = gen_rtx_MEM (Pmode, addr);
8363 set_mem_alias_set (addr, get_frame_alias_set ());
8364 emit_move_insn (return_reg, addr);
8365 }
8366 }
8367
8368 insn = restore_gprs (frame_pointer,
8369 offset + cfun_frame_layout.gprs_offset
8370 + (cfun_frame_layout.first_restore_gpr
8371 - cfun_frame_layout.first_save_gpr_slot)
8372 * UNITS_PER_LONG,
8373 cfun_frame_layout.first_restore_gpr,
8374 cfun_frame_layout.last_restore_gpr);
8375 insn = emit_insn (insn);
8376 REG_NOTES (insn) = cfa_restores;
8377 add_reg_note (insn, REG_CFA_DEF_CFA,
8378 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8379 RTX_FRAME_RELATED_P (insn) = 1;
8380 }
8381
8382 if (! sibcall)
8383 {
8384
8385 /* Return to caller. */
8386
8387 p = rtvec_alloc (2);
8388
8389 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
8390 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8391 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8392 }
8393 }
8394
8395
8396 /* Return the size in bytes of a function argument of
8397 type TYPE and/or mode MODE. At least one of TYPE or
8398 MODE must be specified. */
8399
8400 static int
8401 s390_function_arg_size (enum machine_mode mode, const_tree type)
8402 {
8403 if (type)
8404 return int_size_in_bytes (type);
8405
8406 /* No type info available for some library calls ... */
8407 if (mode != BLKmode)
8408 return GET_MODE_SIZE (mode);
8409
8410 /* If we have neither type nor mode, abort */
8411 gcc_unreachable ();
8412 }
8413
8414 /* Return true if a function argument of type TYPE and mode MODE
8415 is to be passed in a floating-point register, if available. */
8416
8417 static bool
8418 s390_function_arg_float (enum machine_mode mode, const_tree type)
8419 {
8420 int size = s390_function_arg_size (mode, type);
8421 if (size > 8)
8422 return false;
8423
8424 /* Soft-float changes the ABI: no floating-point registers are used. */
8425 if (TARGET_SOFT_FLOAT)
8426 return false;
8427
8428 /* No type info available for some library calls ... */
8429 if (!type)
8430 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8431
8432 /* The ABI says that record types with a single member are treated
8433 just like that member would be. */
8434 while (TREE_CODE (type) == RECORD_TYPE)
8435 {
8436 tree field, single = NULL_TREE;
8437
8438 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8439 {
8440 if (TREE_CODE (field) != FIELD_DECL)
8441 continue;
8442
8443 if (single == NULL_TREE)
8444 single = TREE_TYPE (field);
8445 else
8446 return false;
8447 }
8448
8449 if (single == NULL_TREE)
8450 return false;
8451 else
8452 type = single;
8453 }
8454
8455 return TREE_CODE (type) == REAL_TYPE;
8456 }
8457
8458 /* Return true if a function argument of type TYPE and mode MODE
8459 is to be passed in an integer register, or a pair of integer
8460 registers, if available. */
8461
8462 static bool
8463 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8464 {
8465 int size = s390_function_arg_size (mode, type);
8466 if (size > 8)
8467 return false;
8468
8469 /* No type info available for some library calls ... */
8470 if (!type)
8471 return GET_MODE_CLASS (mode) == MODE_INT
8472 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8473
8474 /* We accept small integral (and similar) types. */
8475 if (INTEGRAL_TYPE_P (type)
8476 || POINTER_TYPE_P (type)
8477 || TREE_CODE (type) == NULLPTR_TYPE
8478 || TREE_CODE (type) == OFFSET_TYPE
8479 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8480 return true;
8481
8482 /* We also accept structs of size 1, 2, 4, 8 that are not
8483 passed in floating-point registers. */
8484 if (AGGREGATE_TYPE_P (type)
8485 && exact_log2 (size) >= 0
8486 && !s390_function_arg_float (mode, type))
8487 return true;
8488
8489 return false;
8490 }
8491
8492 /* Return 1 if a function argument of type TYPE and mode MODE
8493 is to be passed by reference. The ABI specifies that only
8494 structures of size 1, 2, 4, or 8 bytes are passed by value,
8495 all other structures (and complex numbers) are passed by
8496 reference. */
8497
8498 static bool
8499 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8500 enum machine_mode mode, const_tree type,
8501 bool named ATTRIBUTE_UNUSED)
8502 {
8503 int size = s390_function_arg_size (mode, type);
8504 if (size > 8)
8505 return true;
8506
8507 if (type)
8508 {
8509 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8510 return 1;
8511
8512 if (TREE_CODE (type) == COMPLEX_TYPE
8513 || TREE_CODE (type) == VECTOR_TYPE)
8514 return 1;
8515 }
8516
8517 return 0;
8518 }
8519
8520 /* Update the data in CUM to advance over an argument of mode MODE and
8521 data type TYPE. (TYPE is null for libcalls where that information
8522 may not be available.). The boolean NAMED specifies whether the
8523 argument is a named argument (as opposed to an unnamed argument
8524 matching an ellipsis). */
8525
8526 static void
8527 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8528 const_tree type, bool named ATTRIBUTE_UNUSED)
8529 {
8530 if (s390_function_arg_float (mode, type))
8531 {
8532 cum->fprs += 1;
8533 }
8534 else if (s390_function_arg_integer (mode, type))
8535 {
8536 int size = s390_function_arg_size (mode, type);
8537 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8538 }
8539 else
8540 gcc_unreachable ();
8541 }
8542
8543 /* Define where to put the arguments to a function.
8544 Value is zero to push the argument on the stack,
8545 or a hard register in which to store the argument.
8546
8547 MODE is the argument's machine mode.
8548 TYPE is the data type of the argument (as a tree).
8549 This is null for libcalls where that information may
8550 not be available.
8551 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8552 the preceding args and about the function being called.
8553 NAMED is nonzero if this argument is a named parameter
8554 (otherwise it is an extra parameter matching an ellipsis).
8555
8556 On S/390, we use general purpose registers 2 through 6 to
8557 pass integer, pointer, and certain structure arguments, and
8558 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8559 to pass floating point arguments. All remaining arguments
8560 are pushed to the stack. */
8561
8562 static rtx
8563 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8564 const_tree type, bool named ATTRIBUTE_UNUSED)
8565 {
8566 if (s390_function_arg_float (mode, type))
8567 {
8568 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8569 return 0;
8570 else
8571 return gen_rtx_REG (mode, cum->fprs + 16);
8572 }
8573 else if (s390_function_arg_integer (mode, type))
8574 {
8575 int size = s390_function_arg_size (mode, type);
8576 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8577
8578 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8579 return 0;
8580 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8581 return gen_rtx_REG (mode, cum->gprs + 2);
8582 else if (n_gprs == 2)
8583 {
8584 rtvec p = rtvec_alloc (2);
8585
8586 RTVEC_ELT (p, 0)
8587 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8588 const0_rtx);
8589 RTVEC_ELT (p, 1)
8590 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8591 GEN_INT (4));
8592
8593 return gen_rtx_PARALLEL (mode, p);
8594 }
8595 }
8596
8597 /* After the real arguments, expand_call calls us once again
8598 with a void_type_node type. Whatever we return here is
8599 passed as operand 2 to the call expanders.
8600
8601 We don't need this feature ... */
8602 else if (type == void_type_node)
8603 return const0_rtx;
8604
8605 gcc_unreachable ();
8606 }
8607
8608 /* Return true if return values of type TYPE should be returned
8609 in a memory buffer whose address is passed by the caller as
8610 hidden first argument. */
8611
8612 static bool
8613 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8614 {
8615 /* We accept small integral (and similar) types. */
8616 if (INTEGRAL_TYPE_P (type)
8617 || POINTER_TYPE_P (type)
8618 || TREE_CODE (type) == OFFSET_TYPE
8619 || TREE_CODE (type) == REAL_TYPE)
8620 return int_size_in_bytes (type) > 8;
8621
8622 /* Aggregates and similar constructs are always returned
8623 in memory. */
8624 if (AGGREGATE_TYPE_P (type)
8625 || TREE_CODE (type) == COMPLEX_TYPE
8626 || TREE_CODE (type) == VECTOR_TYPE)
8627 return true;
8628
8629 /* ??? We get called on all sorts of random stuff from
8630 aggregate_value_p. We can't abort, but it's not clear
8631 what's safe to return. Pretend it's a struct I guess. */
8632 return true;
8633 }
8634
8635 /* Function arguments and return values are promoted to word size. */
8636
8637 static enum machine_mode
8638 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8639 int *punsignedp,
8640 const_tree fntype ATTRIBUTE_UNUSED,
8641 int for_return ATTRIBUTE_UNUSED)
8642 {
8643 if (INTEGRAL_MODE_P (mode)
8644 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8645 {
8646 if (POINTER_TYPE_P (type))
8647 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8648 return Pmode;
8649 }
8650
8651 return mode;
8652 }
8653
8654 /* Define where to return a (scalar) value of type TYPE.
8655 If TYPE is null, define where to return a (scalar)
8656 value of mode MODE from a libcall. */
8657
8658 rtx
8659 s390_function_value (const_tree type, const_tree fn, enum machine_mode mode)
8660 {
8661 if (type)
8662 {
8663 int unsignedp = TYPE_UNSIGNED (type);
8664 mode = promote_function_mode (type, TYPE_MODE (type), &unsignedp, fn, 1);
8665 }
8666
8667 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8668 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8669
8670 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8671 return gen_rtx_REG (mode, 16);
8672 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8673 || UNITS_PER_LONG == UNITS_PER_WORD)
8674 return gen_rtx_REG (mode, 2);
8675 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8676 {
8677 rtvec p = rtvec_alloc (2);
8678
8679 RTVEC_ELT (p, 0)
8680 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8681 RTVEC_ELT (p, 1)
8682 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8683
8684 return gen_rtx_PARALLEL (mode, p);
8685 }
8686
8687 gcc_unreachable ();
8688 }
8689
8690
8691 /* Create and return the va_list datatype.
8692
8693 On S/390, va_list is an array type equivalent to
8694
8695 typedef struct __va_list_tag
8696 {
8697 long __gpr;
8698 long __fpr;
8699 void *__overflow_arg_area;
8700 void *__reg_save_area;
8701 } va_list[1];
8702
8703 where __gpr and __fpr hold the number of general purpose
8704 or floating point arguments used up to now, respectively,
8705 __overflow_arg_area points to the stack location of the
8706 next argument passed on the stack, and __reg_save_area
8707 always points to the start of the register area in the
8708 call frame of the current function. The function prologue
8709 saves all registers used for argument passing into this
8710 area if the function uses variable arguments. */
8711
8712 static tree
8713 s390_build_builtin_va_list (void)
8714 {
8715 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8716
8717 record = lang_hooks.types.make_type (RECORD_TYPE);
8718
8719 type_decl =
8720 build_decl (BUILTINS_LOCATION,
8721 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8722
8723 f_gpr = build_decl (BUILTINS_LOCATION,
8724 FIELD_DECL, get_identifier ("__gpr"),
8725 long_integer_type_node);
8726 f_fpr = build_decl (BUILTINS_LOCATION,
8727 FIELD_DECL, get_identifier ("__fpr"),
8728 long_integer_type_node);
8729 f_ovf = build_decl (BUILTINS_LOCATION,
8730 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8731 ptr_type_node);
8732 f_sav = build_decl (BUILTINS_LOCATION,
8733 FIELD_DECL, get_identifier ("__reg_save_area"),
8734 ptr_type_node);
8735
8736 va_list_gpr_counter_field = f_gpr;
8737 va_list_fpr_counter_field = f_fpr;
8738
8739 DECL_FIELD_CONTEXT (f_gpr) = record;
8740 DECL_FIELD_CONTEXT (f_fpr) = record;
8741 DECL_FIELD_CONTEXT (f_ovf) = record;
8742 DECL_FIELD_CONTEXT (f_sav) = record;
8743
8744 TYPE_STUB_DECL (record) = type_decl;
8745 TYPE_NAME (record) = type_decl;
8746 TYPE_FIELDS (record) = f_gpr;
8747 DECL_CHAIN (f_gpr) = f_fpr;
8748 DECL_CHAIN (f_fpr) = f_ovf;
8749 DECL_CHAIN (f_ovf) = f_sav;
8750
8751 layout_type (record);
8752
8753 /* The correct type is an array type of one element. */
8754 return build_array_type (record, build_index_type (size_zero_node));
8755 }
8756
8757 /* Implement va_start by filling the va_list structure VALIST.
8758 STDARG_P is always true, and ignored.
8759 NEXTARG points to the first anonymous stack argument.
8760
8761 The following global variables are used to initialize
8762 the va_list structure:
8763
8764 crtl->args.info:
8765 holds number of gprs and fprs used for named arguments.
8766 crtl->args.arg_offset_rtx:
8767 holds the offset of the first anonymous stack argument
8768 (relative to the virtual arg pointer). */
8769
8770 static void
8771 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8772 {
8773 HOST_WIDE_INT n_gpr, n_fpr;
8774 int off;
8775 tree f_gpr, f_fpr, f_ovf, f_sav;
8776 tree gpr, fpr, ovf, sav, t;
8777
8778 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8779 f_fpr = DECL_CHAIN (f_gpr);
8780 f_ovf = DECL_CHAIN (f_fpr);
8781 f_sav = DECL_CHAIN (f_ovf);
8782
8783 valist = build_simple_mem_ref (valist);
8784 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8785 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8786 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8787 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8788
8789 /* Count number of gp and fp argument registers used. */
8790
8791 n_gpr = crtl->args.info.gprs;
8792 n_fpr = crtl->args.info.fprs;
8793
8794 if (cfun->va_list_gpr_size)
8795 {
8796 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8797 build_int_cst (NULL_TREE, n_gpr));
8798 TREE_SIDE_EFFECTS (t) = 1;
8799 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8800 }
8801
8802 if (cfun->va_list_fpr_size)
8803 {
8804 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8805 build_int_cst (NULL_TREE, n_fpr));
8806 TREE_SIDE_EFFECTS (t) = 1;
8807 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8808 }
8809
8810 /* Find the overflow area. */
8811 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8812 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8813 {
8814 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8815
8816 off = INTVAL (crtl->args.arg_offset_rtx);
8817 off = off < 0 ? 0 : off;
8818 if (TARGET_DEBUG_ARG)
8819 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8820 (int)n_gpr, (int)n_fpr, off);
8821
8822 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8823
8824 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8825 TREE_SIDE_EFFECTS (t) = 1;
8826 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8827 }
8828
8829 /* Find the register save area. */
8830 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8831 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8832 {
8833 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8834 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8835 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8836
8837 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8838 TREE_SIDE_EFFECTS (t) = 1;
8839 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8840 }
8841 }
8842
8843 /* Implement va_arg by updating the va_list structure
8844 VALIST as required to retrieve an argument of type
8845 TYPE, and returning that argument.
8846
8847 Generates code equivalent to:
8848
8849 if (integral value) {
8850 if (size <= 4 && args.gpr < 5 ||
8851 size > 4 && args.gpr < 4 )
8852 ret = args.reg_save_area[args.gpr+8]
8853 else
8854 ret = *args.overflow_arg_area++;
8855 } else if (float value) {
8856 if (args.fgpr < 2)
8857 ret = args.reg_save_area[args.fpr+64]
8858 else
8859 ret = *args.overflow_arg_area++;
8860 } else if (aggregate value) {
8861 if (args.gpr < 5)
8862 ret = *args.reg_save_area[args.gpr]
8863 else
8864 ret = **args.overflow_arg_area++;
8865 } */
8866
8867 static tree
8868 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8869 gimple_seq *post_p ATTRIBUTE_UNUSED)
8870 {
8871 tree f_gpr, f_fpr, f_ovf, f_sav;
8872 tree gpr, fpr, ovf, sav, reg, t, u;
8873 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8874 tree lab_false, lab_over, addr;
8875
8876 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8877 f_fpr = DECL_CHAIN (f_gpr);
8878 f_ovf = DECL_CHAIN (f_fpr);
8879 f_sav = DECL_CHAIN (f_ovf);
8880
8881 valist = build_va_arg_indirect_ref (valist);
8882 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8883 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8884 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8885
8886 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8887 both appear on a lhs. */
8888 valist = unshare_expr (valist);
8889 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8890
8891 size = int_size_in_bytes (type);
8892
8893 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8894 {
8895 if (TARGET_DEBUG_ARG)
8896 {
8897 fprintf (stderr, "va_arg: aggregate type");
8898 debug_tree (type);
8899 }
8900
8901 /* Aggregates are passed by reference. */
8902 indirect_p = 1;
8903 reg = gpr;
8904 n_reg = 1;
8905
8906 /* kernel stack layout on 31 bit: It is assumed here that no padding
8907 will be added by s390_frame_info because for va_args always an even
8908 number of gprs has to be saved r15-r2 = 14 regs. */
8909 sav_ofs = 2 * UNITS_PER_LONG;
8910 sav_scale = UNITS_PER_LONG;
8911 size = UNITS_PER_LONG;
8912 max_reg = GP_ARG_NUM_REG - n_reg;
8913 }
8914 else if (s390_function_arg_float (TYPE_MODE (type), type))
8915 {
8916 if (TARGET_DEBUG_ARG)
8917 {
8918 fprintf (stderr, "va_arg: float type");
8919 debug_tree (type);
8920 }
8921
8922 /* FP args go in FP registers, if present. */
8923 indirect_p = 0;
8924 reg = fpr;
8925 n_reg = 1;
8926 sav_ofs = 16 * UNITS_PER_LONG;
8927 sav_scale = 8;
8928 max_reg = FP_ARG_NUM_REG - n_reg;
8929 }
8930 else
8931 {
8932 if (TARGET_DEBUG_ARG)
8933 {
8934 fprintf (stderr, "va_arg: other type");
8935 debug_tree (type);
8936 }
8937
8938 /* Otherwise into GP registers. */
8939 indirect_p = 0;
8940 reg = gpr;
8941 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8942
8943 /* kernel stack layout on 31 bit: It is assumed here that no padding
8944 will be added by s390_frame_info because for va_args always an even
8945 number of gprs has to be saved r15-r2 = 14 regs. */
8946 sav_ofs = 2 * UNITS_PER_LONG;
8947
8948 if (size < UNITS_PER_LONG)
8949 sav_ofs += UNITS_PER_LONG - size;
8950
8951 sav_scale = UNITS_PER_LONG;
8952 max_reg = GP_ARG_NUM_REG - n_reg;
8953 }
8954
8955 /* Pull the value out of the saved registers ... */
8956
8957 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8958 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8959 addr = create_tmp_var (ptr_type_node, "addr");
8960
8961 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
8962 t = build2 (GT_EXPR, boolean_type_node, reg, t);
8963 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8964 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8965 gimplify_and_add (t, pre_p);
8966
8967 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
8968 size_int (sav_ofs));
8969 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
8970 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
8971 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
8972
8973 gimplify_assign (addr, t, pre_p);
8974
8975 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8976
8977 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8978
8979
8980 /* ... Otherwise out of the overflow area. */
8981
8982 t = ovf;
8983 if (size < UNITS_PER_LONG)
8984 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8985 size_int (UNITS_PER_LONG - size));
8986
8987 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8988
8989 gimplify_assign (addr, t, pre_p);
8990
8991 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
8992 size_int (size));
8993 gimplify_assign (ovf, t, pre_p);
8994
8995 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8996
8997
8998 /* Increment register save count. */
8999
9000 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9001 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9002 gimplify_and_add (u, pre_p);
9003
9004 if (indirect_p)
9005 {
9006 t = build_pointer_type_for_mode (build_pointer_type (type),
9007 ptr_mode, true);
9008 addr = fold_convert (t, addr);
9009 addr = build_va_arg_indirect_ref (addr);
9010 }
9011 else
9012 {
9013 t = build_pointer_type_for_mode (type, ptr_mode, true);
9014 addr = fold_convert (t, addr);
9015 }
9016
9017 return build_va_arg_indirect_ref (addr);
9018 }
9019
9020
9021 /* Builtins. */
9022
9023 enum s390_builtin
9024 {
9025 S390_BUILTIN_THREAD_POINTER,
9026 S390_BUILTIN_SET_THREAD_POINTER,
9027
9028 S390_BUILTIN_max
9029 };
9030
9031 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9032 CODE_FOR_get_tp_64,
9033 CODE_FOR_set_tp_64
9034 };
9035
9036 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9037 CODE_FOR_get_tp_31,
9038 CODE_FOR_set_tp_31
9039 };
9040
9041 static void
9042 s390_init_builtins (void)
9043 {
9044 tree ftype;
9045
9046 ftype = build_function_type (ptr_type_node, void_list_node);
9047 add_builtin_function ("__builtin_thread_pointer", ftype,
9048 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9049 NULL, NULL_TREE);
9050
9051 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9052 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9053 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9054 NULL, NULL_TREE);
9055 }
9056
9057 /* Expand an expression EXP that calls a built-in function,
9058 with result going to TARGET if that's convenient
9059 (and in mode MODE if that's convenient).
9060 SUBTARGET may be used as the target for computing one of EXP's operands.
9061 IGNORE is nonzero if the value is to be ignored. */
9062
9063 static rtx
9064 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9065 enum machine_mode mode ATTRIBUTE_UNUSED,
9066 int ignore ATTRIBUTE_UNUSED)
9067 {
9068 #define MAX_ARGS 2
9069
9070 enum insn_code const *code_for_builtin =
9071 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9072
9073 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9074 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9075 enum insn_code icode;
9076 rtx op[MAX_ARGS], pat;
9077 int arity;
9078 bool nonvoid;
9079 tree arg;
9080 call_expr_arg_iterator iter;
9081
9082 if (fcode >= S390_BUILTIN_max)
9083 internal_error ("bad builtin fcode");
9084 icode = code_for_builtin[fcode];
9085 if (icode == 0)
9086 internal_error ("bad builtin fcode");
9087
9088 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9089
9090 arity = 0;
9091 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9092 {
9093 const struct insn_operand_data *insn_op;
9094
9095 if (arg == error_mark_node)
9096 return NULL_RTX;
9097 if (arity > MAX_ARGS)
9098 return NULL_RTX;
9099
9100 insn_op = &insn_data[icode].operand[arity + nonvoid];
9101
9102 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9103
9104 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9105 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9106 arity++;
9107 }
9108
9109 if (nonvoid)
9110 {
9111 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9112 if (!target
9113 || GET_MODE (target) != tmode
9114 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9115 target = gen_reg_rtx (tmode);
9116 }
9117
9118 switch (arity)
9119 {
9120 case 0:
9121 pat = GEN_FCN (icode) (target);
9122 break;
9123 case 1:
9124 if (nonvoid)
9125 pat = GEN_FCN (icode) (target, op[0]);
9126 else
9127 pat = GEN_FCN (icode) (op[0]);
9128 break;
9129 case 2:
9130 pat = GEN_FCN (icode) (target, op[0], op[1]);
9131 break;
9132 default:
9133 gcc_unreachable ();
9134 }
9135 if (!pat)
9136 return NULL_RTX;
9137 emit_insn (pat);
9138
9139 if (nonvoid)
9140 return target;
9141 else
9142 return const0_rtx;
9143 }
9144
9145
9146 /* Output assembly code for the trampoline template to
9147 stdio stream FILE.
9148
9149 On S/390, we use gpr 1 internally in the trampoline code;
9150 gpr 0 is used to hold the static chain. */
9151
9152 static void
9153 s390_asm_trampoline_template (FILE *file)
9154 {
9155 rtx op[2];
9156 op[0] = gen_rtx_REG (Pmode, 0);
9157 op[1] = gen_rtx_REG (Pmode, 1);
9158
9159 if (TARGET_64BIT)
9160 {
9161 output_asm_insn ("basr\t%1,0", op);
9162 output_asm_insn ("lmg\t%0,%1,14(%1)", op);
9163 output_asm_insn ("br\t%1", op);
9164 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9165 }
9166 else
9167 {
9168 output_asm_insn ("basr\t%1,0", op);
9169 output_asm_insn ("lm\t%0,%1,6(%1)", op);
9170 output_asm_insn ("br\t%1", op);
9171 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9172 }
9173 }
9174
9175 /* Emit RTL insns to initialize the variable parts of a trampoline.
9176 FNADDR is an RTX for the address of the function's pure code.
9177 CXT is an RTX for the static chain value for the function. */
9178
9179 static void
9180 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9181 {
9182 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9183 rtx mem;
9184
9185 emit_block_move (m_tramp, assemble_trampoline_template (),
9186 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
9187
9188 mem = adjust_address (m_tramp, Pmode, 2*UNITS_PER_WORD);
9189 emit_move_insn (mem, cxt);
9190 mem = adjust_address (m_tramp, Pmode, 3*UNITS_PER_WORD);
9191 emit_move_insn (mem, fnaddr);
9192 }
9193
9194 /* Output assembler code to FILE to increment profiler label # LABELNO
9195 for profiling a function entry. */
9196
9197 void
9198 s390_function_profiler (FILE *file, int labelno)
9199 {
9200 rtx op[7];
9201
9202 char label[128];
9203 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9204
9205 fprintf (file, "# function profiler \n");
9206
9207 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9208 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9209 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9210
9211 op[2] = gen_rtx_REG (Pmode, 1);
9212 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9213 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9214
9215 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9216 if (flag_pic)
9217 {
9218 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9219 op[4] = gen_rtx_CONST (Pmode, op[4]);
9220 }
9221
9222 if (TARGET_64BIT)
9223 {
9224 output_asm_insn ("stg\t%0,%1", op);
9225 output_asm_insn ("larl\t%2,%3", op);
9226 output_asm_insn ("brasl\t%0,%4", op);
9227 output_asm_insn ("lg\t%0,%1", op);
9228 }
9229 else if (!flag_pic)
9230 {
9231 op[6] = gen_label_rtx ();
9232
9233 output_asm_insn ("st\t%0,%1", op);
9234 output_asm_insn ("bras\t%2,%l6", op);
9235 output_asm_insn (".long\t%4", op);
9236 output_asm_insn (".long\t%3", op);
9237 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9238 output_asm_insn ("l\t%0,0(%2)", op);
9239 output_asm_insn ("l\t%2,4(%2)", op);
9240 output_asm_insn ("basr\t%0,%0", op);
9241 output_asm_insn ("l\t%0,%1", op);
9242 }
9243 else
9244 {
9245 op[5] = gen_label_rtx ();
9246 op[6] = gen_label_rtx ();
9247
9248 output_asm_insn ("st\t%0,%1", op);
9249 output_asm_insn ("bras\t%2,%l6", op);
9250 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9251 output_asm_insn (".long\t%4-%l5", op);
9252 output_asm_insn (".long\t%3-%l5", op);
9253 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9254 output_asm_insn ("lr\t%0,%2", op);
9255 output_asm_insn ("a\t%0,0(%2)", op);
9256 output_asm_insn ("a\t%2,4(%2)", op);
9257 output_asm_insn ("basr\t%0,%0", op);
9258 output_asm_insn ("l\t%0,%1", op);
9259 }
9260 }
9261
9262 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9263 into its SYMBOL_REF_FLAGS. */
9264
9265 static void
9266 s390_encode_section_info (tree decl, rtx rtl, int first)
9267 {
9268 default_encode_section_info (decl, rtl, first);
9269
9270 if (TREE_CODE (decl) == VAR_DECL)
9271 {
9272 /* If a variable has a forced alignment to < 2 bytes, mark it
9273 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9274 operand. */
9275 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9276 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9277 if (!DECL_SIZE (decl)
9278 || !DECL_ALIGN (decl)
9279 || !host_integerp (DECL_SIZE (decl), 0)
9280 || (DECL_ALIGN (decl) <= 64
9281 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9282 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9283 }
9284
9285 /* Literal pool references don't have a decl so they are handled
9286 differently here. We rely on the information in the MEM_ALIGN
9287 entry to decide upon natural alignment. */
9288 if (MEM_P (rtl)
9289 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9290 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9291 && (MEM_ALIGN (rtl) == 0
9292 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9293 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9294 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9295 }
9296
9297 /* Output thunk to FILE that implements a C++ virtual function call (with
9298 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9299 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9300 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9301 relative to the resulting this pointer. */
9302
9303 static void
9304 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9305 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9306 tree function)
9307 {
9308 rtx op[10];
9309 int nonlocal = 0;
9310
9311 /* Make sure unwind info is emitted for the thunk if needed. */
9312 final_start_function (emit_barrier (), file, 1);
9313
9314 /* Operand 0 is the target function. */
9315 op[0] = XEXP (DECL_RTL (function), 0);
9316 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9317 {
9318 nonlocal = 1;
9319 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9320 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9321 op[0] = gen_rtx_CONST (Pmode, op[0]);
9322 }
9323
9324 /* Operand 1 is the 'this' pointer. */
9325 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9326 op[1] = gen_rtx_REG (Pmode, 3);
9327 else
9328 op[1] = gen_rtx_REG (Pmode, 2);
9329
9330 /* Operand 2 is the delta. */
9331 op[2] = GEN_INT (delta);
9332
9333 /* Operand 3 is the vcall_offset. */
9334 op[3] = GEN_INT (vcall_offset);
9335
9336 /* Operand 4 is the temporary register. */
9337 op[4] = gen_rtx_REG (Pmode, 1);
9338
9339 /* Operands 5 to 8 can be used as labels. */
9340 op[5] = NULL_RTX;
9341 op[6] = NULL_RTX;
9342 op[7] = NULL_RTX;
9343 op[8] = NULL_RTX;
9344
9345 /* Operand 9 can be used for temporary register. */
9346 op[9] = NULL_RTX;
9347
9348 /* Generate code. */
9349 if (TARGET_64BIT)
9350 {
9351 /* Setup literal pool pointer if required. */
9352 if ((!DISP_IN_RANGE (delta)
9353 && !CONST_OK_FOR_K (delta)
9354 && !CONST_OK_FOR_Os (delta))
9355 || (!DISP_IN_RANGE (vcall_offset)
9356 && !CONST_OK_FOR_K (vcall_offset)
9357 && !CONST_OK_FOR_Os (vcall_offset)))
9358 {
9359 op[5] = gen_label_rtx ();
9360 output_asm_insn ("larl\t%4,%5", op);
9361 }
9362
9363 /* Add DELTA to this pointer. */
9364 if (delta)
9365 {
9366 if (CONST_OK_FOR_J (delta))
9367 output_asm_insn ("la\t%1,%2(%1)", op);
9368 else if (DISP_IN_RANGE (delta))
9369 output_asm_insn ("lay\t%1,%2(%1)", op);
9370 else if (CONST_OK_FOR_K (delta))
9371 output_asm_insn ("aghi\t%1,%2", op);
9372 else if (CONST_OK_FOR_Os (delta))
9373 output_asm_insn ("agfi\t%1,%2", op);
9374 else
9375 {
9376 op[6] = gen_label_rtx ();
9377 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9378 }
9379 }
9380
9381 /* Perform vcall adjustment. */
9382 if (vcall_offset)
9383 {
9384 if (DISP_IN_RANGE (vcall_offset))
9385 {
9386 output_asm_insn ("lg\t%4,0(%1)", op);
9387 output_asm_insn ("ag\t%1,%3(%4)", op);
9388 }
9389 else if (CONST_OK_FOR_K (vcall_offset))
9390 {
9391 output_asm_insn ("lghi\t%4,%3", op);
9392 output_asm_insn ("ag\t%4,0(%1)", op);
9393 output_asm_insn ("ag\t%1,0(%4)", op);
9394 }
9395 else if (CONST_OK_FOR_Os (vcall_offset))
9396 {
9397 output_asm_insn ("lgfi\t%4,%3", op);
9398 output_asm_insn ("ag\t%4,0(%1)", op);
9399 output_asm_insn ("ag\t%1,0(%4)", op);
9400 }
9401 else
9402 {
9403 op[7] = gen_label_rtx ();
9404 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9405 output_asm_insn ("ag\t%4,0(%1)", op);
9406 output_asm_insn ("ag\t%1,0(%4)", op);
9407 }
9408 }
9409
9410 /* Jump to target. */
9411 output_asm_insn ("jg\t%0", op);
9412
9413 /* Output literal pool if required. */
9414 if (op[5])
9415 {
9416 output_asm_insn (".align\t4", op);
9417 targetm.asm_out.internal_label (file, "L",
9418 CODE_LABEL_NUMBER (op[5]));
9419 }
9420 if (op[6])
9421 {
9422 targetm.asm_out.internal_label (file, "L",
9423 CODE_LABEL_NUMBER (op[6]));
9424 output_asm_insn (".long\t%2", op);
9425 }
9426 if (op[7])
9427 {
9428 targetm.asm_out.internal_label (file, "L",
9429 CODE_LABEL_NUMBER (op[7]));
9430 output_asm_insn (".long\t%3", op);
9431 }
9432 }
9433 else
9434 {
9435 /* Setup base pointer if required. */
9436 if (!vcall_offset
9437 || (!DISP_IN_RANGE (delta)
9438 && !CONST_OK_FOR_K (delta)
9439 && !CONST_OK_FOR_Os (delta))
9440 || (!DISP_IN_RANGE (delta)
9441 && !CONST_OK_FOR_K (vcall_offset)
9442 && !CONST_OK_FOR_Os (vcall_offset)))
9443 {
9444 op[5] = gen_label_rtx ();
9445 output_asm_insn ("basr\t%4,0", op);
9446 targetm.asm_out.internal_label (file, "L",
9447 CODE_LABEL_NUMBER (op[5]));
9448 }
9449
9450 /* Add DELTA to this pointer. */
9451 if (delta)
9452 {
9453 if (CONST_OK_FOR_J (delta))
9454 output_asm_insn ("la\t%1,%2(%1)", op);
9455 else if (DISP_IN_RANGE (delta))
9456 output_asm_insn ("lay\t%1,%2(%1)", op);
9457 else if (CONST_OK_FOR_K (delta))
9458 output_asm_insn ("ahi\t%1,%2", op);
9459 else if (CONST_OK_FOR_Os (delta))
9460 output_asm_insn ("afi\t%1,%2", op);
9461 else
9462 {
9463 op[6] = gen_label_rtx ();
9464 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9465 }
9466 }
9467
9468 /* Perform vcall adjustment. */
9469 if (vcall_offset)
9470 {
9471 if (CONST_OK_FOR_J (vcall_offset))
9472 {
9473 output_asm_insn ("l\t%4,0(%1)", op);
9474 output_asm_insn ("a\t%1,%3(%4)", op);
9475 }
9476 else if (DISP_IN_RANGE (vcall_offset))
9477 {
9478 output_asm_insn ("l\t%4,0(%1)", op);
9479 output_asm_insn ("ay\t%1,%3(%4)", op);
9480 }
9481 else if (CONST_OK_FOR_K (vcall_offset))
9482 {
9483 output_asm_insn ("lhi\t%4,%3", op);
9484 output_asm_insn ("a\t%4,0(%1)", op);
9485 output_asm_insn ("a\t%1,0(%4)", op);
9486 }
9487 else if (CONST_OK_FOR_Os (vcall_offset))
9488 {
9489 output_asm_insn ("iilf\t%4,%3", op);
9490 output_asm_insn ("a\t%4,0(%1)", op);
9491 output_asm_insn ("a\t%1,0(%4)", op);
9492 }
9493 else
9494 {
9495 op[7] = gen_label_rtx ();
9496 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9497 output_asm_insn ("a\t%4,0(%1)", op);
9498 output_asm_insn ("a\t%1,0(%4)", op);
9499 }
9500
9501 /* We had to clobber the base pointer register.
9502 Re-setup the base pointer (with a different base). */
9503 op[5] = gen_label_rtx ();
9504 output_asm_insn ("basr\t%4,0", op);
9505 targetm.asm_out.internal_label (file, "L",
9506 CODE_LABEL_NUMBER (op[5]));
9507 }
9508
9509 /* Jump to target. */
9510 op[8] = gen_label_rtx ();
9511
9512 if (!flag_pic)
9513 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9514 else if (!nonlocal)
9515 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9516 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9517 else if (flag_pic == 1)
9518 {
9519 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9520 output_asm_insn ("l\t%4,%0(%4)", op);
9521 }
9522 else if (flag_pic == 2)
9523 {
9524 op[9] = gen_rtx_REG (Pmode, 0);
9525 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9526 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9527 output_asm_insn ("ar\t%4,%9", op);
9528 output_asm_insn ("l\t%4,0(%4)", op);
9529 }
9530
9531 output_asm_insn ("br\t%4", op);
9532
9533 /* Output literal pool. */
9534 output_asm_insn (".align\t4", op);
9535
9536 if (nonlocal && flag_pic == 2)
9537 output_asm_insn (".long\t%0", op);
9538 if (nonlocal)
9539 {
9540 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9541 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9542 }
9543
9544 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9545 if (!flag_pic)
9546 output_asm_insn (".long\t%0", op);
9547 else
9548 output_asm_insn (".long\t%0-%5", op);
9549
9550 if (op[6])
9551 {
9552 targetm.asm_out.internal_label (file, "L",
9553 CODE_LABEL_NUMBER (op[6]));
9554 output_asm_insn (".long\t%2", op);
9555 }
9556 if (op[7])
9557 {
9558 targetm.asm_out.internal_label (file, "L",
9559 CODE_LABEL_NUMBER (op[7]));
9560 output_asm_insn (".long\t%3", op);
9561 }
9562 }
9563 final_end_function ();
9564 }
9565
9566 static bool
9567 s390_valid_pointer_mode (enum machine_mode mode)
9568 {
9569 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9570 }
9571
9572 /* Checks whether the given CALL_EXPR would use a caller
9573 saved register. This is used to decide whether sibling call
9574 optimization could be performed on the respective function
9575 call. */
9576
9577 static bool
9578 s390_call_saved_register_used (tree call_expr)
9579 {
9580 CUMULATIVE_ARGS cum;
9581 tree parameter;
9582 enum machine_mode mode;
9583 tree type;
9584 rtx parm_rtx;
9585 int reg, i;
9586
9587 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9588
9589 for (i = 0; i < call_expr_nargs (call_expr); i++)
9590 {
9591 parameter = CALL_EXPR_ARG (call_expr, i);
9592 gcc_assert (parameter);
9593
9594 /* For an undeclared variable passed as parameter we will get
9595 an ERROR_MARK node here. */
9596 if (TREE_CODE (parameter) == ERROR_MARK)
9597 return true;
9598
9599 type = TREE_TYPE (parameter);
9600 gcc_assert (type);
9601
9602 mode = TYPE_MODE (type);
9603 gcc_assert (mode);
9604
9605 if (pass_by_reference (&cum, mode, type, true))
9606 {
9607 mode = Pmode;
9608 type = build_pointer_type (type);
9609 }
9610
9611 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9612
9613 s390_function_arg_advance (&cum, mode, type, 0);
9614
9615 if (!parm_rtx)
9616 continue;
9617
9618 if (REG_P (parm_rtx))
9619 {
9620 for (reg = 0;
9621 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9622 reg++)
9623 if (!call_used_regs[reg + REGNO (parm_rtx)])
9624 return true;
9625 }
9626
9627 if (GET_CODE (parm_rtx) == PARALLEL)
9628 {
9629 int i;
9630
9631 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9632 {
9633 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9634
9635 gcc_assert (REG_P (r));
9636
9637 for (reg = 0;
9638 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9639 reg++)
9640 if (!call_used_regs[reg + REGNO (r)])
9641 return true;
9642 }
9643 }
9644
9645 }
9646 return false;
9647 }
9648
9649 /* Return true if the given call expression can be
9650 turned into a sibling call.
9651 DECL holds the declaration of the function to be called whereas
9652 EXP is the call expression itself. */
9653
9654 static bool
9655 s390_function_ok_for_sibcall (tree decl, tree exp)
9656 {
9657 /* The TPF epilogue uses register 1. */
9658 if (TARGET_TPF_PROFILING)
9659 return false;
9660
9661 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9662 which would have to be restored before the sibcall. */
9663 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9664 return false;
9665
9666 /* Register 6 on s390 is available as an argument register but unfortunately
9667 "caller saved". This makes functions needing this register for arguments
9668 not suitable for sibcalls. */
9669 return !s390_call_saved_register_used (exp);
9670 }
9671
9672 /* Return the fixed registers used for condition codes. */
9673
9674 static bool
9675 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9676 {
9677 *p1 = CC_REGNUM;
9678 *p2 = INVALID_REGNUM;
9679
9680 return true;
9681 }
9682
9683 /* This function is used by the call expanders of the machine description.
9684 It emits the call insn itself together with the necessary operations
9685 to adjust the target address and returns the emitted insn.
9686 ADDR_LOCATION is the target address rtx
9687 TLS_CALL the location of the thread-local symbol
9688 RESULT_REG the register where the result of the call should be stored
9689 RETADDR_REG the register where the return address should be stored
9690 If this parameter is NULL_RTX the call is considered
9691 to be a sibling call. */
9692
9693 rtx
9694 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9695 rtx retaddr_reg)
9696 {
9697 bool plt_call = false;
9698 rtx insn;
9699 rtx call;
9700 rtx clobber;
9701 rtvec vec;
9702
9703 /* Direct function calls need special treatment. */
9704 if (GET_CODE (addr_location) == SYMBOL_REF)
9705 {
9706 /* When calling a global routine in PIC mode, we must
9707 replace the symbol itself with the PLT stub. */
9708 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9709 {
9710 if (retaddr_reg != NULL_RTX)
9711 {
9712 addr_location = gen_rtx_UNSPEC (Pmode,
9713 gen_rtvec (1, addr_location),
9714 UNSPEC_PLT);
9715 addr_location = gen_rtx_CONST (Pmode, addr_location);
9716 plt_call = true;
9717 }
9718 else
9719 /* For -fpic code the PLT entries might use r12 which is
9720 call-saved. Therefore we cannot do a sibcall when
9721 calling directly using a symbol ref. When reaching
9722 this point we decided (in s390_function_ok_for_sibcall)
9723 to do a sibcall for a function pointer but one of the
9724 optimizers was able to get rid of the function pointer
9725 by propagating the symbol ref into the call. This
9726 optimization is illegal for S/390 so we turn the direct
9727 call into a indirect call again. */
9728 addr_location = force_reg (Pmode, addr_location);
9729 }
9730
9731 /* Unless we can use the bras(l) insn, force the
9732 routine address into a register. */
9733 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9734 {
9735 if (flag_pic)
9736 addr_location = legitimize_pic_address (addr_location, 0);
9737 else
9738 addr_location = force_reg (Pmode, addr_location);
9739 }
9740 }
9741
9742 /* If it is already an indirect call or the code above moved the
9743 SYMBOL_REF to somewhere else make sure the address can be found in
9744 register 1. */
9745 if (retaddr_reg == NULL_RTX
9746 && GET_CODE (addr_location) != SYMBOL_REF
9747 && !plt_call)
9748 {
9749 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9750 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9751 }
9752
9753 addr_location = gen_rtx_MEM (QImode, addr_location);
9754 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9755
9756 if (result_reg != NULL_RTX)
9757 call = gen_rtx_SET (VOIDmode, result_reg, call);
9758
9759 if (retaddr_reg != NULL_RTX)
9760 {
9761 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9762
9763 if (tls_call != NULL_RTX)
9764 vec = gen_rtvec (3, call, clobber,
9765 gen_rtx_USE (VOIDmode, tls_call));
9766 else
9767 vec = gen_rtvec (2, call, clobber);
9768
9769 call = gen_rtx_PARALLEL (VOIDmode, vec);
9770 }
9771
9772 insn = emit_call_insn (call);
9773
9774 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9775 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9776 {
9777 /* s390_function_ok_for_sibcall should
9778 have denied sibcalls in this case. */
9779 gcc_assert (retaddr_reg != NULL_RTX);
9780
9781 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9782 }
9783 return insn;
9784 }
9785
9786 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9787
9788 static void
9789 s390_conditional_register_usage (void)
9790 {
9791 int i;
9792
9793 if (flag_pic)
9794 {
9795 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9796 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9797 }
9798 if (TARGET_CPU_ZARCH)
9799 {
9800 fixed_regs[BASE_REGNUM] = 0;
9801 call_used_regs[BASE_REGNUM] = 0;
9802 fixed_regs[RETURN_REGNUM] = 0;
9803 call_used_regs[RETURN_REGNUM] = 0;
9804 }
9805 if (TARGET_64BIT)
9806 {
9807 for (i = 24; i < 32; i++)
9808 call_used_regs[i] = call_really_used_regs[i] = 0;
9809 }
9810 else
9811 {
9812 for (i = 18; i < 20; i++)
9813 call_used_regs[i] = call_really_used_regs[i] = 0;
9814 }
9815
9816 if (TARGET_SOFT_FLOAT)
9817 {
9818 for (i = 16; i < 32; i++)
9819 call_used_regs[i] = fixed_regs[i] = 1;
9820 }
9821 }
9822
9823 /* Corresponding function to eh_return expander. */
9824
9825 static GTY(()) rtx s390_tpf_eh_return_symbol;
9826 void
9827 s390_emit_tpf_eh_return (rtx target)
9828 {
9829 rtx insn, reg;
9830
9831 if (!s390_tpf_eh_return_symbol)
9832 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9833
9834 reg = gen_rtx_REG (Pmode, 2);
9835
9836 emit_move_insn (reg, target);
9837 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9838 gen_rtx_REG (Pmode, RETURN_REGNUM));
9839 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9840
9841 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9842 }
9843
9844 /* Rework the prologue/epilogue to avoid saving/restoring
9845 registers unnecessarily. */
9846
9847 static void
9848 s390_optimize_prologue (void)
9849 {
9850 rtx insn, new_insn, next_insn;
9851
9852 /* Do a final recompute of the frame-related data. */
9853
9854 s390_update_frame_layout ();
9855
9856 /* If all special registers are in fact used, there's nothing we
9857 can do, so no point in walking the insn list. */
9858
9859 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9860 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9861 && (TARGET_CPU_ZARCH
9862 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9863 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9864 return;
9865
9866 /* Search for prologue/epilogue insns and replace them. */
9867
9868 for (insn = get_insns (); insn; insn = next_insn)
9869 {
9870 int first, last, off;
9871 rtx set, base, offset;
9872
9873 next_insn = NEXT_INSN (insn);
9874
9875 if (GET_CODE (insn) != INSN)
9876 continue;
9877
9878 if (GET_CODE (PATTERN (insn)) == PARALLEL
9879 && store_multiple_operation (PATTERN (insn), VOIDmode))
9880 {
9881 set = XVECEXP (PATTERN (insn), 0, 0);
9882 first = REGNO (SET_SRC (set));
9883 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9884 offset = const0_rtx;
9885 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9886 off = INTVAL (offset);
9887
9888 if (GET_CODE (base) != REG || off < 0)
9889 continue;
9890 if (cfun_frame_layout.first_save_gpr != -1
9891 && (cfun_frame_layout.first_save_gpr < first
9892 || cfun_frame_layout.last_save_gpr > last))
9893 continue;
9894 if (REGNO (base) != STACK_POINTER_REGNUM
9895 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9896 continue;
9897 if (first > BASE_REGNUM || last < BASE_REGNUM)
9898 continue;
9899
9900 if (cfun_frame_layout.first_save_gpr != -1)
9901 {
9902 new_insn = save_gprs (base,
9903 off + (cfun_frame_layout.first_save_gpr
9904 - first) * UNITS_PER_LONG,
9905 cfun_frame_layout.first_save_gpr,
9906 cfun_frame_layout.last_save_gpr);
9907 new_insn = emit_insn_before (new_insn, insn);
9908 INSN_ADDRESSES_NEW (new_insn, -1);
9909 }
9910
9911 remove_insn (insn);
9912 continue;
9913 }
9914
9915 if (cfun_frame_layout.first_save_gpr == -1
9916 && GET_CODE (PATTERN (insn)) == SET
9917 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9918 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9919 || (!TARGET_CPU_ZARCH
9920 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9921 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9922 {
9923 set = PATTERN (insn);
9924 first = REGNO (SET_SRC (set));
9925 offset = const0_rtx;
9926 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9927 off = INTVAL (offset);
9928
9929 if (GET_CODE (base) != REG || off < 0)
9930 continue;
9931 if (REGNO (base) != STACK_POINTER_REGNUM
9932 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9933 continue;
9934
9935 remove_insn (insn);
9936 continue;
9937 }
9938
9939 if (GET_CODE (PATTERN (insn)) == PARALLEL
9940 && load_multiple_operation (PATTERN (insn), VOIDmode))
9941 {
9942 set = XVECEXP (PATTERN (insn), 0, 0);
9943 first = REGNO (SET_DEST (set));
9944 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9945 offset = const0_rtx;
9946 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9947 off = INTVAL (offset);
9948
9949 if (GET_CODE (base) != REG || off < 0)
9950 continue;
9951 if (cfun_frame_layout.first_restore_gpr != -1
9952 && (cfun_frame_layout.first_restore_gpr < first
9953 || cfun_frame_layout.last_restore_gpr > last))
9954 continue;
9955 if (REGNO (base) != STACK_POINTER_REGNUM
9956 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9957 continue;
9958 if (first > BASE_REGNUM || last < BASE_REGNUM)
9959 continue;
9960
9961 if (cfun_frame_layout.first_restore_gpr != -1)
9962 {
9963 new_insn = restore_gprs (base,
9964 off + (cfun_frame_layout.first_restore_gpr
9965 - first) * UNITS_PER_LONG,
9966 cfun_frame_layout.first_restore_gpr,
9967 cfun_frame_layout.last_restore_gpr);
9968 new_insn = emit_insn_before (new_insn, insn);
9969 INSN_ADDRESSES_NEW (new_insn, -1);
9970 }
9971
9972 remove_insn (insn);
9973 continue;
9974 }
9975
9976 if (cfun_frame_layout.first_restore_gpr == -1
9977 && GET_CODE (PATTERN (insn)) == SET
9978 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
9979 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
9980 || (!TARGET_CPU_ZARCH
9981 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
9982 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
9983 {
9984 set = PATTERN (insn);
9985 first = REGNO (SET_DEST (set));
9986 offset = const0_rtx;
9987 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9988 off = INTVAL (offset);
9989
9990 if (GET_CODE (base) != REG || off < 0)
9991 continue;
9992 if (REGNO (base) != STACK_POINTER_REGNUM
9993 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9994 continue;
9995
9996 remove_insn (insn);
9997 continue;
9998 }
9999 }
10000 }
10001
10002 /* On z10 and later the dynamic branch prediction must see the
10003 backward jump within a certain windows. If not it falls back to
10004 the static prediction. This function rearranges the loop backward
10005 branch in a way which makes the static prediction always correct.
10006 The function returns true if it added an instruction. */
10007 static bool
10008 s390_fix_long_loop_prediction (rtx insn)
10009 {
10010 rtx set = single_set (insn);
10011 rtx code_label, label_ref, new_label;
10012 rtx uncond_jump;
10013 rtx cur_insn;
10014 rtx tmp;
10015 int distance;
10016
10017 /* This will exclude branch on count and branch on index patterns
10018 since these are correctly statically predicted. */
10019 if (!set
10020 || SET_DEST (set) != pc_rtx
10021 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10022 return false;
10023
10024 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10025 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10026
10027 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10028
10029 code_label = XEXP (label_ref, 0);
10030
10031 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10032 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10033 || (INSN_ADDRESSES (INSN_UID (insn))
10034 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10035 return false;
10036
10037 for (distance = 0, cur_insn = PREV_INSN (insn);
10038 distance < PREDICT_DISTANCE - 6;
10039 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10040 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10041 return false;
10042
10043 new_label = gen_label_rtx ();
10044 uncond_jump = emit_jump_insn_after (
10045 gen_rtx_SET (VOIDmode, pc_rtx,
10046 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10047 insn);
10048 emit_label_after (new_label, uncond_jump);
10049
10050 tmp = XEXP (SET_SRC (set), 1);
10051 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10052 XEXP (SET_SRC (set), 2) = tmp;
10053 INSN_CODE (insn) = -1;
10054
10055 XEXP (label_ref, 0) = new_label;
10056 JUMP_LABEL (insn) = new_label;
10057 JUMP_LABEL (uncond_jump) = code_label;
10058
10059 return true;
10060 }
10061
10062 /* Returns 1 if INSN reads the value of REG for purposes not related
10063 to addressing of memory, and 0 otherwise. */
10064 static int
10065 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10066 {
10067 return reg_referenced_p (reg, PATTERN (insn))
10068 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10069 }
10070
10071 /* Starting from INSN find_cond_jump looks downwards in the insn
10072 stream for a single jump insn which is the last user of the
10073 condition code set in INSN. */
10074 static rtx
10075 find_cond_jump (rtx insn)
10076 {
10077 for (; insn; insn = NEXT_INSN (insn))
10078 {
10079 rtx ite, cc;
10080
10081 if (LABEL_P (insn))
10082 break;
10083
10084 if (!JUMP_P (insn))
10085 {
10086 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10087 break;
10088 continue;
10089 }
10090
10091 /* This will be triggered by a return. */
10092 if (GET_CODE (PATTERN (insn)) != SET)
10093 break;
10094
10095 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10096 ite = SET_SRC (PATTERN (insn));
10097
10098 if (GET_CODE (ite) != IF_THEN_ELSE)
10099 break;
10100
10101 cc = XEXP (XEXP (ite, 0), 0);
10102 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10103 break;
10104
10105 if (find_reg_note (insn, REG_DEAD, cc))
10106 return insn;
10107 break;
10108 }
10109
10110 return NULL_RTX;
10111 }
10112
10113 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10114 the semantics does not change. If NULL_RTX is passed as COND the
10115 function tries to find the conditional jump starting with INSN. */
10116 static void
10117 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10118 {
10119 rtx tmp = *op0;
10120
10121 if (cond == NULL_RTX)
10122 {
10123 rtx jump = find_cond_jump (NEXT_INSN (insn));
10124 jump = jump ? single_set (jump) : NULL_RTX;
10125
10126 if (jump == NULL_RTX)
10127 return;
10128
10129 cond = XEXP (XEXP (jump, 1), 0);
10130 }
10131
10132 *op0 = *op1;
10133 *op1 = tmp;
10134 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10135 }
10136
10137 /* On z10, instructions of the compare-and-branch family have the
10138 property to access the register occurring as second operand with
10139 its bits complemented. If such a compare is grouped with a second
10140 instruction that accesses the same register non-complemented, and
10141 if that register's value is delivered via a bypass, then the
10142 pipeline recycles, thereby causing significant performance decline.
10143 This function locates such situations and exchanges the two
10144 operands of the compare. The function return true whenever it
10145 added an insn. */
10146 static bool
10147 s390_z10_optimize_cmp (rtx insn)
10148 {
10149 rtx prev_insn, next_insn;
10150 bool insn_added_p = false;
10151 rtx cond, *op0, *op1;
10152
10153 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10154 {
10155 /* Handle compare and branch and branch on count
10156 instructions. */
10157 rtx pattern = single_set (insn);
10158
10159 if (!pattern
10160 || SET_DEST (pattern) != pc_rtx
10161 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10162 return false;
10163
10164 cond = XEXP (SET_SRC (pattern), 0);
10165 op0 = &XEXP (cond, 0);
10166 op1 = &XEXP (cond, 1);
10167 }
10168 else if (GET_CODE (PATTERN (insn)) == SET)
10169 {
10170 rtx src, dest;
10171
10172 /* Handle normal compare instructions. */
10173 src = SET_SRC (PATTERN (insn));
10174 dest = SET_DEST (PATTERN (insn));
10175
10176 if (!REG_P (dest)
10177 || !CC_REGNO_P (REGNO (dest))
10178 || GET_CODE (src) != COMPARE)
10179 return false;
10180
10181 /* s390_swap_cmp will try to find the conditional
10182 jump when passing NULL_RTX as condition. */
10183 cond = NULL_RTX;
10184 op0 = &XEXP (src, 0);
10185 op1 = &XEXP (src, 1);
10186 }
10187 else
10188 return false;
10189
10190 if (!REG_P (*op0) || !REG_P (*op1))
10191 return false;
10192
10193 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10194 return false;
10195
10196 /* Swap the COMPARE arguments and its mask if there is a
10197 conflicting access in the previous insn. */
10198 prev_insn = prev_active_insn (insn);
10199 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10200 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10201 s390_swap_cmp (cond, op0, op1, insn);
10202
10203 /* Check if there is a conflict with the next insn. If there
10204 was no conflict with the previous insn, then swap the
10205 COMPARE arguments and its mask. If we already swapped
10206 the operands, or if swapping them would cause a conflict
10207 with the previous insn, issue a NOP after the COMPARE in
10208 order to separate the two instuctions. */
10209 next_insn = next_active_insn (insn);
10210 if (next_insn != NULL_RTX && INSN_P (next_insn)
10211 && s390_non_addr_reg_read_p (*op1, next_insn))
10212 {
10213 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10214 && s390_non_addr_reg_read_p (*op0, prev_insn))
10215 {
10216 if (REGNO (*op1) == 0)
10217 emit_insn_after (gen_nop1 (), insn);
10218 else
10219 emit_insn_after (gen_nop (), insn);
10220 insn_added_p = true;
10221 }
10222 else
10223 s390_swap_cmp (cond, op0, op1, insn);
10224 }
10225 return insn_added_p;
10226 }
10227
10228 /* Perform machine-dependent processing. */
10229
10230 static void
10231 s390_reorg (void)
10232 {
10233 bool pool_overflow = false;
10234
10235 /* Make sure all splits have been performed; splits after
10236 machine_dependent_reorg might confuse insn length counts. */
10237 split_all_insns_noflow ();
10238
10239 /* Install the main literal pool and the associated base
10240 register load insns.
10241
10242 In addition, there are two problematic situations we need
10243 to correct:
10244
10245 - the literal pool might be > 4096 bytes in size, so that
10246 some of its elements cannot be directly accessed
10247
10248 - a branch target might be > 64K away from the branch, so that
10249 it is not possible to use a PC-relative instruction.
10250
10251 To fix those, we split the single literal pool into multiple
10252 pool chunks, reloading the pool base register at various
10253 points throughout the function to ensure it always points to
10254 the pool chunk the following code expects, and / or replace
10255 PC-relative branches by absolute branches.
10256
10257 However, the two problems are interdependent: splitting the
10258 literal pool can move a branch further away from its target,
10259 causing the 64K limit to overflow, and on the other hand,
10260 replacing a PC-relative branch by an absolute branch means
10261 we need to put the branch target address into the literal
10262 pool, possibly causing it to overflow.
10263
10264 So, we loop trying to fix up both problems until we manage
10265 to satisfy both conditions at the same time. Note that the
10266 loop is guaranteed to terminate as every pass of the loop
10267 strictly decreases the total number of PC-relative branches
10268 in the function. (This is not completely true as there
10269 might be branch-over-pool insns introduced by chunkify_start.
10270 Those never need to be split however.) */
10271
10272 for (;;)
10273 {
10274 struct constant_pool *pool = NULL;
10275
10276 /* Collect the literal pool. */
10277 if (!pool_overflow)
10278 {
10279 pool = s390_mainpool_start ();
10280 if (!pool)
10281 pool_overflow = true;
10282 }
10283
10284 /* If literal pool overflowed, start to chunkify it. */
10285 if (pool_overflow)
10286 pool = s390_chunkify_start ();
10287
10288 /* Split out-of-range branches. If this has created new
10289 literal pool entries, cancel current chunk list and
10290 recompute it. zSeries machines have large branch
10291 instructions, so we never need to split a branch. */
10292 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10293 {
10294 if (pool_overflow)
10295 s390_chunkify_cancel (pool);
10296 else
10297 s390_mainpool_cancel (pool);
10298
10299 continue;
10300 }
10301
10302 /* If we made it up to here, both conditions are satisfied.
10303 Finish up literal pool related changes. */
10304 if (pool_overflow)
10305 s390_chunkify_finish (pool);
10306 else
10307 s390_mainpool_finish (pool);
10308
10309 /* We're done splitting branches. */
10310 cfun->machine->split_branches_pending_p = false;
10311 break;
10312 }
10313
10314 /* Generate out-of-pool execute target insns. */
10315 if (TARGET_CPU_ZARCH)
10316 {
10317 rtx insn, label, target;
10318
10319 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10320 {
10321 label = s390_execute_label (insn);
10322 if (!label)
10323 continue;
10324
10325 gcc_assert (label != const0_rtx);
10326
10327 target = emit_label (XEXP (label, 0));
10328 INSN_ADDRESSES_NEW (target, -1);
10329
10330 target = emit_insn (s390_execute_target (insn));
10331 INSN_ADDRESSES_NEW (target, -1);
10332 }
10333 }
10334
10335 /* Try to optimize prologue and epilogue further. */
10336 s390_optimize_prologue ();
10337
10338 /* Walk over the insns and do some >=z10 specific changes. */
10339 if (s390_tune == PROCESSOR_2097_Z10
10340 || s390_tune == PROCESSOR_2817_Z196)
10341 {
10342 rtx insn;
10343 bool insn_added_p = false;
10344
10345 /* The insn lengths and addresses have to be up to date for the
10346 following manipulations. */
10347 shorten_branches (get_insns ());
10348
10349 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10350 {
10351 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10352 continue;
10353
10354 if (JUMP_P (insn))
10355 insn_added_p |= s390_fix_long_loop_prediction (insn);
10356
10357 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10358 || GET_CODE (PATTERN (insn)) == SET)
10359 && s390_tune == PROCESSOR_2097_Z10)
10360 insn_added_p |= s390_z10_optimize_cmp (insn);
10361 }
10362
10363 /* Adjust branches if we added new instructions. */
10364 if (insn_added_p)
10365 shorten_branches (get_insns ());
10366 }
10367 }
10368
10369 /* Return true if INSN is a fp load insn writing register REGNO. */
10370 static inline bool
10371 s390_fpload_toreg (rtx insn, unsigned int regno)
10372 {
10373 rtx set;
10374 enum attr_type flag = s390_safe_attr_type (insn);
10375
10376 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10377 return false;
10378
10379 set = single_set (insn);
10380
10381 if (set == NULL_RTX)
10382 return false;
10383
10384 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10385 return false;
10386
10387 if (REGNO (SET_DEST (set)) != regno)
10388 return false;
10389
10390 return true;
10391 }
10392
10393 /* This value describes the distance to be avoided between an
10394 aritmetic fp instruction and an fp load writing the same register.
10395 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10396 fine but the exact value has to be avoided. Otherwise the FP
10397 pipeline will throw an exception causing a major penalty. */
10398 #define Z10_EARLYLOAD_DISTANCE 7
10399
10400 /* Rearrange the ready list in order to avoid the situation described
10401 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10402 moved to the very end of the ready list. */
10403 static void
10404 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10405 {
10406 unsigned int regno;
10407 int nready = *nready_p;
10408 rtx tmp;
10409 int i;
10410 rtx insn;
10411 rtx set;
10412 enum attr_type flag;
10413 int distance;
10414
10415 /* Skip DISTANCE - 1 active insns. */
10416 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10417 distance > 0 && insn != NULL_RTX;
10418 distance--, insn = prev_active_insn (insn))
10419 if (CALL_P (insn) || JUMP_P (insn))
10420 return;
10421
10422 if (insn == NULL_RTX)
10423 return;
10424
10425 set = single_set (insn);
10426
10427 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10428 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10429 return;
10430
10431 flag = s390_safe_attr_type (insn);
10432
10433 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10434 return;
10435
10436 regno = REGNO (SET_DEST (set));
10437 i = nready - 1;
10438
10439 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10440 i--;
10441
10442 if (!i)
10443 return;
10444
10445 tmp = ready[i];
10446 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10447 ready[0] = tmp;
10448 }
10449
10450 /* This function is called via hook TARGET_SCHED_REORDER before
10451 issueing one insn from list READY which contains *NREADYP entries.
10452 For target z10 it reorders load instructions to avoid early load
10453 conflicts in the floating point pipeline */
10454 static int
10455 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10456 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10457 {
10458 if (s390_tune == PROCESSOR_2097_Z10)
10459 if (reload_completed && *nreadyp > 1)
10460 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10461
10462 return s390_issue_rate ();
10463 }
10464
10465 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10466 the scheduler has issued INSN. It stores the last issued insn into
10467 last_scheduled_insn in order to make it available for
10468 s390_sched_reorder. */
10469 static int
10470 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10471 int verbose ATTRIBUTE_UNUSED,
10472 rtx insn, int more)
10473 {
10474 last_scheduled_insn = insn;
10475
10476 if (GET_CODE (PATTERN (insn)) != USE
10477 && GET_CODE (PATTERN (insn)) != CLOBBER)
10478 return more - 1;
10479 else
10480 return more;
10481 }
10482
10483 static void
10484 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10485 int verbose ATTRIBUTE_UNUSED,
10486 int max_ready ATTRIBUTE_UNUSED)
10487 {
10488 last_scheduled_insn = NULL_RTX;
10489 }
10490
10491 /* This function checks the whole of insn X for memory references. The
10492 function always returns zero because the framework it is called
10493 from would stop recursively analyzing the insn upon a return value
10494 other than zero. The real result of this function is updating
10495 counter variable MEM_COUNT. */
10496 static int
10497 check_dpu (rtx *x, unsigned *mem_count)
10498 {
10499 if (*x != NULL_RTX && MEM_P (*x))
10500 (*mem_count)++;
10501 return 0;
10502 }
10503
10504 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10505 a new number struct loop *loop should be unrolled if tuned for cpus with
10506 a built-in stride prefetcher.
10507 The loop is analyzed for memory accesses by calling check_dpu for
10508 each rtx of the loop. Depending on the loop_depth and the amount of
10509 memory accesses a new number <=nunroll is returned to improve the
10510 behaviour of the hardware prefetch unit. */
10511 static unsigned
10512 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10513 {
10514 basic_block *bbs;
10515 rtx insn;
10516 unsigned i;
10517 unsigned mem_count = 0;
10518
10519 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10520 return nunroll;
10521
10522 /* Count the number of memory references within the loop body. */
10523 bbs = get_loop_body (loop);
10524 for (i = 0; i < loop->num_nodes; i++)
10525 {
10526 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10527 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10528 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10529 }
10530 free (bbs);
10531
10532 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10533 if (mem_count == 0)
10534 return nunroll;
10535
10536 switch (loop_depth(loop))
10537 {
10538 case 1:
10539 return MIN (nunroll, 28 / mem_count);
10540 case 2:
10541 return MIN (nunroll, 22 / mem_count);
10542 default:
10543 return MIN (nunroll, 16 / mem_count);
10544 }
10545 }
10546
10547 /* Initialize GCC target structure. */
10548
10549 #undef TARGET_ASM_ALIGNED_HI_OP
10550 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10551 #undef TARGET_ASM_ALIGNED_DI_OP
10552 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10553 #undef TARGET_ASM_INTEGER
10554 #define TARGET_ASM_INTEGER s390_assemble_integer
10555
10556 #undef TARGET_ASM_OPEN_PAREN
10557 #define TARGET_ASM_OPEN_PAREN ""
10558
10559 #undef TARGET_ASM_CLOSE_PAREN
10560 #define TARGET_ASM_CLOSE_PAREN ""
10561
10562 #undef TARGET_DEFAULT_TARGET_FLAGS
10563 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT)
10564
10565 #undef TARGET_HANDLE_OPTION
10566 #define TARGET_HANDLE_OPTION s390_handle_option
10567
10568 #undef TARGET_OPTION_OVERRIDE
10569 #define TARGET_OPTION_OVERRIDE s390_option_override
10570
10571 #undef TARGET_OPTION_OPTIMIZATION_TABLE
10572 #define TARGET_OPTION_OPTIMIZATION_TABLE s390_option_optimization_table
10573
10574 #undef TARGET_OPTION_INIT_STRUCT
10575 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10576
10577 #undef TARGET_ENCODE_SECTION_INFO
10578 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10579
10580 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10581 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10582
10583 #ifdef HAVE_AS_TLS
10584 #undef TARGET_HAVE_TLS
10585 #define TARGET_HAVE_TLS true
10586 #endif
10587 #undef TARGET_CANNOT_FORCE_CONST_MEM
10588 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10589
10590 #undef TARGET_DELEGITIMIZE_ADDRESS
10591 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10592
10593 #undef TARGET_LEGITIMIZE_ADDRESS
10594 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10595
10596 #undef TARGET_RETURN_IN_MEMORY
10597 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10598
10599 #undef TARGET_INIT_BUILTINS
10600 #define TARGET_INIT_BUILTINS s390_init_builtins
10601 #undef TARGET_EXPAND_BUILTIN
10602 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10603
10604 #undef TARGET_ASM_OUTPUT_MI_THUNK
10605 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10606 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10607 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10608
10609 #undef TARGET_SCHED_ADJUST_PRIORITY
10610 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10611 #undef TARGET_SCHED_ISSUE_RATE
10612 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10613 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10614 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10615
10616 #undef TARGET_SCHED_VARIABLE_ISSUE
10617 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10618 #undef TARGET_SCHED_REORDER
10619 #define TARGET_SCHED_REORDER s390_sched_reorder
10620 #undef TARGET_SCHED_INIT
10621 #define TARGET_SCHED_INIT s390_sched_init
10622
10623 #undef TARGET_CANNOT_COPY_INSN_P
10624 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10625 #undef TARGET_RTX_COSTS
10626 #define TARGET_RTX_COSTS s390_rtx_costs
10627 #undef TARGET_ADDRESS_COST
10628 #define TARGET_ADDRESS_COST s390_address_cost
10629
10630 #undef TARGET_MACHINE_DEPENDENT_REORG
10631 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10632
10633 #undef TARGET_VALID_POINTER_MODE
10634 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10635
10636 #undef TARGET_BUILD_BUILTIN_VA_LIST
10637 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10638 #undef TARGET_EXPAND_BUILTIN_VA_START
10639 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10640 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10641 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10642
10643 #undef TARGET_PROMOTE_FUNCTION_MODE
10644 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10645 #undef TARGET_PASS_BY_REFERENCE
10646 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10647
10648 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10649 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10650 #undef TARGET_FUNCTION_ARG
10651 #define TARGET_FUNCTION_ARG s390_function_arg
10652 #undef TARGET_FUNCTION_ARG_ADVANCE
10653 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10654
10655 #undef TARGET_FIXED_CONDITION_CODE_REGS
10656 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10657
10658 #undef TARGET_CC_MODES_COMPATIBLE
10659 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10660
10661 #undef TARGET_INVALID_WITHIN_DOLOOP
10662 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10663
10664 #ifdef HAVE_AS_TLS
10665 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10666 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10667 #endif
10668
10669 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10670 #undef TARGET_MANGLE_TYPE
10671 #define TARGET_MANGLE_TYPE s390_mangle_type
10672 #endif
10673
10674 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10675 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10676
10677 #undef TARGET_SECONDARY_RELOAD
10678 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10679
10680 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10681 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10682
10683 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10684 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10685
10686 #undef TARGET_LEGITIMATE_ADDRESS_P
10687 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10688
10689 #undef TARGET_CAN_ELIMINATE
10690 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10691
10692 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10693 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10694
10695 #undef TARGET_LOOP_UNROLL_ADJUST
10696 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10697
10698 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10699 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10700 #undef TARGET_TRAMPOLINE_INIT
10701 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10702
10703 #undef TARGET_UNWIND_WORD_MODE
10704 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10705
10706 struct gcc_target targetm = TARGET_INITIALIZER;
10707
10708 #include "gt-s390.h"